diff --git a/accounts/abi/bind/bind_test.go b/accounts/abi/bind/bind_test.go
index c1035f6b8a..f624963e6c 100644
--- a/accounts/abi/bind/bind_test.go
+++ b/accounts/abi/bind/bind_test.go
@@ -2179,6 +2179,11 @@ func golangBindings(t *testing.T, overload bool) {
if out, err := replacer.CombinedOutput(); err != nil {
t.Fatalf("failed to replace binding test dependency to current source tree: %v\n%s", err, out)
}
+ replacer = exec.Command(gocmd, "mod", "edit", "-x", "-require", "github.com/ava-labs/libevm@v0.0.0", "-replace", "github.com/ava-labs/libevm=github.com/ava-labs/libevm@v0.0.0-20241121221822-8486d85dbf1f")
+ replacer.Dir = pkg
+ if out, err := replacer.CombinedOutput(); err != nil {
+ t.Fatalf("failed to replace binding test dependency to current source tree: %v\n%s", err, out)
+ }
tidier := exec.Command(gocmd, "mod", "tidy", "-compat=1.22")
tidier.Dir = pkg
if out, err := tidier.CombinedOutput(); err != nil {
diff --git a/accounts/abi/bind/util_test.go b/accounts/abi/bind/util_test.go
index a9038a0f7a..685b034d14 100644
--- a/accounts/abi/bind/util_test.go
+++ b/accounts/abi/bind/util_test.go
@@ -125,10 +125,15 @@ func TestWaitDeployedCornerCases(t *testing.T) {
// Create a transaction to an account.
code := "6060604052600a8060106000396000f360606040526008565b00"
tx := types.NewTransaction(0, common.HexToAddress("0x01"), big.NewInt(0), 3000000, gasPrice, common.FromHex(code))
- tx, _ = types.SignTx(tx, types.LatestSigner(params.TestChainConfig), testKey)
+ tx, err := types.SignTx(tx, types.LatestSignerForChainID(big.NewInt(1337)), testKey)
+ if err != nil {
+ t.Fatalf("Failed to sign transaction: %s", err)
+ }
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- backend.Client().SendTransaction(ctx, tx)
+ if err := backend.Client().SendTransaction(ctx, tx); err != nil {
+ t.Fatalf("Failed to send transaction: %s", err)
+ }
backend.Commit(true)
notContractCreation := errors.New("tx is not contract creation")
if _, err := bind.WaitDeployed(ctx, backend.Client(), tx); err.Error() != notContractCreation.Error() {
diff --git a/consensus/dummy/consensus.go b/consensus/dummy/consensus.go
index dc00530e91..822ea8fe22 100644
--- a/consensus/dummy/consensus.go
+++ b/consensus/dummy/consensus.go
@@ -16,8 +16,8 @@ import (
"github.com/ava-labs/coreth/core/state"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/trie"
"github.com/ava-labs/libevm/common"
+ "github.com/ava-labs/libevm/trie"
)
var (
diff --git a/core/block_validator.go b/core/block_validator.go
index a75eeb01a1..2c0fdceaf3 100644
--- a/core/block_validator.go
+++ b/core/block_validator.go
@@ -34,7 +34,7 @@ import (
"github.com/ava-labs/coreth/core/state"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/trie"
+ "github.com/ava-labs/libevm/trie"
)
// BlockValidator is responsible for validating block headers, uncles and
diff --git a/core/blockchain.go b/core/blockchain.go
index 30214c68ad..23599d608c 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -43,21 +43,21 @@ import (
"github.com/ava-labs/coreth/consensus/misc/eip4844"
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/state"
- "github.com/ava-labs/coreth/core/state/snapshot"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/internal/version"
"github.com/ava-labs/coreth/metrics"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/triedb/hashdb"
"github.com/ava-labs/coreth/triedb/pathdb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/common/lru"
+ ethsnapshot "github.com/ava-labs/libevm/core/state/snapshot"
"github.com/ava-labs/libevm/core/vm"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/event"
"github.com/ava-labs/libevm/log"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/triedb"
)
var (
@@ -177,18 +177,19 @@ type CacheConfig struct {
// triedbConfig derives the configures for trie database.
func (c *CacheConfig) triedbConfig() *triedb.Config {
config := &triedb.Config{Preimages: c.Preimages}
- if c.StateScheme == rawdb.HashScheme {
- config.HashDB = &hashdb.Config{
+ if c.StateScheme == rawdb.HashScheme || c.StateScheme == "" {
+ config.DBOverride = hashdb.Config{
CleanCacheSize: c.TrieCleanLimit * 1024 * 1024,
StatsPrefix: trieCleanCacheStatsNamespace,
- }
+ ReferenceRoot: true, // Automatically reference root nodes when an update is made
+ }.BackendConstructor
}
if c.StateScheme == rawdb.PathScheme {
- config.PathDB = &pathdb.Config{
+ config.DBOverride = pathdb.Config{
StateHistory: c.StateHistory,
CleanCacheSize: c.TrieCleanLimit * 1024 * 1024,
DirtyCacheSize: c.TrieDirtyLimit * 1024 * 1024,
- }
+ }.BackendConstructor
}
return config
}
@@ -241,11 +242,11 @@ type BlockChain struct {
chainConfig *params.ChainConfig // Chain & network configuration
cacheConfig *CacheConfig // Cache configuration for pruning
- db ethdb.Database // Low level persistent database to store final content in
- snaps *snapshot.Tree // Snapshot tree for fast trie leaf access
- triedb *triedb.Database // The database handler for maintaining trie nodes.
- stateCache state.Database // State database to reuse between imports (contains state cache)
- txIndexer *txIndexer // Transaction indexer, might be nil if not enabled
+ db ethdb.Database // Low level persistent database to store final content in
+ snaps *ethsnapshot.Tree // Snapshot tree for fast trie leaf access
+ triedb *triedb.Database // The database handler for maintaining trie nodes.
+ stateCache state.Database // State database to reuse between imports (contains state cache)
+ txIndexer *txIndexer // Transaction indexer, might be nil if not enabled
stateManager TrieWriter
hc *HeaderChain
@@ -939,6 +940,14 @@ func (bc *BlockChain) Stop() {
// Ensure that the entirety of the state snapshot is journaled to disk.
if bc.snaps != nil {
+ _, err := bc.db.Has(nil)
+ dbOpen := err == nil
+ if dbOpen {
+ //if _, err = bc.snaps.Journal(bc.CurrentBlock().Root); err != nil {
+ // log.Error("Failed to journal state snapshot", "err", err)
+ //}
+ }
+ bc.snaps.AbortGeneration()
bc.snaps.Release()
}
if bc.triedb.Scheme() == rawdb.PathScheme {
@@ -1125,8 +1134,8 @@ func (bc *BlockChain) newTip(block *types.Block) bool {
// canonical chain.
// writeBlockAndSetHead expects to be the last verification step during InsertBlock
// since it creates a reference that will only be cleaned up by Accept/Reject.
-func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB) error {
- if err := bc.writeBlockWithState(block, receipts, state); err != nil {
+func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, parentRoot common.Hash, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB) error {
+ if err := bc.writeBlockWithState(block, parentRoot, receipts, state); err != nil {
return err
}
@@ -1143,7 +1152,7 @@ func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types
// writeBlockWithState writes the block and all associated state to the database,
// but it expects the chain mutex to be held.
-func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) error {
+func (bc *BlockChain) writeBlockWithState(block *types.Block, parentRoot common.Hash, receipts []*types.Receipt, state *state.StateDB) error {
// Irrelevant of the canonical status, write the block itself to the database.
//
// Note all the components of block(hash->number map, header, body, receipts)
@@ -1157,14 +1166,8 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
}
// Commit all cached state changes into underlying memory database.
- // If snapshots are enabled, call CommitWithSnaps to explicitly create a snapshot
- // diff layer for the block.
var err error
- if bc.snaps == nil {
- _, err = state.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), true)
- } else {
- _, err = state.CommitWithSnap(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), bc.snaps, block.Hash(), block.ParentHash(), true)
- }
+ _, err = bc.commitWithSnap(block, parentRoot, state)
if err != nil {
return err
}
@@ -1367,7 +1370,7 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error {
// will be cleaned up in Accept/Reject so we need to ensure an error cannot occur
// later in verification, since that would cause the referenced root to never be dereferenced.
wstart := time.Now()
- if err := bc.writeBlockAndSetHead(block, receipts, logs, statedb); err != nil {
+ if err := bc.writeBlockAndSetHead(block, parent.Root, receipts, logs, statedb); err != nil {
return err
}
// Update the metrics touched during block commit
@@ -1667,7 +1670,7 @@ func (bc *BlockChain) reprocessBlock(parent *types.Block, current *types.Block)
if snap == nil {
return common.Hash{}, fmt.Errorf("failed to get snapshot for parent root: %s", parentRoot)
}
- statedb, err = state.NewWithSnapshot(parentRoot, bc.stateCache, snap)
+ statedb, err = state.New(parentRoot, bc.stateCache, bc.snaps)
}
if err != nil {
return common.Hash{}, fmt.Errorf("could not fetch state for (%s: %d): %v", parent.Hash().Hex(), parent.NumberU64(), err)
@@ -1692,12 +1695,33 @@ func (bc *BlockChain) reprocessBlock(parent *types.Block, current *types.Block)
log.Debug("Processed block", "block", current.Hash(), "number", current.NumberU64())
// Commit all cached state changes into underlying memory database.
- // If snapshots are enabled, call CommitWithSnaps to explicitly create a snapshot
- // diff layer for the block.
- if bc.snaps == nil {
- return statedb.Commit(current.NumberU64(), bc.chainConfig.IsEIP158(current.Number()), false)
+ return bc.commitWithSnap(current, parentRoot, statedb)
+}
+
+func (bc *BlockChain) commitWithSnap(
+ current *types.Block, parentRoot common.Hash, statedb *state.StateDB,
+) (common.Hash, error) {
+ // If snapshots are enabled, WithBlockHashes must be called as snapshot layers
+ // are stored by block hash.
+ root, err := statedb.Commit(
+ current.NumberU64(), bc.chainConfig.IsEIP158(current.Number()),
+ ethsnapshot.WithBlockHashes(current.Hash(), current.ParentHash()),
+ )
+ if err != nil {
+ return common.Hash{}, err
+ }
+ // Upstream does not perform a snapshot update if the root is the same as the
+ // parent root, however here the snapshots are based on the block hash, so
+ // this update is necessary.
+ if bc.snaps != nil && root == parentRoot {
+ if err := bc.snaps.Update(
+ root, parentRoot, nil, nil, nil,
+ ethsnapshot.WithBlockHashes(current.Hash(), current.ParentHash()),
+ ); err != nil {
+ return common.Hash{}, err
+ }
}
- return statedb.CommitWithSnap(current.NumberU64(), bc.chainConfig.IsEIP158(current.Number()), bc.snaps, current.Hash(), current.ParentHash(), false)
+ return root, nil
}
// initSnapshot instantiates a Snapshot instance and adds it to [bc]
@@ -1715,14 +1739,13 @@ func (bc *BlockChain) initSnapshot(b *types.Header) {
asyncBuild := !bc.cacheConfig.SnapshotWait && b.Number.Uint64() > 0
noBuild := bc.cacheConfig.SnapshotNoBuild && b.Number.Uint64() > 0
log.Info("Initializing snapshots", "async", asyncBuild, "rebuild", !noBuild, "headHash", b.Hash(), "headRoot", b.Root)
- snapconfig := snapshot.Config{
+ snapconfig := ethsnapshot.Config{
CacheSize: bc.cacheConfig.SnapshotLimit,
NoBuild: noBuild,
AsyncBuild: asyncBuild,
- SkipVerify: !bc.cacheConfig.SnapshotVerify,
}
var err error
- bc.snaps, err = snapshot.New(snapconfig, bc.db, bc.triedb, b.Hash(), b.Root)
+ bc.snaps, err = ethsnapshot.New(snapconfig, bc.db, bc.triedb, b.Root)
if err != nil {
log.Error("failed to initialize snapshots", "headHash", b.Hash(), "headRoot", b.Root, "err", err, "async", asyncBuild)
}
@@ -1838,7 +1861,6 @@ func (bc *BlockChain) reprocessState(current *types.Block, reexec uint64) error
// Flatten snapshot if initialized, holding a reference to the state root until the next block
// is processed.
if err := bc.flattenSnapshot(func() error {
- triedb.Reference(root, common.Hash{})
if previousRoot != (common.Hash{}) {
triedb.Dereference(previousRoot)
}
diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go
index 335ef2fcb8..7aa11ad75e 100644
--- a/core/blockchain_reader.go
+++ b/core/blockchain_reader.go
@@ -33,10 +33,10 @@ import (
"github.com/ava-labs/coreth/core/state/snapshot"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/core/vm"
"github.com/ava-labs/libevm/event"
+ "github.com/ava-labs/libevm/triedb"
)
// CurrentHeader retrieves the current head header of the canonical chain. The
@@ -264,10 +264,14 @@ func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
// Snapshots returns the blockchain snapshot tree.
-func (bc *BlockChain) Snapshots() *snapshot.Tree {
+func (bc *BlockChain) Snapshots() snapshot.DiskIterable {
return bc.snaps
}
+func (bc *BlockChain) VerifySnapshot(root common.Hash) error {
+ return bc.snaps.Verify(root)
+}
+
// Validator returns the current validator.
func (bc *BlockChain) Validator() Validator {
return bc.validator
diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go
index e8c812f261..4f29d198ce 100644
--- a/core/blockchain_repair_test.go
+++ b/core/blockchain_repair_test.go
@@ -38,10 +38,10 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/core/vm"
"github.com/ava-labs/libevm/crypto"
+ "github.com/ava-labs/libevm/triedb"
"github.com/stretchr/testify/require"
)
diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go
index 510065b1b0..aa154839d0 100644
--- a/core/blockchain_snapshot_test.go
+++ b/core/blockchain_snapshot_test.go
@@ -156,7 +156,7 @@ func (basic *snapshotTestBasic) verify(t *testing.T, chain *BlockChain, blocks [
t.Errorf("The corresponding block[%d] of snapshot disk layer is missing", basic.expSnapshotBottom)
} else if !bytes.Equal(chain.snaps.DiskRoot().Bytes(), block.Root().Bytes()) {
t.Errorf("The snapshot disk layer root is incorrect, want %x, get %x", block.Root(), chain.snaps.DiskRoot())
- } else if len(chain.snaps.Snapshots(block.Hash(), -1, false)) != 1 {
+ } else if len(chain.snaps.Snapshots(block.Root(), -1, false)) != 1 {
t.Errorf("The corresponding block[%d] of snapshot disk layer is missing", basic.expSnapshotBottom)
}
diff --git a/core/chain_makers.go b/core/chain_makers.go
index 52b6351922..9a2d833cd7 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -37,10 +37,10 @@ import (
"github.com/ava-labs/coreth/core/state"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/core/vm"
"github.com/ava-labs/libevm/ethdb"
+ "github.com/ava-labs/libevm/triedb"
"github.com/holiman/uint256"
)
@@ -298,7 +298,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
}
// Write state changes to db
- root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number), false)
+ root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number))
if err != nil {
panic(fmt.Sprintf("state write error: %v", err))
}
diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go
index 7ff11def9d..06b482388d 100644
--- a/core/chain_makers_test.go
+++ b/core/chain_makers_test.go
@@ -34,10 +34,10 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/core/vm"
"github.com/ava-labs/libevm/crypto"
+ "github.com/ava-labs/libevm/triedb"
)
func ExampleGenerateChain() {
diff --git a/core/extstate/statedb.go b/core/extstate/statedb.go
index 86687acca8..1c81601064 100644
--- a/core/extstate/statedb.go
+++ b/core/extstate/statedb.go
@@ -15,8 +15,9 @@ import (
type VmStateDB interface {
vm.StateDB
+ Logs() []*types.Log
+
GetTxHash() common.Hash
- GetLogData() (topics [][]common.Hash, data [][]byte)
GetBalanceMultiCoin(common.Address, common.Hash) *big.Int
AddBalanceMultiCoin(common.Address, common.Hash, *big.Int)
SubBalanceMultiCoin(common.Address, common.Hash, *big.Int)
@@ -36,6 +37,16 @@ func (s *StateDB) Prepare(rules params.Rules, sender, coinbase common.Address, d
s.VmStateDB.Prepare(rules, sender, coinbase, dst, precompiles, list)
}
+// GetLogData returns the underlying topics and data from each log included in the StateDB
+// Test helper function.
+func (s *StateDB) GetLogData() (topics [][]common.Hash, data [][]byte) {
+ for _, log := range s.Logs() {
+ topics = append(topics, log.Topics)
+ data = append(data, common.CopyBytes(log.Data))
+ }
+ return topics, data
+}
+
// GetPredicateStorageSlots returns the storage slots associated with the address, index pair.
// A list of access tuples can be included within transaction types post EIP-2930. The address
// is declared directly on the access tuple and the index is the i'th occurrence of an access
diff --git a/core/genesis.go b/core/genesis.go
index 9b0ed37946..19b01bd671 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -37,14 +37,14 @@ import (
"github.com/ava-labs/coreth/core/state"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/triedb/pathdb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/common/hexutil"
"github.com/ava-labs/libevm/common/math"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/triedb"
"github.com/holiman/uint256"
)
@@ -219,8 +219,8 @@ func (g *Genesis) trieConfig() *triedb.Config {
return nil
}
return &triedb.Config{
- PathDB: pathdb.Defaults,
- IsVerkle: true,
+ DBOverride: pathdb.Defaults.BackendConstructor,
+ IsVerkle: true,
}
}
@@ -258,11 +258,6 @@ func (g *Genesis) toBlock(db ethdb.Database, triedb *triedb.Database) *types.Blo
for key, value := range account.Storage {
statedb.SetState(addr, key, value)
}
- if account.MCBalance != nil {
- for coinID, value := range account.MCBalance {
- statedb.AddBalanceMultiCoin(addr, coinID, value)
- }
- }
}
root := statedb.IntermediateRoot(false)
head.Root = root
@@ -299,7 +294,7 @@ func (g *Genesis) toBlock(db ethdb.Database, triedb *triedb.Database) *types.Blo
}
}
- statedb.Commit(0, false, false)
+ statedb.Commit(0, false)
// Commit newly generated states into disk if it's not empty.
if root != types.EmptyRootHash {
if err := triedb.Commit(root, true); err != nil {
diff --git a/core/genesis_extra_test.go b/core/genesis_extra_test.go
index 91e7a47176..7889d6668d 100644
--- a/core/genesis_extra_test.go
+++ b/core/genesis_extra_test.go
@@ -34,9 +34,9 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/utils"
"github.com/ava-labs/libevm/common"
+ "github.com/ava-labs/libevm/triedb"
"github.com/stretchr/testify/require"
)
diff --git a/core/genesis_test.go b/core/genesis_test.go
index ae3ed5de70..94d3435f7d 100644
--- a/core/genesis_test.go
+++ b/core/genesis_test.go
@@ -38,13 +38,13 @@ import (
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
"github.com/ava-labs/coreth/precompile/contracts/warp"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/triedb/pathdb"
"github.com/ava-labs/coreth/utils"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/core/vm"
"github.com/ava-labs/libevm/ethdb"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/triedb"
"github.com/davecgh/go-spew/spew"
"github.com/stretchr/testify/require"
)
@@ -285,7 +285,7 @@ func newDbConfig(scheme string) *triedb.Config {
if scheme == rawdb.HashScheme {
return triedb.HashDefaults
}
- return &triedb.Config{PathDB: pathdb.Defaults}
+ return &triedb.Config{DBOverride: pathdb.Defaults.BackendConstructor}
}
func TestVerkleGenesisCommit(t *testing.T) {
@@ -325,7 +325,7 @@ func TestVerkleGenesisCommit(t *testing.T) {
}
db := rawdb.NewMemoryDatabase()
- triedb := triedb.NewDatabase(db, &triedb.Config{IsVerkle: true, PathDB: pathdb.Defaults})
+ triedb := triedb.NewDatabase(db, &triedb.Config{IsVerkle: true, DBOverride: pathdb.Defaults.BackendConstructor})
block := genesis.MustCommit(db, triedb)
if !bytes.Equal(block.Root().Bytes(), expected) {
t.Fatalf("invalid genesis state root, expected %x, got %x", expected, got)
diff --git a/core/state/database.go b/core/state/database.go
index 1f9fd40f13..e72770bb08 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -32,14 +32,14 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/trie/utils"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/common/lru"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/ethdb"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/trie/trienode"
+ "github.com/ava-labs/libevm/trie/utils"
+ "github.com/ava-labs/libevm/triedb"
"github.com/crate-crypto/go-ipa/banderwagon"
)
diff --git a/core/state/dump.go b/core/state/dump.go
index a8239b616d..3d0c81494e 100644
--- a/core/state/dump.go
+++ b/core/state/dump.go
@@ -32,11 +32,11 @@ import (
"time"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/common/hexutil"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie"
)
// DumpConfig is a set of options to control what portions of the state will be
@@ -156,7 +156,7 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey []
Nonce: data.Nonce,
Root: data.Root[:],
CodeHash: data.CodeHash,
- IsMultiCoin: data.IsMultiCoin,
+ IsMultiCoin: types.IsMultiCoin(&data),
AddressHash: it.Key,
}
address *common.Address
diff --git a/core/state/iterator.go b/core/state/iterator.go
index 409a08f148..f615f396b3 100644
--- a/core/state/iterator.go
+++ b/core/state/iterator.go
@@ -32,9 +32,9 @@ import (
"fmt"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie"
)
// nodeIterator is an iterator to traverse the entire state trie post-order,
diff --git a/core/state/journal.go b/core/state/journal.go
index 4a3dedf8e4..9029cfacfc 100644
--- a/core/state/journal.go
+++ b/core/state/journal.go
@@ -27,6 +27,7 @@
package state
import (
+ "github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/libevm/common"
"github.com/holiman/uint256"
)
@@ -226,7 +227,8 @@ func (ch balanceChange) dirtied() *common.Address {
}
func (ch multiCoinEnable) revert(s *StateDB) {
- s.getStateObject(*ch.account).data.IsMultiCoin = false
+ // XXX: This should be removed once the libevm staedb is in use
+ types.DisableMultiCoin(&s.getStateObject(*ch.account).data)
}
func (ch multiCoinEnable) dirtied() *common.Address {
diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go
index 79ce9f3815..ce5d2b301d 100644
--- a/core/state/pruner/pruner.go
+++ b/core/state/pruner/pruner.go
@@ -38,14 +38,14 @@ import (
"time"
"github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/state/snapshot"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
+ ethsnapshot "github.com/ava-labs/libevm/core/state/snapshot"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/triedb"
)
const (
@@ -87,7 +87,7 @@ type Pruner struct {
chainHeader *types.Header
db ethdb.Database
stateBloom *stateBloom
- snaptree *snapshot.Tree
+ snaptree *ethsnapshot.Tree
}
// NewPruner creates the pruner instance.
@@ -103,13 +103,12 @@ func NewPruner(db ethdb.Database, config Config) (*Pruner, error) {
// us from ever needing to enter RecoverPruning in an invalid pruning session (a session where we do not have
// the protected trie in the triedb and in the snapshot disk layer).
- snapconfig := snapshot.Config{
+ snapconfig := ethsnapshot.Config{
CacheSize: 256,
AsyncBuild: false,
NoBuild: true,
- SkipVerify: true,
}
- snaptree, err := snapshot.New(snapconfig, db, triedb, headBlock.Hash(), headBlock.Root())
+ snaptree, err := ethsnapshot.New(snapconfig, db, triedb, headBlock.Root())
if err != nil {
return nil, fmt.Errorf("failed to create snapshot for pruning, must restart without offline pruning disabled to recover: %w", err) // The relevant snapshot(s) might not exist
}
@@ -285,7 +284,7 @@ func (p *Pruner) Prune(root common.Hash) error {
// Traverse the target state, re-construct the whole state trie and
// commit to the given bloom filter.
start := time.Now()
- if err := snapshot.GenerateTrie(p.snaptree, root, p.db, p.stateBloom); err != nil {
+ if err := ethsnapshot.GenerateTrie(p.snaptree, root, p.db, p.stateBloom); err != nil {
return err
}
// Traverse the genesis, put all genesis state entries into the
diff --git a/core/state/snapshot/conversion.go b/core/state/snapshot/conversion.go
index efe115a952..ea2b8f7c79 100644
--- a/core/state/snapshot/conversion.go
+++ b/core/state/snapshot/conversion.go
@@ -37,11 +37,11 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie"
)
// trieKV represents a trie key-value pair
@@ -92,7 +92,7 @@ func GenerateTrie(snaptree *Tree, root common.Hash, src ethdb.Database, dst ethd
rawdb.WriteCode(dst, codeHash, code)
}
// Then migrate all storage trie nodes into the tmp db.
- storageIt, err := snaptree.StorageIterator(root, accountHash, common.Hash{}, false)
+ storageIt, err := snaptree.StorageIterator(root, accountHash, common.Hash{})
if err != nil {
return common.Hash{}, err
}
diff --git a/core/state/snapshot/disklayer.go b/core/state/snapshot/disklayer.go
index 684b740d85..87fc3f4e2f 100644
--- a/core/state/snapshot/disklayer.go
+++ b/core/state/snapshot/disklayer.go
@@ -33,11 +33,11 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/utils"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/triedb"
)
// diskLayer is a low level persistent snapshot built on top of a key-value store.
diff --git a/core/state/snapshot/disklayer_test.go b/core/state/snapshot/disklayer_test.go
index dadd54ef07..1f76dbf1e0 100644
--- a/core/state/snapshot/disklayer_test.go
+++ b/core/state/snapshot/disklayer_test.go
@@ -121,7 +121,7 @@ func TestDiskMerge(t *testing.T) {
base.Storage(conNukeCache, conNukeCacheSlot)
// Modify or delete some accounts, flatten everything onto disk
- if err := snaps.Update(diffBlockHash, diffRoot, baseBlockHash, map[common.Hash]struct{}{
+ if err := snaps.UpdateWithBlockHashes(diffBlockHash, diffRoot, baseBlockHash, map[common.Hash]struct{}{
accDelNoCache: {},
accDelCache: {},
conNukeNoCache: {},
@@ -341,7 +341,7 @@ func TestDiskPartialMerge(t *testing.T) {
assertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:])
// Modify or delete some accounts, flatten everything onto disk
- if err := snaps.Update(diffBlockHash, diffRoot, baseBlockHash, map[common.Hash]struct{}{
+ if err := snaps.UpdateWithBlockHashes(diffBlockHash, diffRoot, baseBlockHash, map[common.Hash]struct{}{
accDelNoCache: {},
accDelCache: {},
conNukeNoCache: {},
@@ -460,7 +460,7 @@ func TestDiskGeneratorPersistence(t *testing.T) {
dl := snaps.disklayer()
dl.genMarker = genMarker
// Modify or delete some accounts, flatten everything onto disk
- if err := snaps.Update(diffBlockHash, diffRoot, baseBlockHash, nil, map[common.Hash][]byte{
+ if err := snaps.UpdateWithBlockHashes(diffBlockHash, diffRoot, baseBlockHash, nil, map[common.Hash][]byte{
accTwo: accTwo[:],
}, nil); err != nil {
t.Fatalf("failed to update snapshot tree: %v", err)
@@ -478,7 +478,7 @@ func TestDiskGeneratorPersistence(t *testing.T) {
}
// Test scenario 2, the disk layer is fully generated
// Modify or delete some accounts, flatten everything onto disk
- if err := snaps.Update(diffTwoBlockHash, diffTwoRoot, diffBlockHash, nil, map[common.Hash][]byte{
+ if err := snaps.UpdateWithBlockHashes(diffTwoBlockHash, diffTwoRoot, diffBlockHash, nil, map[common.Hash][]byte{
accThree: accThree.Bytes(),
}, map[common.Hash]map[common.Hash][]byte{
accThree: {accThreeSlot: accThreeSlot.Bytes()},
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
index 7305750025..1021cd1ed6 100644
--- a/core/state/snapshot/generate.go
+++ b/core/state/snapshot/generate.go
@@ -33,13 +33,13 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/utils"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/triedb"
)
const (
diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go
index 31a1f5271f..fe1b33f17b 100644
--- a/core/state/snapshot/generate_test.go
+++ b/core/state/snapshot/generate_test.go
@@ -34,15 +34,15 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/triedb/hashdb"
"github.com/ava-labs/coreth/triedb/pathdb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/trie/trienode"
+ "github.com/ava-labs/libevm/triedb"
"github.com/holiman/uint256"
"golang.org/x/crypto/sha3"
)
@@ -176,9 +176,9 @@ func newHelper(scheme string) *testHelper {
diskdb := rawdb.NewMemoryDatabase()
config := &triedb.Config{}
if scheme == rawdb.PathScheme {
- config.PathDB = &pathdb.Config{} // disable caching
+ config.DBOverride = pathdb.Config{}.BackendConstructor // disable caching
} else {
- config.HashDB = &hashdb.Config{} // disable caching
+ config.DBOverride = hashdb.Config{}.BackendConstructor // disable caching
}
triedb := triedb.NewDatabase(diskdb, config)
accTrie, _ := trie.NewStateTrie(trie.StateTrieID(types.EmptyRootHash), triedb)
diff --git a/core/state/snapshot/iterator.go b/core/state/snapshot/iterator.go
index 7cc5dee33f..0fbffe0a56 100644
--- a/core/state/snapshot/iterator.go
+++ b/core/state/snapshot/iterator.go
@@ -33,6 +33,7 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/libevm/common"
+ ethsnapshot "github.com/ava-labs/libevm/core/state/snapshot"
"github.com/ava-labs/libevm/ethdb"
)
@@ -59,23 +60,11 @@ type Iterator interface {
// AccountIterator is an iterator to step over all the accounts in a snapshot,
// which may or may not be composed of multiple layers.
-type AccountIterator interface {
- Iterator
-
- // Account returns the RLP encoded slim account the iterator is currently at.
- // An error will be returned if the iterator becomes invalid
- Account() []byte
-}
+type AccountIterator = ethsnapshot.AccountIterator
// StorageIterator is an iterator to step over the specific storage in a snapshot,
// which may or may not be composed of multiple layers.
-type StorageIterator interface {
- Iterator
-
- // Slot returns the storage slot the iterator is currently at. An error will
- // be returned if the iterator becomes invalid
- Slot() []byte
-}
+type StorageIterator = ethsnapshot.StorageIterator
// diffAccountIterator is an account iterator that steps over the accounts (both
// live and deleted) contained within a single diff layer. Higher order iterators
diff --git a/core/state/snapshot/iterator_test.go b/core/state/snapshot/iterator_test.go
index f668e4df8b..b2862407b2 100644
--- a/core/state/snapshot/iterator_test.go
+++ b/core/state/snapshot/iterator_test.go
@@ -222,13 +222,13 @@ func TestAccountIteratorTraversal(t *testing.T) {
// Create a snapshot tree with a single empty disk layer with the specified root and block hash
snaps := NewTestTree(rawdb.NewMemoryDatabase(), common.HexToHash("0x01"), common.HexToHash("0xff01"))
// Stack three diff layers on top with various overlaps
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil,
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil,
randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil,
randomAccountSet("0xcc", "0xf0", "0xff"), nil)
// Verify the single and multi-layer iterators
@@ -263,13 +263,13 @@ func TestStorageIteratorTraversal(t *testing.T) {
// Create an empty base layer and a snapshot tree out of it
snaps := NewTestTree(rawdb.NewMemoryDatabase(), common.HexToHash("0x01"), common.HexToHash("0xff01"))
// Stack three diff layers on top with various overlaps
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"),
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"),
nil, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil))
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"),
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"),
nil, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil))
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"),
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"),
nil, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil))
// Verify the single and multi-layer iterators
@@ -279,7 +279,7 @@ func TestStorageIteratorTraversal(t *testing.T) {
verifyIterator(t, 3, diffIter, verifyNothing)
verifyIterator(t, 6, head.(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage)
- it, _ := snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.Hash{}, false)
+ it, _ := snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.Hash{})
verifyIterator(t, 6, it, verifyStorage)
it.Release()
@@ -296,7 +296,7 @@ func TestStorageIteratorTraversal(t *testing.T) {
}
verifyIterator(t, 6, head.(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage)
- it, _ = snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.Hash{}, false)
+ it, _ = snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.Hash{})
verifyIterator(t, 6, it, verifyStorage)
it.Release()
}
@@ -342,14 +342,14 @@ func TestAccountIteratorTraversalValues(t *testing.T) {
}
}
// Assemble a stack of snapshots from the account layers
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil, a, nil)
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil, b, nil)
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil, c, nil)
- snaps.Update(common.HexToHash("0x05"), common.HexToHash("0xff05"), common.HexToHash("0x04"), nil, d, nil)
- snaps.Update(common.HexToHash("0x06"), common.HexToHash("0xff06"), common.HexToHash("0x05"), nil, e, nil)
- snaps.Update(common.HexToHash("0x07"), common.HexToHash("0xff07"), common.HexToHash("0x06"), nil, f, nil)
- snaps.Update(common.HexToHash("0x08"), common.HexToHash("0xff08"), common.HexToHash("0x07"), nil, g, nil)
- snaps.Update(common.HexToHash("0x09"), common.HexToHash("0xff09"), common.HexToHash("0x08"), nil, h, nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil, a, nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil, b, nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil, c, nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x05"), common.HexToHash("0xff05"), common.HexToHash("0x04"), nil, d, nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x06"), common.HexToHash("0xff06"), common.HexToHash("0x05"), nil, e, nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x07"), common.HexToHash("0xff07"), common.HexToHash("0x06"), nil, f, nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x08"), common.HexToHash("0xff08"), common.HexToHash("0x07"), nil, g, nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x09"), common.HexToHash("0xff09"), common.HexToHash("0x08"), nil, h, nil)
it, _ := snaps.AccountIterator(common.HexToHash("0xff09"), common.Hash{}, false)
head := snaps.Snapshot(common.HexToHash("0xff09"))
@@ -437,16 +437,16 @@ func TestStorageIteratorTraversalValues(t *testing.T) {
}
}
// Assemble a stack of snapshots from the account layers
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil, randomAccountSet("0xaa"), wrapStorage(a))
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil, randomAccountSet("0xaa"), wrapStorage(b))
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil, randomAccountSet("0xaa"), wrapStorage(c))
- snaps.Update(common.HexToHash("0x05"), common.HexToHash("0xff05"), common.HexToHash("0x04"), nil, randomAccountSet("0xaa"), wrapStorage(d))
- snaps.Update(common.HexToHash("0x06"), common.HexToHash("0xff06"), common.HexToHash("0x05"), nil, randomAccountSet("0xaa"), wrapStorage(e))
- snaps.Update(common.HexToHash("0x07"), common.HexToHash("0xff07"), common.HexToHash("0x06"), nil, randomAccountSet("0xaa"), wrapStorage(e))
- snaps.Update(common.HexToHash("0x08"), common.HexToHash("0xff08"), common.HexToHash("0x07"), nil, randomAccountSet("0xaa"), wrapStorage(g))
- snaps.Update(common.HexToHash("0x09"), common.HexToHash("0xff09"), common.HexToHash("0x08"), nil, randomAccountSet("0xaa"), wrapStorage(h))
-
- it, _ := snaps.StorageIterator(common.HexToHash("0xff09"), common.HexToHash("0xaa"), common.Hash{}, false)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil, randomAccountSet("0xaa"), wrapStorage(a))
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil, randomAccountSet("0xaa"), wrapStorage(b))
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil, randomAccountSet("0xaa"), wrapStorage(c))
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x05"), common.HexToHash("0xff05"), common.HexToHash("0x04"), nil, randomAccountSet("0xaa"), wrapStorage(d))
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x06"), common.HexToHash("0xff06"), common.HexToHash("0x05"), nil, randomAccountSet("0xaa"), wrapStorage(e))
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x07"), common.HexToHash("0xff07"), common.HexToHash("0x06"), nil, randomAccountSet("0xaa"), wrapStorage(e))
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x08"), common.HexToHash("0xff08"), common.HexToHash("0x07"), nil, randomAccountSet("0xaa"), wrapStorage(g))
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x09"), common.HexToHash("0xff09"), common.HexToHash("0x08"), nil, randomAccountSet("0xaa"), wrapStorage(h))
+
+ it, _ := snaps.StorageIterator(common.HexToHash("0xff09"), common.HexToHash("0xaa"), common.Hash{})
head := snaps.Snapshot(common.HexToHash("0xff09"))
for it.Next() {
hash := it.Hash()
@@ -474,7 +474,7 @@ func TestStorageIteratorTraversalValues(t *testing.T) {
}
}
- it, _ = snaps.StorageIterator(common.HexToHash("0xff09"), common.HexToHash("0xaa"), common.Hash{}, false)
+ it, _ = snaps.StorageIterator(common.HexToHash("0xff09"), common.HexToHash("0xaa"), common.Hash{})
for it.Next() {
hash := it.Hash()
want, err := head.Storage(common.HexToHash("0xaa"), hash)
@@ -503,7 +503,7 @@ func TestAccountIteratorLargeTraversal(t *testing.T) {
// Build up a large stack of snapshots
snaps := NewTestTree(rawdb.NewMemoryDatabase(), common.HexToHash("0x01"), common.HexToHash("0xff01"))
for i := 1; i < 128; i++ {
- snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0xff%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0xff%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil)
}
// Iterate the entire stack and ensure everything is hit only once
head := snaps.Snapshot(common.HexToHash("0xff80"))
@@ -543,13 +543,13 @@ func TestAccountIteratorFlattening(t *testing.T) {
// Create an empty base layer and a snapshot tree out of it
snaps := NewTestTree(rawdb.NewMemoryDatabase(), common.HexToHash("0x01"), common.HexToHash("0xff01"))
// Create a stack of diffs on top
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil,
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil,
randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil,
randomAccountSet("0xcc", "0xf0", "0xff"), nil)
// Create an iterator and flatten the data from underneath it
@@ -568,13 +568,13 @@ func TestAccountIteratorFlattening(t *testing.T) {
func TestAccountIteratorSeek(t *testing.T) {
// Create a snapshot stack with some initial data
snaps := NewTestTree(rawdb.NewMemoryDatabase(), common.HexToHash("0x01"), common.HexToHash("0xff01"))
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil,
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil,
randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil,
randomAccountSet("0xcc", "0xf0", "0xff"), nil)
// Account set is now
@@ -623,13 +623,13 @@ func TestStorageIteratorSeek(t *testing.T) {
// Create a snapshot stack with some initial data
snaps := NewTestTree(rawdb.NewMemoryDatabase(), common.HexToHash("0x01"), common.HexToHash("0xff01"))
// Stack three diff layers on top with various overlaps
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil,
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil))
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil,
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil))
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil,
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil))
// Account set is now
@@ -637,35 +637,35 @@ func TestStorageIteratorSeek(t *testing.T) {
// 03: 01, 02, 03, 05 (, 05), 06
// 04: 01(, 01), 02, 03, 05(, 05, 05), 06, 08
// Construct various iterators and ensure their traversal is correct
- it, _ := snaps.StorageIterator(common.HexToHash("0xff02"), common.HexToHash("0xaa"), common.HexToHash("0x01"), false)
+ it, _ := snaps.StorageIterator(common.HexToHash("0xff02"), common.HexToHash("0xaa"), common.HexToHash("0x01"))
defer it.Release()
verifyIterator(t, 3, it, verifyStorage) // expected: 01, 03, 05
- it, _ = snaps.StorageIterator(common.HexToHash("0xff02"), common.HexToHash("0xaa"), common.HexToHash("0x02"), false)
+ it, _ = snaps.StorageIterator(common.HexToHash("0xff02"), common.HexToHash("0xaa"), common.HexToHash("0x02"))
defer it.Release()
verifyIterator(t, 2, it, verifyStorage) // expected: 03, 05
- it, _ = snaps.StorageIterator(common.HexToHash("0xff02"), common.HexToHash("0xaa"), common.HexToHash("0x5"), false)
+ it, _ = snaps.StorageIterator(common.HexToHash("0xff02"), common.HexToHash("0xaa"), common.HexToHash("0x5"))
defer it.Release()
verifyIterator(t, 1, it, verifyStorage) // expected: 05
- it, _ = snaps.StorageIterator(common.HexToHash("0xff02"), common.HexToHash("0xaa"), common.HexToHash("0x6"), false)
+ it, _ = snaps.StorageIterator(common.HexToHash("0xff02"), common.HexToHash("0xaa"), common.HexToHash("0x6"))
defer it.Release()
verifyIterator(t, 0, it, verifyStorage) // expected: nothing
- it, _ = snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.HexToHash("0x01"), false)
+ it, _ = snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.HexToHash("0x01"))
defer it.Release()
verifyIterator(t, 6, it, verifyStorage) // expected: 01, 02, 03, 05, 06, 08
- it, _ = snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.HexToHash("0x05"), false)
+ it, _ = snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.HexToHash("0x05"))
defer it.Release()
verifyIterator(t, 3, it, verifyStorage) // expected: 05, 06, 08
- it, _ = snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.HexToHash("0x08"), false)
+ it, _ = snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.HexToHash("0x08"))
defer it.Release()
verifyIterator(t, 1, it, verifyStorage) // expected: 08
- it, _ = snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.HexToHash("0x09"), false)
+ it, _ = snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.HexToHash("0x09"))
defer it.Release()
verifyIterator(t, 0, it, verifyStorage) // expected: nothing
}
@@ -677,17 +677,17 @@ func TestAccountIteratorDeletions(t *testing.T) {
// Create an empty base layer and a snapshot tree out of it
snaps := NewTestTree(rawdb.NewMemoryDatabase(), common.HexToHash("0x01"), common.HexToHash("0xff01"))
// Stack three diff layers on top with various overlaps
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"),
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"),
nil, randomAccountSet("0x11", "0x22", "0x33"), nil)
deleted := common.HexToHash("0x22")
destructed := map[common.Hash]struct{}{
deleted: {},
}
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"),
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"),
destructed, randomAccountSet("0x11", "0x33"), nil)
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"),
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"),
nil, randomAccountSet("0x33", "0x44", "0x55"), nil)
// The output should be 11,33,44,55
@@ -714,19 +714,19 @@ func TestStorageIteratorDeletions(t *testing.T) {
// Create an empty base layer and a snapshot tree out of it
snaps := NewTestTree(rawdb.NewMemoryDatabase(), common.HexToHash("0x01"), common.HexToHash("0xff01"))
// Stack three diff layers on top with various overlaps
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil,
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil))
- snaps.Update(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil,
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}}))
// The output should be 02,04,05,06
- it, _ := snaps.StorageIterator(common.HexToHash("0xff03"), common.HexToHash("0xaa"), common.Hash{}, false)
+ it, _ := snaps.StorageIterator(common.HexToHash("0xff03"), common.HexToHash("0xaa"), common.Hash{})
verifyIterator(t, 4, it, verifyStorage)
it.Release()
// The output should be 04,05,06
- it, _ = snaps.StorageIterator(common.HexToHash("0xff03"), common.HexToHash("0xaa"), common.HexToHash("0x03"), false)
+ it, _ = snaps.StorageIterator(common.HexToHash("0xff03"), common.HexToHash("0xaa"), common.HexToHash("0x03"))
verifyIterator(t, 3, it, verifyStorage)
it.Release()
@@ -734,24 +734,24 @@ func TestStorageIteratorDeletions(t *testing.T) {
destructed := map[common.Hash]struct{}{
common.HexToHash("0xaa"): {},
}
- snaps.Update(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), destructed, nil, nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), destructed, nil, nil)
- it, _ = snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.Hash{}, false)
+ it, _ = snaps.StorageIterator(common.HexToHash("0xff04"), common.HexToHash("0xaa"), common.Hash{})
verifyIterator(t, 0, it, verifyStorage)
it.Release()
// Re-insert the slots of the same account
- snaps.Update(common.HexToHash("0x05"), common.HexToHash("0xff05"), common.HexToHash("0x04"), nil,
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x05"), common.HexToHash("0xff05"), common.HexToHash("0x04"), nil,
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil))
// The output should be 07,08,09
- it, _ = snaps.StorageIterator(common.HexToHash("0xff05"), common.HexToHash("0xaa"), common.Hash{}, false)
+ it, _ = snaps.StorageIterator(common.HexToHash("0xff05"), common.HexToHash("0xaa"), common.Hash{})
verifyIterator(t, 3, it, verifyStorage)
it.Release()
// Destruct the whole storage but re-create the account in the same layer
- snaps.Update(common.HexToHash("0x06"), common.HexToHash("0xff06"), common.HexToHash("0x05"), destructed, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, nil))
- it, _ = snaps.StorageIterator(common.HexToHash("0xff06"), common.HexToHash("0xaa"), common.Hash{}, false)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x06"), common.HexToHash("0xff06"), common.HexToHash("0x05"), destructed, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, nil))
+ it, _ = snaps.StorageIterator(common.HexToHash("0xff06"), common.HexToHash("0xaa"), common.Hash{})
verifyIterator(t, 2, it, verifyStorage) // The output should be 11,12
it.Release()
@@ -783,7 +783,7 @@ func BenchmarkAccountIteratorTraversal(b *testing.B) {
// Build up a large stack of snapshots
snaps := NewTestTree(rawdb.NewMemoryDatabase(), common.HexToHash("0x01"), common.HexToHash("0xff01"))
for i := 1; i <= 100; i++ {
- snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0xff%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0xff%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil)
}
// We call this once before the benchmark, so the creation of
// sorted accountlists are not included in the results.
@@ -869,9 +869,9 @@ func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) {
}
// Build up a large stack of snapshots
snaps := NewTestTree(rawdb.NewMemoryDatabase(), common.HexToHash("0x01"), common.HexToHash("0xff01"))
- snaps.Update(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil, makeAccounts(2000), nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil, makeAccounts(2000), nil)
for i := 2; i <= 100; i++ {
- snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0xff%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(20), nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0xff%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(20), nil)
}
// We call this once before the benchmark, so the creation of
// sorted accountlists are not included in the results.
diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go
index 374de3b71d..f793ffb832 100644
--- a/core/state/snapshot/journal.go
+++ b/core/state/snapshot/journal.go
@@ -33,11 +33,11 @@ import (
"time"
"github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/triedb"
)
// journalGenerator is a disk layer entry containing the generator progress marker.
diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go
index a60e28eacf..f23d97537a 100644
--- a/core/state/snapshot/snapshot.go
+++ b/core/state/snapshot/snapshot.go
@@ -35,12 +35,12 @@ import (
"time"
"github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/metrics"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
+ ethsnapshot "github.com/ava-labs/libevm/core/state/snapshot"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
+ "github.com/ava-labs/libevm/triedb"
)
const (
@@ -118,28 +118,7 @@ var (
)
// Snapshot represents the functionality supported by a snapshot storage layer.
-type Snapshot interface {
- // Root returns the root hash for which this snapshot was made.
- Root() common.Hash
-
- // Account directly retrieves the account associated with a particular hash in
- // the snapshot slim data format.
- Account(hash common.Hash) (*types.SlimAccount, error)
-
- // AccountRLP directly retrieves the account RLP associated with a particular
- // hash in the snapshot slim data format.
- AccountRLP(hash common.Hash) ([]byte, error)
-
- // Storage directly retrieves the storage data associated with a particular hash,
- // within a particular account.
- Storage(accountHash, storageHash common.Hash) ([]byte, error)
-
- // AccountIterator creates an account iterator over the account trie given by the provided root hash.
- AccountIterator(seek common.Hash) AccountIterator
-
- // StorageIterator creates a storage iterator over the storage trie given by the provided root hash.
- StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool)
-}
+type Snapshot = ethsnapshot.Snapshot
// snapshot is the internal version of the snapshot data layer that supports some
// additional methods compared to the public API.
@@ -164,6 +143,12 @@ type snapshot interface {
// Stale return whether this layer has become stale (was flattened across) or
// if it's still live.
Stale() bool
+
+ // AccountIterator creates an account iterator over an arbitrary layer.
+ AccountIterator(seek common.Hash) AccountIterator
+
+ // StorageIterator creates a storage iterator over an arbitrary layer.
+ StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool)
}
// Config includes the configurations for snapshots.
@@ -199,6 +184,11 @@ type Tree struct {
// Test hooks
onFlatten func() // Hook invoked when the bottom most diff layers are flattened
+
+ // XXX: The following fields are to help with integrating the modified snapshot
+ // with the upstream statedb.
+ parentBlockHash *common.Hash
+ blockHash *common.Hash
}
// New attempts to load an already existing snapshot from a persistent key-value
@@ -321,9 +311,35 @@ func (t *Tree) Snapshots(blockHash common.Hash, limits int, nodisk bool) []Snaps
return ret
}
+func (t *Tree) WithBlockHashes(blockHash, parentBlockHash common.Hash) {
+ t.blockHash = &blockHash
+ t.parentBlockHash = &parentBlockHash
+}
+
// Update adds a new snapshot into the tree, if that can be linked to an existing
// old parent. It is disallowed to insert a disk layer (the origin of all).
-func (t *Tree) Update(blockHash, blockRoot, parentBlockHash common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
+func (t *Tree) Update(
+ blockRoot common.Hash,
+ parentRoot common.Hash,
+ destructs map[common.Hash]struct{},
+ accounts map[common.Hash][]byte,
+ storage map[common.Hash]map[common.Hash][]byte,
+ opts ...ethsnapshot.LibEVMOption,
+) error {
+ blockHash := *t.blockHash
+ parentBlockHash := *t.parentBlockHash
+
+ // Clear the block hashes, they must be set each time
+ t.blockHash, t.parentBlockHash = nil, nil
+ return t.UpdateWithBlockHashes(blockHash, blockRoot, parentBlockHash, destructs, accounts, storage)
+}
+
+func (t *Tree) UpdateWithBlockHashes(
+ blockHash, blockRoot, parentBlockHash common.Hash,
+ destructs map[common.Hash]struct{},
+ accounts map[common.Hash][]byte,
+ storage map[common.Hash]map[common.Hash][]byte,
+) error {
t.lock.Lock()
defer t.lock.Unlock()
@@ -381,6 +397,10 @@ func (t *Tree) verifyIntegrity(base *diskLayer, waitBuild bool) error {
return nil
}
+func (t *Tree) Cap(root common.Hash, layers int) error {
+ return nil // No-op for now
+}
+
// Flatten flattens the snapshot for [blockHash] into its parent. if its
// parent is not a disk layer, Flatten will return an error.
// Note: a blockHash is used instead of a state root so that the exact state
@@ -823,7 +843,11 @@ func (t *Tree) AccountIterator(root common.Hash, seek common.Hash, force bool) (
// account. The iterator will be move to the specific start position. When [force]
// is true, a new account iterator is created without acquiring the [snapTree]
// lock and without confirming that the snapshot on the disk layer is fully generated.
-func (t *Tree) StorageIterator(root common.Hash, account common.Hash, seek common.Hash, force bool) (StorageIterator, error) {
+func (t *Tree) StorageIterator(root common.Hash, account common.Hash, seek common.Hash) (StorageIterator, error) {
+ return t.StorageIteratorWithForce(root, account, seek, false)
+}
+
+func (t *Tree) StorageIteratorWithForce(root common.Hash, account common.Hash, seek common.Hash, force bool) (StorageIterator, error) {
if !force {
ok, err := t.generating()
if err != nil {
@@ -854,7 +878,7 @@ func (t *Tree) verify(root common.Hash, force bool) error {
defer acctIt.Release()
got, err := generateTrieRoot(nil, "", acctIt, common.Hash{}, stackTrieGenerate, func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) {
- storageIt, err := t.StorageIterator(root, accountHash, common.Hash{}, force)
+ storageIt, err := t.StorageIteratorWithForce(root, accountHash, common.Hash{}, force)
if err != nil {
return common.Hash{}, err
}
diff --git a/core/state/snapshot/snapshot_ext.go b/core/state/snapshot/snapshot_ext.go
index 829e242d39..edf4b7049d 100644
--- a/core/state/snapshot/snapshot_ext.go
+++ b/core/state/snapshot/snapshot_ext.go
@@ -23,9 +23,24 @@ func (t *Tree) DiskStorageIterator(account common.Hash, seek common.Hash) Storag
return it
}
+type SnapshotIterable interface {
+ Snapshot
+
+ // AccountIterator creates an account iterator over an arbitrary layer.
+ AccountIterator(seek common.Hash) AccountIterator
+
+ // StorageIterator creates a storage iterator over an arbitrary layer.
+ StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool)
+}
+
+type DiskIterable interface {
+ DiskAccountIterator(seek common.Hash) AccountIterator
+ DiskStorageIterator(account common.Hash, seek common.Hash) StorageIterator
+}
+
// NewDiskLayer creates a diskLayer for direct access to the contents of the on-disk
// snapshot. Does not perform any validation.
-func NewDiskLayer(diskdb ethdb.KeyValueStore) Snapshot {
+func NewDiskLayer(diskdb ethdb.KeyValueStore) SnapshotIterable {
return &diskLayer{
diskdb: diskdb,
created: time.Now(),
diff --git a/core/state/snapshot/snapshot_test.go b/core/state/snapshot/snapshot_test.go
index 45c502f96b..cc98830e13 100644
--- a/core/state/snapshot/snapshot_test.go
+++ b/core/state/snapshot/snapshot_test.go
@@ -106,7 +106,7 @@ func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) {
accounts := map[common.Hash][]byte{
common.HexToHash("0xa1"): randomAccount(),
}
- if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
t.Fatalf("failed to create a diff layer: %v", err)
}
if n := snaps.NumStateLayers(); n != 2 {
@@ -147,10 +147,10 @@ func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) {
accounts := map[common.Hash][]byte{
common.HexToHash("0xa1"): randomAccount(),
}
- if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
t.Fatalf("failed to create a diff layer: %v", err)
}
- if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil {
t.Fatalf("failed to create a diff layer: %v", err)
}
if n := snaps.NumBlockLayers(); n != 3 {
@@ -196,13 +196,13 @@ func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) {
accounts := map[common.Hash][]byte{
common.HexToHash("0xa1"): randomAccount(),
}
- if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(common.HexToHash("0x02"), common.HexToHash("0xff02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
t.Fatalf("failed to create a diff layer: %v", err)
}
- if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(common.HexToHash("0x03"), common.HexToHash("0xff03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil {
t.Fatalf("failed to create a diff layer: %v", err)
}
- if err := snaps.Update(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil, accounts, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(common.HexToHash("0x04"), common.HexToHash("0xff04"), common.HexToHash("0x03"), nil, accounts, nil); err != nil {
t.Fatalf("failed to create a diff layer: %v", err)
}
if n := snaps.NumStateLayers(); n != 4 {
@@ -244,12 +244,12 @@ func TestPostFlattenBasicDataAccess(t *testing.T) {
// Create a starting base layer and a snapshot tree out of it
snaps := NewTestTree(rawdb.NewMemoryDatabase(), common.HexToHash("0x01"), common.HexToHash("0xff01"))
// The lowest difflayer
- snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0xffa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil)
- snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xffa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil)
- snaps.Update(common.HexToHash("0xb2"), common.HexToHash("0xffb2"), common.HexToHash("0xa1"), nil, setAccount("0xb2"), nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0xa1"), common.HexToHash("0xffa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0xa2"), common.HexToHash("0xffa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0xb2"), common.HexToHash("0xffb2"), common.HexToHash("0xa1"), nil, setAccount("0xb2"), nil)
- snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xffa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil)
- snaps.Update(common.HexToHash("0xb3"), common.HexToHash("0xffb3"), common.HexToHash("0xb2"), nil, setAccount("0xb3"), nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0xa3"), common.HexToHash("0xffa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil)
+ snaps.UpdateWithBlockHashes(common.HexToHash("0xb3"), common.HexToHash("0xffb3"), common.HexToHash("0xb2"), nil, setAccount("0xb3"), nil)
// checkExist verifies if an account exists in a snapshot
checkExist := func(layer Snapshot, key string) error {
@@ -434,10 +434,10 @@ func TestTreeFlattenDoesNotDropPendingLayers(t *testing.T) {
diffBlockAHash := common.Hash{0xee, 0xee, byte(i)}
diffBlockBHash := common.Hash{0xdd, 0xdd, byte(i)}
diffBlockRoot := common.Hash{0xff, 0xff, byte(i)}
- if err := snaps.Update(diffBlockAHash, diffBlockRoot, parentAHash, nil, accounts, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(diffBlockAHash, diffBlockRoot, parentAHash, nil, accounts, nil); err != nil {
t.Fatalf("failed to create a diff layer: %v", err)
}
- if err := snaps.Update(diffBlockBHash, diffBlockRoot, parentBHash, nil, accounts, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(diffBlockBHash, diffBlockRoot, parentBHash, nil, accounts, nil); err != nil {
t.Fatalf("failed to create a diff layer: %v", err)
}
@@ -509,7 +509,7 @@ func TestStaleOriginLayer(t *testing.T) {
}
// Create diff layer A containing account 0xa1
- if err := snaps.Update(diffBlockHashA, diffRootA, baseBlockHash, nil, accountsA, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(diffBlockHashA, diffRootA, baseBlockHash, nil, accountsA, nil); err != nil {
t.Errorf("failed to create diff layer A: %v", err)
}
// Flatten account 0xa1 to disk
@@ -519,12 +519,12 @@ func TestStaleOriginLayer(t *testing.T) {
}
// Create diff layer B containing account 0xa2
// The bloom filter should contain only 0xa2.
- if err := snaps.Update(diffBlockHashB, diffRootB, diffBlockHashA, nil, accountsB, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(diffBlockHashB, diffRootB, diffBlockHashA, nil, accountsB, nil); err != nil {
t.Errorf("failed to create diff layer B: %v", err)
}
// Create diff layer C containing account 0xa3
// The bloom filter should contain 0xa2 and 0xa3
- if err := snaps.Update(diffBlockHashC, diffRootC, diffBlockHashB, nil, accountsC, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(diffBlockHashC, diffRootC, diffBlockHashB, nil, accountsC, nil); err != nil {
t.Errorf("failed to create diff layer C: %v", err)
}
@@ -591,16 +591,16 @@ func TestRebloomOnFlatten(t *testing.T) {
}
// Build the tree
- if err := snaps.Update(diffBlockHashA, diffRootA, baseBlockHash, nil, accountsA, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(diffBlockHashA, diffRootA, baseBlockHash, nil, accountsA, nil); err != nil {
t.Errorf("failed to create diff layer A: %v", err)
}
- if err := snaps.Update(diffBlockHashB, diffRootB, diffBlockHashA, nil, accountsB, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(diffBlockHashB, diffRootB, diffBlockHashA, nil, accountsB, nil); err != nil {
t.Errorf("failed to create diff layer B: %v", err)
}
- if err := snaps.Update(diffBlockHashC, diffRootC, diffBlockHashB, nil, accountsC, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(diffBlockHashC, diffRootC, diffBlockHashB, nil, accountsC, nil); err != nil {
t.Errorf("failed to create diff layer C: %v", err)
}
- if err := snaps.Update(diffBlockHashD, diffRootD, diffBlockHashB, nil, accountsD, nil); err != nil {
+ if err := snaps.UpdateWithBlockHashes(diffBlockHashD, diffRootD, diffBlockHashB, nil, accountsD, nil); err != nil {
t.Errorf("failed to create diff layer D: %v", err)
}
@@ -687,9 +687,9 @@ func TestReadStateDuringFlattening(t *testing.T) {
snaps := NewTestTree(rawdb.NewMemoryDatabase(), baseBlockHash, baseRoot)
// 4 layers in total, 3 diff layers and 1 disk layers
- snaps.Update(diffBlockHashA, diffRootA, baseBlockHash, nil, setAccount("0xa1"), nil)
- snaps.Update(diffBlockHashB, diffRootB, diffBlockHashA, nil, setAccount("0xa2"), nil)
- snaps.Update(diffBlockHashC, diffRootC, diffBlockHashB, nil, setAccount("0xa3"), nil)
+ snaps.UpdateWithBlockHashes(diffBlockHashA, diffRootA, baseBlockHash, nil, setAccount("0xa1"), nil)
+ snaps.UpdateWithBlockHashes(diffBlockHashB, diffRootB, diffBlockHashA, nil, setAccount("0xa2"), nil)
+ snaps.UpdateWithBlockHashes(diffBlockHashC, diffRootC, diffBlockHashB, nil, setAccount("0xa3"), nil)
// Obtain the topmost snapshot handler for state accessing
snap := snaps.Snapshot(diffRootC)
diff --git a/core/state/state_object.go b/core/state/state_object.go
index dd3d966157..44345a9b0c 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -35,10 +35,10 @@ import (
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/metrics"
- "github.com/ava-labs/coreth/trie/trienode"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie/trienode"
"github.com/holiman/uint256"
)
@@ -104,7 +104,7 @@ type stateObject struct {
// empty returns whether the account is considered empty.
func (s *stateObject) empty() bool {
- return s.data.Nonce == 0 && s.data.Balance.IsZero() && bytes.Equal(s.data.CodeHash, types.EmptyCodeHash.Bytes()) && !s.data.IsMultiCoin
+ return s.data.Nonce == 0 && s.data.Balance.IsZero() && bytes.Equal(s.data.CodeHash, types.EmptyCodeHash.Bytes()) && !types.IsMultiCoin(&s.data)
}
// newObject creates a state object.
@@ -478,7 +478,7 @@ func (s *stateObject) setBalance(amount *uint256.Int) {
}
func (s *stateObject) enableMultiCoin() {
- s.data.IsMultiCoin = true
+ types.EnableMultiCoin(&s.data)
}
func (s *stateObject) deepCopy(db *StateDB) *stateObject {
@@ -602,7 +602,7 @@ func (s *stateObject) BalanceMultiCoin(coinID common.Hash, db Database) *big.Int
}
func (s *stateObject) EnableMultiCoin() bool {
- if s.data.IsMultiCoin {
+ if types.IsMultiCoin(&s.data) {
return false
}
s.db.journal.append(multiCoinEnable{
diff --git a/core/state/state_test.go b/core/state/state_test.go
index f5cb0ca363..b4f17983eb 100644
--- a/core/state/state_test.go
+++ b/core/state/state_test.go
@@ -33,10 +33,10 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/ethdb"
+ "github.com/ava-labs/libevm/triedb"
"github.com/holiman/uint256"
)
@@ -68,7 +68,7 @@ func TestDump(t *testing.T) {
// write some of them to the trie
s.state.updateStateObject(obj1)
s.state.updateStateObject(obj2)
- root, _ := s.state.Commit(0, false, false)
+ root, _ := s.state.Commit(0, false)
// check that DumpToCollector contains the state objects that are in trie
s.state, _ = New(root, tdb, nil)
@@ -130,7 +130,7 @@ func TestIterativeDump(t *testing.T) {
// write some of them to the trie
s.state.updateStateObject(obj1)
s.state.updateStateObject(obj2)
- root, _ := s.state.Commit(0, false, false)
+ root, _ := s.state.Commit(0, false)
s.state, _ = New(root, tdb, nil)
b := &bytes.Buffer{}
@@ -156,7 +156,7 @@ func TestNull(t *testing.T) {
var value common.Hash
s.state.SetState(address, common.Hash{}, value)
- s.state.Commit(0, false, false)
+ s.state.Commit(0, false)
if value := s.state.GetState(address, common.Hash{}); value != (common.Hash{}) {
t.Errorf("expected empty current value, got %x", value)
@@ -228,8 +228,8 @@ func TestSnapshot2(t *testing.T) {
so0.deleted = false
state.setStateObject(so0)
- root, _ := state.Commit(0, false, false)
- state, _ = New(root, state.db, nil)
+ root, _ := state.Commit(0, false)
+ state, _ = New(root, state.db, state.snaps)
// and one with deleted == true
so1 := state.getStateObject(stateobjaddr1)
diff --git a/core/state/statedb.go b/core/state/statedb.go
index e3699be04d..e0b817c25b 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -30,6 +30,7 @@ package state
import (
"fmt"
"math/big"
+ "reflect"
"sort"
"time"
@@ -37,19 +38,18 @@ import (
"github.com/ava-labs/coreth/core/state/snapshot"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/metrics"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/trie/triestate"
"github.com/ava-labs/libevm/common"
+ ethsnapshot "github.com/ava-labs/libevm/core/state/snapshot"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/params"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/trie/trienode"
+ "github.com/ava-labs/libevm/trie/triestate"
"github.com/holiman/uint256"
)
const (
- // storageDeleteLimit denotes the highest permissible memory allocation
- // employed for contract storage deletion.
storageDeleteLimit = 512 * 1024 * 1024
)
@@ -58,6 +58,20 @@ type revision struct {
journalIndex int
}
+type snapshotTree interface {
+ Snapshot(root common.Hash) ethsnapshot.Snapshot
+ Update(
+ blockRoot common.Hash,
+ parentRoot common.Hash,
+ destructs map[common.Hash]struct{},
+ accounts map[common.Hash][]byte,
+ storage map[common.Hash]map[common.Hash][]byte,
+ opts ...ethsnapshot.LibEVMOption,
+ ) error
+ StorageIterator(root common.Hash, account common.Hash, seek common.Hash) (ethsnapshot.StorageIterator, error)
+ //Cap(root common.Hash, layers int, ...opts ethsnapshot.) error
+}
+
// StateDB structs within the ethereum protocol are used to store anything
// within the merkle trie. StateDBs take care of caching and storing
// nested states. It's the general query interface to retrieve:
@@ -74,6 +88,7 @@ type StateDB struct {
prefetcher *triePrefetcher
trie Trie
hasher crypto.KeccakState
+ snaps snapshotTree // Nil if snapshot is not available
snap snapshot.Snapshot // Nil if snapshot is not available
// originalRoot is the pre-state root, before any changes were made.
@@ -151,19 +166,7 @@ type StateDB struct {
}
// New creates a new state from a given trie.
-func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) {
- var snap snapshot.Snapshot
- if snaps != nil {
- snap = snaps.Snapshot(root)
- }
- return NewWithSnapshot(root, db, snap)
-}
-
-// NewWithSnapshot creates a new state from a given trie with the specified [snap]
-// If [snap] doesn't have the same root as [root], then NewWithSnapshot will return
-// an error. If snap is nil, then no snapshot will be used and CommitWithSnapshot
-// cannot be called on the returned StateDB.
-func NewWithSnapshot(root common.Hash, db Database, snap snapshot.Snapshot) (*StateDB, error) {
+func New(root common.Hash, db Database, snaps snapshotTree) (*StateDB, error) {
tr, err := db.OpenTrie(root)
if err != nil {
return nil, err
@@ -172,6 +175,7 @@ func NewWithSnapshot(root common.Hash, db Database, snap snapshot.Snapshot) (*St
db: db,
trie: tr,
originalRoot: root,
+ snaps: snaps,
accounts: make(map[common.Hash][]byte),
storages: make(map[common.Hash]map[common.Hash][]byte),
accountsOrigin: make(map[common.Address][]byte),
@@ -187,11 +191,16 @@ func NewWithSnapshot(root common.Hash, db Database, snap snapshot.Snapshot) (*St
transientStorage: newTransientStorage(),
hasher: crypto.NewKeccakState(),
}
- if snap != nil {
- if snap.Root() != root {
- return nil, fmt.Errorf("cannot create new statedb for root: %s, using snapshot with mismatched root: %s", root, snap.Root().Hex())
+ if sdb.snaps != nil {
+ // XXX: Make sure we treat incoming `nil` ptrs as `nil` values, not an
+ // interface to a nil ptr
+ v := reflect.ValueOf(sdb.snaps)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ sdb.snaps = nil
}
- sdb.snap = snap
+ }
+ if sdb.snaps != nil {
+ sdb.snap = sdb.snaps.Snapshot(root)
}
return sdb, nil
}
@@ -230,9 +239,6 @@ func (s *StateDB) Error() error {
return s.dbErr
}
-// AddLog adds a log with the specified parameters to the statedb
-// Note: blockNumber is a required argument because StateDB does not
-// know the current block number.
func (s *StateDB) AddLog(log *types.Log) {
s.journal.append(addLogChange{txhash: s.thash})
@@ -262,20 +268,6 @@ func (s *StateDB) Logs() []*types.Log {
return logs
}
-// GetLogData returns the underlying topics and data from each log included in the StateDB
-// Test helper function.
-func (s *StateDB) GetLogData() ([][]common.Hash, [][]byte) {
- var logData [][]byte
- var topics [][]common.Hash
- for _, lgs := range s.logs {
- for _, log := range lgs {
- topics = append(topics, log.Topics)
- logData = append(logData, common.CopyBytes(log.Data))
- }
- }
- return topics, logData
-}
-
// AddPreimage records a SHA3 preimage seen by the VM.
func (s *StateDB) AddPreimage(hash common.Hash, preimage []byte) {
if _, ok := s.preimages[hash]; !ok {
@@ -643,26 +635,20 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
var data *types.StateAccount
if s.snap != nil {
start := time.Now()
- acc, err := s.snap.Account(crypto.HashData(s.hasher, addr.Bytes()))
+ acc, err := s.snap.AccountRLP(crypto.HashData(s.hasher, addr.Bytes()))
if metrics.EnabledExpensive {
s.SnapshotAccountReads += time.Since(start)
}
if err == nil {
- if acc == nil {
+ if len(acc) == 0 {
return nil
}
- data = &types.StateAccount{
- Nonce: acc.Nonce,
- Balance: acc.Balance,
- CodeHash: acc.CodeHash,
- IsMultiCoin: acc.IsMultiCoin,
- Root: common.BytesToHash(acc.Root),
- }
- if len(data.CodeHash) == 0 {
- data.CodeHash = types.EmptyCodeHash.Bytes()
- }
- if data.Root == (common.Hash{}) {
- data.Root = types.EmptyRootHash
+ // XXX: This is temporary until using the upstream statedb.
+ // Otherwise we must set IsMultiCoin based on
+ data, err = types.FullAccount(acc)
+ if err != nil {
+ s.setError(fmt.Errorf("getDeletedStateObject (%x) error: %w", addr.Bytes(), err))
+ return nil
}
}
}
@@ -692,6 +678,10 @@ func (s *StateDB) setStateObject(object *stateObject) {
s.stateObjects[object.Address()] = object
}
+func (s *StateDB) GetOrNewStateObject(addr common.Address) *stateObject {
+ return s.getOrNewStateObject(addr)
+}
+
// getOrNewStateObject retrieves a state object or create a new state object if nil.
func (s *StateDB) getOrNewStateObject(addr common.Address) *stateObject {
stateObject := s.getStateObject(addr)
@@ -787,7 +777,8 @@ func (s *StateDB) Copy() *StateDB {
// to the snapshot tree, we need to copy that as well. Otherwise, any
// block mined by ourselves will cause gaps in the tree, and force the
// miner to operate trie-backed only.
- snap: s.snap,
+ snaps: s.snaps,
+ snap: s.snap,
}
// Copy the dirty states, logs, and preimages
for addr := range s.journal.dirties {
@@ -1027,7 +1018,10 @@ func (s *StateDB) clearJournalAndRefund() {
// storage iteration and constructs trie node deletion markers by creating
// stack trie with iterated slots.
func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (bool, common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) {
- iter, _ := s.snap.StorageIterator(addrHash, common.Hash{})
+ iter, err := s.snaps.StorageIterator(s.originalRoot, addrHash, common.Hash{})
+ if err != nil {
+ return false, 0, nil, nil, err
+ }
defer iter.Release()
var (
@@ -1228,16 +1222,6 @@ func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.A
}
// Commit writes the state to the underlying in-memory trie database.
-func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool, referenceRoot bool) (common.Hash, error) {
- return s.commit(block, deleteEmptyObjects, nil, common.Hash{}, common.Hash{}, referenceRoot)
-}
-
-// CommitWithSnap writes the state to the underlying in-memory trie database and
-// generates a snapshot layer for the newly committed state.
-func (s *StateDB) CommitWithSnap(block uint64, deleteEmptyObjects bool, snaps *snapshot.Tree, blockHash, parentHash common.Hash, referenceRoot bool) (common.Hash, error) {
- return s.commit(block, deleteEmptyObjects, snaps, blockHash, parentHash, referenceRoot)
-}
-
// Once the state is committed, tries cached in stateDB (including account
// trie, storage tries) will no longer be functional. A new state instance
// must be created with new root and updated database for accessing post-
@@ -1245,7 +1229,7 @@ func (s *StateDB) CommitWithSnap(block uint64, deleteEmptyObjects bool, snaps *s
//
// The associated block number of the state transition is also provided
// for more chain context.
-func (s *StateDB) commit(block uint64, deleteEmptyObjects bool, snaps *snapshot.Tree, blockHash, parentHash common.Hash, referenceRoot bool) (common.Hash, error) {
+func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool, opts ...ethsnapshot.LibEVMOption) (common.Hash, error) {
// Short circuit in case any database failure occurred earlier.
if s.dbErr != nil {
return common.Hash{}, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr)
@@ -1331,13 +1315,20 @@ func (s *StateDB) commit(block uint64, deleteEmptyObjects bool, snaps *snapshot.
s.StorageUpdated, s.StorageDeleted = 0, 0
}
// If snapshotting is enabled, update the snapshot tree with this new version
- if snaps != nil {
+ if s.snap != nil {
start := time.Now()
- if s.snap == nil {
- log.Error(fmt.Sprintf("cannot commit with snaps without a pre-existing snap layer, parentHash: %s, blockHash: %s", parentHash, blockHash))
- }
- if err := snaps.Update(blockHash, root, parentHash, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages); err != nil {
- log.Warn("Failed to update snapshot tree", "to", root, "err", err)
+ // Only update if there's a state transition (skip empty Clique blocks)
+ if parent := s.snap.Root(); parent != root {
+ if err := s.snaps.Update(root, parent, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages, opts...); err != nil {
+ log.Warn("Failed to update snapshot tree", "from", parent, "to", root, "err", err)
+ }
+ // Keep 128 diff layers in the memory, persistent layer is 129th.
+ // - head layer is paired with HEAD state
+ // - head-1 layer is paired with HEAD-1 state
+ // - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state
+ // if err := s.snaps.Cap(root, 128); err != nil {
+ // log.Warn("Failed to cap snapshot tree", "root", root, "layers", 128, "err", err)
+ // }
}
if metrics.EnabledExpensive {
s.SnapshotCommits += time.Since(start)
@@ -1354,14 +1345,8 @@ func (s *StateDB) commit(block uint64, deleteEmptyObjects bool, snaps *snapshot.
if root != origin {
start := time.Now()
set := triestate.New(s.accountsOrigin, s.storagesOrigin, incomplete)
- if referenceRoot {
- if err := s.db.TrieDB().UpdateAndReferenceRoot(root, origin, block, nodes, set); err != nil {
- return common.Hash{}, err
- }
- } else {
- if err := s.db.TrieDB().Update(root, origin, block, nodes, set); err != nil {
- return common.Hash{}, err
- }
+ if err := s.db.TrieDB().Update(root, origin, block, nodes, set); err != nil {
+ return common.Hash{}, err
}
s.originalRoot = root
if metrics.EnabledExpensive {
@@ -1384,15 +1369,15 @@ func (s *StateDB) commit(block uint64, deleteEmptyObjects bool, snaps *snapshot.
// Prepare handles the preparatory steps for executing a state transition with.
// This method must be invoked before state transition.
//
-// Berlin fork (aka ApricotPhase2):
+// Berlin fork:
// - Add sender to access list (2929)
// - Add destination to access list (2929)
// - Add precompiles to access list (2929)
// - Add the contents of the optional tx access list (2930)
//
// Potential EIPs:
-// - Reset access list (Berlin/ApricotPhase2)
-// - Add coinbase to access list (EIP-3651/Durango)
+// - Reset access list (Berlin)
+// - Add coinbase to access list (EIP-3651)
// - Reset transient storage (EIP-1153)
func (s *StateDB) Prepare(rules params.Rules, sender, coinbase common.Address, dst *common.Address, precompiles []common.Address, list types.AccessList) {
if rules.IsBerlin {
diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go
index aa5e69fa66..8d66eafe82 100644
--- a/core/state/statedb_fuzz_test.go
+++ b/core/state/statedb_fuzz_test.go
@@ -41,13 +41,13 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/state/snapshot"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/trie/triestate"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/triedb/pathdb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/trie/triestate"
+ "github.com/ava-labs/libevm/triedb"
"github.com/holiman/uint256"
)
@@ -192,7 +192,7 @@ func (test *stateTest) run() bool {
storageList = append(storageList, copy2DSet(states.Storages))
}
disk = rawdb.NewMemoryDatabase()
- tdb = triedb.NewDatabase(disk, &triedb.Config{PathDB: pathdb.Defaults})
+ tdb = triedb.NewDatabase(disk, &triedb.Config{DBOverride: pathdb.Defaults.BackendConstructor})
sdb = NewDatabaseWithNodeDB(disk, tdb)
byzantium = rand.Intn(2) == 0
)
@@ -233,7 +233,10 @@ func (test *stateTest) run() bool {
} else {
state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary
}
- nroot, err := state.Commit(0, true, false) // call commit at the block boundary
+ if snaps != nil {
+ snaps.WithBlockHashes(common.Hash{}, common.Hash{})
+ }
+ nroot, err := state.Commit(0, true) // call commit at the block boundary
if err != nil {
panic(err)
}
diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go
index e5d36fcc26..50a3a01bb3 100644
--- a/core/state/statedb_test.go
+++ b/core/state/statedb_test.go
@@ -43,14 +43,14 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/state/snapshot"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/triedb/hashdb"
"github.com/ava-labs/coreth/triedb/pathdb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/trie/trienode"
+ "github.com/ava-labs/libevm/triedb"
"github.com/holiman/uint256"
)
@@ -127,7 +127,7 @@ func TestIntermediateLeaks(t *testing.T) {
}
// Commit and cross check the databases.
- transRoot, err := transState.Commit(0, false, false)
+ transRoot, err := transState.Commit(0, false)
if err != nil {
t.Fatalf("failed to commit transition state: %v", err)
}
@@ -135,7 +135,7 @@ func TestIntermediateLeaks(t *testing.T) {
t.Errorf("can not commit trie %v to persistent database", transRoot.Hex())
}
- finalRoot, err := finalState.Commit(0, false, false)
+ finalRoot, err := finalState.Commit(0, false)
if err != nil {
t.Fatalf("failed to commit final state: %v", err)
}
@@ -543,8 +543,8 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error {
func TestTouchDelete(t *testing.T) {
s := newStateEnv()
s.state.getOrNewStateObject(common.Address{})
- root, _ := s.state.Commit(0, false, false)
- s.state, _ = NewWithSnapshot(root, s.state.db, s.state.snap)
+ root, _ := s.state.Commit(0, false)
+ s.state, _ = New(root, s.state.db, s.state.snaps)
snapshot := s.state.Snapshot()
s.state.AddBalance(common.Address{}, new(uint256.Int))
@@ -631,7 +631,7 @@ func TestCopyCommitCopy(t *testing.T) {
t.Fatalf("second copy committed storage slot mismatch: have %x, want %x", val, sval)
}
// Commit state, ensure states can be loaded from disk
- root, _ := state.Commit(0, false, false)
+ root, _ := state.Commit(0, false)
state, _ = New(root, tdb, nil)
if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
t.Fatalf("state post-commit balance mismatch: have %v, want %v", balance, 42)
@@ -745,7 +745,7 @@ func TestCommitCopy(t *testing.T) {
t.Fatalf("initial committed storage slot mismatch: have %x, want %x", val, common.Hash{})
}
// Copy the committed state database, the copied one is not functional.
- state.Commit(0, true, false)
+ state.Commit(0, true)
copied := state.Copy()
if balance := copied.GetBalance(addr); balance.Cmp(uint256.NewInt(0)) != 0 {
t.Fatalf("unexpected balance: have %v", balance)
@@ -779,8 +779,8 @@ func TestDeleteCreateRevert(t *testing.T) {
addr := common.BytesToAddress([]byte("so"))
state.SetBalance(addr, uint256.NewInt(1))
- root, _ := state.Commit(0, false, false)
- state, _ = NewWithSnapshot(root, state.db, state.snap)
+ root, _ := state.Commit(0, false)
+ state, _ = New(root, state.db, state.snaps)
// Simulate self-destructing in one transaction, then create-reverting in another
state.SelfDestruct(addr)
@@ -791,8 +791,8 @@ func TestDeleteCreateRevert(t *testing.T) {
state.RevertToSnapshot(id)
// Commit the entire state and make sure we don't crash and have the correct state
- root, _ = state.Commit(0, true, false)
- state, _ = NewWithSnapshot(root, state.db, state.snap)
+ root, _ = state.Commit(0, true)
+ state, _ = New(root, state.db, state.snaps)
if state.getStateObject(addr) != nil {
t.Fatalf("self-destructed contract came alive")
@@ -814,14 +814,14 @@ func testMissingTrieNodes(t *testing.T, scheme string) {
memDb = rawdb.NewMemoryDatabase()
)
if scheme == rawdb.PathScheme {
- tdb = triedb.NewDatabase(memDb, &triedb.Config{PathDB: &pathdb.Config{
+ tdb = triedb.NewDatabase(memDb, &triedb.Config{DBOverride: pathdb.Config{
CleanCacheSize: 0,
DirtyCacheSize: 0,
- }}) // disable caching
+ }.BackendConstructor}) // disable caching
} else {
- tdb = triedb.NewDatabase(memDb, &triedb.Config{HashDB: &hashdb.Config{
+ tdb = triedb.NewDatabase(memDb, &triedb.Config{DBOverride: hashdb.Config{
CleanCacheSize: 0,
- }}) // disable caching
+ }.BackendConstructor}) // disable caching
}
db := NewDatabaseWithNodeDB(memDb, tdb)
@@ -834,7 +834,7 @@ func testMissingTrieNodes(t *testing.T, scheme string) {
a2 := common.BytesToAddress([]byte("another"))
state.SetBalance(a2, uint256.NewInt(100))
state.SetCode(a2, []byte{1, 2, 4})
- root, _ = state.Commit(0, false, false)
+ root, _ = state.Commit(0, false)
t.Logf("root: %x", root)
// force-flush
tdb.Commit(root, false)
@@ -858,7 +858,7 @@ func testMissingTrieNodes(t *testing.T, scheme string) {
}
// Modify the state
state.SetBalance(addr, uint256.NewInt(2))
- root, err := state.Commit(0, false, false)
+ root, err := state.Commit(0, false)
if err == nil {
t.Fatalf("expected error, got root :%x", root)
}
@@ -1044,8 +1044,8 @@ func TestMultiCoinOperations(t *testing.T) {
assetID := common.Hash{2}
s.state.getOrNewStateObject(addr)
- root, _ := s.state.Commit(0, false, false)
- s.state, _ = NewWithSnapshot(root, s.state.db, s.state.snap)
+ root, _ := s.state.Commit(0, false)
+ s.state, _ = New(root, s.state.db, s.state.snaps)
s.state.AddBalance(addr, new(uint256.Int))
@@ -1101,14 +1101,16 @@ func TestMultiCoinSnapshot(t *testing.T) {
assertBalances(10, 0, 0)
// Commit and get the new root
- root, _ = stateDB.Commit(0, false, false)
+ snapTree.WithBlockHashes(common.Hash{}, common.Hash{})
+ root, _ = stateDB.Commit(0, false)
assertBalances(10, 0, 0)
// Create a new state from the latest root, add a multicoin balance, and
// commit it to the tree.
stateDB, _ = New(root, sdb, snapTree)
stateDB.AddBalanceMultiCoin(addr, assetID1, big.NewInt(10))
- root, _ = stateDB.Commit(0, false, false)
+ snapTree.WithBlockHashes(common.Hash{}, common.Hash{})
+ root, _ = stateDB.Commit(0, false)
assertBalances(10, 10, 0)
// Add more layers than the cap and ensure the balances and layers are correct
@@ -1116,7 +1118,8 @@ func TestMultiCoinSnapshot(t *testing.T) {
stateDB, _ = New(root, sdb, snapTree)
stateDB.AddBalanceMultiCoin(addr, assetID1, big.NewInt(1))
stateDB.AddBalanceMultiCoin(addr, assetID2, big.NewInt(2))
- root, _ = stateDB.Commit(0, false, false)
+ snapTree.WithBlockHashes(common.Hash{}, common.Hash{})
+ root, _ = stateDB.Commit(0, false)
}
assertBalances(10, 266, 512)
@@ -1125,7 +1128,8 @@ func TestMultiCoinSnapshot(t *testing.T) {
stateDB, _ = New(root, sdb, snapTree)
stateDB.AddBalance(addr, uint256.NewInt(1))
stateDB.AddBalanceMultiCoin(addr, assetID1, big.NewInt(1))
- root, _ = stateDB.Commit(0, false, false)
+ snapTree.WithBlockHashes(common.Hash{}, common.Hash{})
+ root, _ = stateDB.Commit(0, false)
stateDB, _ = New(root, sdb, snapTree)
assertBalances(11, 267, 512)
}
@@ -1147,7 +1151,7 @@ func TestGenerateMultiCoinAccounts(t *testing.T) {
t.Fatal(err)
}
stateDB.SetBalanceMultiCoin(addr, assetID, assetBalance)
- root, err := stateDB.Commit(0, false, false)
+ root, err := stateDB.Commit(0, false)
if err != nil {
t.Fatal(err)
}
@@ -1170,12 +1174,16 @@ func TestGenerateMultiCoinAccounts(t *testing.T) {
// Get latest snapshot and make sure it has the correct account and storage
snap := snaps.Snapshot(root)
- snapAccount, err := snap.Account(addrHash)
+ snapAccount, err := snap.AccountRLP(addrHash)
if err != nil {
t.Fatal(err)
}
- if !snapAccount.IsMultiCoin {
- t.Fatalf("Expected SnapAccount to return IsMultiCoin: true, found: %v", snapAccount.IsMultiCoin)
+ account := new(types.StateAccount)
+ if err := rlp.DecodeBytes(snapAccount, account); err != nil {
+ t.Fatal(err)
+ }
+ if !types.IsMultiCoin(account) {
+ t.Fatalf("Expected SnapAccount to return IsMultiCoin: true, found: %v", types.IsMultiCoin(account))
}
NormalizeCoinID(&assetID)
@@ -1207,7 +1215,7 @@ func TestFlushOrderDataLoss(t *testing.T) {
state.SetState(common.Address{a}, common.Hash{a, s}, common.Hash{a, s})
}
}
- root, err := state.Commit(0, false, false)
+ root, err := state.Commit(0, false)
if err != nil {
t.Fatalf("failed to commit state trie: %v", err)
}
@@ -1286,7 +1294,8 @@ func TestResetObject(t *testing.T) {
state.CreateAccount(addr)
state.SetBalance(addr, uint256.NewInt(2))
state.SetState(addr, slotB, common.BytesToHash([]byte{0x2}))
- root, _ := state.CommitWithSnap(0, true, snaps, common.Hash{}, common.Hash{}, false)
+ snaps.WithBlockHashes(common.Hash{}, common.Hash{})
+ root, _ := state.Commit(0, true)
// Ensure the original account is wiped properly
snap := snaps.Snapshot(root)
@@ -1317,7 +1326,8 @@ func TestDeleteStorage(t *testing.T) {
value := common.Hash(uint256.NewInt(uint64(10 * i)).Bytes32())
state.SetState(addr, slot, value)
}
- root, _ := state.CommitWithSnap(0, true, snaps, common.Hash{}, common.Hash{}, false)
+ snaps.WithBlockHashes(common.Hash{}, common.Hash{})
+ root, _ := state.Commit(0, true)
// Init phase done, create two states, one with snap and one without
fastState, _ := New(root, db, snaps)
slowState, _ := New(root, db, nil)
diff --git a/core/state/sync_test.go b/core/state/sync_test.go
index f7cf30c683..560f3a96fd 100644
--- a/core/state/sync_test.go
+++ b/core/state/sync_test.go
@@ -21,12 +21,12 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/triedb/hashdb"
"github.com/ava-labs/coreth/triedb/pathdb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/ethdb"
+ "github.com/ava-labs/libevm/triedb"
"github.com/holiman/uint256"
)
@@ -43,9 +43,9 @@ func makeTestState(scheme string) (ethdb.Database, Database, *triedb.Database, c
// Create an empty state
config := &triedb.Config{Preimages: true}
if scheme == rawdb.PathScheme {
- config.PathDB = pathdb.Defaults
+ config.DBOverride = pathdb.Defaults.BackendConstructor
} else {
- config.HashDB = hashdb.Defaults
+ config.DBOverride = hashdb.Defaults.BackendConstructor
}
db := rawdb.NewMemoryDatabase()
nodeDb := triedb.NewDatabase(db, config)
@@ -76,7 +76,7 @@ func makeTestState(scheme string) (ethdb.Database, Database, *triedb.Database, c
}
accounts = append(accounts, acc)
}
- root, _ := state.Commit(0, false, false)
+ root, _ := state.Commit(0, false)
// Return the generated state
return db, sdb, nodeDb, root, accounts
diff --git a/core/state_processor_test.go b/core/state_processor_test.go
index 520e4f1f7c..b043f36a0c 100644
--- a/core/state_processor_test.go
+++ b/core/state_processor_test.go
@@ -38,11 +38,11 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/trie"
"github.com/ava-labs/coreth/utils"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/core/vm"
"github.com/ava-labs/libevm/crypto"
+ "github.com/ava-labs/libevm/trie"
"github.com/holiman/uint256"
"golang.org/x/crypto/sha3"
)
diff --git a/core/test_blockchain.go b/core/test_blockchain.go
index 95065f276a..caacfe2d65 100644
--- a/core/test_blockchain.go
+++ b/core/test_blockchain.go
@@ -351,7 +351,7 @@ func TestInsertLongForkedChain(t *testing.T, create func(db ethdb.Database, gspe
if blockchain.snaps != nil {
// Snap layer count should be 1 fewer
- if want, got := len(chain1)+len(chain2), blockchain.snaps.NumBlockLayers(); got != want {
+ if want, got := len(chain1), blockchain.snaps.NumBlockLayers(); got != want {
t.Fatalf("incorrect snapshot layer count; got %d, want %d", got, want)
}
}
@@ -360,13 +360,6 @@ func TestInsertLongForkedChain(t *testing.T, create func(db ethdb.Database, gspe
if err := blockchain.Reject(chain2[i]); err != nil {
t.Fatal(err)
}
-
- if blockchain.snaps != nil {
- // Snap layer count should decrease by 1 per Reject
- if want, got := len(chain1)+len(chain2)-i-1, blockchain.snaps.NumBlockLayers(); got != want {
- t.Fatalf("incorrect snapshot layer count; got %d, want %d", got, want)
- }
- }
}
if blockchain.snaps != nil {
diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go
index 4fc2b8fb57..a33dcbd56c 100644
--- a/core/txpool/blobpool/blobpool_test.go
+++ b/core/txpool/blobpool/blobpool_test.go
@@ -71,7 +71,7 @@ var testChainConfig *params.ChainConfig
func init() {
testChainConfig = new(params.ChainConfig)
- *testChainConfig = *params.TestChainConfig
+ *testChainConfig = params.Copy(params.TestChainConfig)
testChainConfig.CancunTime = new(uint64)
*testChainConfig.CancunTime = uint64(time.Now().Unix())
@@ -584,7 +584,7 @@ func TestOpenDrops(t *testing.T) {
statedb.AddBalance(crypto.PubkeyToAddress(overcapper.PublicKey), uint256.NewInt(10000000))
statedb.AddBalance(crypto.PubkeyToAddress(duplicater.PublicKey), uint256.NewInt(1000000))
statedb.AddBalance(crypto.PubkeyToAddress(repeater.PublicKey), uint256.NewInt(1000000))
- statedb.Commit(0, true, false)
+ statedb.Commit(0, true)
chain := &testBlockChain{
config: testChainConfig,
@@ -703,7 +703,7 @@ func TestOpenIndex(t *testing.T) {
// Create a blob pool out of the pre-seeded data
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
statedb.AddBalance(addr, uint256.NewInt(1_000_000_000))
- statedb.Commit(0, true, false)
+ statedb.Commit(0, true)
chain := &testBlockChain{
config: testChainConfig,
@@ -806,7 +806,7 @@ func TestOpenHeap(t *testing.T) {
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000))
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000))
statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000))
- statedb.Commit(0, true, false)
+ statedb.Commit(0, true)
chain := &testBlockChain{
config: testChainConfig,
@@ -887,7 +887,7 @@ func TestOpenCap(t *testing.T) {
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000))
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000))
statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000))
- statedb.Commit(0, true, false)
+ statedb.Commit(0, true)
chain := &testBlockChain{
config: testChainConfig,
@@ -1305,7 +1305,7 @@ func TestAdd(t *testing.T) {
store.Put(blob)
}
}
- statedb.Commit(0, true, false)
+ statedb.Commit(0, true)
store.Close()
// Create a blob pool out of the pre-seeded dats
@@ -1378,7 +1378,7 @@ func benchmarkPoolPending(b *testing.B, datacap uint64) {
statedb.AddBalance(addr, uint256.NewInt(1_000_000_000))
pool.add(tx)
}
- statedb.Commit(0, true, false)
+ statedb.Commit(0, true)
defer pool.Close()
// Benchmark assembling the pending
diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go
index 4aa44d046c..a193510b2d 100644
--- a/core/txpool/legacypool/legacypool_test.go
+++ b/core/txpool/legacypool/legacypool_test.go
@@ -45,10 +45,10 @@ import (
"github.com/ava-labs/coreth/core/txpool"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/trie"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/event"
+ "github.com/ava-labs/libevm/trie"
"github.com/holiman/uint256"
)
diff --git a/core/types/gen_account_rlp.go b/core/types/gen_account_rlp.go
deleted file mode 100644
index b9a41e8e58..0000000000
--- a/core/types/gen_account_rlp.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Code generated by rlpgen. DO NOT EDIT.
-
-package types
-
-import (
- "io"
-
- "github.com/ava-labs/libevm/rlp"
-)
-
-func (obj *StateAccount) EncodeRLP(_w io.Writer) error {
- w := rlp.NewEncoderBuffer(_w)
- _tmp0 := w.List()
- w.WriteUint64(obj.Nonce)
- if obj.Balance == nil {
- w.Write(rlp.EmptyString)
- } else {
- w.WriteUint256(obj.Balance)
- }
- w.WriteBytes(obj.Root[:])
- w.WriteBytes(obj.CodeHash)
- w.WriteBool(obj.IsMultiCoin)
- w.ListEnd(_tmp0)
- return w.Flush()
-}
diff --git a/core/types/hashing_test.go b/core/types/hashing_test.go
index 5fea29dd91..94dd5631d2 100644
--- a/core/types/hashing_test.go
+++ b/core/types/hashing_test.go
@@ -36,12 +36,12 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/common/hexutil"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/triedb"
)
func TestDeriveSha(t *testing.T) {
diff --git a/core/types/state_account.go b/core/types/state_account.go
index d2727e3b92..d296a0902c 100644
--- a/core/types/state_account.go
+++ b/core/types/state_account.go
@@ -27,109 +27,36 @@
package types
import (
- "bytes"
-
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/rlp"
- "github.com/holiman/uint256"
+ ethtypes "github.com/ava-labs/libevm/core/types"
)
-//go:generate go run github.com/ava-labs/libevm/rlp/rlpgen -type StateAccount -out gen_account_rlp.go
-
-// StateAccount is the Ethereum consensus representation of accounts.
-// These objects are stored in the main account trie.
-type StateAccount struct {
- Nonce uint64
- Balance *uint256.Int
- Root common.Hash // merkle root of the storage trie
- CodeHash []byte
- IsMultiCoin bool
-}
+type (
+ // Import these types from the go-ethereum package
+ StateAccount = ethtypes.StateAccount
+ SlimAccount = ethtypes.SlimAccount
+)
-// NewEmptyStateAccount constructs an empty state account.
-func NewEmptyStateAccount() *StateAccount {
- return &StateAccount{
- Balance: new(uint256.Int),
- Root: EmptyRootHash,
- CodeHash: EmptyCodeHash.Bytes(),
- }
-}
+var (
+ // Import these functions from the go-ethereum package
+ NewEmptyStateAccount = ethtypes.NewEmptyStateAccount
+ SlimAccountRLP = ethtypes.SlimAccountRLP
+ FullAccount = ethtypes.FullAccount
+ FullAccountRLP = ethtypes.FullAccountRLP
+)
-// Copy returns a deep-copied state account object.
-func (acct *StateAccount) Copy() *StateAccount {
- var balance *uint256.Int
- if acct.Balance != nil {
- balance = new(uint256.Int).Set(acct.Balance)
- }
- return &StateAccount{
- Nonce: acct.Nonce,
- Balance: balance,
- Root: acct.Root,
- CodeHash: common.CopyBytes(acct.CodeHash),
- IsMultiCoin: acct.IsMultiCoin,
- }
-}
+type isMultiCoin bool
-// SlimAccount is a modified version of an Account, where the root is replaced
-// with a byte slice. This format can be used to represent full-consensus format
-// or slim format which replaces the empty root and code hash as nil byte slice.
-type SlimAccount struct {
- Nonce uint64
- Balance *uint256.Int
- Root []byte // Nil if root equals to types.EmptyRootHash
- CodeHash []byte // Nil if hash equals to types.EmptyCodeHash
- IsMultiCoin bool
-}
+var isMultiCoinPayloads = ethtypes.RegisterExtras[isMultiCoin]()
-// SlimAccountRLP encodes the state account in 'slim RLP' format.
-func SlimAccountRLP(account StateAccount) []byte {
- slim := SlimAccount{
- Nonce: account.Nonce,
- Balance: account.Balance,
- IsMultiCoin: account.IsMultiCoin,
- }
- if account.Root != EmptyRootHash {
- slim.Root = account.Root[:]
- }
- if !bytes.Equal(account.CodeHash, EmptyCodeHash[:]) {
- slim.CodeHash = account.CodeHash
- }
- data, err := rlp.EncodeToBytes(slim)
- if err != nil {
- panic(err)
- }
- return data
+func IsMultiCoin(a *StateAccount) bool {
+ return bool(isMultiCoinPayloads.FromStateAccount(a))
}
-// FullAccount decodes the data on the 'slim RLP' format and returns
-// the consensus format account.
-func FullAccount(data []byte) (*StateAccount, error) {
- var slim SlimAccount
- if err := rlp.DecodeBytes(data, &slim); err != nil {
- return nil, err
- }
- var account StateAccount
- account.Nonce, account.Balance, account.IsMultiCoin = slim.Nonce, slim.Balance, slim.IsMultiCoin
-
- // Interpret the storage root and code hash in slim format.
- if len(slim.Root) == 0 {
- account.Root = EmptyRootHash
- } else {
- account.Root = common.BytesToHash(slim.Root)
- }
- if len(slim.CodeHash) == 0 {
- account.CodeHash = EmptyCodeHash[:]
- } else {
- account.CodeHash = slim.CodeHash
- }
- return &account, nil
+func EnableMultiCoin(a *StateAccount) {
+ isMultiCoinPayloads.SetOnStateAccount(a, true)
}
-// FullAccountRLP converts data on the 'slim RLP' format into the full RLP-format.
-func FullAccountRLP(data []byte) ([]byte, error) {
- account, err := FullAccount(data)
- if err != nil {
- return nil, err
- }
- return rlp.EncodeToBytes(account)
+// XXX: Should be removed once we use the upstream statedb
+func DisableMultiCoin(a *StateAccount) {
+ isMultiCoinPayloads.SetOnStateAccount(a, false)
}
diff --git a/eth/api_debug.go b/eth/api_debug.go
index e1c67dc436..e5073e4f63 100644
--- a/eth/api_debug.go
+++ b/eth/api_debug.go
@@ -37,12 +37,12 @@ import (
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/internal/ethapi"
"github.com/ava-labs/coreth/rpc"
- "github.com/ava-labs/coreth/trie"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/common/hexutil"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie"
)
// DebugAPI is the collection of Ethereum full node APIs for debugging the
diff --git a/eth/api_debug_test.go b/eth/api_debug_test.go
index 1c87611904..926e4de48d 100644
--- a/eth/api_debug_test.go
+++ b/eth/api_debug_test.go
@@ -36,7 +36,7 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/state"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/triedb"
+ "github.com/ava-labs/libevm/triedb"
"github.com/holiman/uint256"
"github.com/davecgh/go-spew/spew"
@@ -93,7 +93,7 @@ func TestAccountRange(t *testing.T) {
m[addr] = true
}
}
- root, _ := sdb.Commit(0, true, false)
+ root, _ := sdb.Commit(0, true)
sdb, _ = state.New(root, statedb, nil)
trie, err := statedb.OpenTrie(root)
@@ -151,7 +151,7 @@ func TestEmptyAccountRange(t *testing.T) {
st, _ = state.New(types.EmptyRootHash, statedb, nil)
)
// Commit(although nothing to flush) and re-init the statedb
- st.Commit(0, true, false)
+ st.Commit(0, true)
st, _ = state.New(types.EmptyRootHash, statedb, nil)
results := st.RawDump(&state.DumpConfig{
@@ -197,7 +197,7 @@ func TestStorageRangeAt(t *testing.T) {
for _, entry := range storage {
sdb.SetState(addr, *entry.Key, entry.Value)
}
- root, _ := sdb.Commit(0, false, false)
+ root, _ := sdb.Commit(0, false)
sdb, _ = state.New(root, db, nil)
// Check a few combinations of limit and start/end.
diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go
index d5ae4074fe..583f97ad8e 100644
--- a/eth/filters/filter_test.go
+++ b/eth/filters/filter_test.go
@@ -41,10 +41,10 @@ import (
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
"github.com/ava-labs/coreth/rpc"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/core/vm"
"github.com/ava-labs/libevm/crypto"
+ "github.com/ava-labs/libevm/triedb"
"github.com/stretchr/testify/require"
)
diff --git a/eth/state_accessor.go b/eth/state_accessor.go
index 8395f08cd2..37363f4e50 100644
--- a/eth/state_accessor.go
+++ b/eth/state_accessor.go
@@ -37,11 +37,11 @@ import (
"github.com/ava-labs/coreth/core/state"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/eth/tracers"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/core/vm"
"github.com/ava-labs/libevm/log"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/triedb"
)
// noopReleaser is returned in case there is no operation expected
@@ -163,7 +163,7 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u
return nil, nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err)
}
// Finalize the state so any modifications are written to the trie
- root, err := statedb.Commit(current.NumberU64(), eth.blockchain.Config().IsEIP158(current.Number()), true)
+ root, err := statedb.Commit(current.NumberU64(), eth.blockchain.Config().IsEIP158(current.Number()))
if err != nil {
return nil, nil, fmt.Errorf("stateAtBlock commit failed, number %d root %v: %w",
current.NumberU64(), current.Root().Hex(), err)
diff --git a/go.mod b/go.mod
index 5141cdbf7a..0009f493ad 100644
--- a/go.mod
+++ b/go.mod
@@ -5,11 +5,10 @@ go 1.22.8
require (
github.com/VictoriaMetrics/fastcache v1.12.1
github.com/ava-labs/avalanchego v1.11.13-0.20241106174551-4fb3f3c6b195
- github.com/ava-labs/libevm v1.13.14-0.1.0-rc.1
+ github.com/ava-labs/libevm v1.13.14-0.1.0-rc.1.0.20241113200655-753d1f5dff5f
github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233
github.com/davecgh/go-spew v1.1.1
github.com/deckarep/golang-set/v2 v2.1.0
- github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46
github.com/gorilla/rpc v1.2.0
github.com/gorilla/websocket v1.5.0
github.com/hashicorp/go-bexpr v0.1.10
@@ -64,6 +63,7 @@ require (
github.com/ethereum/c-kzg-4844 v0.4.0 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect
+ github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
@@ -135,3 +135,7 @@ require (
gopkg.in/yaml.v3 v3.0.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
)
+
+// replace github.com/ava-labs/libevm => ../../git2/go-ethereum
+
+replace github.com/ava-labs/libevm => github.com/ava-labs/libevm v0.0.0-20241121221822-8486d85dbf1f
diff --git a/go.sum b/go.sum
index 5aa8d8e816..fe8fa55e80 100644
--- a/go.sum
+++ b/go.sum
@@ -58,8 +58,8 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/ava-labs/avalanchego v1.11.13-0.20241106174551-4fb3f3c6b195 h1:dyf52xlqlA/9SaiCv29oqbitRAYu7L890zK774xDNrE=
github.com/ava-labs/avalanchego v1.11.13-0.20241106174551-4fb3f3c6b195/go.mod h1:eZ/UmH4rDhhgL/FLqtJZYJ7ka73m88RmLrOoAyZFgD4=
-github.com/ava-labs/libevm v1.13.14-0.1.0-rc.1 h1:ughW0E2DUNRnvwJYNU8zUSCUzIWdcOwyXSBpy7oauZE=
-github.com/ava-labs/libevm v1.13.14-0.1.0-rc.1/go.mod h1:yBctIV/wnxXTF38h95943jvpuk4aj07TrjbpoGor6LQ=
+github.com/ava-labs/libevm v0.0.0-20241121221822-8486d85dbf1f h1:MbYJ8fTFv8ewqIpqPPMiYPMkqf/astF3O0Gqrjyo8FA=
+github.com/ava-labs/libevm v0.0.0-20241121221822-8486d85dbf1f/go.mod h1:yBctIV/wnxXTF38h95943jvpuk4aj07TrjbpoGor6LQ=
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 5fccd248a6..5cdb19eed5 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -43,7 +43,6 @@ import (
"github.com/ava-labs/coreth/eth/gasestimator"
"github.com/ava-labs/coreth/params"
"github.com/ava-labs/coreth/rpc"
- "github.com/ava-labs/coreth/trie"
"github.com/ava-labs/libevm/accounts"
"github.com/ava-labs/libevm/accounts/keystore"
"github.com/ava-labs/libevm/accounts/scwallet"
@@ -55,6 +54,7 @@ import (
"github.com/ava-labs/libevm/eth/tracers/logger"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie"
"github.com/davecgh/go-spew/spew"
"github.com/holiman/uint256"
"github.com/tyler-smith/go-bip39"
diff --git a/plugin/evm/atomic_syncer.go b/plugin/evm/atomic_syncer.go
index 365c7f11fd..2da188d47a 100644
--- a/plugin/evm/atomic_syncer.go
+++ b/plugin/evm/atomic_syncer.go
@@ -16,7 +16,7 @@ import (
"github.com/ava-labs/coreth/plugin/evm/message"
syncclient "github.com/ava-labs/coreth/sync/client"
- "github.com/ava-labs/coreth/trie"
+ "github.com/ava-labs/libevm/trie"
)
var (
diff --git a/plugin/evm/atomic_syncer_test.go b/plugin/evm/atomic_syncer_test.go
index 7054649f3c..ed6ba46466 100644
--- a/plugin/evm/atomic_syncer_test.go
+++ b/plugin/evm/atomic_syncer_test.go
@@ -22,9 +22,9 @@ import (
"github.com/ava-labs/coreth/sync/handlers"
handlerstats "github.com/ava-labs/coreth/sync/handlers/stats"
"github.com/ava-labs/coreth/sync/syncutils"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/triedb"
)
const commitInterval = 1024
diff --git a/plugin/evm/atomic_trie.go b/plugin/evm/atomic_trie.go
index d4c27a88bd..ec86498904 100644
--- a/plugin/evm/atomic_trie.go
+++ b/plugin/evm/atomic_trie.go
@@ -17,13 +17,13 @@ import (
"github.com/ava-labs/coreth/core"
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/triedb/hashdb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/trie/trienode"
+ "github.com/ava-labs/libevm/triedb"
)
const (
@@ -154,9 +154,9 @@ func newAtomicTrie(
trieDB := triedb.NewDatabase(
rawdb.NewDatabase(Database{atomicTrieDB}),
&triedb.Config{
- HashDB: &hashdb.Config{
+ DBOverride: hashdb.Config{
CleanCacheSize: 64 * units.MiB, // Allocate 64MB of memory for clean cache
- },
+ }.BackendConstructor,
},
)
diff --git a/plugin/evm/atomic_trie_iterator.go b/plugin/evm/atomic_trie_iterator.go
index edc3a9d47b..6fd636d744 100644
--- a/plugin/evm/atomic_trie_iterator.go
+++ b/plugin/evm/atomic_trie_iterator.go
@@ -12,8 +12,8 @@ import (
"github.com/ava-labs/avalanchego/ids"
"github.com/ava-labs/avalanchego/utils/wrappers"
- "github.com/ava-labs/coreth/trie"
"github.com/ava-labs/libevm/common"
+ "github.com/ava-labs/libevm/trie"
)
const atomicTrieKeyLen = wrappers.LongLen + common.HashLength
diff --git a/plugin/evm/block_verification.go b/plugin/evm/block_verification.go
index ef7fda1ad7..9b6eeaae65 100644
--- a/plugin/evm/block_verification.go
+++ b/plugin/evm/block_verification.go
@@ -15,7 +15,7 @@ import (
"github.com/ava-labs/coreth/constants"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/trie"
+ "github.com/ava-labs/libevm/trie"
)
var (
diff --git a/plugin/evm/network_handler.go b/plugin/evm/network_handler.go
index d11d12c4a4..15dca8ce38 100644
--- a/plugin/evm/network_handler.go
+++ b/plugin/evm/network_handler.go
@@ -12,10 +12,10 @@ import (
"github.com/ava-labs/coreth/plugin/evm/message"
syncHandlers "github.com/ava-labs/coreth/sync/handlers"
syncStats "github.com/ava-labs/coreth/sync/handlers/stats"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/warp"
warpHandlers "github.com/ava-labs/coreth/warp/handlers"
"github.com/ava-labs/libevm/ethdb"
+ "github.com/ava-labs/libevm/triedb"
)
var _ message.RequestHandler = &networkHandler{}
diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go
index c08627b612..81180203dc 100644
--- a/plugin/evm/syncervm_test.go
+++ b/plugin/evm/syncervm_test.go
@@ -37,13 +37,13 @@ import (
"github.com/ava-labs/coreth/predicate"
statesyncclient "github.com/ava-labs/coreth/sync/client"
"github.com/ava-labs/coreth/sync/statesync"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/utils"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/triedb"
)
func TestSkipStateSync(t *testing.T) {
@@ -192,7 +192,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) {
}
// Verify the snapshot disk layer matches the last block root
lastRoot := syncDisabledVM.blockChain.CurrentBlock().Root
- if err := syncDisabledVM.blockChain.Snapshots().Verify(lastRoot); err != nil {
+ if err := syncDisabledVM.blockChain.VerifySnapshot(lastRoot); err != nil {
t.Fatal(err)
}
syncDisabledVM.blockChain.DrainAcceptorQueue()
diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go
index 6887f71993..1390ea0ab1 100644
--- a/plugin/evm/vm.go
+++ b/plugin/evm/vm.go
@@ -40,9 +40,9 @@ import (
"github.com/ava-labs/coreth/params"
"github.com/ava-labs/coreth/peer"
"github.com/ava-labs/coreth/plugin/evm/message"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/triedb/hashdb"
"github.com/ava-labs/coreth/utils"
+ "github.com/ava-labs/libevm/triedb"
warpcontract "github.com/ava-labs/coreth/precompile/contracts/warp"
"github.com/ava-labs/coreth/rpc"
@@ -1289,9 +1289,9 @@ func (vm *VM) setAppRequestHandlers() {
evmTrieDB := triedb.NewDatabase(
vm.chaindb,
&triedb.Config{
- HashDB: &hashdb.Config{
+ DBOverride: hashdb.Config{
CleanCacheSize: vm.config.StateSyncServerTrieCache * units.MiB,
- },
+ }.BackendConstructor,
},
)
networkHandler := newNetworkHandler(
diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go
index 80dc75da50..c1cf46244c 100644
--- a/plugin/evm/vm_test.go
+++ b/plugin/evm/vm_test.go
@@ -24,8 +24,8 @@ import (
"github.com/ava-labs/coreth/constants"
"github.com/ava-labs/coreth/eth/filters"
"github.com/ava-labs/coreth/metrics"
- "github.com/ava-labs/coreth/trie"
"github.com/ava-labs/coreth/utils"
+ "github.com/ava-labs/libevm/trie"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
diff --git a/scripts/eth-allowed-packages.txt b/scripts/eth-allowed-packages.txt
index 8d3beea0fd..27ebfcea55 100644
--- a/scripts/eth-allowed-packages.txt
+++ b/scripts/eth-allowed-packages.txt
@@ -27,4 +27,11 @@
"github.com/ava-labs/libevm/libevm"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/params"
-"github.com/ava-labs/libevm/rlp"
\ No newline at end of file
+"github.com/ava-labs/libevm/rlp"
+"github.com/ava-labs/libevm/trie"
+"github.com/ava-labs/libevm/trie/testutil"
+"github.com/ava-labs/libevm/trie/trienode"
+"github.com/ava-labs/libevm/trie/triestate"
+"github.com/ava-labs/libevm/trie/utils"
+"github.com/ava-labs/libevm/triedb"
+"github.com/ava-labs/libevm/triedb/database"
\ No newline at end of file
diff --git a/scripts/tests.e2e.sh b/scripts/tests.e2e.sh
index 85f5bd3d48..cbfc46bf31 100755
--- a/scripts/tests.e2e.sh
+++ b/scripts/tests.e2e.sh
@@ -45,6 +45,7 @@ git checkout -B "test-${AVALANCHE_VERSION}" "${AVALANCHE_VERSION}"
echo "updating coreth dependency to point to ${CORETH_PATH}"
go mod edit -replace "github.com/ava-labs/coreth=${CORETH_PATH}"
+go mod edit -replace "github.com/ava-labs/libevm=github.com/ava-labs/libevm@v0.0.0-20241121221822-8486d85dbf1f"
go mod tidy
echo "building avalanchego"
diff --git a/sync/client/client.go b/sync/client/client.go
index 72b656d217..6a1348679e 100644
--- a/sync/client/client.go
+++ b/sync/client/client.go
@@ -27,8 +27,8 @@ import (
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/peer"
"github.com/ava-labs/coreth/plugin/evm/message"
- "github.com/ava-labs/coreth/trie"
"github.com/ava-labs/libevm/ethdb"
+ "github.com/ava-labs/libevm/trie"
)
const (
diff --git a/sync/client/client_test.go b/sync/client/client_test.go
index 03566ed5f5..70c7f6f118 100644
--- a/sync/client/client_test.go
+++ b/sync/client/client_test.go
@@ -25,9 +25,9 @@ import (
"github.com/ava-labs/coreth/sync/handlers"
handlerstats "github.com/ava-labs/coreth/sync/handlers/stats"
"github.com/ava-labs/coreth/sync/syncutils"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
+ "github.com/ava-labs/libevm/triedb"
)
func TestGetCode(t *testing.T) {
diff --git a/sync/handlers/block_request_test.go b/sync/handlers/block_request_test.go
index a3557cdcf9..bd9bf8ce4f 100644
--- a/sync/handlers/block_request_test.go
+++ b/sync/handlers/block_request_test.go
@@ -17,10 +17,10 @@ import (
"github.com/ava-labs/coreth/params"
"github.com/ava-labs/coreth/plugin/evm/message"
"github.com/ava-labs/coreth/sync/handlers/stats"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/triedb"
"github.com/stretchr/testify/assert"
)
diff --git a/sync/handlers/handler.go b/sync/handlers/handler.go
index eece20e4d0..5bd9d86475 100644
--- a/sync/handlers/handler.go
+++ b/sync/handlers/handler.go
@@ -14,7 +14,7 @@ type BlockProvider interface {
}
type SnapshotProvider interface {
- Snapshots() *snapshot.Tree
+ Snapshots() snapshot.DiskIterable
}
type SyncDataProvider interface {
diff --git a/sync/handlers/leafs_request.go b/sync/handlers/leafs_request.go
index 76b726136b..ab2572ce22 100644
--- a/sync/handlers/leafs_request.go
+++ b/sync/handlers/leafs_request.go
@@ -18,13 +18,13 @@ import (
"github.com/ava-labs/coreth/plugin/evm/message"
"github.com/ava-labs/coreth/sync/handlers/stats"
"github.com/ava-labs/coreth/sync/syncutils"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/utils"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/ethdb/memorydb"
"github.com/ava-labs/libevm/log"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/triedb"
)
const (
@@ -175,7 +175,7 @@ type responseBuilder struct {
request *message.LeafsRequest
response *message.LeafsResponse
t *trie.Trie
- snap *snapshot.Tree
+ snap snapshot.DiskIterable
keyLength int
limit uint16
diff --git a/sync/handlers/leafs_request_test.go b/sync/handlers/leafs_request_test.go
index 2cfda9abc1..a98f398cb4 100644
--- a/sync/handlers/leafs_request_test.go
+++ b/sync/handlers/leafs_request_test.go
@@ -16,11 +16,11 @@ import (
"github.com/ava-labs/coreth/plugin/evm/message"
"github.com/ava-labs/coreth/sync/handlers/stats"
"github.com/ava-labs/coreth/sync/syncutils"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/ethdb"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/triedb"
"github.com/stretchr/testify/assert"
)
diff --git a/sync/handlers/test_providers.go b/sync/handlers/test_providers.go
index a16410d71a..10151b0632 100644
--- a/sync/handlers/test_providers.go
+++ b/sync/handlers/test_providers.go
@@ -23,9 +23,9 @@ func (t *TestBlockProvider) GetBlock(hash common.Hash, number uint64) *types.Blo
}
type TestSnapshotProvider struct {
- Snapshot *snapshot.Tree
+ Snapshot snapshot.DiskIterable
}
-func (t *TestSnapshotProvider) Snapshots() *snapshot.Tree {
+func (t *TestSnapshotProvider) Snapshots() snapshot.DiskIterable {
return t.Snapshot
}
diff --git a/sync/statesync/state_syncer.go b/sync/statesync/state_syncer.go
index 3ec5013270..7031979208 100644
--- a/sync/statesync/state_syncer.go
+++ b/sync/statesync/state_syncer.go
@@ -10,9 +10,9 @@ import (
"github.com/ava-labs/coreth/core/state/snapshot"
syncclient "github.com/ava-labs/coreth/sync/client"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
+ "github.com/ava-labs/libevm/triedb"
"golang.org/x/sync/errgroup"
)
@@ -35,12 +35,12 @@ type StateSyncerConfig struct {
// stateSync keeps the state of the entire state sync operation.
type stateSync struct {
- db ethdb.Database // database we are syncing
- root common.Hash // root of the EVM state we are syncing to
- trieDB *triedb.Database // trieDB on top of db we are syncing. used to restore any existing tries.
- snapshot snapshot.Snapshot // used to access the database we are syncing as a snapshot.
- batchSize int // write batches when they reach this size
- client syncclient.Client // used to contact peers over the network
+ db ethdb.Database // database we are syncing
+ root common.Hash // root of the EVM state we are syncing to
+ trieDB *triedb.Database // trieDB on top of db we are syncing. used to restore any existing tries.
+ snapshot snapshot.SnapshotIterable // used to access the database we are syncing as a snapshot.
+ batchSize int // write batches when they reach this size
+ client syncclient.Client // used to contact peers over the network
segments chan syncclient.LeafSyncTask // channel of tasks to sync
syncer *syncclient.CallbackLeafSyncer // performs the sync, looping over each task's range and invoking specified callbacks
diff --git a/sync/statesync/sync_helpers.go b/sync/statesync/sync_helpers.go
index f73e963495..22874b0017 100644
--- a/sync/statesync/sync_helpers.go
+++ b/sync/statesync/sync_helpers.go
@@ -6,9 +6,9 @@ package statesync
import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
+ "github.com/ava-labs/libevm/trie"
)
// writeAccountSnapshot stores the account represented by [acc] to the snapshot at [accHash], using
diff --git a/sync/statesync/sync_test.go b/sync/statesync/sync_test.go
index 456a1b06c1..14a096a7fd 100644
--- a/sync/statesync/sync_test.go
+++ b/sync/statesync/sync_test.go
@@ -21,12 +21,12 @@ import (
"github.com/ava-labs/coreth/sync/handlers"
handlerstats "github.com/ava-labs/coreth/sync/handlers/stats"
"github.com/ava-labs/coreth/sync/syncutils"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/triedb"
"github.com/stretchr/testify/assert"
)
diff --git a/sync/statesync/test_sync.go b/sync/statesync/test_sync.go
index 515d3cade2..e3083e3484 100644
--- a/sync/statesync/test_sync.go
+++ b/sync/statesync/test_sync.go
@@ -11,12 +11,12 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/sync/syncutils"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/utils"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/triedb"
"github.com/stretchr/testify/assert"
)
diff --git a/sync/statesync/trie_segments.go b/sync/statesync/trie_segments.go
index 7a685b71ca..d0665ace9e 100644
--- a/sync/statesync/trie_segments.go
+++ b/sync/statesync/trie_segments.go
@@ -14,11 +14,11 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/plugin/evm/message"
syncclient "github.com/ava-labs/coreth/sync/client"
- "github.com/ava-labs/coreth/trie"
"github.com/ava-labs/coreth/utils"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
+ "github.com/ava-labs/libevm/trie"
)
var (
diff --git a/sync/statesync/trie_sync_tasks.go b/sync/statesync/trie_sync_tasks.go
index 5cb78ee982..a0c4d4845f 100644
--- a/sync/statesync/trie_sync_tasks.go
+++ b/sync/statesync/trie_sync_tasks.go
@@ -9,11 +9,11 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/sync/syncutils"
- "github.com/ava-labs/coreth/trie"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie"
)
var (
diff --git a/sync/syncutils/test_trie.go b/sync/syncutils/test_trie.go
index 1b13867c06..c244d7bb1c 100644
--- a/sync/syncutils/test_trie.go
+++ b/sync/syncutils/test_trie.go
@@ -11,10 +11,10 @@ import (
"github.com/ava-labs/avalanchego/utils/wrappers"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/utils"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/trie/trienode"
+ "github.com/ava-labs/libevm/triedb"
"github.com/holiman/uint256"
"github.com/ava-labs/libevm/common"
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index 84fb7ac9ca..714ad125a8 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -31,11 +31,11 @@ import (
"github.com/ava-labs/coreth/core/state"
"github.com/ava-labs/coreth/core/state/snapshot"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/triedb"
"github.com/ava-labs/coreth/triedb/hashdb"
"github.com/ava-labs/coreth/triedb/pathdb"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
+ "github.com/ava-labs/libevm/triedb"
"github.com/holiman/uint256"
)
@@ -50,9 +50,9 @@ type StateTestState struct {
func MakePreState(db ethdb.Database, accounts types.GenesisAlloc, snapshotter bool, scheme string) StateTestState {
tconf := &triedb.Config{Preimages: true}
if scheme == rawdb.HashScheme {
- tconf.HashDB = hashdb.Defaults
+ tconf.DBOverride = hashdb.Defaults.BackendConstructor
} else {
- tconf.PathDB = pathdb.Defaults
+ tconf.DBOverride = pathdb.Defaults.BackendConstructor
}
triedb := triedb.NewDatabase(db, tconf)
sdb := state.NewDatabaseWithNodeDB(db, triedb)
@@ -66,7 +66,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc, snapshotter bo
}
}
// Commit and re-open to start with a clean state.
- root, _ := statedb.Commit(0, false, false)
+ root, _ := statedb.Commit(0, false)
// If snapshot is requested, initialize the snapshotter and use it in state.
var snaps *snapshot.Tree
diff --git a/trie/committer.go b/trie/committer.go
deleted file mode 100644
index 5abf11b506..0000000000
--- a/trie/committer.go
+++ /dev/null
@@ -1,192 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "fmt"
-
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/libevm/common"
-)
-
-// committer is the tool used for the trie Commit operation. The committer will
-// capture all dirty nodes during the commit process and keep them cached in
-// insertion order.
-type committer struct {
- nodes *trienode.NodeSet
- tracer *tracer
- collectLeaf bool
-}
-
-// newCommitter creates a new committer or picks one from the pool.
-func newCommitter(nodeset *trienode.NodeSet, tracer *tracer, collectLeaf bool) *committer {
- return &committer{
- nodes: nodeset,
- tracer: tracer,
- collectLeaf: collectLeaf,
- }
-}
-
-// Commit collapses a node down into a hash node.
-func (c *committer) Commit(n node) hashNode {
- return c.commit(nil, n).(hashNode)
-}
-
-// commit collapses a node down into a hash node and returns it.
-func (c *committer) commit(path []byte, n node) node {
- // if this path is clean, use available cached data
- hash, dirty := n.cache()
- if hash != nil && !dirty {
- return hash
- }
- // Commit children, then parent, and remove the dirty flag.
- switch cn := n.(type) {
- case *shortNode:
- // Commit child
- collapsed := cn.copy()
-
- // If the child is fullNode, recursively commit,
- // otherwise it can only be hashNode or valueNode.
- if _, ok := cn.Val.(*fullNode); ok {
- collapsed.Val = c.commit(append(path, cn.Key...), cn.Val)
- }
- // The key needs to be copied, since we're adding it to the
- // modified nodeset.
- collapsed.Key = hexToCompact(cn.Key)
- hashedNode := c.store(path, collapsed)
- if hn, ok := hashedNode.(hashNode); ok {
- return hn
- }
- return collapsed
- case *fullNode:
- hashedKids := c.commitChildren(path, cn)
- collapsed := cn.copy()
- collapsed.Children = hashedKids
-
- hashedNode := c.store(path, collapsed)
- if hn, ok := hashedNode.(hashNode); ok {
- return hn
- }
- return collapsed
- case hashNode:
- return cn
- default:
- // nil, valuenode shouldn't be committed
- panic(fmt.Sprintf("%T: invalid node: %v", n, n))
- }
-}
-
-// commitChildren commits the children of the given fullnode
-func (c *committer) commitChildren(path []byte, n *fullNode) [17]node {
- var children [17]node
- for i := 0; i < 16; i++ {
- child := n.Children[i]
- if child == nil {
- continue
- }
- // If it's the hashed child, save the hash value directly.
- // Note: it's impossible that the child in range [0, 15]
- // is a valueNode.
- if hn, ok := child.(hashNode); ok {
- children[i] = hn
- continue
- }
- // Commit the child recursively and store the "hashed" value.
- // Note the returned node can be some embedded nodes, so it's
- // possible the type is not hashNode.
- children[i] = c.commit(append(path, byte(i)), child)
- }
- // For the 17th child, it's possible the type is valuenode.
- if n.Children[16] != nil {
- children[16] = n.Children[16]
- }
- return children
-}
-
-// store hashes the node n and adds it to the modified nodeset. If leaf collection
-// is enabled, leaf nodes will be tracked in the modified nodeset as well.
-func (c *committer) store(path []byte, n node) node {
- // Larger nodes are replaced by their hash and stored in the database.
- var hash, _ = n.cache()
-
- // This was not generated - must be a small node stored in the parent.
- // In theory, we should check if the node is leaf here (embedded node
- // usually is leaf node). But small value (less than 32bytes) is not
- // our target (leaves in account trie only).
- if hash == nil {
- // The node is embedded in its parent, in other words, this node
- // will not be stored in the database independently, mark it as
- // deleted only if the node was existent in database before.
- _, ok := c.tracer.accessList[string(path)]
- if ok {
- c.nodes.AddNode(path, trienode.NewDeleted())
- }
- return n
- }
- // Collect the dirty node to nodeset for return.
- nhash := common.BytesToHash(hash)
- c.nodes.AddNode(path, trienode.New(nhash, nodeToBytes(n)))
-
- // Collect the corresponding leaf node if it's required. We don't check
- // full node since it's impossible to store value in fullNode. The key
- // length of leaves should be exactly same.
- if c.collectLeaf {
- if sn, ok := n.(*shortNode); ok {
- if val, ok := sn.Val.(valueNode); ok {
- c.nodes.AddLeaf(nhash, val)
- }
- }
- }
- return hash
-}
-
-// MerkleResolver the children resolver in merkle-patricia-tree.
-type MerkleResolver struct{}
-
-// ForEach implements childResolver, decodes the provided node and
-// traverses the children inside.
-func (resolver MerkleResolver) ForEach(node []byte, onChild func(common.Hash)) {
- forGatherChildren(mustDecodeNodeUnsafe(nil, node), onChild)
-}
-
-// forGatherChildren traverses the node hierarchy and invokes the callback
-// for all the hashnode children.
-func forGatherChildren(n node, onChild func(hash common.Hash)) {
- switch n := n.(type) {
- case *shortNode:
- forGatherChildren(n.Val, onChild)
- case *fullNode:
- for i := 0; i < 16; i++ {
- forGatherChildren(n.Children[i], onChild)
- }
- case hashNode:
- onChild(common.BytesToHash(n))
- case valueNode, nil:
- default:
- panic(fmt.Sprintf("unknown node type: %T", n))
- }
-}
diff --git a/trie/database_test.go b/trie/database_test.go
deleted file mode 100644
index 231c7f9677..0000000000
--- a/trie/database_test.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/triedb/database"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
-)
-
-// testReader implements database.Reader interface, providing function to
-// access trie nodes.
-type testReader struct {
- db ethdb.Database
- scheme string
- nodes []*trienode.MergedNodeSet // sorted from new to old
-}
-
-// Node implements database.Reader interface, retrieving trie node with
-// all available cached layers.
-func (r *testReader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) {
- // Check the node presence with the cached layer, from latest to oldest.
- for _, nodes := range r.nodes {
- if _, ok := nodes.Sets[owner]; !ok {
- continue
- }
- n, ok := nodes.Sets[owner].Nodes[string(path)]
- if !ok {
- continue
- }
- if n.IsDeleted() || n.Hash != hash {
- return nil, &MissingNodeError{Owner: owner, Path: path, NodeHash: hash}
- }
- return n.Blob, nil
- }
- // Check the node presence in database.
- return rawdb.ReadTrieNode(r.db, owner, path, hash, r.scheme), nil
-}
-
-// testDb implements database.Database interface, using for testing purpose.
-type testDb struct {
- disk ethdb.Database
- root common.Hash
- scheme string
- nodes map[common.Hash]*trienode.MergedNodeSet
- parents map[common.Hash]common.Hash
-}
-
-func newTestDatabase(diskdb ethdb.Database, scheme string) *testDb {
- return &testDb{
- disk: diskdb,
- root: types.EmptyRootHash,
- scheme: scheme,
- nodes: make(map[common.Hash]*trienode.MergedNodeSet),
- parents: make(map[common.Hash]common.Hash),
- }
-}
-
-func (db *testDb) Reader(stateRoot common.Hash) (database.Reader, error) {
- nodes, _ := db.dirties(stateRoot, true)
- return &testReader{db: db.disk, scheme: db.scheme, nodes: nodes}, nil
-}
-
-func (db *testDb) Preimage(hash common.Hash) []byte {
- return rawdb.ReadPreimage(db.disk, hash)
-}
-
-func (db *testDb) InsertPreimage(preimages map[common.Hash][]byte) {
- rawdb.WritePreimages(db.disk, preimages)
-}
-
-func (db *testDb) Scheme() string { return db.scheme }
-
-func (db *testDb) Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error {
- if root == parent {
- return nil
- }
- if _, ok := db.nodes[root]; ok {
- return nil
- }
- db.parents[root] = parent
- db.nodes[root] = nodes
- return nil
-}
-
-func (db *testDb) dirties(root common.Hash, topToBottom bool) ([]*trienode.MergedNodeSet, []common.Hash) {
- var (
- pending []*trienode.MergedNodeSet
- roots []common.Hash
- )
- for {
- if root == db.root {
- break
- }
- nodes, ok := db.nodes[root]
- if !ok {
- break
- }
- if topToBottom {
- pending = append(pending, nodes)
- roots = append(roots, root)
- } else {
- pending = append([]*trienode.MergedNodeSet{nodes}, pending...)
- roots = append([]common.Hash{root}, roots...)
- }
- root = db.parents[root]
- }
- return pending, roots
-}
-
-func (db *testDb) Commit(root common.Hash) error {
- if root == db.root {
- return nil
- }
- pending, roots := db.dirties(root, false)
- for i, nodes := range pending {
- for owner, set := range nodes.Sets {
- if owner == (common.Hash{}) {
- continue
- }
- set.ForEachWithOrder(func(path string, n *trienode.Node) {
- rawdb.WriteTrieNode(db.disk, owner, []byte(path), n.Hash, n.Blob, db.scheme)
- })
- }
- nodes.Sets[common.Hash{}].ForEachWithOrder(func(path string, n *trienode.Node) {
- rawdb.WriteTrieNode(db.disk, common.Hash{}, []byte(path), n.Hash, n.Blob, db.scheme)
- })
- db.root = roots[i]
- }
- for _, root := range roots {
- delete(db.nodes, root)
- delete(db.parents, root)
- }
- return nil
-}
diff --git a/trie/encoding.go b/trie/encoding.go
deleted file mode 100644
index aaa131ef1f..0000000000
--- a/trie/encoding.go
+++ /dev/null
@@ -1,154 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2014 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-// Trie keys are dealt with in three distinct encodings:
-//
-// KEYBYTES encoding contains the actual key and nothing else. This encoding is the
-// input to most API functions.
-//
-// HEX encoding contains one byte for each nibble of the key and an optional trailing
-// 'terminator' byte of value 0x10 which indicates whether or not the node at the key
-// contains a value. Hex key encoding is used for nodes loaded in memory because it's
-// convenient to access.
-//
-// COMPACT encoding is defined by the Ethereum Yellow Paper (it's called "hex prefix
-// encoding" there) and contains the bytes of the key and a flag. The high nibble of the
-// first byte contains the flag; the lowest bit encoding the oddness of the length and
-// the second-lowest encoding whether the node at the key is a value node. The low nibble
-// of the first byte is zero in the case of an even number of nibbles and the first nibble
-// in the case of an odd number. All remaining nibbles (now an even number) fit properly
-// into the remaining bytes. Compact encoding is used for nodes stored on disk.
-
-func hexToCompact(hex []byte) []byte {
- terminator := byte(0)
- if hasTerm(hex) {
- terminator = 1
- hex = hex[:len(hex)-1]
- }
- buf := make([]byte, len(hex)/2+1)
- buf[0] = terminator << 5 // the flag byte
- if len(hex)&1 == 1 {
- buf[0] |= 1 << 4 // odd flag
- buf[0] |= hex[0] // first nibble is contained in the first byte
- hex = hex[1:]
- }
- decodeNibbles(hex, buf[1:])
- return buf
-}
-
-// hexToCompactInPlace places the compact key in input buffer, returning the compacted key.
-func hexToCompactInPlace(hex []byte) []byte {
- var (
- hexLen = len(hex) // length of the hex input
- firstByte = byte(0)
- )
- // Check if we have a terminator there
- if hexLen > 0 && hex[hexLen-1] == 16 {
- firstByte = 1 << 5
- hexLen-- // last part was the terminator, ignore that
- }
- var (
- binLen = hexLen/2 + 1
- ni = 0 // index in hex
- bi = 1 // index in bin (compact)
- )
- if hexLen&1 == 1 {
- firstByte |= 1 << 4 // odd flag
- firstByte |= hex[0] // first nibble is contained in the first byte
- ni++
- }
- for ; ni < hexLen; bi, ni = bi+1, ni+2 {
- hex[bi] = hex[ni]<<4 | hex[ni+1]
- }
- hex[0] = firstByte
- return hex[:binLen]
-}
-
-func compactToHex(compact []byte) []byte {
- if len(compact) == 0 {
- return compact
- }
- base := keybytesToHex(compact)
- // delete terminator flag
- if base[0] < 2 {
- base = base[:len(base)-1]
- }
- // apply odd flag
- chop := 2 - base[0]&1
- return base[chop:]
-}
-
-func keybytesToHex(str []byte) []byte {
- l := len(str)*2 + 1
- var nibbles = make([]byte, l)
- for i, b := range str {
- nibbles[i*2] = b / 16
- nibbles[i*2+1] = b % 16
- }
- nibbles[l-1] = 16
- return nibbles
-}
-
-// hexToKeybytes turns hex nibbles into key bytes.
-// This can only be used for keys of even length.
-func hexToKeybytes(hex []byte) []byte {
- if hasTerm(hex) {
- hex = hex[:len(hex)-1]
- }
- if len(hex)&1 != 0 {
- panic("can't convert hex key of odd length")
- }
- key := make([]byte, len(hex)/2)
- decodeNibbles(hex, key)
- return key
-}
-
-func decodeNibbles(nibbles []byte, bytes []byte) {
- for bi, ni := 0, 0; ni < len(nibbles); bi, ni = bi+1, ni+2 {
- bytes[bi] = nibbles[ni]<<4 | nibbles[ni+1]
- }
-}
-
-// prefixLen returns the length of the common prefix of a and b.
-func prefixLen(a, b []byte) int {
- var i, length = 0, len(a)
- if len(b) < length {
- length = len(b)
- }
- for ; i < length; i++ {
- if a[i] != b[i] {
- break
- }
- }
- return i
-}
-
-// hasTerm returns whether a hex key has the terminator flag.
-func hasTerm(s []byte) bool {
- return len(s) > 0 && s[len(s)-1] == 16
-}
diff --git a/trie/encoding_test.go b/trie/encoding_test.go
deleted file mode 100644
index e25e4ae600..0000000000
--- a/trie/encoding_test.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2014 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- crand "crypto/rand"
- "encoding/hex"
- "math/rand"
- "testing"
-)
-
-func TestHexCompact(t *testing.T) {
- tests := []struct{ hex, compact []byte }{
- // empty keys, with and without terminator.
- {hex: []byte{}, compact: []byte{0x00}},
- {hex: []byte{16}, compact: []byte{0x20}},
- // odd length, no terminator
- {hex: []byte{1, 2, 3, 4, 5}, compact: []byte{0x11, 0x23, 0x45}},
- // even length, no terminator
- {hex: []byte{0, 1, 2, 3, 4, 5}, compact: []byte{0x00, 0x01, 0x23, 0x45}},
- // odd length, terminator
- {hex: []byte{15, 1, 12, 11, 8, 16 /*term*/}, compact: []byte{0x3f, 0x1c, 0xb8}},
- // even length, terminator
- {hex: []byte{0, 15, 1, 12, 11, 8, 16 /*term*/}, compact: []byte{0x20, 0x0f, 0x1c, 0xb8}},
- }
- for _, test := range tests {
- if c := hexToCompact(test.hex); !bytes.Equal(c, test.compact) {
- t.Errorf("hexToCompact(%x) -> %x, want %x", test.hex, c, test.compact)
- }
- if h := compactToHex(test.compact); !bytes.Equal(h, test.hex) {
- t.Errorf("compactToHex(%x) -> %x, want %x", test.compact, h, test.hex)
- }
- }
-}
-
-func TestHexKeybytes(t *testing.T) {
- tests := []struct{ key, hexIn, hexOut []byte }{
- {key: []byte{}, hexIn: []byte{16}, hexOut: []byte{16}},
- {key: []byte{}, hexIn: []byte{}, hexOut: []byte{16}},
- {
- key: []byte{0x12, 0x34, 0x56},
- hexIn: []byte{1, 2, 3, 4, 5, 6, 16},
- hexOut: []byte{1, 2, 3, 4, 5, 6, 16},
- },
- {
- key: []byte{0x12, 0x34, 0x5},
- hexIn: []byte{1, 2, 3, 4, 0, 5, 16},
- hexOut: []byte{1, 2, 3, 4, 0, 5, 16},
- },
- {
- key: []byte{0x12, 0x34, 0x56},
- hexIn: []byte{1, 2, 3, 4, 5, 6},
- hexOut: []byte{1, 2, 3, 4, 5, 6, 16},
- },
- }
- for _, test := range tests {
- if h := keybytesToHex(test.key); !bytes.Equal(h, test.hexOut) {
- t.Errorf("keybytesToHex(%x) -> %x, want %x", test.key, h, test.hexOut)
- }
- if k := hexToKeybytes(test.hexIn); !bytes.Equal(k, test.key) {
- t.Errorf("hexToKeybytes(%x) -> %x, want %x", test.hexIn, k, test.key)
- }
- }
-}
-
-func TestHexToCompactInPlace(t *testing.T) {
- for i, key := range []string{
- "00",
- "060a040c0f000a090b040803010801010900080d090a0a0d0903000b10",
- "10",
- } {
- hexBytes, _ := hex.DecodeString(key)
- exp := hexToCompact(hexBytes)
- got := hexToCompactInPlace(hexBytes)
- if !bytes.Equal(exp, got) {
- t.Fatalf("test %d: encoding err\ninp %v\ngot %x\nexp %x\n", i, key, got, exp)
- }
- }
-}
-
-func TestHexToCompactInPlaceRandom(t *testing.T) {
- for i := 0; i < 10000; i++ {
- l := rand.Intn(128)
- key := make([]byte, l)
- crand.Read(key)
- hexBytes := keybytesToHex(key)
- hexOrig := []byte(string(hexBytes))
- exp := hexToCompact(hexBytes)
- got := hexToCompactInPlace(hexBytes)
-
- if !bytes.Equal(exp, got) {
- t.Fatalf("encoding err \ncpt %x\nhex %x\ngot %x\nexp %x\n",
- key, hexOrig, got, exp)
- }
- }
-}
-
-func BenchmarkHexToCompact(b *testing.B) {
- testBytes := []byte{0, 15, 1, 12, 11, 8, 16 /*term*/}
- for i := 0; i < b.N; i++ {
- hexToCompact(testBytes)
- }
-}
-
-func BenchmarkHexToCompactInPlace(b *testing.B) {
- testBytes := []byte{0, 15, 1, 12, 11, 8, 16 /*term*/}
- for i := 0; i < b.N; i++ {
- hexToCompactInPlace(testBytes)
- }
-}
-
-func BenchmarkCompactToHex(b *testing.B) {
- testBytes := []byte{0, 15, 1, 12, 11, 8, 16 /*term*/}
- for i := 0; i < b.N; i++ {
- compactToHex(testBytes)
- }
-}
-
-func BenchmarkKeybytesToHex(b *testing.B) {
- testBytes := []byte{7, 6, 6, 5, 7, 2, 6, 2, 16}
- for i := 0; i < b.N; i++ {
- keybytesToHex(testBytes)
- }
-}
-
-func BenchmarkHexToKeybytes(b *testing.B) {
- testBytes := []byte{7, 6, 6, 5, 7, 2, 6, 2, 16}
- for i := 0; i < b.N; i++ {
- hexToKeybytes(testBytes)
- }
-}
diff --git a/trie/errors.go b/trie/errors.go
deleted file mode 100644
index a39fb9baf9..0000000000
--- a/trie/errors.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "errors"
- "fmt"
-
- "github.com/ava-labs/libevm/common"
-)
-
-// ErrCommitted is returned when a already committed trie is requested for usage.
-// The potential usages can be `Get`, `Update`, `Delete`, `NodeIterator`, `Prove`
-// and so on.
-var ErrCommitted = errors.New("trie is already committed")
-
-// MissingNodeError is returned by the trie functions (Get, Update, Delete)
-// in the case where a trie node is not present in the local database. It contains
-// information necessary for retrieving the missing node.
-type MissingNodeError struct {
- Owner common.Hash // owner of the trie if it's 2-layered trie
- NodeHash common.Hash // hash of the missing node
- Path []byte // hex-encoded path to the missing node
- err error // concrete error for missing trie node
-}
-
-// Unwrap returns the concrete error for missing trie node which
-// allows us for further analysis outside.
-func (err *MissingNodeError) Unwrap() error {
- return err.err
-}
-
-func (err *MissingNodeError) Error() string {
- if err.Owner == (common.Hash{}) {
- return fmt.Sprintf("missing trie node %x (path %x) %v", err.NodeHash, err.Path, err.err)
- }
- return fmt.Sprintf("missing trie node %x (owner %x) (path %x) %v", err.NodeHash, err.Owner, err.Path, err.err)
-}
diff --git a/trie/hasher.go b/trie/hasher.go
deleted file mode 100644
index 72e4fe8384..0000000000
--- a/trie/hasher.go
+++ /dev/null
@@ -1,218 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "sync"
-
- "github.com/ava-labs/libevm/crypto"
- "github.com/ava-labs/libevm/rlp"
- "golang.org/x/crypto/sha3"
-)
-
-// hasher is a type used for the trie Hash operation. A hasher has some
-// internal preallocated temp space
-type hasher struct {
- sha crypto.KeccakState
- tmp []byte
- encbuf rlp.EncoderBuffer
- parallel bool // Whether to use parallel threads when hashing
-}
-
-// hasherPool holds pureHashers
-var hasherPool = sync.Pool{
- New: func() interface{} {
- return &hasher{
- tmp: make([]byte, 0, 550), // cap is as large as a full fullNode.
- sha: sha3.NewLegacyKeccak256().(crypto.KeccakState),
- encbuf: rlp.NewEncoderBuffer(nil),
- }
- },
-}
-
-func newHasher(parallel bool) *hasher {
- h := hasherPool.Get().(*hasher)
- h.parallel = parallel
- return h
-}
-
-func returnHasherToPool(h *hasher) {
- hasherPool.Put(h)
-}
-
-// hash collapses a node down into a hash node, also returning a copy of the
-// original node initialized with the computed hash to replace the original one.
-func (h *hasher) hash(n node, force bool) (hashed node, cached node) {
- // Return the cached hash if it's available
- if hash, _ := n.cache(); hash != nil {
- return hash, n
- }
- // Trie not processed yet, walk the children
- switch n := n.(type) {
- case *shortNode:
- collapsed, cached := h.hashShortNodeChildren(n)
- hashed := h.shortnodeToHash(collapsed, force)
- // We need to retain the possibly _not_ hashed node, in case it was too
- // small to be hashed
- if hn, ok := hashed.(hashNode); ok {
- cached.flags.hash = hn
- } else {
- cached.flags.hash = nil
- }
- return hashed, cached
- case *fullNode:
- collapsed, cached := h.hashFullNodeChildren(n)
- hashed = h.fullnodeToHash(collapsed, force)
- if hn, ok := hashed.(hashNode); ok {
- cached.flags.hash = hn
- } else {
- cached.flags.hash = nil
- }
- return hashed, cached
- default:
- // Value and hash nodes don't have children, so they're left as were
- return n, n
- }
-}
-
-// hashShortNodeChildren collapses the short node. The returned collapsed node
-// holds a live reference to the Key, and must not be modified.
-func (h *hasher) hashShortNodeChildren(n *shortNode) (collapsed, cached *shortNode) {
- // Hash the short node's child, caching the newly hashed subtree
- collapsed, cached = n.copy(), n.copy()
- // Previously, we did copy this one. We don't seem to need to actually
- // do that, since we don't overwrite/reuse keys
- // cached.Key = common.CopyBytes(n.Key)
- collapsed.Key = hexToCompact(n.Key)
- // Unless the child is a valuenode or hashnode, hash it
- switch n.Val.(type) {
- case *fullNode, *shortNode:
- collapsed.Val, cached.Val = h.hash(n.Val, false)
- }
- return collapsed, cached
-}
-
-func (h *hasher) hashFullNodeChildren(n *fullNode) (collapsed *fullNode, cached *fullNode) {
- // Hash the full node's children, caching the newly hashed subtrees
- cached = n.copy()
- collapsed = n.copy()
- if h.parallel {
- var wg sync.WaitGroup
- wg.Add(16)
- for i := 0; i < 16; i++ {
- go func(i int) {
- hasher := newHasher(false)
- if child := n.Children[i]; child != nil {
- collapsed.Children[i], cached.Children[i] = hasher.hash(child, false)
- } else {
- collapsed.Children[i] = nilValueNode
- }
- returnHasherToPool(hasher)
- wg.Done()
- }(i)
- }
- wg.Wait()
- } else {
- for i := 0; i < 16; i++ {
- if child := n.Children[i]; child != nil {
- collapsed.Children[i], cached.Children[i] = h.hash(child, false)
- } else {
- collapsed.Children[i] = nilValueNode
- }
- }
- }
- return collapsed, cached
-}
-
-// shortnodeToHash creates a hashNode from a shortNode. The supplied shortnode
-// should have hex-type Key, which will be converted (without modification)
-// into compact form for RLP encoding.
-// If the rlp data is smaller than 32 bytes, `nil` is returned.
-func (h *hasher) shortnodeToHash(n *shortNode, force bool) node {
- n.encode(h.encbuf)
- enc := h.encodedBytes()
-
- if len(enc) < 32 && !force {
- return n // Nodes smaller than 32 bytes are stored inside their parent
- }
- return h.hashData(enc)
-}
-
-// fullnodeToHash is used to create a hashNode from a fullNode, (which
-// may contain nil values)
-func (h *hasher) fullnodeToHash(n *fullNode, force bool) node {
- n.encode(h.encbuf)
- enc := h.encodedBytes()
-
- if len(enc) < 32 && !force {
- return n // Nodes smaller than 32 bytes are stored inside their parent
- }
- return h.hashData(enc)
-}
-
-// encodedBytes returns the result of the last encoding operation on h.encbuf.
-// This also resets the encoder buffer.
-//
-// All node encoding must be done like this:
-//
-// node.encode(h.encbuf)
-// enc := h.encodedBytes()
-//
-// This convention exists because node.encode can only be inlined/escape-analyzed when
-// called on a concrete receiver type.
-func (h *hasher) encodedBytes() []byte {
- h.tmp = h.encbuf.AppendToBytes(h.tmp[:0])
- h.encbuf.Reset(nil)
- return h.tmp
-}
-
-// hashData hashes the provided data
-func (h *hasher) hashData(data []byte) hashNode {
- n := make(hashNode, 32)
- h.sha.Reset()
- h.sha.Write(data)
- h.sha.Read(n)
- return n
-}
-
-// proofHash is used to construct trie proofs, and returns the 'collapsed'
-// node (for later RLP encoding) as well as the hashed node -- unless the
-// node is smaller than 32 bytes, in which case it will be returned as is.
-// This method does not do anything on value- or hash-nodes.
-func (h *hasher) proofHash(original node) (collapsed, hashed node) {
- switch n := original.(type) {
- case *shortNode:
- sn, _ := h.hashShortNodeChildren(n)
- return sn, h.shortnodeToHash(sn, false)
- case *fullNode:
- fn, _ := h.hashFullNodeChildren(n)
- return fn, h.fullnodeToHash(fn, false)
- default:
- // Value and hash nodes don't have children, so they're left as were
- return n, n
- }
-}
diff --git a/trie/iterator.go b/trie/iterator.go
deleted file mode 100644
index d174dae0b3..0000000000
--- a/trie/iterator.go
+++ /dev/null
@@ -1,801 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2014 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- "container/heap"
- "errors"
-
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/libevm/common"
-)
-
-// NodeResolver is used for looking up trie nodes before reaching into the real
-// persistent layer. This is not mandatory, rather is an optimization for cases
-// where trie nodes can be recovered from some external mechanism without reading
-// from disk. In those cases, this resolver allows short circuiting accesses and
-// returning them from memory.
-type NodeResolver func(owner common.Hash, path []byte, hash common.Hash) []byte
-
-// Iterator is a key-value trie iterator that traverses a Trie.
-type Iterator struct {
- nodeIt NodeIterator
-
- Key []byte // Current data key on which the iterator is positioned on
- Value []byte // Current data value on which the iterator is positioned on
- Err error
-}
-
-// NewIterator creates a new key-value iterator from a node iterator.
-// Note that the value returned by the iterator is raw. If the content is encoded
-// (e.g. storage value is RLP-encoded), it's caller's duty to decode it.
-func NewIterator(it NodeIterator) *Iterator {
- return &Iterator{
- nodeIt: it,
- }
-}
-
-// Next moves the iterator forward one key-value entry.
-func (it *Iterator) Next() bool {
- for it.nodeIt.Next(true) {
- if it.nodeIt.Leaf() {
- it.Key = it.nodeIt.LeafKey()
- it.Value = it.nodeIt.LeafBlob()
- return true
- }
- }
- it.Key = nil
- it.Value = nil
- it.Err = it.nodeIt.Error()
- return false
-}
-
-// Prove generates the Merkle proof for the leaf node the iterator is currently
-// positioned on.
-func (it *Iterator) Prove() [][]byte {
- return it.nodeIt.LeafProof()
-}
-
-// NodeIterator is an iterator to traverse the trie pre-order.
-type NodeIterator interface {
- // Next moves the iterator to the next node. If the parameter is false, any child
- // nodes will be skipped.
- Next(bool) bool
-
- // Error returns the error status of the iterator.
- Error() error
-
- // Hash returns the hash of the current node.
- Hash() common.Hash
-
- // Parent returns the hash of the parent of the current node. The hash may be the one
- // grandparent if the immediate parent is an internal node with no hash.
- Parent() common.Hash
-
- // Path returns the hex-encoded path to the current node.
- // Callers must not retain references to the return value after calling Next.
- // For leaf nodes, the last element of the path is the 'terminator symbol' 0x10.
- Path() []byte
-
- // NodeBlob returns the rlp-encoded value of the current iterated node.
- // If the node is an embedded node in its parent, nil is returned then.
- NodeBlob() []byte
-
- // Leaf returns true iff the current node is a leaf node.
- Leaf() bool
-
- // LeafKey returns the key of the leaf. The method panics if the iterator is not
- // positioned at a leaf. Callers must not retain references to the value after
- // calling Next.
- LeafKey() []byte
-
- // LeafBlob returns the content of the leaf. The method panics if the iterator
- // is not positioned at a leaf. Callers must not retain references to the value
- // after calling Next.
- LeafBlob() []byte
-
- // LeafProof returns the Merkle proof of the leaf. The method panics if the
- // iterator is not positioned at a leaf. Callers must not retain references
- // to the value after calling Next.
- LeafProof() [][]byte
-
- // AddResolver sets a node resolver to use for looking up trie nodes before
- // reaching into the real persistent layer.
- //
- // This is not required for normal operation, rather is an optimization for
- // cases where trie nodes can be recovered from some external mechanism without
- // reading from disk. In those cases, this resolver allows short circuiting
- // accesses and returning them from memory.
- //
- // Before adding a similar mechanism to any other place in Geth, consider
- // making trie.Database an interface and wrapping at that level. It's a huge
- // refactor, but it could be worth it if another occurrence arises.
- AddResolver(NodeResolver)
-}
-
-// nodeIteratorState represents the iteration state at one particular node of the
-// trie, which can be resumed at a later invocation.
-type nodeIteratorState struct {
- hash common.Hash // Hash of the node being iterated (nil if not standalone)
- node node // Trie node being iterated
- parent common.Hash // Hash of the first full ancestor node (nil if current is the root)
- index int // Child to be processed next
- pathlen int // Length of the path to this node
-}
-
-type nodeIterator struct {
- trie *Trie // Trie being iterated
- stack []*nodeIteratorState // Hierarchy of trie nodes persisting the iteration state
- path []byte // Path to the current node
- err error // Failure set in case of an internal error in the iterator
-
- resolver NodeResolver // optional node resolver for avoiding disk hits
- pool []*nodeIteratorState // local pool for iteratorstates
-}
-
-// errIteratorEnd is stored in nodeIterator.err when iteration is done.
-var errIteratorEnd = errors.New("end of iteration")
-
-// seekError is stored in nodeIterator.err if the initial seek has failed.
-type seekError struct {
- key []byte
- err error
-}
-
-func (e seekError) Error() string {
- return "seek error: " + e.err.Error()
-}
-
-func newNodeIterator(trie *Trie, start []byte) NodeIterator {
- if trie.Hash() == types.EmptyRootHash {
- return &nodeIterator{
- trie: trie,
- err: errIteratorEnd,
- }
- }
- it := &nodeIterator{trie: trie}
- it.err = it.seek(start)
- return it
-}
-
-func (it *nodeIterator) putInPool(item *nodeIteratorState) {
- if len(it.pool) < 40 {
- item.node = nil
- it.pool = append(it.pool, item)
- }
-}
-
-func (it *nodeIterator) getFromPool() *nodeIteratorState {
- idx := len(it.pool) - 1
- if idx < 0 {
- return new(nodeIteratorState)
- }
- el := it.pool[idx]
- it.pool[idx] = nil
- it.pool = it.pool[:idx]
- return el
-}
-
-func (it *nodeIterator) AddResolver(resolver NodeResolver) {
- it.resolver = resolver
-}
-
-func (it *nodeIterator) Hash() common.Hash {
- if len(it.stack) == 0 {
- return common.Hash{}
- }
- return it.stack[len(it.stack)-1].hash
-}
-
-func (it *nodeIterator) Parent() common.Hash {
- if len(it.stack) == 0 {
- return common.Hash{}
- }
- return it.stack[len(it.stack)-1].parent
-}
-
-func (it *nodeIterator) Leaf() bool {
- return hasTerm(it.path)
-}
-
-func (it *nodeIterator) LeafKey() []byte {
- if len(it.stack) > 0 {
- if _, ok := it.stack[len(it.stack)-1].node.(valueNode); ok {
- return hexToKeybytes(it.path)
- }
- }
- panic("not at leaf")
-}
-
-func (it *nodeIterator) LeafBlob() []byte {
- if len(it.stack) > 0 {
- if node, ok := it.stack[len(it.stack)-1].node.(valueNode); ok {
- return node
- }
- }
- panic("not at leaf")
-}
-
-func (it *nodeIterator) LeafProof() [][]byte {
- if len(it.stack) > 0 {
- if _, ok := it.stack[len(it.stack)-1].node.(valueNode); ok {
- hasher := newHasher(false)
- defer returnHasherToPool(hasher)
- proofs := make([][]byte, 0, len(it.stack))
-
- for i, item := range it.stack[:len(it.stack)-1] {
- // Gather nodes that end up as hash nodes (or the root)
- node, hashed := hasher.proofHash(item.node)
- if _, ok := hashed.(hashNode); ok || i == 0 {
- proofs = append(proofs, nodeToBytes(node))
- }
- }
- return proofs
- }
- }
- panic("not at leaf")
-}
-
-func (it *nodeIterator) Path() []byte {
- return it.path
-}
-
-func (it *nodeIterator) NodeBlob() []byte {
- if it.Hash() == (common.Hash{}) {
- return nil // skip the non-standalone node
- }
- blob, err := it.resolveBlob(it.Hash().Bytes(), it.Path())
- if err != nil {
- it.err = err
- return nil
- }
- return blob
-}
-
-func (it *nodeIterator) Error() error {
- if it.err == errIteratorEnd {
- return nil
- }
- if seek, ok := it.err.(seekError); ok {
- return seek.err
- }
- return it.err
-}
-
-// Next moves the iterator to the next node, returning whether there are any
-// further nodes. In case of an internal error this method returns false and
-// sets the Error field to the encountered failure. If `descend` is false,
-// skips iterating over any subnodes of the current node.
-func (it *nodeIterator) Next(descend bool) bool {
- if it.err == errIteratorEnd {
- return false
- }
- if seek, ok := it.err.(seekError); ok {
- if it.err = it.seek(seek.key); it.err != nil {
- return false
- }
- }
- // Otherwise step forward with the iterator and report any errors.
- state, parentIndex, path, err := it.peek(descend)
- it.err = err
- if it.err != nil {
- return false
- }
- it.push(state, parentIndex, path)
- return true
-}
-
-func (it *nodeIterator) seek(prefix []byte) error {
- // The path we're looking for is the hex encoded key without terminator.
- key := keybytesToHex(prefix)
- key = key[:len(key)-1]
- // Move forward until we're just before the closest match to key.
- for {
- state, parentIndex, path, err := it.peekSeek(key)
- if err == errIteratorEnd {
- return errIteratorEnd
- } else if err != nil {
- return seekError{prefix, err}
- } else if bytes.Compare(path, key) >= 0 {
- return nil
- }
- it.push(state, parentIndex, path)
- }
-}
-
-// init initializes the iterator.
-func (it *nodeIterator) init() (*nodeIteratorState, error) {
- root := it.trie.Hash()
- state := &nodeIteratorState{node: it.trie.root, index: -1}
- if root != types.EmptyRootHash {
- state.hash = root
- }
- return state, state.resolve(it, nil)
-}
-
-// peek creates the next state of the iterator.
-func (it *nodeIterator) peek(descend bool) (*nodeIteratorState, *int, []byte, error) {
- // Initialize the iterator if we've just started.
- if len(it.stack) == 0 {
- state, err := it.init()
- return state, nil, nil, err
- }
- if !descend {
- // If we're skipping children, pop the current node first
- it.pop()
- }
-
- // Continue iteration to the next child
- for len(it.stack) > 0 {
- parent := it.stack[len(it.stack)-1]
- ancestor := parent.hash
- if (ancestor == common.Hash{}) {
- ancestor = parent.parent
- }
- state, path, ok := it.nextChild(parent, ancestor)
- if ok {
- if err := state.resolve(it, path); err != nil {
- return parent, &parent.index, path, err
- }
- return state, &parent.index, path, nil
- }
- // No more child nodes, move back up.
- it.pop()
- }
- return nil, nil, nil, errIteratorEnd
-}
-
-// peekSeek is like peek, but it also tries to skip resolving hashes by skipping
-// over the siblings that do not lead towards the desired seek position.
-func (it *nodeIterator) peekSeek(seekKey []byte) (*nodeIteratorState, *int, []byte, error) {
- // Initialize the iterator if we've just started.
- if len(it.stack) == 0 {
- state, err := it.init()
- return state, nil, nil, err
- }
- if !bytes.HasPrefix(seekKey, it.path) {
- // If we're skipping children, pop the current node first
- it.pop()
- }
-
- // Continue iteration to the next child
- for len(it.stack) > 0 {
- parent := it.stack[len(it.stack)-1]
- ancestor := parent.hash
- if (ancestor == common.Hash{}) {
- ancestor = parent.parent
- }
- state, path, ok := it.nextChildAt(parent, ancestor, seekKey)
- if ok {
- if err := state.resolve(it, path); err != nil {
- return parent, &parent.index, path, err
- }
- return state, &parent.index, path, nil
- }
- // No more child nodes, move back up.
- it.pop()
- }
- return nil, nil, nil, errIteratorEnd
-}
-
-func (it *nodeIterator) resolveHash(hash hashNode, path []byte) (node, error) {
- if it.resolver != nil {
- if blob := it.resolver(it.trie.owner, path, common.BytesToHash(hash)); len(blob) > 0 {
- if resolved, err := decodeNode(hash, blob); err == nil {
- return resolved, nil
- }
- }
- }
- // Retrieve the specified node from the underlying node reader.
- // it.trie.resolveAndTrack is not used since in that function the
- // loaded blob will be tracked, while it's not required here since
- // all loaded nodes won't be linked to trie at all and track nodes
- // may lead to out-of-memory issue.
- blob, err := it.trie.reader.node(path, common.BytesToHash(hash))
- if err != nil {
- return nil, err
- }
- // The raw-blob format nodes are loaded either from the
- // clean cache or the database, they are all in their own
- // copy and safe to use unsafe decoder.
- return mustDecodeNodeUnsafe(hash, blob), nil
-}
-
-func (it *nodeIterator) resolveBlob(hash hashNode, path []byte) ([]byte, error) {
- if it.resolver != nil {
- if blob := it.resolver(it.trie.owner, path, common.BytesToHash(hash)); len(blob) > 0 {
- return blob, nil
- }
- }
- // Retrieve the specified node from the underlying node reader.
- // it.trie.resolveAndTrack is not used since in that function the
- // loaded blob will be tracked, while it's not required here since
- // all loaded nodes won't be linked to trie at all and track nodes
- // may lead to out-of-memory issue.
- return it.trie.reader.node(path, common.BytesToHash(hash))
-}
-
-func (st *nodeIteratorState) resolve(it *nodeIterator, path []byte) error {
- if hash, ok := st.node.(hashNode); ok {
- resolved, err := it.resolveHash(hash, path)
- if err != nil {
- return err
- }
- st.node = resolved
- st.hash = common.BytesToHash(hash)
- }
- return nil
-}
-
-func (it *nodeIterator) findChild(n *fullNode, index int, ancestor common.Hash) (node, *nodeIteratorState, []byte, int) {
- var (
- path = it.path
- child node
- state *nodeIteratorState
- childPath []byte
- )
- for ; index < len(n.Children); index++ {
- if n.Children[index] != nil {
- child = n.Children[index]
- hash, _ := child.cache()
- state = it.getFromPool()
- state.hash = common.BytesToHash(hash)
- state.node = child
- state.parent = ancestor
- state.index = -1
- state.pathlen = len(path)
- childPath = append(childPath, path...)
- childPath = append(childPath, byte(index))
- return child, state, childPath, index
- }
- }
- return nil, nil, nil, 0
-}
-
-func (it *nodeIterator) nextChild(parent *nodeIteratorState, ancestor common.Hash) (*nodeIteratorState, []byte, bool) {
- switch node := parent.node.(type) {
- case *fullNode:
- // Full node, move to the first non-nil child.
- if child, state, path, index := it.findChild(node, parent.index+1, ancestor); child != nil {
- parent.index = index - 1
- return state, path, true
- }
- case *shortNode:
- // Short node, return the pointer singleton child
- if parent.index < 0 {
- hash, _ := node.Val.cache()
- state := it.getFromPool()
- state.hash = common.BytesToHash(hash)
- state.node = node.Val
- state.parent = ancestor
- state.index = -1
- state.pathlen = len(it.path)
- path := append(it.path, node.Key...)
- return state, path, true
- }
- }
- return parent, it.path, false
-}
-
-// nextChildAt is similar to nextChild, except that it targets a child as close to the
-// target key as possible, thus skipping siblings.
-func (it *nodeIterator) nextChildAt(parent *nodeIteratorState, ancestor common.Hash, key []byte) (*nodeIteratorState, []byte, bool) {
- switch n := parent.node.(type) {
- case *fullNode:
- // Full node, move to the first non-nil child before the desired key position
- child, state, path, index := it.findChild(n, parent.index+1, ancestor)
- if child == nil {
- // No more children in this fullnode
- return parent, it.path, false
- }
- // If the child we found is already past the seek position, just return it.
- if bytes.Compare(path, key) >= 0 {
- parent.index = index - 1
- return state, path, true
- }
- // The child is before the seek position. Try advancing
- for {
- nextChild, nextState, nextPath, nextIndex := it.findChild(n, index+1, ancestor)
- // If we run out of children, or skipped past the target, return the
- // previous one
- if nextChild == nil || bytes.Compare(nextPath, key) >= 0 {
- parent.index = index - 1
- return state, path, true
- }
- // We found a better child closer to the target
- state, path, index = nextState, nextPath, nextIndex
- }
- case *shortNode:
- // Short node, return the pointer singleton child
- if parent.index < 0 {
- hash, _ := n.Val.cache()
- state := it.getFromPool()
- state.hash = common.BytesToHash(hash)
- state.node = n.Val
- state.parent = ancestor
- state.index = -1
- state.pathlen = len(it.path)
- path := append(it.path, n.Key...)
- return state, path, true
- }
- }
- return parent, it.path, false
-}
-
-func (it *nodeIterator) push(state *nodeIteratorState, parentIndex *int, path []byte) {
- it.path = path
- it.stack = append(it.stack, state)
- if parentIndex != nil {
- *parentIndex++
- }
-}
-
-func (it *nodeIterator) pop() {
- last := it.stack[len(it.stack)-1]
- it.path = it.path[:last.pathlen]
- it.stack[len(it.stack)-1] = nil
- it.stack = it.stack[:len(it.stack)-1]
- // last is now unused
- it.putInPool(last)
-}
-
-func compareNodes(a, b NodeIterator) int {
- if cmp := bytes.Compare(a.Path(), b.Path()); cmp != 0 {
- return cmp
- }
- if a.Leaf() && !b.Leaf() {
- return -1
- } else if b.Leaf() && !a.Leaf() {
- return 1
- }
- if cmp := bytes.Compare(a.Hash().Bytes(), b.Hash().Bytes()); cmp != 0 {
- return cmp
- }
- if a.Leaf() && b.Leaf() {
- return bytes.Compare(a.LeafBlob(), b.LeafBlob())
- }
- return 0
-}
-
-type differenceIterator struct {
- a, b NodeIterator // Nodes returned are those in b - a.
- eof bool // Indicates a has run out of elements
- count int // Number of nodes scanned on either trie
-}
-
-// NewDifferenceIterator constructs a NodeIterator that iterates over elements in b that
-// are not in a. Returns the iterator, and a pointer to an integer recording the number
-// of nodes seen.
-func NewDifferenceIterator(a, b NodeIterator) (NodeIterator, *int) {
- a.Next(true)
- it := &differenceIterator{
- a: a,
- b: b,
- }
- return it, &it.count
-}
-
-func (it *differenceIterator) Hash() common.Hash {
- return it.b.Hash()
-}
-
-func (it *differenceIterator) Parent() common.Hash {
- return it.b.Parent()
-}
-
-func (it *differenceIterator) Leaf() bool {
- return it.b.Leaf()
-}
-
-func (it *differenceIterator) LeafKey() []byte {
- return it.b.LeafKey()
-}
-
-func (it *differenceIterator) LeafBlob() []byte {
- return it.b.LeafBlob()
-}
-
-func (it *differenceIterator) LeafProof() [][]byte {
- return it.b.LeafProof()
-}
-
-func (it *differenceIterator) Path() []byte {
- return it.b.Path()
-}
-
-func (it *differenceIterator) NodeBlob() []byte {
- return it.b.NodeBlob()
-}
-
-func (it *differenceIterator) AddResolver(resolver NodeResolver) {
- panic("not implemented")
-}
-
-func (it *differenceIterator) Next(bool) bool {
- // Invariants:
- // - We always advance at least one element in b.
- // - At the start of this function, a's path is lexically greater than b's.
- if !it.b.Next(true) {
- return false
- }
- it.count++
-
- if it.eof {
- // a has reached eof, so we just return all elements from b
- return true
- }
-
- for {
- switch compareNodes(it.a, it.b) {
- case -1:
- // b jumped past a; advance a
- if !it.a.Next(true) {
- it.eof = true
- return true
- }
- it.count++
- case 1:
- // b is before a
- return true
- case 0:
- // a and b are identical; skip this whole subtree if the nodes have hashes
- hasHash := it.a.Hash() == common.Hash{}
- if !it.b.Next(hasHash) {
- return false
- }
- it.count++
- if !it.a.Next(hasHash) {
- it.eof = true
- return true
- }
- it.count++
- }
- }
-}
-
-func (it *differenceIterator) Error() error {
- if err := it.a.Error(); err != nil {
- return err
- }
- return it.b.Error()
-}
-
-type nodeIteratorHeap []NodeIterator
-
-func (h nodeIteratorHeap) Len() int { return len(h) }
-func (h nodeIteratorHeap) Less(i, j int) bool { return compareNodes(h[i], h[j]) < 0 }
-func (h nodeIteratorHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
-func (h *nodeIteratorHeap) Push(x interface{}) { *h = append(*h, x.(NodeIterator)) }
-func (h *nodeIteratorHeap) Pop() interface{} {
- n := len(*h)
- x := (*h)[n-1]
- *h = (*h)[0 : n-1]
- return x
-}
-
-type unionIterator struct {
- items *nodeIteratorHeap // Nodes returned are the union of the ones in these iterators
- count int // Number of nodes scanned across all tries
-}
-
-// NewUnionIterator constructs a NodeIterator that iterates over elements in the union
-// of the provided NodeIterators. Returns the iterator, and a pointer to an integer
-// recording the number of nodes visited.
-func NewUnionIterator(iters []NodeIterator) (NodeIterator, *int) {
- h := make(nodeIteratorHeap, len(iters))
- copy(h, iters)
- heap.Init(&h)
-
- ui := &unionIterator{items: &h}
- return ui, &ui.count
-}
-
-func (it *unionIterator) Hash() common.Hash {
- return (*it.items)[0].Hash()
-}
-
-func (it *unionIterator) Parent() common.Hash {
- return (*it.items)[0].Parent()
-}
-
-func (it *unionIterator) Leaf() bool {
- return (*it.items)[0].Leaf()
-}
-
-func (it *unionIterator) LeafKey() []byte {
- return (*it.items)[0].LeafKey()
-}
-
-func (it *unionIterator) LeafBlob() []byte {
- return (*it.items)[0].LeafBlob()
-}
-
-func (it *unionIterator) LeafProof() [][]byte {
- return (*it.items)[0].LeafProof()
-}
-
-func (it *unionIterator) Path() []byte {
- return (*it.items)[0].Path()
-}
-
-func (it *unionIterator) NodeBlob() []byte {
- return (*it.items)[0].NodeBlob()
-}
-
-func (it *unionIterator) AddResolver(resolver NodeResolver) {
- panic("not implemented")
-}
-
-// Next returns the next node in the union of tries being iterated over.
-//
-// It does this by maintaining a heap of iterators, sorted by the iteration
-// order of their next elements, with one entry for each source trie. Each
-// time Next() is called, it takes the least element from the heap to return,
-// advancing any other iterators that also point to that same element. These
-// iterators are called with descend=false, since we know that any nodes under
-// these nodes will also be duplicates, found in the currently selected iterator.
-// Whenever an iterator is advanced, it is pushed back into the heap if it still
-// has elements remaining.
-//
-// In the case that descend=false - eg, we're asked to ignore all subnodes of the
-// current node - we also advance any iterators in the heap that have the current
-// path as a prefix.
-func (it *unionIterator) Next(descend bool) bool {
- if len(*it.items) == 0 {
- return false
- }
-
- // Get the next key from the union
- least := heap.Pop(it.items).(NodeIterator)
-
- // Skip over other nodes as long as they're identical, or, if we're not descending, as
- // long as they have the same prefix as the current node.
- for len(*it.items) > 0 && ((!descend && bytes.HasPrefix((*it.items)[0].Path(), least.Path())) || compareNodes(least, (*it.items)[0]) == 0) {
- skipped := heap.Pop(it.items).(NodeIterator)
- // Skip the whole subtree if the nodes have hashes; otherwise just skip this node
- if skipped.Next(skipped.Hash() == common.Hash{}) {
- it.count++
- // If there are more elements, push the iterator back on the heap
- heap.Push(it.items, skipped)
- }
- }
- if least.Next(descend) {
- it.count++
- heap.Push(it.items, least)
- }
- return len(*it.items) > 0
-}
-
-func (it *unionIterator) Error() error {
- for i := 0; i < len(*it.items); i++ {
- if err := (*it.items)[i].Error(); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/trie/iterator_test.go b/trie/iterator_test.go
deleted file mode 100644
index 3219d93502..0000000000
--- a/trie/iterator_test.go
+++ /dev/null
@@ -1,641 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2014 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- "fmt"
- "math/rand"
- "testing"
-
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
-)
-
-func TestEmptyIterator(t *testing.T) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- iter := trie.MustNodeIterator(nil)
-
- seen := make(map[string]struct{})
- for iter.Next(true) {
- seen[string(iter.Path())] = struct{}{}
- }
- if len(seen) != 0 {
- t.Fatal("Unexpected trie node iterated")
- }
-}
-
-func TestIterator(t *testing.T) {
- db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- trie := NewEmpty(db)
- vals := []struct{ k, v string }{
- {"do", "verb"},
- {"ether", "wookiedoo"},
- {"horse", "stallion"},
- {"shaman", "horse"},
- {"doge", "coin"},
- {"dog", "puppy"},
- {"somethingveryoddindeedthis is", "myothernodedata"},
- }
- all := make(map[string]string)
- for _, val := range vals {
- all[val.k] = val.v
- trie.MustUpdate([]byte(val.k), []byte(val.v))
- }
- root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
-
- trie, _ = New(TrieID(root), db)
- found := make(map[string]string)
- it := NewIterator(trie.MustNodeIterator(nil))
- for it.Next() {
- found[string(it.Key)] = string(it.Value)
- }
-
- for k, v := range all {
- if found[k] != v {
- t.Errorf("iterator value mismatch for %s: got %q want %q", k, found[k], v)
- }
- }
-}
-
-type kv struct {
- k, v []byte
- t bool
-}
-
-func (k *kv) cmp(other *kv) int {
- return bytes.Compare(k.k, other.k)
-}
-
-func TestIteratorLargeData(t *testing.T) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- vals := make(map[string]*kv)
-
- for i := byte(0); i < 255; i++ {
- value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
- value2 := &kv{common.LeftPadBytes([]byte{10, i}, 32), []byte{i}, false}
- trie.MustUpdate(value.k, value.v)
- trie.MustUpdate(value2.k, value2.v)
- vals[string(value.k)] = value
- vals[string(value2.k)] = value2
- }
-
- it := NewIterator(trie.MustNodeIterator(nil))
- for it.Next() {
- vals[string(it.Key)].t = true
- }
-
- var untouched []*kv
- for _, value := range vals {
- if !value.t {
- untouched = append(untouched, value)
- }
- }
-
- if len(untouched) > 0 {
- t.Errorf("Missed %d nodes", len(untouched))
- for _, value := range untouched {
- t.Error(value)
- }
- }
-}
-
-type iterationElement struct {
- hash common.Hash
- path []byte
- blob []byte
-}
-
-// Tests that the node iterator indeed walks over the entire database contents.
-func TestNodeIteratorCoverage(t *testing.T) {
- testNodeIteratorCoverage(t, rawdb.HashScheme)
- testNodeIteratorCoverage(t, rawdb.PathScheme)
-}
-
-func testNodeIteratorCoverage(t *testing.T, scheme string) {
- // Create some arbitrary test trie to iterate
- db, nodeDb, trie, _ := makeTestTrie(scheme)
-
- // Gather all the node hashes found by the iterator
- var elements = make(map[common.Hash]iterationElement)
- for it := trie.MustNodeIterator(nil); it.Next(true); {
- if it.Hash() != (common.Hash{}) {
- elements[it.Hash()] = iterationElement{
- hash: it.Hash(),
- path: common.CopyBytes(it.Path()),
- blob: common.CopyBytes(it.NodeBlob()),
- }
- }
- }
- // Cross check the hashes and the database itself
- reader, err := nodeDb.Reader(trie.Hash())
- if err != nil {
- t.Fatalf("state is not available %x", trie.Hash())
- }
- for _, element := range elements {
- if blob, err := reader.Node(common.Hash{}, element.path, element.hash); err != nil {
- t.Errorf("failed to retrieve reported node %x: %v", element.hash, err)
- } else if !bytes.Equal(blob, element.blob) {
- t.Errorf("node blob is different, want %v got %v", element.blob, blob)
- }
- }
- var (
- count int
- it = db.NewIterator(nil, nil)
- )
- for it.Next() {
- res, _, _ := isTrieNode(nodeDb.Scheme(), it.Key(), it.Value())
- if !res {
- continue
- }
- count += 1
- if elem, ok := elements[crypto.Keccak256Hash(it.Value())]; !ok {
- t.Error("state entry not reported")
- } else if !bytes.Equal(it.Value(), elem.blob) {
- t.Errorf("node blob is different, want %v got %v", elem.blob, it.Value())
- }
- }
- it.Release()
- if count != len(elements) {
- t.Errorf("state entry is mismatched %d %d", count, len(elements))
- }
-}
-
-type kvs struct{ k, v string }
-
-var testdata1 = []kvs{
- {"barb", "ba"},
- {"bard", "bc"},
- {"bars", "bb"},
- {"bar", "b"},
- {"fab", "z"},
- {"food", "ab"},
- {"foos", "aa"},
- {"foo", "a"},
-}
-
-var testdata2 = []kvs{
- {"aardvark", "c"},
- {"bar", "b"},
- {"barb", "bd"},
- {"bars", "be"},
- {"fab", "z"},
- {"foo", "a"},
- {"foos", "aa"},
- {"food", "ab"},
- {"jars", "d"},
-}
-
-func TestIteratorSeek(t *testing.T) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- for _, val := range testdata1 {
- trie.MustUpdate([]byte(val.k), []byte(val.v))
- }
-
- // Seek to the middle.
- it := NewIterator(trie.MustNodeIterator([]byte("fab")))
- if err := checkIteratorOrder(testdata1[4:], it); err != nil {
- t.Fatal(err)
- }
-
- // Seek to a non-existent key.
- it = NewIterator(trie.MustNodeIterator([]byte("barc")))
- if err := checkIteratorOrder(testdata1[1:], it); err != nil {
- t.Fatal(err)
- }
-
- // Seek beyond the end.
- it = NewIterator(trie.MustNodeIterator([]byte("z")))
- if err := checkIteratorOrder(nil, it); err != nil {
- t.Fatal(err)
- }
-}
-
-func checkIteratorOrder(want []kvs, it *Iterator) error {
- for it.Next() {
- if len(want) == 0 {
- return fmt.Errorf("didn't expect any more values, got key %q", it.Key)
- }
- if !bytes.Equal(it.Key, []byte(want[0].k)) {
- return fmt.Errorf("wrong key: got %q, want %q", it.Key, want[0].k)
- }
- want = want[1:]
- }
- if len(want) > 0 {
- return fmt.Errorf("iterator ended early, want key %q", want[0])
- }
- return nil
-}
-
-func TestDifferenceIterator(t *testing.T) {
- dba := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- triea := NewEmpty(dba)
- for _, val := range testdata1 {
- triea.MustUpdate([]byte(val.k), []byte(val.v))
- }
- rootA, nodesA, _ := triea.Commit(false)
- dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA))
- triea, _ = New(TrieID(rootA), dba)
-
- dbb := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- trieb := NewEmpty(dbb)
- for _, val := range testdata2 {
- trieb.MustUpdate([]byte(val.k), []byte(val.v))
- }
- rootB, nodesB, _ := trieb.Commit(false)
- dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB))
- trieb, _ = New(TrieID(rootB), dbb)
-
- found := make(map[string]string)
- di, _ := NewDifferenceIterator(triea.MustNodeIterator(nil), trieb.MustNodeIterator(nil))
- it := NewIterator(di)
- for it.Next() {
- found[string(it.Key)] = string(it.Value)
- }
-
- all := []struct{ k, v string }{
- {"aardvark", "c"},
- {"barb", "bd"},
- {"bars", "be"},
- {"jars", "d"},
- }
- for _, item := range all {
- if found[item.k] != item.v {
- t.Errorf("iterator value mismatch for %s: got %v want %v", item.k, found[item.k], item.v)
- }
- }
- if len(found) != len(all) {
- t.Errorf("iterator count mismatch: got %d values, want %d", len(found), len(all))
- }
-}
-
-func TestUnionIterator(t *testing.T) {
- dba := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- triea := NewEmpty(dba)
- for _, val := range testdata1 {
- triea.MustUpdate([]byte(val.k), []byte(val.v))
- }
- rootA, nodesA, _ := triea.Commit(false)
- dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA))
- triea, _ = New(TrieID(rootA), dba)
-
- dbb := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- trieb := NewEmpty(dbb)
- for _, val := range testdata2 {
- trieb.MustUpdate([]byte(val.k), []byte(val.v))
- }
- rootB, nodesB, _ := trieb.Commit(false)
- dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB))
- trieb, _ = New(TrieID(rootB), dbb)
-
- di, _ := NewUnionIterator([]NodeIterator{triea.MustNodeIterator(nil), trieb.MustNodeIterator(nil)})
- it := NewIterator(di)
-
- all := []struct{ k, v string }{
- {"aardvark", "c"},
- {"barb", "ba"},
- {"barb", "bd"},
- {"bard", "bc"},
- {"bars", "bb"},
- {"bars", "be"},
- {"bar", "b"},
- {"fab", "z"},
- {"food", "ab"},
- {"foos", "aa"},
- {"foo", "a"},
- {"jars", "d"},
- }
-
- for i, kv := range all {
- if !it.Next() {
- t.Errorf("Iterator ends prematurely at element %d", i)
- }
- if kv.k != string(it.Key) {
- t.Errorf("iterator value mismatch for element %d: got key %s want %s", i, it.Key, kv.k)
- }
- if kv.v != string(it.Value) {
- t.Errorf("iterator value mismatch for element %d: got value %s want %s", i, it.Value, kv.v)
- }
- }
- if it.Next() {
- t.Errorf("Iterator returned extra values.")
- }
-}
-
-func TestIteratorNoDups(t *testing.T) {
- db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- tr := NewEmpty(db)
- for _, val := range testdata1 {
- tr.MustUpdate([]byte(val.k), []byte(val.v))
- }
- checkIteratorNoDups(t, tr.MustNodeIterator(nil), nil)
-}
-
-// This test checks that nodeIterator.Next can be retried after inserting missing trie nodes.
-func TestIteratorContinueAfterError(t *testing.T) {
- testIteratorContinueAfterError(t, false, rawdb.HashScheme)
- testIteratorContinueAfterError(t, true, rawdb.HashScheme)
- testIteratorContinueAfterError(t, false, rawdb.PathScheme)
- testIteratorContinueAfterError(t, true, rawdb.PathScheme)
-}
-
-func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) {
- diskdb := rawdb.NewMemoryDatabase()
- tdb := newTestDatabase(diskdb, scheme)
-
- tr := NewEmpty(tdb)
- for _, val := range testdata1 {
- tr.MustUpdate([]byte(val.k), []byte(val.v))
- }
- root, nodes, _ := tr.Commit(false)
- tdb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
- if !memonly {
- tdb.Commit(root)
- }
- tr, _ = New(TrieID(root), tdb)
- wantNodeCount := checkIteratorNoDups(t, tr.MustNodeIterator(nil), nil)
-
- var (
- paths [][]byte
- hashes []common.Hash
- )
- if memonly {
- for path, n := range nodes.Nodes {
- paths = append(paths, []byte(path))
- hashes = append(hashes, n.Hash)
- }
- } else {
- it := diskdb.NewIterator(nil, nil)
- for it.Next() {
- ok, path, hash := isTrieNode(tdb.Scheme(), it.Key(), it.Value())
- if !ok {
- continue
- }
- paths = append(paths, path)
- hashes = append(hashes, hash)
- }
- it.Release()
- }
- for i := 0; i < 20; i++ {
- // Create trie that will load all nodes from DB.
- tr, _ := New(TrieID(tr.Hash()), tdb)
-
- // Remove a random node from the database. It can't be the root node
- // because that one is already loaded.
- var (
- rval []byte
- rpath []byte
- rhash common.Hash
- )
- for {
- if memonly {
- rpath = paths[rand.Intn(len(paths))]
- n := nodes.Nodes[string(rpath)]
- if n == nil {
- continue
- }
- rhash = n.Hash
- } else {
- index := rand.Intn(len(paths))
- rpath = paths[index]
- rhash = hashes[index]
- }
- if rhash != tr.Hash() {
- break
- }
- }
- if memonly {
- tr.reader.banned = map[string]struct{}{string(rpath): {}}
- } else {
- rval = rawdb.ReadTrieNode(diskdb, common.Hash{}, rpath, rhash, tdb.Scheme())
- rawdb.DeleteTrieNode(diskdb, common.Hash{}, rpath, rhash, tdb.Scheme())
- }
- // Iterate until the error is hit.
- seen := make(map[string]bool)
- it := tr.MustNodeIterator(nil)
- checkIteratorNoDups(t, it, seen)
- missing, ok := it.Error().(*MissingNodeError)
- if !ok || missing.NodeHash != rhash {
- t.Fatal("didn't hit missing node, got", it.Error())
- }
-
- // Add the node back and continue iteration.
- if memonly {
- delete(tr.reader.banned, string(rpath))
- } else {
- rawdb.WriteTrieNode(diskdb, common.Hash{}, rpath, rhash, rval, tdb.Scheme())
- }
- checkIteratorNoDups(t, it, seen)
- if it.Error() != nil {
- t.Fatal("unexpected error", it.Error())
- }
- if len(seen) != wantNodeCount {
- t.Fatal("wrong node iteration count, got", len(seen), "want", wantNodeCount)
- }
- }
-}
-
-// Similar to the test above, this one checks that failure to create nodeIterator at a
-// certain key prefix behaves correctly when Next is called. The expectation is that Next
-// should retry seeking before returning true for the first time.
-func TestIteratorContinueAfterSeekError(t *testing.T) {
- testIteratorContinueAfterSeekError(t, false, rawdb.HashScheme)
- testIteratorContinueAfterSeekError(t, true, rawdb.HashScheme)
- testIteratorContinueAfterSeekError(t, false, rawdb.PathScheme)
- testIteratorContinueAfterSeekError(t, true, rawdb.PathScheme)
-}
-
-func testIteratorContinueAfterSeekError(t *testing.T, memonly bool, scheme string) {
- // Commit test trie to db, then remove the node containing "bars".
- var (
- barNodePath []byte
- barNodeHash = common.HexToHash("05041990364eb72fcb1127652ce40d8bab765f2bfe53225b1170d276cc101c2e")
- )
- diskdb := rawdb.NewMemoryDatabase()
- triedb := newTestDatabase(diskdb, scheme)
- ctr := NewEmpty(triedb)
- for _, val := range testdata1 {
- ctr.MustUpdate([]byte(val.k), []byte(val.v))
- }
- root, nodes, _ := ctr.Commit(false)
- for path, n := range nodes.Nodes {
- if n.Hash == barNodeHash {
- barNodePath = []byte(path)
- break
- }
- }
- triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
- if !memonly {
- triedb.Commit(root)
- }
- var (
- barNodeBlob []byte
- )
- tr, _ := New(TrieID(root), triedb)
- if memonly {
- tr.reader.banned = map[string]struct{}{string(barNodePath): {}}
- } else {
- barNodeBlob = rawdb.ReadTrieNode(diskdb, common.Hash{}, barNodePath, barNodeHash, triedb.Scheme())
- rawdb.DeleteTrieNode(diskdb, common.Hash{}, barNodePath, barNodeHash, triedb.Scheme())
- }
- // Create a new iterator that seeks to "bars". Seeking can't proceed because
- // the node is missing.
- it := tr.MustNodeIterator([]byte("bars"))
- missing, ok := it.Error().(*MissingNodeError)
- if !ok {
- t.Fatal("want MissingNodeError, got", it.Error())
- } else if missing.NodeHash != barNodeHash {
- t.Fatal("wrong node missing")
- }
- // Reinsert the missing node.
- if memonly {
- delete(tr.reader.banned, string(barNodePath))
- } else {
- rawdb.WriteTrieNode(diskdb, common.Hash{}, barNodePath, barNodeHash, barNodeBlob, triedb.Scheme())
- }
- // Check that iteration produces the right set of values.
- if err := checkIteratorOrder(testdata1[2:], NewIterator(it)); err != nil {
- t.Fatal(err)
- }
-}
-
-func checkIteratorNoDups(t *testing.T, it NodeIterator, seen map[string]bool) int {
- if seen == nil {
- seen = make(map[string]bool)
- }
- for it.Next(true) {
- if seen[string(it.Path())] {
- t.Fatalf("iterator visited node path %x twice", it.Path())
- }
- seen[string(it.Path())] = true
- }
- return len(seen)
-}
-
-func TestIteratorNodeBlob(t *testing.T) {
- testIteratorNodeBlob(t, rawdb.HashScheme)
- testIteratorNodeBlob(t, rawdb.PathScheme)
-}
-
-func testIteratorNodeBlob(t *testing.T, scheme string) {
- var (
- db = rawdb.NewMemoryDatabase()
- triedb = newTestDatabase(db, scheme)
- trie = NewEmpty(triedb)
- )
- vals := []struct{ k, v string }{
- {"do", "verb"},
- {"ether", "wookiedoo"},
- {"horse", "stallion"},
- {"shaman", "horse"},
- {"doge", "coin"},
- {"dog", "puppy"},
- {"somethingveryoddindeedthis is", "myothernodedata"},
- }
- all := make(map[string]string)
- for _, val := range vals {
- all[val.k] = val.v
- trie.MustUpdate([]byte(val.k), []byte(val.v))
- }
- root, nodes, _ := trie.Commit(false)
- triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
- triedb.Commit(root)
-
- var found = make(map[common.Hash][]byte)
- trie, _ = New(TrieID(root), triedb)
- it := trie.MustNodeIterator(nil)
- for it.Next(true) {
- if it.Hash() == (common.Hash{}) {
- continue
- }
- found[it.Hash()] = it.NodeBlob()
- }
-
- dbIter := db.NewIterator(nil, nil)
- defer dbIter.Release()
-
- var count int
- for dbIter.Next() {
- ok, _, _ := isTrieNode(triedb.Scheme(), dbIter.Key(), dbIter.Value())
- if !ok {
- continue
- }
- got, present := found[crypto.Keccak256Hash(dbIter.Value())]
- if !present {
- t.Fatal("Miss trie node")
- }
- if !bytes.Equal(got, dbIter.Value()) {
- t.Fatalf("Unexpected trie node want %v got %v", dbIter.Value(), got)
- }
- count += 1
- }
- if count != len(found) {
- t.Fatal("Find extra trie node via iterator")
- }
-}
-
-// isTrieNode is a helper function which reports if the provided
-// database entry belongs to a trie node or not. Note in tests
-// only single layer trie is used, namely storage trie is not
-// considered at all.
-func isTrieNode(scheme string, key, val []byte) (bool, []byte, common.Hash) {
- var (
- path []byte
- hash common.Hash
- )
- if scheme == rawdb.HashScheme {
- ok := rawdb.IsLegacyTrieNode(key, val)
- if !ok {
- return false, nil, common.Hash{}
- }
- hash = common.BytesToHash(key)
- } else {
- ok, remain := rawdb.ResolveAccountTrieNodeKey(key)
- if !ok {
- return false, nil, common.Hash{}
- }
- path = common.CopyBytes(remain)
- hash = crypto.Keccak256Hash(val)
- }
- return true, path, hash
-}
-
-func BenchmarkIterator(b *testing.B) {
- diskDb, srcDb, tr, _ := makeTestTrie(rawdb.HashScheme)
- root := tr.Hash()
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- if err := checkTrieConsistency(diskDb, srcDb.Scheme(), root, false); err != nil {
- b.Fatal(err)
- }
- }
-}
diff --git a/trie/node.go b/trie/node.go
deleted file mode 100644
index 523a7b3497..0000000000
--- a/trie/node.go
+++ /dev/null
@@ -1,264 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2014 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "fmt"
- "io"
- "strings"
-
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/rlp"
-)
-
-var indices = []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "[17]"}
-
-type node interface {
- cache() (hashNode, bool)
- encode(w rlp.EncoderBuffer)
- fstring(string) string
-}
-
-type (
- fullNode struct {
- Children [17]node // Actual trie node data to encode/decode (needs custom encoder)
- flags nodeFlag
- }
- shortNode struct {
- Key []byte
- Val node
- flags nodeFlag
- }
- hashNode []byte
- valueNode []byte
-)
-
-// nilValueNode is used when collapsing internal trie nodes for hashing, since
-// unset children need to serialize correctly.
-var nilValueNode = valueNode(nil)
-
-// EncodeRLP encodes a full node into the consensus RLP format.
-func (n *fullNode) EncodeRLP(w io.Writer) error {
- eb := rlp.NewEncoderBuffer(w)
- n.encode(eb)
- return eb.Flush()
-}
-
-func (n *fullNode) copy() *fullNode { copy := *n; return © }
-func (n *shortNode) copy() *shortNode { copy := *n; return © }
-
-// nodeFlag contains caching-related metadata about a node.
-type nodeFlag struct {
- hash hashNode // cached hash of the node (may be nil)
- dirty bool // whether the node has changes that must be written to the database
-}
-
-func (n *fullNode) cache() (hashNode, bool) { return n.flags.hash, n.flags.dirty }
-func (n *shortNode) cache() (hashNode, bool) { return n.flags.hash, n.flags.dirty }
-func (n hashNode) cache() (hashNode, bool) { return nil, true }
-func (n valueNode) cache() (hashNode, bool) { return nil, true }
-
-// Pretty printing.
-func (n *fullNode) String() string { return n.fstring("") }
-func (n *shortNode) String() string { return n.fstring("") }
-func (n hashNode) String() string { return n.fstring("") }
-func (n valueNode) String() string { return n.fstring("") }
-
-func (n *fullNode) fstring(ind string) string {
- resp := fmt.Sprintf("[\n%s ", ind)
- for i, node := range &n.Children {
- if node == nil {
- resp += fmt.Sprintf("%s: ", indices[i])
- } else {
- resp += fmt.Sprintf("%s: %v", indices[i], node.fstring(ind+" "))
- }
- }
- return resp + fmt.Sprintf("\n%s] ", ind)
-}
-func (n *shortNode) fstring(ind string) string {
- return fmt.Sprintf("{%x: %v} ", n.Key, n.Val.fstring(ind+" "))
-}
-func (n hashNode) fstring(ind string) string {
- return fmt.Sprintf("<%x> ", []byte(n))
-}
-func (n valueNode) fstring(ind string) string {
- return fmt.Sprintf("%x ", []byte(n))
-}
-
-// rawNode is a simple binary blob used to differentiate between collapsed trie
-// nodes and already encoded RLP binary blobs (while at the same time store them
-// in the same cache fields).
-type rawNode []byte
-
-func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") }
-func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") }
-
-func (n rawNode) EncodeRLP(w io.Writer) error {
- _, err := w.Write(n)
- return err
-}
-
-// mustDecodeNode is a wrapper of decodeNode and panic if any error is encountered.
-func mustDecodeNode(hash, buf []byte) node {
- n, err := decodeNode(hash, buf)
- if err != nil {
- panic(fmt.Sprintf("node %x: %v", hash, err))
- }
- return n
-}
-
-// mustDecodeNodeUnsafe is a wrapper of decodeNodeUnsafe and panic if any error is
-// encountered.
-func mustDecodeNodeUnsafe(hash, buf []byte) node {
- n, err := decodeNodeUnsafe(hash, buf)
- if err != nil {
- panic(fmt.Sprintf("node %x: %v", hash, err))
- }
- return n
-}
-
-// decodeNode parses the RLP encoding of a trie node. It will deep-copy the passed
-// byte slice for decoding, so it's safe to modify the byte slice afterwards. The-
-// decode performance of this function is not optimal, but it is suitable for most
-// scenarios with low performance requirements and hard to determine whether the
-// byte slice be modified or not.
-func decodeNode(hash, buf []byte) (node, error) {
- return decodeNodeUnsafe(hash, common.CopyBytes(buf))
-}
-
-// decodeNodeUnsafe parses the RLP encoding of a trie node. The passed byte slice
-// will be directly referenced by node without bytes deep copy, so the input MUST
-// not be changed after.
-func decodeNodeUnsafe(hash, buf []byte) (node, error) {
- if len(buf) == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- elems, _, err := rlp.SplitList(buf)
- if err != nil {
- return nil, fmt.Errorf("decode error: %v", err)
- }
- switch c, _ := rlp.CountValues(elems); c {
- case 2:
- n, err := decodeShort(hash, elems)
- return n, wrapError(err, "short")
- case 17:
- n, err := decodeFull(hash, elems)
- return n, wrapError(err, "full")
- default:
- return nil, fmt.Errorf("invalid number of list elements: %v", c)
- }
-}
-
-func decodeShort(hash, elems []byte) (node, error) {
- kbuf, rest, err := rlp.SplitString(elems)
- if err != nil {
- return nil, err
- }
- flag := nodeFlag{hash: hash}
- key := compactToHex(kbuf)
- if hasTerm(key) {
- // value node
- val, _, err := rlp.SplitString(rest)
- if err != nil {
- return nil, fmt.Errorf("invalid value node: %v", err)
- }
- return &shortNode{key, valueNode(val), flag}, nil
- }
- r, _, err := decodeRef(rest)
- if err != nil {
- return nil, wrapError(err, "val")
- }
- return &shortNode{key, r, flag}, nil
-}
-
-func decodeFull(hash, elems []byte) (*fullNode, error) {
- n := &fullNode{flags: nodeFlag{hash: hash}}
- for i := 0; i < 16; i++ {
- cld, rest, err := decodeRef(elems)
- if err != nil {
- return n, wrapError(err, fmt.Sprintf("[%d]", i))
- }
- n.Children[i], elems = cld, rest
- }
- val, _, err := rlp.SplitString(elems)
- if err != nil {
- return n, err
- }
- if len(val) > 0 {
- n.Children[16] = valueNode(val)
- }
- return n, nil
-}
-
-const hashLen = len(common.Hash{})
-
-func decodeRef(buf []byte) (node, []byte, error) {
- kind, val, rest, err := rlp.Split(buf)
- if err != nil {
- return nil, buf, err
- }
- switch {
- case kind == rlp.List:
- // 'embedded' node reference. The encoding must be smaller
- // than a hash in order to be valid.
- if size := len(buf) - len(rest); size > hashLen {
- err := fmt.Errorf("oversized embedded node (size is %d bytes, want size < %d)", size, hashLen)
- return nil, buf, err
- }
- n, err := decodeNode(nil, buf)
- return n, rest, err
- case kind == rlp.String && len(val) == 0:
- // empty node
- return nil, rest, nil
- case kind == rlp.String && len(val) == 32:
- return hashNode(val), rest, nil
- default:
- return nil, nil, fmt.Errorf("invalid RLP string size %d (want 0 or 32)", len(val))
- }
-}
-
-// wraps a decoding error with information about the path to the
-// invalid child node (for debugging encoding issues).
-type decodeError struct {
- what error
- stack []string
-}
-
-func wrapError(err error, ctx string) error {
- if err == nil {
- return nil
- }
- if decErr, ok := err.(*decodeError); ok {
- decErr.stack = append(decErr.stack, ctx)
- return decErr
- }
- return &decodeError{err, []string{ctx}}
-}
-
-func (err *decodeError) Error() string {
- return fmt.Sprintf("%v (decode path: %s)", err.what, strings.Join(err.stack, "<-"))
-}
diff --git a/trie/node_enc.go b/trie/node_enc.go
deleted file mode 100644
index 6cd6aba3c9..0000000000
--- a/trie/node_enc.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// (c) 2022, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "github.com/ava-labs/libevm/rlp"
-)
-
-func nodeToBytes(n node) []byte {
- w := rlp.NewEncoderBuffer(nil)
- n.encode(w)
- result := w.ToBytes()
- w.Flush()
- return result
-}
-
-func (n *fullNode) encode(w rlp.EncoderBuffer) {
- offset := w.List()
- for _, c := range n.Children {
- if c != nil {
- c.encode(w)
- } else {
- w.Write(rlp.EmptyString)
- }
- }
- w.ListEnd(offset)
-}
-
-func (n *shortNode) encode(w rlp.EncoderBuffer) {
- offset := w.List()
- w.WriteBytes(n.Key)
- if n.Val != nil {
- n.Val.encode(w)
- } else {
- w.Write(rlp.EmptyString)
- }
- w.ListEnd(offset)
-}
-
-func (n hashNode) encode(w rlp.EncoderBuffer) {
- w.WriteBytes(n)
-}
-
-func (n valueNode) encode(w rlp.EncoderBuffer) {
- w.WriteBytes(n)
-}
-
-func (n rawNode) encode(w rlp.EncoderBuffer) {
- w.Write(n)
-}
diff --git a/trie/node_test.go b/trie/node_test.go
deleted file mode 100644
index 51dd126bde..0000000000
--- a/trie/node_test.go
+++ /dev/null
@@ -1,225 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- "testing"
-
- "github.com/ava-labs/libevm/crypto"
- "github.com/ava-labs/libevm/rlp"
-)
-
-func newTestFullNode(v []byte) []interface{} {
- fullNodeData := []interface{}{}
- for i := 0; i < 16; i++ {
- k := bytes.Repeat([]byte{byte(i + 1)}, 32)
- fullNodeData = append(fullNodeData, k)
- }
- fullNodeData = append(fullNodeData, v)
- return fullNodeData
-}
-
-func TestDecodeNestedNode(t *testing.T) {
- fullNodeData := newTestFullNode([]byte("fullnode"))
-
- data := [][]byte{}
- for i := 0; i < 16; i++ {
- data = append(data, nil)
- }
- data = append(data, []byte("subnode"))
- fullNodeData[15] = data
-
- buf := bytes.NewBuffer([]byte{})
- rlp.Encode(buf, fullNodeData)
-
- if _, err := decodeNode([]byte("testdecode"), buf.Bytes()); err != nil {
- t.Fatalf("decode nested full node err: %v", err)
- }
-}
-
-func TestDecodeFullNodeWrongSizeChild(t *testing.T) {
- fullNodeData := newTestFullNode([]byte("wrongsizechild"))
- fullNodeData[0] = []byte("00")
- buf := bytes.NewBuffer([]byte{})
- rlp.Encode(buf, fullNodeData)
-
- _, err := decodeNode([]byte("testdecode"), buf.Bytes())
- if _, ok := err.(*decodeError); !ok {
- t.Fatalf("decodeNode returned wrong err: %v", err)
- }
-}
-
-func TestDecodeFullNodeWrongNestedFullNode(t *testing.T) {
- fullNodeData := newTestFullNode([]byte("fullnode"))
-
- data := [][]byte{}
- for i := 0; i < 16; i++ {
- data = append(data, []byte("123456"))
- }
- data = append(data, []byte("subnode"))
- fullNodeData[15] = data
-
- buf := bytes.NewBuffer([]byte{})
- rlp.Encode(buf, fullNodeData)
-
- _, err := decodeNode([]byte("testdecode"), buf.Bytes())
- if _, ok := err.(*decodeError); !ok {
- t.Fatalf("decodeNode returned wrong err: %v", err)
- }
-}
-
-func TestDecodeFullNode(t *testing.T) {
- fullNodeData := newTestFullNode([]byte("decodefullnode"))
- buf := bytes.NewBuffer([]byte{})
- rlp.Encode(buf, fullNodeData)
-
- _, err := decodeNode([]byte("testdecode"), buf.Bytes())
- if err != nil {
- t.Fatalf("decode full node err: %v", err)
- }
-}
-
-// goos: darwin
-// goarch: arm64
-// pkg: github.com/ava-labs/coreth/trie
-// BenchmarkEncodeShortNode
-// BenchmarkEncodeShortNode-8 16878850 70.81 ns/op 48 B/op 1 allocs/op
-func BenchmarkEncodeShortNode(b *testing.B) {
- node := &shortNode{
- Key: []byte{0x1, 0x2},
- Val: hashNode(randBytes(32)),
- }
- b.ResetTimer()
- b.ReportAllocs()
-
- for i := 0; i < b.N; i++ {
- nodeToBytes(node)
- }
-}
-
-// goos: darwin
-// goarch: arm64
-// pkg: github.com/ava-labs/coreth/trie
-// BenchmarkEncodeFullNode
-// BenchmarkEncodeFullNode-8 4323273 284.4 ns/op 576 B/op 1 allocs/op
-func BenchmarkEncodeFullNode(b *testing.B) {
- node := &fullNode{}
- for i := 0; i < 16; i++ {
- node.Children[i] = hashNode(randBytes(32))
- }
- b.ResetTimer()
- b.ReportAllocs()
-
- for i := 0; i < b.N; i++ {
- nodeToBytes(node)
- }
-}
-
-// goos: darwin
-// goarch: arm64
-// pkg: github.com/ava-labs/coreth/trie
-// BenchmarkDecodeShortNode
-// BenchmarkDecodeShortNode-8 7925638 151.0 ns/op 157 B/op 4 allocs/op
-func BenchmarkDecodeShortNode(b *testing.B) {
- node := &shortNode{
- Key: []byte{0x1, 0x2},
- Val: hashNode(randBytes(32)),
- }
- blob := nodeToBytes(node)
- hash := crypto.Keccak256(blob)
-
- b.ResetTimer()
- b.ReportAllocs()
-
- for i := 0; i < b.N; i++ {
- mustDecodeNode(hash, blob)
- }
-}
-
-// goos: darwin
-// goarch: arm64
-// pkg: github.com/ava-labs/coreth/trie
-// BenchmarkDecodeShortNodeUnsafe
-// BenchmarkDecodeShortNodeUnsafe-8 9027476 128.6 ns/op 109 B/op 3 allocs/op
-func BenchmarkDecodeShortNodeUnsafe(b *testing.B) {
- node := &shortNode{
- Key: []byte{0x1, 0x2},
- Val: hashNode(randBytes(32)),
- }
- blob := nodeToBytes(node)
- hash := crypto.Keccak256(blob)
-
- b.ResetTimer()
- b.ReportAllocs()
-
- for i := 0; i < b.N; i++ {
- mustDecodeNodeUnsafe(hash, blob)
- }
-}
-
-// goos: darwin
-// goarch: arm64
-// pkg: github.com/ava-labs/coreth/trie
-// BenchmarkDecodeFullNode
-// BenchmarkDecodeFullNode-8 1597462 761.9 ns/op 1280 B/op 18 allocs/op
-func BenchmarkDecodeFullNode(b *testing.B) {
- node := &fullNode{}
- for i := 0; i < 16; i++ {
- node.Children[i] = hashNode(randBytes(32))
- }
- blob := nodeToBytes(node)
- hash := crypto.Keccak256(blob)
-
- b.ResetTimer()
- b.ReportAllocs()
-
- for i := 0; i < b.N; i++ {
- mustDecodeNode(hash, blob)
- }
-}
-
-// goos: darwin
-// goarch: arm64
-// pkg: github.com/ava-labs/coreth/trie
-// BenchmarkDecodeFullNodeUnsafe
-// BenchmarkDecodeFullNodeUnsafe-8 1789070 687.1 ns/op 704 B/op 17 allocs/op
-func BenchmarkDecodeFullNodeUnsafe(b *testing.B) {
- node := &fullNode{}
- for i := 0; i < 16; i++ {
- node.Children[i] = hashNode(randBytes(32))
- }
- blob := nodeToBytes(node)
- hash := crypto.Keccak256(blob)
-
- b.ResetTimer()
- b.ReportAllocs()
-
- for i := 0; i < b.N; i++ {
- mustDecodeNodeUnsafe(hash, blob)
- }
-}
diff --git a/trie/proof.go b/trie/proof.go
deleted file mode 100644
index 0b2ee57796..0000000000
--- a/trie/proof.go
+++ /dev/null
@@ -1,626 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- "errors"
- "fmt"
-
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/log"
-)
-
-// Prove constructs a merkle proof for key. The result contains all encoded nodes
-// on the path to the value at key. The value itself is also included in the last
-// node and can be retrieved by verifying the proof.
-//
-// If the trie does not contain a value for key, the returned proof contains all
-// nodes of the longest existing prefix of the key (at least the root node), ending
-// with the node that proves the absence of the key.
-func (t *Trie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error {
- // Short circuit if the trie is already committed and not usable.
- if t.committed {
- return ErrCommitted
- }
- // Collect all nodes on the path to key.
- var (
- prefix []byte
- nodes []node
- tn = t.root
- )
- key = keybytesToHex(key)
- for len(key) > 0 && tn != nil {
- switch n := tn.(type) {
- case *shortNode:
- if len(key) < len(n.Key) || !bytes.Equal(n.Key, key[:len(n.Key)]) {
- // The trie doesn't contain the key.
- tn = nil
- } else {
- tn = n.Val
- prefix = append(prefix, n.Key...)
- key = key[len(n.Key):]
- }
- nodes = append(nodes, n)
- case *fullNode:
- tn = n.Children[key[0]]
- prefix = append(prefix, key[0])
- key = key[1:]
- nodes = append(nodes, n)
- case hashNode:
- // Retrieve the specified node from the underlying node reader.
- // trie.resolveAndTrack is not used since in that function the
- // loaded blob will be tracked, while it's not required here since
- // all loaded nodes won't be linked to trie at all and track nodes
- // may lead to out-of-memory issue.
- blob, err := t.reader.node(prefix, common.BytesToHash(n))
- if err != nil {
- log.Error("Unhandled trie error in Trie.Prove", "err", err)
- return err
- }
- // The raw-blob format nodes are loaded either from the
- // clean cache or the database, they are all in their own
- // copy and safe to use unsafe decoder.
- tn = mustDecodeNodeUnsafe(n, blob)
- default:
- panic(fmt.Sprintf("%T: invalid node: %v", tn, tn))
- }
- }
- hasher := newHasher(false)
- defer returnHasherToPool(hasher)
-
- for i, n := range nodes {
- var hn node
- n, hn = hasher.proofHash(n)
- if hash, ok := hn.(hashNode); ok || i == 0 {
- // If the node's database encoding is a hash (or is the
- // root node), it becomes a proof element.
- enc := nodeToBytes(n)
- if !ok {
- hash = hasher.hashData(enc)
- }
- proofDb.Put(hash, enc)
- }
- }
- return nil
-}
-
-// Prove constructs a merkle proof for key. The result contains all encoded nodes
-// on the path to the value at key. The value itself is also included in the last
-// node and can be retrieved by verifying the proof.
-//
-// If the trie does not contain a value for key, the returned proof contains all
-// nodes of the longest existing prefix of the key (at least the root node), ending
-// with the node that proves the absence of the key.
-func (t *StateTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error {
- return t.trie.Prove(key, proofDb)
-}
-
-// VerifyProof checks merkle proofs. The given proof must contain the value for
-// key in a trie with the given root hash. VerifyProof returns an error if the
-// proof contains invalid trie nodes or the wrong value.
-func VerifyProof(rootHash common.Hash, key []byte, proofDb ethdb.KeyValueReader) (value []byte, err error) {
- key = keybytesToHex(key)
- wantHash := rootHash
- for i := 0; ; i++ {
- buf, _ := proofDb.Get(wantHash[:])
- if buf == nil {
- return nil, fmt.Errorf("proof node %d (hash %064x) missing", i, wantHash)
- }
- n, err := decodeNode(wantHash[:], buf)
- if err != nil {
- return nil, fmt.Errorf("bad proof node %d: %v", i, err)
- }
- keyrest, cld := get(n, key, true)
- switch cld := cld.(type) {
- case nil:
- // The trie doesn't contain the key.
- return nil, nil
- case hashNode:
- key = keyrest
- copy(wantHash[:], cld)
- case valueNode:
- return cld, nil
- }
- }
-}
-
-// proofToPath converts a merkle proof to trie node path. The main purpose of
-// this function is recovering a node path from the merkle proof stream. All
-// necessary nodes will be resolved and leave the remaining as hashnode.
-//
-// The given edge proof is allowed to be an existent or non-existent proof.
-func proofToPath(rootHash common.Hash, root node, key []byte, proofDb ethdb.KeyValueReader, allowNonExistent bool) (node, []byte, error) {
- // resolveNode retrieves and resolves trie node from merkle proof stream
- resolveNode := func(hash common.Hash) (node, error) {
- buf, _ := proofDb.Get(hash[:])
- if buf == nil {
- return nil, fmt.Errorf("proof node (hash %064x) missing", hash)
- }
- n, err := decodeNode(hash[:], buf)
- if err != nil {
- return nil, fmt.Errorf("bad proof node %v", err)
- }
- return n, err
- }
- // If the root node is empty, resolve it first.
- // Root node must be included in the proof.
- if root == nil {
- n, err := resolveNode(rootHash)
- if err != nil {
- return nil, nil, err
- }
- root = n
- }
- var (
- err error
- child, parent node
- keyrest []byte
- valnode []byte
- )
- key, parent = keybytesToHex(key), root
- for {
- keyrest, child = get(parent, key, false)
- switch cld := child.(type) {
- case nil:
- // The trie doesn't contain the key. It's possible
- // the proof is a non-existing proof, but at least
- // we can prove all resolved nodes are correct, it's
- // enough for us to prove range.
- if allowNonExistent {
- return root, nil, nil
- }
- return nil, nil, errors.New("the node is not contained in trie")
- case *shortNode:
- key, parent = keyrest, child // Already resolved
- continue
- case *fullNode:
- key, parent = keyrest, child // Already resolved
- continue
- case hashNode:
- child, err = resolveNode(common.BytesToHash(cld))
- if err != nil {
- return nil, nil, err
- }
- case valueNode:
- valnode = cld
- }
- // Link the parent and child.
- switch pnode := parent.(type) {
- case *shortNode:
- pnode.Val = child
- case *fullNode:
- pnode.Children[key[0]] = child
- default:
- panic(fmt.Sprintf("%T: invalid node: %v", pnode, pnode))
- }
- if len(valnode) > 0 {
- return root, valnode, nil // The whole path is resolved
- }
- key, parent = keyrest, child
- }
-}
-
-// unsetInternal removes all internal node references(hashnode, embedded node).
-// It should be called after a trie is constructed with two edge paths. Also
-// the given boundary keys must be the one used to construct the edge paths.
-//
-// It's the key step for range proof. All visited nodes should be marked dirty
-// since the node content might be modified. Besides it can happen that some
-// fullnodes only have one child which is disallowed. But if the proof is valid,
-// the missing children will be filled, otherwise it will be thrown anyway.
-//
-// Note we have the assumption here the given boundary keys are different
-// and right is larger than left.
-func unsetInternal(n node, left []byte, right []byte) (bool, error) {
- left, right = keybytesToHex(left), keybytesToHex(right)
-
- // Step down to the fork point. There are two scenarios can happen:
- // - the fork point is a shortnode: either the key of left proof or
- // right proof doesn't match with shortnode's key.
- // - the fork point is a fullnode: both two edge proofs are allowed
- // to point to a non-existent key.
- var (
- pos = 0
- parent node
-
- // fork indicator, 0 means no fork, -1 means proof is less, 1 means proof is greater
- shortForkLeft, shortForkRight int
- )
-findFork:
- for {
- switch rn := (n).(type) {
- case *shortNode:
- rn.flags = nodeFlag{dirty: true}
-
- // If either the key of left proof or right proof doesn't match with
- // shortnode, stop here and the forkpoint is the shortnode.
- if len(left)-pos < len(rn.Key) {
- shortForkLeft = bytes.Compare(left[pos:], rn.Key)
- } else {
- shortForkLeft = bytes.Compare(left[pos:pos+len(rn.Key)], rn.Key)
- }
- if len(right)-pos < len(rn.Key) {
- shortForkRight = bytes.Compare(right[pos:], rn.Key)
- } else {
- shortForkRight = bytes.Compare(right[pos:pos+len(rn.Key)], rn.Key)
- }
- if shortForkLeft != 0 || shortForkRight != 0 {
- break findFork
- }
- parent = n
- n, pos = rn.Val, pos+len(rn.Key)
- case *fullNode:
- rn.flags = nodeFlag{dirty: true}
-
- // If either the node pointed by left proof or right proof is nil,
- // stop here and the forkpoint is the fullnode.
- leftnode, rightnode := rn.Children[left[pos]], rn.Children[right[pos]]
- if leftnode == nil || rightnode == nil || leftnode != rightnode {
- break findFork
- }
- parent = n
- n, pos = rn.Children[left[pos]], pos+1
- default:
- panic(fmt.Sprintf("%T: invalid node: %v", n, n))
- }
- }
- switch rn := n.(type) {
- case *shortNode:
- // There can have these five scenarios:
- // - both proofs are less than the trie path => no valid range
- // - both proofs are greater than the trie path => no valid range
- // - left proof is less and right proof is greater => valid range, unset the shortnode entirely
- // - left proof points to the shortnode, but right proof is greater
- // - right proof points to the shortnode, but left proof is less
- if shortForkLeft == -1 && shortForkRight == -1 {
- return false, errors.New("empty range")
- }
- if shortForkLeft == 1 && shortForkRight == 1 {
- return false, errors.New("empty range")
- }
- if shortForkLeft != 0 && shortForkRight != 0 {
- // The fork point is root node, unset the entire trie
- if parent == nil {
- return true, nil
- }
- parent.(*fullNode).Children[left[pos-1]] = nil
- return false, nil
- }
- // Only one proof points to non-existent key.
- if shortForkRight != 0 {
- if _, ok := rn.Val.(valueNode); ok {
- // The fork point is root node, unset the entire trie
- if parent == nil {
- return true, nil
- }
- parent.(*fullNode).Children[left[pos-1]] = nil
- return false, nil
- }
- return false, unset(rn, rn.Val, left[pos:], len(rn.Key), false)
- }
- if shortForkLeft != 0 {
- if _, ok := rn.Val.(valueNode); ok {
- // The fork point is root node, unset the entire trie
- if parent == nil {
- return true, nil
- }
- parent.(*fullNode).Children[right[pos-1]] = nil
- return false, nil
- }
- return false, unset(rn, rn.Val, right[pos:], len(rn.Key), true)
- }
- return false, nil
- case *fullNode:
- // unset all internal nodes in the forkpoint
- for i := left[pos] + 1; i < right[pos]; i++ {
- rn.Children[i] = nil
- }
- if err := unset(rn, rn.Children[left[pos]], left[pos:], 1, false); err != nil {
- return false, err
- }
- if err := unset(rn, rn.Children[right[pos]], right[pos:], 1, true); err != nil {
- return false, err
- }
- return false, nil
- default:
- panic(fmt.Sprintf("%T: invalid node: %v", n, n))
- }
-}
-
-// unset removes all internal node references either the left most or right most.
-// It can meet these scenarios:
-//
-// - The given path is existent in the trie, unset the associated nodes with the
-// specific direction
-// - The given path is non-existent in the trie
-// - the fork point is a fullnode, the corresponding child pointed by path
-// is nil, return
-// - the fork point is a shortnode, the shortnode is included in the range,
-// keep the entire branch and return.
-// - the fork point is a shortnode, the shortnode is excluded in the range,
-// unset the entire branch.
-func unset(parent node, child node, key []byte, pos int, removeLeft bool) error {
- switch cld := child.(type) {
- case *fullNode:
- if removeLeft {
- for i := 0; i < int(key[pos]); i++ {
- cld.Children[i] = nil
- }
- cld.flags = nodeFlag{dirty: true}
- } else {
- for i := key[pos] + 1; i < 16; i++ {
- cld.Children[i] = nil
- }
- cld.flags = nodeFlag{dirty: true}
- }
- return unset(cld, cld.Children[key[pos]], key, pos+1, removeLeft)
- case *shortNode:
- if len(key[pos:]) < len(cld.Key) || !bytes.Equal(cld.Key, key[pos:pos+len(cld.Key)]) {
- // Find the fork point, it's an non-existent branch.
- if removeLeft {
- if bytes.Compare(cld.Key, key[pos:]) < 0 {
- // The key of fork shortnode is less than the path
- // (it belongs to the range), unset the entire
- // branch. The parent must be a fullnode.
- fn := parent.(*fullNode)
- fn.Children[key[pos-1]] = nil
- }
- //else {
- // The key of fork shortnode is greater than the
- // path(it doesn't belong to the range), keep
- // it with the cached hash available.
- //}
- } else {
- if bytes.Compare(cld.Key, key[pos:]) > 0 {
- // The key of fork shortnode is greater than the
- // path(it belongs to the range), unset the entries
- // branch. The parent must be a fullnode.
- fn := parent.(*fullNode)
- fn.Children[key[pos-1]] = nil
- }
- //else {
- // The key of fork shortnode is less than the
- // path(it doesn't belong to the range), keep
- // it with the cached hash available.
- //}
- }
- return nil
- }
- if _, ok := cld.Val.(valueNode); ok {
- fn := parent.(*fullNode)
- fn.Children[key[pos-1]] = nil
- return nil
- }
- cld.flags = nodeFlag{dirty: true}
- return unset(cld, cld.Val, key, pos+len(cld.Key), removeLeft)
- case nil:
- // If the node is nil, then it's a child of the fork point
- // fullnode(it's a non-existent branch).
- return nil
- default:
- panic("it shouldn't happen") // hashNode, valueNode
- }
-}
-
-// hasRightElement returns the indicator whether there exists more elements
-// on the right side of the given path. The given path can point to an existent
-// key or a non-existent one. This function has the assumption that the whole
-// path should already be resolved.
-func hasRightElement(node node, key []byte) bool {
- pos, key := 0, keybytesToHex(key)
- for node != nil {
- switch rn := node.(type) {
- case *fullNode:
- for i := key[pos] + 1; i < 16; i++ {
- if rn.Children[i] != nil {
- return true
- }
- }
- node, pos = rn.Children[key[pos]], pos+1
- case *shortNode:
- if len(key)-pos < len(rn.Key) || !bytes.Equal(rn.Key, key[pos:pos+len(rn.Key)]) {
- return bytes.Compare(rn.Key, key[pos:]) > 0
- }
- node, pos = rn.Val, pos+len(rn.Key)
- case valueNode:
- return false // We have resolved the whole path
- default:
- panic(fmt.Sprintf("%T: invalid node: %v", node, node)) // hashnode
- }
- }
- return false
-}
-
-// VerifyRangeProof checks whether the given leaf nodes and edge proof
-// can prove the given trie leaves range is matched with the specific root.
-// Besides, the range should be consecutive (no gap inside) and monotonic
-// increasing.
-//
-// Note the given proof actually contains two edge proofs. Both of them can
-// be non-existent proofs. For example the first proof is for a non-existent
-// key 0x03, the last proof is for a non-existent key 0x10. The given batch
-// leaves are [0x04, 0x05, .. 0x09]. It's still feasible to prove the given
-// batch is valid.
-//
-// The firstKey is paired with firstProof, not necessarily the same as keys[0]
-// (unless firstProof is an existent proof). Similarly, lastKey and lastProof
-// are paired.
-//
-// Expect the normal case, this function can also be used to verify the following
-// range proofs:
-//
-// - All elements proof. In this case the proof can be nil, but the range should
-// be all the leaves in the trie.
-//
-// - One element proof. In this case no matter the edge proof is a non-existent
-// proof or not, we can always verify the correctness of the proof.
-//
-// - Zero element proof. In this case a single non-existent proof is enough to prove.
-// Besides, if there are still some other leaves available on the right side, then
-// an error will be returned.
-//
-// Except returning the error to indicate the proof is valid or not, the function will
-// also return a flag to indicate whether there exists more accounts/slots in the trie.
-//
-// Note: This method does not verify that the proof is of minimal form. If the input
-// proofs are 'bloated' with neighbour leaves or random data, aside from the 'useful'
-// data, then the proof will still be accepted.
-func VerifyRangeProof(rootHash common.Hash, firstKey []byte, keys [][]byte, values [][]byte, proof ethdb.KeyValueReader) (bool, error) {
- if len(keys) != len(values) {
- return false, fmt.Errorf("inconsistent proof data, keys: %d, values: %d", len(keys), len(values))
- }
- // Ensure the received batch is monotonic increasing and contains no deletions
- for i := 0; i < len(keys)-1; i++ {
- if bytes.Compare(keys[i], keys[i+1]) >= 0 {
- return false, errors.New("range is not monotonically increasing")
- }
- }
- for _, value := range values {
- if len(value) == 0 {
- return false, errors.New("range contains deletion")
- }
- }
- // Special case, there is no edge proof at all. The given range is expected
- // to be the whole leaf-set in the trie.
- if proof == nil {
- tr := NewStackTrie(nil)
- for index, key := range keys {
- tr.Update(key, values[index])
- }
- if have, want := tr.Hash(), rootHash; have != want {
- return false, fmt.Errorf("invalid proof, want hash %x, got %x", want, have)
- }
- return false, nil // No more elements
- }
- // Special case, there is a provided edge proof but zero key/value
- // pairs, ensure there are no more accounts / slots in the trie.
- if len(keys) == 0 {
- root, val, err := proofToPath(rootHash, nil, firstKey, proof, true)
- if err != nil {
- return false, err
- }
- if val != nil || hasRightElement(root, firstKey) {
- return false, errors.New("more entries available")
- }
- return false, nil
- }
- var lastKey = keys[len(keys)-1]
- // Special case, there is only one element and two edge keys are same.
- // In this case, we can't construct two edge paths. So handle it here.
- if len(keys) == 1 && bytes.Equal(firstKey, lastKey) {
- root, val, err := proofToPath(rootHash, nil, firstKey, proof, false)
- if err != nil {
- return false, err
- }
- if !bytes.Equal(firstKey, keys[0]) {
- return false, errors.New("correct proof but invalid key")
- }
- if !bytes.Equal(val, values[0]) {
- return false, errors.New("correct proof but invalid data")
- }
- return hasRightElement(root, firstKey), nil
- }
- // Ok, in all other cases, we require two edge paths available.
- // First check the validity of edge keys.
- if bytes.Compare(firstKey, lastKey) >= 0 {
- return false, errors.New("invalid edge keys")
- }
- // todo(rjl493456442) different length edge keys should be supported
- if len(firstKey) != len(lastKey) {
- return false, fmt.Errorf("inconsistent edge keys (%d != %d)", len(firstKey), len(lastKey))
- }
- // Convert the edge proofs to edge trie paths. Then we can
- // have the same tree architecture with the original one.
- // For the first edge proof, non-existent proof is allowed.
- root, _, err := proofToPath(rootHash, nil, firstKey, proof, true)
- if err != nil {
- return false, err
- }
- // Pass the root node here, the second path will be merged
- // with the first one. For the last edge proof, non-existent
- // proof is also allowed.
- root, _, err = proofToPath(rootHash, root, lastKey, proof, true)
- if err != nil {
- return false, err
- }
- // Remove all internal references. All the removed parts should
- // be re-filled(or re-constructed) by the given leaves range.
- empty, err := unsetInternal(root, firstKey, lastKey)
- if err != nil {
- return false, err
- }
- // Rebuild the trie with the leaf stream, the shape of trie
- // should be same with the original one.
- tr := &Trie{root: root, reader: newEmptyReader(), tracer: newTracer()}
- if empty {
- tr.root = nil
- }
- for index, key := range keys {
- tr.Update(key, values[index])
- }
- if tr.Hash() != rootHash {
- return false, fmt.Errorf("invalid proof, want hash %x, got %x", rootHash, tr.Hash())
- }
- return hasRightElement(tr.root, keys[len(keys)-1]), nil
-}
-
-// get returns the child of the given node. Return nil if the
-// node with specified key doesn't exist at all.
-//
-// There is an additional flag `skipResolved`. If it's set then
-// all resolved nodes won't be returned.
-func get(tn node, key []byte, skipResolved bool) ([]byte, node) {
- for {
- switch n := tn.(type) {
- case *shortNode:
- if len(key) < len(n.Key) || !bytes.Equal(n.Key, key[:len(n.Key)]) {
- return nil, nil
- }
- tn = n.Val
- key = key[len(n.Key):]
- if !skipResolved {
- return key, tn
- }
- case *fullNode:
- tn = n.Children[key[0]]
- key = key[1:]
- if !skipResolved {
- return key, tn
- }
- case hashNode:
- return key, n
- case nil:
- return key, nil
- case valueNode:
- return nil, n
- default:
- panic(fmt.Sprintf("%T: invalid node: %v", tn, tn))
- }
- }
-}
diff --git a/trie/proof_test.go b/trie/proof_test.go
deleted file mode 100644
index c266dc2595..0000000000
--- a/trie/proof_test.go
+++ /dev/null
@@ -1,1012 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- crand "crypto/rand"
- "encoding/binary"
- "fmt"
- mrand "math/rand"
- "testing"
-
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
- "github.com/ava-labs/libevm/ethdb/memorydb"
- "golang.org/x/exp/slices"
-)
-
-// Prng is a pseudo random number generator seeded by strong randomness.
-// The randomness is printed on startup in order to make failures reproducible.
-var prng = initRnd()
-
-func initRnd() *mrand.Rand {
- var seed [8]byte
- crand.Read(seed[:])
- rnd := mrand.New(mrand.NewSource(int64(binary.LittleEndian.Uint64(seed[:]))))
- fmt.Printf("Seed: %x\n", seed)
- return rnd
-}
-
-func randBytes(n int) []byte {
- r := make([]byte, n)
- prng.Read(r)
- return r
-}
-
-// makeProvers creates Merkle trie provers based on different implementations to
-// test all variations.
-func makeProvers(trie *Trie) []func(key []byte) *memorydb.Database {
- var provers []func(key []byte) *memorydb.Database
-
- // Create a direct trie based Merkle prover
- provers = append(provers, func(key []byte) *memorydb.Database {
- proof := memorydb.New()
- trie.Prove(key, proof)
- return proof
- })
- // Create a leaf iterator based Merkle prover
- provers = append(provers, func(key []byte) *memorydb.Database {
- proof := memorydb.New()
- if it := NewIterator(trie.MustNodeIterator(key)); it.Next() && bytes.Equal(key, it.Key) {
- for _, p := range it.Prove() {
- proof.Put(crypto.Keccak256(p), p)
- }
- }
- return proof
- })
- return provers
-}
-
-func TestProof(t *testing.T) {
- trie, vals := randomTrie(500)
- root := trie.Hash()
- for i, prover := range makeProvers(trie) {
- for _, kv := range vals {
- proof := prover(kv.k)
- if proof == nil {
- t.Fatalf("prover %d: missing key %x while constructing proof", i, kv.k)
- }
- val, err := VerifyProof(root, kv.k, proof)
- if err != nil {
- t.Fatalf("prover %d: failed to verify proof for key %x: %v\nraw proof: %x", i, kv.k, err, proof)
- }
- if !bytes.Equal(val, kv.v) {
- t.Fatalf("prover %d: verified value mismatch for key %x: have %x, want %x", i, kv.k, val, kv.v)
- }
- }
- }
-}
-
-func TestOneElementProof(t *testing.T) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- updateString(trie, "k", "v")
- for i, prover := range makeProvers(trie) {
- proof := prover([]byte("k"))
- if proof == nil {
- t.Fatalf("prover %d: nil proof", i)
- }
- if proof.Len() != 1 {
- t.Errorf("prover %d: proof should have one element", i)
- }
- val, err := VerifyProof(trie.Hash(), []byte("k"), proof)
- if err != nil {
- t.Fatalf("prover %d: failed to verify proof: %v\nraw proof: %x", i, err, proof)
- }
- if !bytes.Equal(val, []byte("v")) {
- t.Fatalf("prover %d: verified value mismatch: have %x, want 'k'", i, val)
- }
- }
-}
-
-func TestBadProof(t *testing.T) {
- trie, vals := randomTrie(800)
- root := trie.Hash()
- for i, prover := range makeProvers(trie) {
- for _, kv := range vals {
- proof := prover(kv.k)
- if proof == nil {
- t.Fatalf("prover %d: nil proof", i)
- }
- it := proof.NewIterator(nil, nil)
- for i, d := 0, mrand.Intn(proof.Len()); i <= d; i++ {
- it.Next()
- }
- key := it.Key()
- val, _ := proof.Get(key)
- proof.Delete(key)
- it.Release()
-
- mutateByte(val)
- proof.Put(crypto.Keccak256(val), val)
-
- if _, err := VerifyProof(root, kv.k, proof); err == nil {
- t.Fatalf("prover %d: expected proof to fail for key %x", i, kv.k)
- }
- }
- }
-}
-
-// Tests that missing keys can also be proven. The test explicitly uses a single
-// entry trie and checks for missing keys both before and after the single entry.
-func TestMissingKeyProof(t *testing.T) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- updateString(trie, "k", "v")
-
- for i, key := range []string{"a", "j", "l", "z"} {
- proof := memorydb.New()
- trie.Prove([]byte(key), proof)
-
- if proof.Len() != 1 {
- t.Errorf("test %d: proof should have one element", i)
- }
- val, err := VerifyProof(trie.Hash(), []byte(key), proof)
- if err != nil {
- t.Fatalf("test %d: failed to verify proof: %v\nraw proof: %x", i, err, proof)
- }
- if val != nil {
- t.Fatalf("test %d: verified value mismatch: have %x, want nil", i, val)
- }
- }
-}
-
-// TestRangeProof tests normal range proof with both edge proofs
-// as the existent proof. The test cases are generated randomly.
-func TestRangeProof(t *testing.T) {
- trie, vals := randomTrie(4096)
- var entries []*kv
- for _, kv := range vals {
- entries = append(entries, kv)
- }
- slices.SortFunc(entries, (*kv).cmp)
- for i := 0; i < 500; i++ {
- start := mrand.Intn(len(entries))
- end := mrand.Intn(len(entries)-start) + start + 1
-
- proof := memorydb.New()
- if err := trie.Prove(entries[start].k, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(entries[end-1].k, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- var keys [][]byte
- var vals [][]byte
- for i := start; i < end; i++ {
- keys = append(keys, entries[i].k)
- vals = append(vals, entries[i].v)
- }
- _, err := VerifyRangeProof(trie.Hash(), keys[0], keys, vals, proof)
- if err != nil {
- t.Fatalf("Case %d(%d->%d) expect no error, got %v", i, start, end-1, err)
- }
- }
-}
-
-// TestRangeProof tests normal range proof with two non-existent proofs.
-// The test cases are generated randomly.
-func TestRangeProofWithNonExistentProof(t *testing.T) {
- trie, vals := randomTrie(4096)
- var entries []*kv
- for _, kv := range vals {
- entries = append(entries, kv)
- }
- slices.SortFunc(entries, (*kv).cmp)
- for i := 0; i < 500; i++ {
- start := mrand.Intn(len(entries))
- end := mrand.Intn(len(entries)-start) + start + 1
- proof := memorydb.New()
-
- // Short circuit if the decreased key is same with the previous key
- first := decreaseKey(common.CopyBytes(entries[start].k))
- if start != 0 && bytes.Equal(first, entries[start-1].k) {
- continue
- }
- // Short circuit if the decreased key is underflow
- if bytes.Compare(first, entries[start].k) > 0 {
- continue
- }
- if err := trie.Prove(first, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(entries[end-1].k, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- var keys [][]byte
- var vals [][]byte
- for i := start; i < end; i++ {
- keys = append(keys, entries[i].k)
- vals = append(vals, entries[i].v)
- }
- _, err := VerifyRangeProof(trie.Hash(), first, keys, vals, proof)
- if err != nil {
- t.Fatalf("Case %d(%d->%d) expect no error, got %v", i, start, end-1, err)
- }
- }
-}
-
-// TestRangeProofWithInvalidNonExistentProof tests such scenarios:
-// - There exists a gap between the first element and the left edge proof
-func TestRangeProofWithInvalidNonExistentProof(t *testing.T) {
- trie, vals := randomTrie(4096)
- var entries []*kv
- for _, kv := range vals {
- entries = append(entries, kv)
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- // Case 1
- start, end := 100, 200
- first := decreaseKey(common.CopyBytes(entries[start].k))
-
- proof := memorydb.New()
- if err := trie.Prove(first, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(entries[end-1].k, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- start = 105 // Gap created
- k := make([][]byte, 0)
- v := make([][]byte, 0)
- for i := start; i < end; i++ {
- k = append(k, entries[i].k)
- v = append(v, entries[i].v)
- }
- _, err := VerifyRangeProof(trie.Hash(), first, k, v, proof)
- if err == nil {
- t.Fatalf("Expected to detect the error, got nil")
- }
-}
-
-// TestOneElementRangeProof tests the proof with only one
-// element. The first edge proof can be existent one or
-// non-existent one.
-func TestOneElementRangeProof(t *testing.T) {
- trie, vals := randomTrie(4096)
- var entries []*kv
- for _, kv := range vals {
- entries = append(entries, kv)
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- // One element with existent edge proof, both edge proofs
- // point to the SAME key.
- start := 1000
- proof := memorydb.New()
- if err := trie.Prove(entries[start].k, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- _, err := VerifyRangeProof(trie.Hash(), entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
-
- // One element with left non-existent edge proof
- start = 1000
- first := decreaseKey(common.CopyBytes(entries[start].k))
- proof = memorydb.New()
- if err := trie.Prove(first, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(entries[start].k, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- _, err = VerifyRangeProof(trie.Hash(), first, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
-
- // One element with right non-existent edge proof
- start = 1000
- last := increaseKey(common.CopyBytes(entries[start].k))
- proof = memorydb.New()
- if err := trie.Prove(entries[start].k, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(last, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- _, err = VerifyRangeProof(trie.Hash(), entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
-
- // One element with two non-existent edge proofs
- start = 1000
- first, last = decreaseKey(common.CopyBytes(entries[start].k)), increaseKey(common.CopyBytes(entries[start].k))
- proof = memorydb.New()
- if err := trie.Prove(first, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(last, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- _, err = VerifyRangeProof(trie.Hash(), first, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
-
- // Test the mini trie with only a single element.
- tinyTrie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- entry := &kv{randBytes(32), randBytes(20), false}
- tinyTrie.MustUpdate(entry.k, entry.v)
-
- first = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes()
- last = entry.k
- proof = memorydb.New()
- if err := tinyTrie.Prove(first, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := tinyTrie.Prove(last, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- _, err = VerifyRangeProof(tinyTrie.Hash(), first, [][]byte{entry.k}, [][]byte{entry.v}, proof)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
-}
-
-// TestAllElementsProof tests the range proof with all elements.
-// The edge proofs can be nil.
-func TestAllElementsProof(t *testing.T) {
- trie, vals := randomTrie(4096)
- var entries []*kv
- for _, kv := range vals {
- entries = append(entries, kv)
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- var k [][]byte
- var v [][]byte
- for i := 0; i < len(entries); i++ {
- k = append(k, entries[i].k)
- v = append(v, entries[i].v)
- }
- _, err := VerifyRangeProof(trie.Hash(), nil, k, v, nil)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
-
- // With edge proofs, it should still work.
- proof := memorydb.New()
- if err := trie.Prove(entries[0].k, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(entries[len(entries)-1].k, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- _, err = VerifyRangeProof(trie.Hash(), k[0], k, v, proof)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
-
- // Even with non-existent edge proofs, it should still work.
- proof = memorydb.New()
- first := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes()
- if err := trie.Prove(first, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(entries[len(entries)-1].k, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- _, err = VerifyRangeProof(trie.Hash(), first, k, v, proof)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
-}
-
-// TestSingleSideRangeProof tests the range starts from zero.
-func TestSingleSideRangeProof(t *testing.T) {
- for i := 0; i < 64; i++ {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- var entries []*kv
- for i := 0; i < 4096; i++ {
- value := &kv{randBytes(32), randBytes(20), false}
- trie.MustUpdate(value.k, value.v)
- entries = append(entries, value)
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- var cases = []int{0, 1, 50, 100, 1000, 2000, len(entries) - 1}
- for _, pos := range cases {
- proof := memorydb.New()
- if err := trie.Prove(common.Hash{}.Bytes(), proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(entries[pos].k, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- k := make([][]byte, 0)
- v := make([][]byte, 0)
- for i := 0; i <= pos; i++ {
- k = append(k, entries[i].k)
- v = append(v, entries[i].v)
- }
- _, err := VerifyRangeProof(trie.Hash(), common.Hash{}.Bytes(), k, v, proof)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
- }
- }
-}
-
-// TestBadRangeProof tests a few cases which the proof is wrong.
-// The prover is expected to detect the error.
-func TestBadRangeProof(t *testing.T) {
- trie, vals := randomTrie(4096)
- var entries []*kv
- for _, kv := range vals {
- entries = append(entries, kv)
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- for i := 0; i < 500; i++ {
- start := mrand.Intn(len(entries))
- end := mrand.Intn(len(entries)-start) + start + 1
- proof := memorydb.New()
- if err := trie.Prove(entries[start].k, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(entries[end-1].k, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- var keys [][]byte
- var vals [][]byte
- for i := start; i < end; i++ {
- keys = append(keys, entries[i].k)
- vals = append(vals, entries[i].v)
- }
- var first = keys[0]
- testcase := mrand.Intn(6)
- var index int
- switch testcase {
- case 0:
- // Modified key
- index = mrand.Intn(end - start)
- keys[index] = randBytes(32) // In theory it can't be same
- case 1:
- // Modified val
- index = mrand.Intn(end - start)
- vals[index] = randBytes(20) // In theory it can't be same
- case 2:
- // Gapped entry slice
- index = mrand.Intn(end - start)
- if (index == 0 && start < 100) || (index == end-start-1) {
- continue
- }
- keys = append(keys[:index], keys[index+1:]...)
- vals = append(vals[:index], vals[index+1:]...)
- case 3:
- // Out of order
- index1 := mrand.Intn(end - start)
- index2 := mrand.Intn(end - start)
- if index1 == index2 {
- continue
- }
- keys[index1], keys[index2] = keys[index2], keys[index1]
- vals[index1], vals[index2] = vals[index2], vals[index1]
- case 4:
- // Set random key to nil, do nothing
- index = mrand.Intn(end - start)
- keys[index] = nil
- case 5:
- // Set random value to nil, deletion
- index = mrand.Intn(end - start)
- vals[index] = nil
- }
- _, err := VerifyRangeProof(trie.Hash(), first, keys, vals, proof)
- if err == nil {
- t.Fatalf("%d Case %d index %d range: (%d->%d) expect error, got nil", i, testcase, index, start, end-1)
- }
- }
-}
-
-// TestGappedRangeProof focuses on the small trie with embedded nodes.
-// If the gapped node is embedded in the trie, it should be detected too.
-func TestGappedRangeProof(t *testing.T) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- var entries []*kv // Sorted entries
- for i := byte(0); i < 10; i++ {
- value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
- trie.MustUpdate(value.k, value.v)
- entries = append(entries, value)
- }
- first, last := 2, 8
- proof := memorydb.New()
- if err := trie.Prove(entries[first].k, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(entries[last-1].k, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- var keys [][]byte
- var vals [][]byte
- for i := first; i < last; i++ {
- if i == (first+last)/2 {
- continue
- }
- keys = append(keys, entries[i].k)
- vals = append(vals, entries[i].v)
- }
- _, err := VerifyRangeProof(trie.Hash(), keys[0], keys, vals, proof)
- if err == nil {
- t.Fatal("expect error, got nil")
- }
-}
-
-// TestSameSideProofs tests the element is not in the range covered by proofs
-func TestSameSideProofs(t *testing.T) {
- trie, vals := randomTrie(4096)
- var entries []*kv
- for _, kv := range vals {
- entries = append(entries, kv)
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- pos := 1000
- first := common.CopyBytes(entries[0].k)
-
- proof := memorydb.New()
- if err := trie.Prove(first, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(entries[2000].k, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- _, err := VerifyRangeProof(trie.Hash(), first, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof)
- if err == nil {
- t.Fatalf("Expected error, got nil")
- }
-
- first = increaseKey(common.CopyBytes(entries[pos].k))
- last := increaseKey(common.CopyBytes(entries[pos].k))
- last = increaseKey(last)
-
- proof = memorydb.New()
- if err := trie.Prove(first, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(last, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- _, err = VerifyRangeProof(trie.Hash(), first, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof)
- if err == nil {
- t.Fatalf("Expected error, got nil")
- }
-}
-
-func TestHasRightElement(t *testing.T) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- var entries []*kv
- for i := 0; i < 4096; i++ {
- value := &kv{randBytes(32), randBytes(20), false}
- trie.MustUpdate(value.k, value.v)
- entries = append(entries, value)
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- var cases = []struct {
- start int
- end int
- hasMore bool
- }{
- {-1, 1, true}, // single element with non-existent left proof
- {0, 1, true}, // single element with existent left proof
- {0, 10, true},
- {50, 100, true},
- {50, len(entries), false}, // No more element expected
- {len(entries) - 1, len(entries), false}, // Single last element with two existent proofs(point to same key)
- {0, len(entries), false}, // The whole set with existent left proof
- {-1, len(entries), false}, // The whole set with non-existent left proof
- }
- for _, c := range cases {
- var (
- firstKey []byte
- start = c.start
- end = c.end
- proof = memorydb.New()
- )
- if c.start == -1 {
- firstKey, start = common.Hash{}.Bytes(), 0
- if err := trie.Prove(firstKey, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- } else {
- firstKey = entries[c.start].k
- if err := trie.Prove(entries[c.start].k, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- }
- if err := trie.Prove(entries[c.end-1].k, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- k := make([][]byte, 0)
- v := make([][]byte, 0)
- for i := start; i < end; i++ {
- k = append(k, entries[i].k)
- v = append(v, entries[i].v)
- }
- hasMore, err := VerifyRangeProof(trie.Hash(), firstKey, k, v, proof)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
- if hasMore != c.hasMore {
- t.Fatalf("Wrong hasMore indicator, want %t, got %t", c.hasMore, hasMore)
- }
- }
-}
-
-// TestEmptyRangeProof tests the range proof with "no" element.
-// The first edge proof must be a non-existent proof.
-func TestEmptyRangeProof(t *testing.T) {
- trie, vals := randomTrie(4096)
- var entries []*kv
- for _, kv := range vals {
- entries = append(entries, kv)
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- var cases = []struct {
- pos int
- err bool
- }{
- {len(entries) - 1, false},
- {500, true},
- }
- for _, c := range cases {
- proof := memorydb.New()
- first := increaseKey(common.CopyBytes(entries[c.pos].k))
- if err := trie.Prove(first, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- _, err := VerifyRangeProof(trie.Hash(), first, nil, nil, proof)
- if c.err && err == nil {
- t.Fatalf("Expected error, got nil")
- }
- if !c.err && err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
- }
-}
-
-// TestBloatedProof tests a malicious proof, where the proof is more or less the
-// whole trie. Previously we didn't accept such packets, but the new APIs do, so
-// lets leave this test as a bit weird, but present.
-func TestBloatedProof(t *testing.T) {
- // Use a small trie
- trie, kvs := nonRandomTrie(100)
- var entries []*kv
- for _, kv := range kvs {
- entries = append(entries, kv)
- }
- slices.SortFunc(entries, (*kv).cmp)
- var keys [][]byte
- var vals [][]byte
-
- proof := memorydb.New()
- // In the 'malicious' case, we add proofs for every single item
- // (but only one key/value pair used as leaf)
- for i, entry := range entries {
- trie.Prove(entry.k, proof)
- if i == 50 {
- keys = append(keys, entry.k)
- vals = append(vals, entry.v)
- }
- }
- // For reference, we use the same function, but _only_ prove the first
- // and last element
- want := memorydb.New()
- trie.Prove(keys[0], want)
- trie.Prove(keys[len(keys)-1], want)
-
- if _, err := VerifyRangeProof(trie.Hash(), keys[0], keys, vals, proof); err != nil {
- t.Fatalf("expected bloated proof to succeed, got %v", err)
- }
-}
-
-// TestEmptyValueRangeProof tests normal range proof with both edge proofs
-// as the existent proof, but with an extra empty value included, which is a
-// noop technically, but practically should be rejected.
-func TestEmptyValueRangeProof(t *testing.T) {
- trie, values := randomTrie(512)
- var entries []*kv
- for _, kv := range values {
- entries = append(entries, kv)
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- // Create a new entry with a slightly modified key
- mid := len(entries) / 2
- key := common.CopyBytes(entries[mid-1].k)
- for n := len(key) - 1; n >= 0; n-- {
- if key[n] < 0xff {
- key[n]++
- break
- }
- }
- noop := &kv{key, []byte{}, false}
- entries = append(append(append([]*kv{}, entries[:mid]...), noop), entries[mid:]...)
-
- start, end := 1, len(entries)-1
-
- proof := memorydb.New()
- if err := trie.Prove(entries[start].k, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(entries[end-1].k, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- var keys [][]byte
- var vals [][]byte
- for i := start; i < end; i++ {
- keys = append(keys, entries[i].k)
- vals = append(vals, entries[i].v)
- }
- _, err := VerifyRangeProof(trie.Hash(), keys[0], keys, vals, proof)
- if err == nil {
- t.Fatalf("Expected failure on noop entry")
- }
-}
-
-// TestAllElementsEmptyValueRangeProof tests the range proof with all elements,
-// but with an extra empty value included, which is a noop technically, but
-// practically should be rejected.
-func TestAllElementsEmptyValueRangeProof(t *testing.T) {
- trie, values := randomTrie(512)
- var entries []*kv
- for _, kv := range values {
- entries = append(entries, kv)
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- // Create a new entry with a slightly modified key
- mid := len(entries) / 2
- key := common.CopyBytes(entries[mid-1].k)
- for n := len(key) - 1; n >= 0; n-- {
- if key[n] < 0xff {
- key[n]++
- break
- }
- }
- noop := &kv{key, []byte{}, false}
- entries = append(append(append([]*kv{}, entries[:mid]...), noop), entries[mid:]...)
-
- var keys [][]byte
- var vals [][]byte
- for i := 0; i < len(entries); i++ {
- keys = append(keys, entries[i].k)
- vals = append(vals, entries[i].v)
- }
- _, err := VerifyRangeProof(trie.Hash(), nil, keys, vals, nil)
- if err == nil {
- t.Fatalf("Expected failure on noop entry")
- }
-}
-
-// mutateByte changes one byte in b.
-func mutateByte(b []byte) {
- for r := mrand.Intn(len(b)); ; {
- new := byte(mrand.Intn(255))
- if new != b[r] {
- b[r] = new
- break
- }
- }
-}
-
-func increaseKey(key []byte) []byte {
- for i := len(key) - 1; i >= 0; i-- {
- key[i]++
- if key[i] != 0x0 {
- break
- }
- }
- return key
-}
-
-func decreaseKey(key []byte) []byte {
- for i := len(key) - 1; i >= 0; i-- {
- key[i]--
- if key[i] != 0xff {
- break
- }
- }
- return key
-}
-
-func BenchmarkProve(b *testing.B) {
- trie, vals := randomTrie(100)
- var keys []string
- for k := range vals {
- keys = append(keys, k)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- kv := vals[keys[i%len(keys)]]
- proofs := memorydb.New()
- if trie.Prove(kv.k, proofs); proofs.Len() == 0 {
- b.Fatalf("zero length proof for %x", kv.k)
- }
- }
-}
-
-func BenchmarkVerifyProof(b *testing.B) {
- trie, vals := randomTrie(100)
- root := trie.Hash()
- var keys []string
- var proofs []*memorydb.Database
- for k := range vals {
- keys = append(keys, k)
- proof := memorydb.New()
- trie.Prove([]byte(k), proof)
- proofs = append(proofs, proof)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- im := i % len(keys)
- if _, err := VerifyProof(root, []byte(keys[im]), proofs[im]); err != nil {
- b.Fatalf("key %x: %v", keys[im], err)
- }
- }
-}
-
-func BenchmarkVerifyRangeProof10(b *testing.B) { benchmarkVerifyRangeProof(b, 10) }
-func BenchmarkVerifyRangeProof100(b *testing.B) { benchmarkVerifyRangeProof(b, 100) }
-func BenchmarkVerifyRangeProof1000(b *testing.B) { benchmarkVerifyRangeProof(b, 1000) }
-func BenchmarkVerifyRangeProof5000(b *testing.B) { benchmarkVerifyRangeProof(b, 5000) }
-
-func benchmarkVerifyRangeProof(b *testing.B, size int) {
- trie, vals := randomTrie(8192)
- var entries []*kv
- for _, kv := range vals {
- entries = append(entries, kv)
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- start := 2
- end := start + size
- proof := memorydb.New()
- if err := trie.Prove(entries[start].k, proof); err != nil {
- b.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(entries[end-1].k, proof); err != nil {
- b.Fatalf("Failed to prove the last node %v", err)
- }
- var keys [][]byte
- var values [][]byte
- for i := start; i < end; i++ {
- keys = append(keys, entries[i].k)
- values = append(values, entries[i].v)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := VerifyRangeProof(trie.Hash(), keys[0], keys, values, proof)
- if err != nil {
- b.Fatalf("Case %d(%d->%d) expect no error, got %v", i, start, end-1, err)
- }
- }
-}
-
-func BenchmarkVerifyRangeNoProof10(b *testing.B) { benchmarkVerifyRangeNoProof(b, 100) }
-func BenchmarkVerifyRangeNoProof500(b *testing.B) { benchmarkVerifyRangeNoProof(b, 500) }
-func BenchmarkVerifyRangeNoProof1000(b *testing.B) { benchmarkVerifyRangeNoProof(b, 1000) }
-
-func benchmarkVerifyRangeNoProof(b *testing.B, size int) {
- trie, vals := randomTrie(size)
- var entries []*kv
- for _, kv := range vals {
- entries = append(entries, kv)
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- var keys [][]byte
- var values [][]byte
- for _, entry := range entries {
- keys = append(keys, entry.k)
- values = append(values, entry.v)
- }
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := VerifyRangeProof(trie.Hash(), keys[0], keys, values, nil)
- if err != nil {
- b.Fatalf("Expected no error, got %v", err)
- }
- }
-}
-
-func randomTrie(n int) (*Trie, map[string]*kv) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- vals := make(map[string]*kv)
- for i := byte(0); i < 100; i++ {
- value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
- value2 := &kv{common.LeftPadBytes([]byte{i + 10}, 32), []byte{i}, false}
- trie.MustUpdate(value.k, value.v)
- trie.MustUpdate(value2.k, value2.v)
- vals[string(value.k)] = value
- vals[string(value2.k)] = value2
- }
- for i := 0; i < n; i++ {
- value := &kv{randBytes(32), randBytes(20), false}
- trie.MustUpdate(value.k, value.v)
- vals[string(value.k)] = value
- }
- return trie, vals
-}
-
-func nonRandomTrie(n int) (*Trie, map[string]*kv) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- vals := make(map[string]*kv)
- max := uint64(0xffffffffffffffff)
- for i := uint64(0); i < uint64(n); i++ {
- value := make([]byte, 32)
- key := make([]byte, 32)
- binary.LittleEndian.PutUint64(key, i)
- binary.LittleEndian.PutUint64(value, i-max)
- //value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
- elem := &kv{key, value, false}
- trie.MustUpdate(elem.k, elem.v)
- vals[string(elem.k)] = elem
- }
- return trie, vals
-}
-
-func TestRangeProofKeysWithSharedPrefix(t *testing.T) {
- keys := [][]byte{
- common.Hex2Bytes("aa10000000000000000000000000000000000000000000000000000000000000"),
- common.Hex2Bytes("aa20000000000000000000000000000000000000000000000000000000000000"),
- }
- vals := [][]byte{
- common.Hex2Bytes("02"),
- common.Hex2Bytes("03"),
- }
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- for i, key := range keys {
- trie.MustUpdate(key, vals[i])
- }
- root := trie.Hash()
- proof := memorydb.New()
- start := common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")
- if err := trie.Prove(start, proof); err != nil {
- t.Fatalf("failed to prove start: %v", err)
- }
- if err := trie.Prove(keys[len(keys)-1], proof); err != nil {
- t.Fatalf("failed to prove end: %v", err)
- }
-
- more, err := VerifyRangeProof(root, start, keys, vals, proof)
- if err != nil {
- t.Fatalf("failed to verify range proof: %v", err)
- }
- if more != false {
- t.Error("expected more to be false")
- }
-}
diff --git a/trie/secure_trie.go b/trie/secure_trie.go
deleted file mode 100644
index 887ddd4090..0000000000
--- a/trie/secure_trie.go
+++ /dev/null
@@ -1,296 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/triedb/database"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/rlp"
-)
-
-// SecureTrie is the old name of StateTrie.
-// Deprecated: use StateTrie.
-type SecureTrie = StateTrie
-
-// NewSecure creates a new StateTrie.
-// Deprecated: use NewStateTrie.
-func NewSecure(stateRoot common.Hash, owner common.Hash, root common.Hash, db database.Database) (*SecureTrie, error) {
- id := &ID{
- StateRoot: stateRoot,
- Owner: owner,
- Root: root,
- }
- return NewStateTrie(id, db)
-}
-
-// StateTrie wraps a trie with key hashing. In a stateTrie trie, all
-// access operations hash the key using keccak256. This prevents
-// calling code from creating long chains of nodes that
-// increase the access time.
-//
-// Contrary to a regular trie, a StateTrie can only be created with
-// New and must have an attached database. The database also stores
-// the preimage of each key if preimage recording is enabled.
-//
-// StateTrie is not safe for concurrent use.
-type StateTrie struct {
- trie Trie
- db database.Database
- hashKeyBuf [common.HashLength]byte
- secKeyCache map[string][]byte
- secKeyCacheOwner *StateTrie // Pointer to self, replace the key cache on mismatch
-}
-
-// NewStateTrie creates a trie with an existing root node from a backing database.
-//
-// If root is the zero hash or the sha3 hash of an empty string, the
-// trie is initially empty. Otherwise, New will panic if db is nil
-// and returns MissingNodeError if the root node cannot be found.
-func NewStateTrie(id *ID, db database.Database) (*StateTrie, error) {
- if db == nil {
- panic("trie.NewStateTrie called without a database")
- }
- trie, err := New(id, db)
- if err != nil {
- return nil, err
- }
- return &StateTrie{trie: *trie, db: db}, nil
-}
-
-// MustGet returns the value for key stored in the trie.
-// The value bytes must not be modified by the caller.
-//
-// This function will omit any encountered error but just
-// print out an error message.
-func (t *StateTrie) MustGet(key []byte) []byte {
- return t.trie.MustGet(t.hashKey(key))
-}
-
-// GetStorage attempts to retrieve a storage slot with provided account address
-// and slot key. The value bytes must not be modified by the caller.
-// If the specified storage slot is not in the trie, nil will be returned.
-// If a trie node is not found in the database, a MissingNodeError is returned.
-func (t *StateTrie) GetStorage(_ common.Address, key []byte) ([]byte, error) {
- enc, err := t.trie.Get(t.hashKey(key))
- if err != nil || len(enc) == 0 {
- return nil, err
- }
- _, content, _, err := rlp.Split(enc)
- return content, err
-}
-
-// GetAccount attempts to retrieve an account with provided account address.
-// If the specified account is not in the trie, nil will be returned.
-// If a trie node is not found in the database, a MissingNodeError is returned.
-func (t *StateTrie) GetAccount(address common.Address) (*types.StateAccount, error) {
- res, err := t.trie.Get(t.hashKey(address.Bytes()))
- if res == nil || err != nil {
- return nil, err
- }
- ret := new(types.StateAccount)
- err = rlp.DecodeBytes(res, ret)
- return ret, err
-}
-
-// GetAccountByHash does the same thing as GetAccount, however it expects an
-// account hash that is the hash of address. This constitutes an abstraction
-// leak, since the client code needs to know the key format.
-func (t *StateTrie) GetAccountByHash(addrHash common.Hash) (*types.StateAccount, error) {
- res, err := t.trie.Get(addrHash.Bytes())
- if res == nil || err != nil {
- return nil, err
- }
- ret := new(types.StateAccount)
- err = rlp.DecodeBytes(res, ret)
- return ret, err
-}
-
-// GetNode attempts to retrieve a trie node by compact-encoded path. It is not
-// possible to use keybyte-encoding as the path might contain odd nibbles.
-// If the specified trie node is not in the trie, nil will be returned.
-// If a trie node is not found in the database, a MissingNodeError is returned.
-func (t *StateTrie) GetNode(path []byte) ([]byte, int, error) {
- return t.trie.GetNode(path)
-}
-
-// MustUpdate associates key with value in the trie. Subsequent calls to
-// Get will return value. If value has length zero, any existing value
-// is deleted from the trie and calls to Get will return nil.
-//
-// The value bytes must not be modified by the caller while they are
-// stored in the trie.
-//
-// This function will omit any encountered error but just print out an
-// error message.
-func (t *StateTrie) MustUpdate(key, value []byte) {
- hk := t.hashKey(key)
- t.trie.MustUpdate(hk, value)
- t.getSecKeyCache()[string(hk)] = common.CopyBytes(key)
-}
-
-// UpdateStorage associates key with value in the trie. Subsequent calls to
-// Get will return value. If value has length zero, any existing value
-// is deleted from the trie and calls to Get will return nil.
-//
-// The value bytes must not be modified by the caller while they are
-// stored in the trie.
-//
-// If a node is not found in the database, a MissingNodeError is returned.
-func (t *StateTrie) UpdateStorage(_ common.Address, key, value []byte) error {
- hk := t.hashKey(key)
- v, _ := rlp.EncodeToBytes(value)
- err := t.trie.Update(hk, v)
- if err != nil {
- return err
- }
- t.getSecKeyCache()[string(hk)] = common.CopyBytes(key)
- return nil
-}
-
-// UpdateAccount will abstract the write of an account to the secure trie.
-func (t *StateTrie) UpdateAccount(address common.Address, acc *types.StateAccount) error {
- hk := t.hashKey(address.Bytes())
- data, err := rlp.EncodeToBytes(acc)
- if err != nil {
- return err
- }
- if err := t.trie.Update(hk, data); err != nil {
- return err
- }
- t.getSecKeyCache()[string(hk)] = address.Bytes()
- return nil
-}
-
-func (t *StateTrie) UpdateContractCode(_ common.Address, _ common.Hash, _ []byte) error {
- return nil
-}
-
-// MustDelete removes any existing value for key from the trie. This function
-// will omit any encountered error but just print out an error message.
-func (t *StateTrie) MustDelete(key []byte) {
- hk := t.hashKey(key)
- delete(t.getSecKeyCache(), string(hk))
- t.trie.MustDelete(hk)
-}
-
-// DeleteStorage removes any existing storage slot from the trie.
-// If the specified trie node is not in the trie, nothing will be changed.
-// If a node is not found in the database, a MissingNodeError is returned.
-func (t *StateTrie) DeleteStorage(_ common.Address, key []byte) error {
- hk := t.hashKey(key)
- delete(t.getSecKeyCache(), string(hk))
- return t.trie.Delete(hk)
-}
-
-// DeleteAccount abstracts an account deletion from the trie.
-func (t *StateTrie) DeleteAccount(address common.Address) error {
- hk := t.hashKey(address.Bytes())
- delete(t.getSecKeyCache(), string(hk))
- return t.trie.Delete(hk)
-}
-
-// GetKey returns the sha3 preimage of a hashed key that was
-// previously used to store a value.
-func (t *StateTrie) GetKey(shaKey []byte) []byte {
- if key, ok := t.getSecKeyCache()[string(shaKey)]; ok {
- return key
- }
- return t.db.Preimage(common.BytesToHash(shaKey))
-}
-
-// Commit collects all dirty nodes in the trie and replaces them with the
-// corresponding node hash. All collected nodes (including dirty leaves if
-// collectLeaf is true) will be encapsulated into a nodeset for return.
-// The returned nodeset can be nil if the trie is clean (nothing to commit).
-// All cached preimages will be also flushed if preimages recording is enabled.
-// Once the trie is committed, it's not usable anymore. A new trie must
-// be created with new root and updated trie database for following usage
-func (t *StateTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) {
- // Write all the pre-images to the actual disk database
- if len(t.getSecKeyCache()) > 0 {
- preimages := make(map[common.Hash][]byte)
- for hk, key := range t.secKeyCache {
- preimages[common.BytesToHash([]byte(hk))] = key
- }
- t.db.InsertPreimage(preimages)
- t.secKeyCache = make(map[string][]byte)
- }
- // Commit the trie and return its modified nodeset.
- return t.trie.Commit(collectLeaf)
-}
-
-// Hash returns the root hash of StateTrie. It does not write to the
-// database and can be used even if the trie doesn't have one.
-func (t *StateTrie) Hash() common.Hash {
- return t.trie.Hash()
-}
-
-// Copy returns a copy of StateTrie.
-func (t *StateTrie) Copy() *StateTrie {
- return &StateTrie{
- trie: *t.trie.Copy(),
- db: t.db,
- secKeyCache: t.secKeyCache,
- }
-}
-
-// NodeIterator returns an iterator that returns nodes of the underlying trie.
-// Iteration starts at the key after the given start key.
-func (t *StateTrie) NodeIterator(start []byte) (NodeIterator, error) {
- return t.trie.NodeIterator(start)
-}
-
-// MustNodeIterator is a wrapper of NodeIterator and will omit any encountered
-// error but just print out an error message.
-func (t *StateTrie) MustNodeIterator(start []byte) NodeIterator {
- return t.trie.MustNodeIterator(start)
-}
-
-// hashKey returns the hash of key as an ephemeral buffer.
-// The caller must not hold onto the return value because it will become
-// invalid on the next call to hashKey or secKey.
-func (t *StateTrie) hashKey(key []byte) []byte {
- h := newHasher(false)
- h.sha.Reset()
- h.sha.Write(key)
- h.sha.Read(t.hashKeyBuf[:])
- returnHasherToPool(h)
- return t.hashKeyBuf[:]
-}
-
-// getSecKeyCache returns the current secure key cache, creating a new one if
-// ownership changed (i.e. the current secure trie is a copy of another owning
-// the actual cache).
-func (t *StateTrie) getSecKeyCache() map[string][]byte {
- if t != t.secKeyCacheOwner {
- t.secKeyCacheOwner = t
- t.secKeyCache = make(map[string][]byte)
- }
- return t.secKeyCache
-}
diff --git a/trie/secure_trie_test.go b/trie/secure_trie_test.go
deleted file mode 100644
index 222552cbdd..0000000000
--- a/trie/secure_trie_test.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- "fmt"
- "runtime"
- "sync"
- "testing"
-
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
-)
-
-func newEmptySecure() *StateTrie {
- trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- return trie
-}
-
-// makeTestStateTrie creates a large enough secure trie for testing.
-func makeTestStateTrie() (*testDb, *StateTrie, map[string][]byte) {
- // Create an empty trie
- triedb := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), triedb)
-
- // Fill it with some arbitrary data
- content := make(map[string][]byte)
- for i := byte(0); i < 255; i++ {
- // Map the same data under multiple keys
- key, val := common.LeftPadBytes([]byte{1, i}, 32), []byte{i}
- content[string(key)] = val
- trie.MustUpdate(key, val)
-
- key, val = common.LeftPadBytes([]byte{2, i}, 32), []byte{i}
- content[string(key)] = val
- trie.MustUpdate(key, val)
-
- // Add some other data to inflate the trie
- for j := byte(3); j < 13; j++ {
- key, val = common.LeftPadBytes([]byte{j, i}, 32), []byte{j, i}
- content[string(key)] = val
- trie.MustUpdate(key, val)
- }
- }
- root, nodes, _ := trie.Commit(false)
- if err := triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)); err != nil {
- panic(fmt.Errorf("failed to commit db %v", err))
- }
- // Re-create the trie based on the new state
- trie, _ = NewStateTrie(TrieID(root), triedb)
- return triedb, trie, content
-}
-
-func TestSecureDelete(t *testing.T) {
- trie := newEmptySecure()
- vals := []struct{ k, v string }{
- {"do", "verb"},
- {"ether", "wookiedoo"},
- {"horse", "stallion"},
- {"shaman", "horse"},
- {"doge", "coin"},
- {"ether", ""},
- {"dog", "puppy"},
- {"shaman", ""},
- }
- for _, val := range vals {
- if val.v != "" {
- trie.MustUpdate([]byte(val.k), []byte(val.v))
- } else {
- trie.MustDelete([]byte(val.k))
- }
- }
- hash := trie.Hash()
- exp := common.HexToHash("29b235a58c3c25ab83010c327d5932bcf05324b7d6b1185e650798034783ca9d")
- if hash != exp {
- t.Errorf("expected %x got %x", exp, hash)
- }
-}
-
-func TestSecureGetKey(t *testing.T) {
- trie := newEmptySecure()
- trie.MustUpdate([]byte("foo"), []byte("bar"))
-
- key := []byte("foo")
- value := []byte("bar")
- seckey := crypto.Keccak256(key)
-
- if !bytes.Equal(trie.MustGet(key), value) {
- t.Errorf("Get did not return bar")
- }
- if k := trie.GetKey(seckey); !bytes.Equal(k, key) {
- t.Errorf("GetKey returned %q, want %q", k, key)
- }
-}
-
-func TestStateTrieConcurrency(t *testing.T) {
- // Create an initial trie and copy if for concurrent access
- _, trie, _ := makeTestStateTrie()
-
- threads := runtime.NumCPU()
- tries := make([]*StateTrie, threads)
- for i := 0; i < threads; i++ {
- tries[i] = trie.Copy()
- }
- // Start a batch of goroutines interacting with the trie
- pend := new(sync.WaitGroup)
- pend.Add(threads)
- for i := 0; i < threads; i++ {
- go func(index int) {
- defer pend.Done()
-
- for j := byte(0); j < 255; j++ {
- // Map the same data under multiple keys
- key, val := common.LeftPadBytes([]byte{byte(index), 1, j}, 32), []byte{j}
- tries[index].MustUpdate(key, val)
-
- key, val = common.LeftPadBytes([]byte{byte(index), 2, j}, 32), []byte{j}
- tries[index].MustUpdate(key, val)
-
- // Add some other data to inflate the trie
- for k := byte(3); k < 13; k++ {
- key, val = common.LeftPadBytes([]byte{byte(index), k, j}, 32), []byte{k, j}
- tries[index].MustUpdate(key, val)
- }
- }
- tries[index].Commit(false)
- }(i)
- }
- // Wait for all threads to finish
- pend.Wait()
-}
diff --git a/trie/stacktrie.go b/trie/stacktrie.go
deleted file mode 100644
index a972dcccaf..0000000000
--- a/trie/stacktrie.go
+++ /dev/null
@@ -1,489 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- "errors"
- "sync"
-
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/metrics"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/log"
-)
-
-var (
- stPool = sync.Pool{New: func() any { return new(stNode) }}
- _ = types.TrieHasher((*StackTrie)(nil))
-)
-
-// StackTrieOptions contains the configured options for manipulating the stackTrie.
-type StackTrieOptions struct {
- Writer func(path []byte, hash common.Hash, blob []byte) // The function to commit the dirty nodes
- Cleaner func(path []byte) // The function to clean up dangling nodes
-
- SkipLeftBoundary bool // Flag whether the nodes on the left boundary are skipped for committing
- SkipRightBoundary bool // Flag whether the nodes on the right boundary are skipped for committing
- boundaryGauge metrics.Gauge // Gauge to track how many boundary nodes are met
-}
-
-// NewStackTrieOptions initializes an empty options for stackTrie.
-func NewStackTrieOptions() *StackTrieOptions { return &StackTrieOptions{} }
-
-// WithWriter configures trie node writer within the options.
-func (o *StackTrieOptions) WithWriter(writer func(path []byte, hash common.Hash, blob []byte)) *StackTrieOptions {
- o.Writer = writer
- return o
-}
-
-// WithCleaner configures the cleaner in the option for removing dangling nodes.
-func (o *StackTrieOptions) WithCleaner(cleaner func(path []byte)) *StackTrieOptions {
- o.Cleaner = cleaner
- return o
-}
-
-// WithSkipBoundary configures whether the left and right boundary nodes are
-// filtered for committing, along with a gauge metrics to track how many
-// boundary nodes are met.
-func (o *StackTrieOptions) WithSkipBoundary(skipLeft, skipRight bool, gauge metrics.Gauge) *StackTrieOptions {
- o.SkipLeftBoundary = skipLeft
- o.SkipRightBoundary = skipRight
- o.boundaryGauge = gauge
- return o
-}
-
-// StackTrie is a trie implementation that expects keys to be inserted
-// in order. Once it determines that a subtree will no longer be inserted
-// into, it will hash it and free up the memory it uses.
-type StackTrie struct {
- options *StackTrieOptions
- root *stNode
- h *hasher
-
- first []byte // The (hex-encoded without terminator) key of first inserted entry, tracked as left boundary.
- last []byte // The (hex-encoded without terminator) key of last inserted entry, tracked as right boundary.
-}
-
-// NewStackTrie allocates and initializes an empty trie.
-func NewStackTrie(options *StackTrieOptions) *StackTrie {
- if options == nil {
- options = NewStackTrieOptions()
- }
- return &StackTrie{
- options: options,
- root: stPool.Get().(*stNode),
- h: newHasher(false),
- }
-}
-
-// Update inserts a (key, value) pair into the stack trie.
-func (t *StackTrie) Update(key, value []byte) error {
- if len(value) == 0 {
- return errors.New("trying to insert empty (deletion)")
- }
- k := keybytesToHex(key)
- k = k[:len(k)-1] // chop the termination flag
- if bytes.Compare(t.last, k) >= 0 {
- return errors.New("non-ascending key order")
- }
- // track the first and last inserted entries.
- if t.first == nil {
- t.first = append([]byte{}, k...)
- }
- if t.last == nil {
- t.last = append([]byte{}, k...) // allocate key slice
- } else {
- t.last = append(t.last[:0], k...) // reuse key slice
- }
- t.insert(t.root, k, value, nil)
- return nil
-}
-
-// MustUpdate is a wrapper of Update and will omit any encountered error but
-// just print out an error message.
-func (t *StackTrie) MustUpdate(key, value []byte) {
- if err := t.Update(key, value); err != nil {
- log.Error("Unhandled trie error in StackTrie.Update", "err", err)
- }
-}
-
-// Reset resets the stack trie object to empty state.
-func (t *StackTrie) Reset() {
- t.options = NewStackTrieOptions()
- t.root = stPool.Get().(*stNode)
- t.first = nil
- t.last = nil
-}
-
-// stNode represents a node within a StackTrie
-type stNode struct {
- typ uint8 // node type (as in branch, ext, leaf)
- key []byte // key chunk covered by this (leaf|ext) node
- val []byte // value contained by this node if it's a leaf
- children [16]*stNode // list of children (for branch and exts)
-}
-
-// newLeaf constructs a leaf node with provided node key and value. The key
-// will be deep-copied in the function and safe to modify afterwards, but
-// value is not.
-func newLeaf(key, val []byte) *stNode {
- st := stPool.Get().(*stNode)
- st.typ = leafNode
- st.key = append(st.key, key...)
- st.val = val
- return st
-}
-
-// newExt constructs an extension node with provided node key and child. The
-// key will be deep-copied in the function and safe to modify afterwards.
-func newExt(key []byte, child *stNode) *stNode {
- st := stPool.Get().(*stNode)
- st.typ = extNode
- st.key = append(st.key, key...)
- st.children[0] = child
- return st
-}
-
-// List all values that stNode#nodeType can hold
-const (
- emptyNode = iota
- branchNode
- extNode
- leafNode
- hashedNode
-)
-
-func (n *stNode) reset() *stNode {
- n.key = n.key[:0]
- n.val = nil
- for i := range n.children {
- n.children[i] = nil
- }
- n.typ = emptyNode
- return n
-}
-
-// Helper function that, given a full key, determines the index
-// at which the chunk pointed by st.keyOffset is different from
-// the same chunk in the full key.
-func (n *stNode) getDiffIndex(key []byte) int {
- for idx, nibble := range n.key {
- if nibble != key[idx] {
- return idx
- }
- }
- return len(n.key)
-}
-
-// Helper function to that inserts a (key, value) pair into
-// the trie.
-func (t *StackTrie) insert(st *stNode, key, value []byte, path []byte) {
- switch st.typ {
- case branchNode: /* Branch */
- idx := int(key[0])
-
- // Unresolve elder siblings
- for i := idx - 1; i >= 0; i-- {
- if st.children[i] != nil {
- if st.children[i].typ != hashedNode {
- t.hash(st.children[i], append(path, byte(i)))
- }
- break
- }
- }
-
- // Add new child
- if st.children[idx] == nil {
- st.children[idx] = newLeaf(key[1:], value)
- } else {
- t.insert(st.children[idx], key[1:], value, append(path, key[0]))
- }
-
- case extNode: /* Ext */
- // Compare both key chunks and see where they differ
- diffidx := st.getDiffIndex(key)
-
- // Check if chunks are identical. If so, recurse into
- // the child node. Otherwise, the key has to be split
- // into 1) an optional common prefix, 2) the fullnode
- // representing the two differing path, and 3) a leaf
- // for each of the differentiated subtrees.
- if diffidx == len(st.key) {
- // Ext key and key segment are identical, recurse into
- // the child node.
- t.insert(st.children[0], key[diffidx:], value, append(path, key[:diffidx]...))
- return
- }
- // Save the original part. Depending if the break is
- // at the extension's last byte or not, create an
- // intermediate extension or use the extension's child
- // node directly.
- var n *stNode
- if diffidx < len(st.key)-1 {
- // Break on the non-last byte, insert an intermediate
- // extension. The path prefix of the newly-inserted
- // extension should also contain the different byte.
- n = newExt(st.key[diffidx+1:], st.children[0])
- t.hash(n, append(path, st.key[:diffidx+1]...))
- } else {
- // Break on the last byte, no need to insert
- // an extension node: reuse the current node.
- // The path prefix of the original part should
- // still be same.
- n = st.children[0]
- t.hash(n, append(path, st.key...))
- }
- var p *stNode
- if diffidx == 0 {
- // the break is on the first byte, so
- // the current node is converted into
- // a branch node.
- st.children[0] = nil
- p = st
- st.typ = branchNode
- } else {
- // the common prefix is at least one byte
- // long, insert a new intermediate branch
- // node.
- st.children[0] = stPool.Get().(*stNode)
- st.children[0].typ = branchNode
- p = st.children[0]
- }
- // Create a leaf for the inserted part
- o := newLeaf(key[diffidx+1:], value)
-
- // Insert both child leaves where they belong:
- origIdx := st.key[diffidx]
- newIdx := key[diffidx]
- p.children[origIdx] = n
- p.children[newIdx] = o
- st.key = st.key[:diffidx]
-
- case leafNode: /* Leaf */
- // Compare both key chunks and see where they differ
- diffidx := st.getDiffIndex(key)
-
- // Overwriting a key isn't supported, which means that
- // the current leaf is expected to be split into 1) an
- // optional extension for the common prefix of these 2
- // keys, 2) a fullnode selecting the path on which the
- // keys differ, and 3) one leaf for the differentiated
- // component of each key.
- if diffidx >= len(st.key) {
- panic("Trying to insert into existing key")
- }
-
- // Check if the split occurs at the first nibble of the
- // chunk. In that case, no prefix extnode is necessary.
- // Otherwise, create that
- var p *stNode
- if diffidx == 0 {
- // Convert current leaf into a branch
- st.typ = branchNode
- p = st
- st.children[0] = nil
- } else {
- // Convert current node into an ext,
- // and insert a child branch node.
- st.typ = extNode
- st.children[0] = stPool.Get().(*stNode)
- st.children[0].typ = branchNode
- p = st.children[0]
- }
-
- // Create the two child leaves: one containing the original
- // value and another containing the new value. The child leaf
- // is hashed directly in order to free up some memory.
- origIdx := st.key[diffidx]
- p.children[origIdx] = newLeaf(st.key[diffidx+1:], st.val)
- t.hash(p.children[origIdx], append(path, st.key[:diffidx+1]...))
-
- newIdx := key[diffidx]
- p.children[newIdx] = newLeaf(key[diffidx+1:], value)
-
- // Finally, cut off the key part that has been passed
- // over to the children.
- st.key = st.key[:diffidx]
- st.val = nil
-
- case emptyNode: /* Empty */
- st.typ = leafNode
- st.key = key
- st.val = value
-
- case hashedNode:
- panic("trying to insert into hash")
-
- default:
- panic("invalid type")
- }
-}
-
-// hash converts st into a 'hashedNode', if possible. Possible outcomes:
-//
-// 1. The rlp-encoded value was >= 32 bytes:
-// - Then the 32-byte `hash` will be accessible in `st.val`.
-// - And the 'st.type' will be 'hashedNode'
-//
-// 2. The rlp-encoded value was < 32 bytes
-// - Then the <32 byte rlp-encoded value will be accessible in 'st.val'.
-// - And the 'st.type' will be 'hashedNode' AGAIN
-//
-// This method also sets 'st.type' to hashedNode, and clears 'st.key'.
-func (t *StackTrie) hash(st *stNode, path []byte) {
- var (
- blob []byte // RLP-encoded node blob
- internal [][]byte // List of node paths covered by the extension node
- )
- switch st.typ {
- case hashedNode:
- return
-
- case emptyNode:
- st.val = types.EmptyRootHash.Bytes()
- st.key = st.key[:0]
- st.typ = hashedNode
- return
-
- case branchNode:
- var nodes fullNode
- for i, child := range st.children {
- if child == nil {
- nodes.Children[i] = nilValueNode
- continue
- }
- t.hash(child, append(path, byte(i)))
-
- if len(child.val) < 32 {
- nodes.Children[i] = rawNode(child.val)
- } else {
- nodes.Children[i] = hashNode(child.val)
- }
- st.children[i] = nil
- stPool.Put(child.reset()) // Release child back to pool.
- }
- nodes.encode(t.h.encbuf)
- blob = t.h.encodedBytes()
-
- case extNode:
- // recursively hash and commit child as the first step
- t.hash(st.children[0], append(path, st.key...))
-
- // Collect the path of internal nodes between shortNode and its **in disk**
- // child. This is essential in the case of path mode scheme to avoid leaving
- // danging nodes within the range of this internal path on disk, which would
- // break the guarantee for state healing.
- if len(st.children[0].val) >= 32 && t.options.Cleaner != nil {
- for i := 1; i < len(st.key); i++ {
- internal = append(internal, append(path, st.key[:i]...))
- }
- }
- // encode the extension node
- n := shortNode{Key: hexToCompactInPlace(st.key)}
- if len(st.children[0].val) < 32 {
- n.Val = rawNode(st.children[0].val)
- } else {
- n.Val = hashNode(st.children[0].val)
- }
- n.encode(t.h.encbuf)
- blob = t.h.encodedBytes()
-
- stPool.Put(st.children[0].reset()) // Release child back to pool.
- st.children[0] = nil
-
- case leafNode:
- st.key = append(st.key, byte(16))
- n := shortNode{Key: hexToCompactInPlace(st.key), Val: valueNode(st.val)}
-
- n.encode(t.h.encbuf)
- blob = t.h.encodedBytes()
-
- default:
- panic("invalid node type")
- }
-
- st.typ = hashedNode
- st.key = st.key[:0]
-
- // Skip committing the non-root node if the size is smaller than 32 bytes.
- if len(blob) < 32 && len(path) > 0 {
- st.val = common.CopyBytes(blob)
- return
- }
- // Write the hash to the 'val'. We allocate a new val here to not mutate
- // input values.
- st.val = t.h.hashData(blob)
-
- // Short circuit if the stack trie is not configured for writing.
- if t.options.Writer == nil {
- return
- }
- // Skip committing if the node is on the left boundary and stackTrie is
- // configured to filter the boundary.
- if t.options.SkipLeftBoundary && bytes.HasPrefix(t.first, path) {
- if t.options.boundaryGauge != nil {
- t.options.boundaryGauge.Inc(1)
- }
- return
- }
- // Skip committing if the node is on the right boundary and stackTrie is
- // configured to filter the boundary.
- if t.options.SkipRightBoundary && bytes.HasPrefix(t.last, path) {
- if t.options.boundaryGauge != nil {
- t.options.boundaryGauge.Inc(1)
- }
- return
- }
- // Clean up the internal dangling nodes covered by the extension node.
- // This should be done before writing the node to adhere to the committing
- // order from bottom to top.
- for _, path := range internal {
- t.options.Cleaner(path)
- }
- t.options.Writer(path, common.BytesToHash(st.val), blob)
-}
-
-// Hash will firstly hash the entire trie if it's still not hashed and then commit
-// all nodes to the associated database. Actually most of the trie nodes have been
-// committed already. The main purpose here is to commit the nodes on right boundary.
-//
-// For stack trie, Hash and Commit are functionally identical.
-func (t *StackTrie) Hash() common.Hash {
- n := t.root
- t.hash(n, nil)
- return common.BytesToHash(n.val)
-}
-
-// Commit will firstly hash the entire trie if it's still not hashed and then commit
-// all nodes to the associated database. Actually most of the trie nodes have been
-// committed already. The main purpose here is to commit the nodes on right boundary.
-//
-// For stack trie, Hash and Commit are functionally identical.
-func (t *StackTrie) Commit() common.Hash {
- return t.Hash()
-}
diff --git a/trie/stacktrie_fuzzer_test.go b/trie/stacktrie_fuzzer_test.go
deleted file mode 100644
index 391c0a6c83..0000000000
--- a/trie/stacktrie_fuzzer_test.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "testing"
-
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
- "golang.org/x/crypto/sha3"
- "golang.org/x/exp/slices"
-)
-
-func FuzzStackTrie(f *testing.F) {
- f.Fuzz(func(t *testing.T, data []byte) {
- fuzz(data, false)
- })
-}
-
-func fuzz(data []byte, debugging bool) {
- // This spongeDb is used to check the sequence of disk-db-writes
- var (
- input = bytes.NewReader(data)
- spongeA = &spongeDb{sponge: sha3.NewLegacyKeccak256()}
- dbA = newTestDatabase(rawdb.NewDatabase(spongeA), rawdb.HashScheme)
- trieA = NewEmpty(dbA)
- spongeB = &spongeDb{sponge: sha3.NewLegacyKeccak256()}
- dbB = newTestDatabase(rawdb.NewDatabase(spongeB), rawdb.HashScheme)
-
- options = NewStackTrieOptions().WithWriter(func(path []byte, hash common.Hash, blob []byte) {
- rawdb.WriteTrieNode(spongeB, common.Hash{}, path, hash, blob, dbB.Scheme())
- })
- trieB = NewStackTrie(options)
- vals []*kv
- maxElements = 10000
- // operate on unique keys only
- keys = make(map[string]struct{})
- )
- // Fill the trie with elements
- for i := 0; input.Len() > 0 && i < maxElements; i++ {
- k := make([]byte, 32)
- input.Read(k)
- var a uint16
- binary.Read(input, binary.LittleEndian, &a)
- a = 1 + a%100
- v := make([]byte, a)
- input.Read(v)
- if input.Len() == 0 {
- // If it was exhausted while reading, the value may be all zeroes,
- // thus 'deletion' which is not supported on stacktrie
- break
- }
- if _, present := keys[string(k)]; present {
- // This key is a duplicate, ignore it
- continue
- }
- keys[string(k)] = struct{}{}
- vals = append(vals, &kv{k: k, v: v})
- trieA.MustUpdate(k, v)
- }
- if len(vals) == 0 {
- return
- }
- // Flush trie -> database
- rootA, nodes, err := trieA.Commit(false)
- if err != nil {
- panic(err)
- }
- if nodes != nil {
- dbA.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
- }
- // Flush memdb -> disk (sponge)
- dbA.Commit(rootA)
-
- // Stacktrie requires sorted insertion
- slices.SortFunc(vals, (*kv).cmp)
-
- for _, kv := range vals {
- if debugging {
- fmt.Printf("{\"%#x\" , \"%#x\"} // stacktrie.Update\n", kv.k, kv.v)
- }
- trieB.MustUpdate(kv.k, kv.v)
- }
- rootB := trieB.Hash()
- trieB.Commit()
- if rootA != rootB {
- panic(fmt.Sprintf("roots differ: (trie) %x != %x (stacktrie)", rootA, rootB))
- }
- sumA := spongeA.sponge.Sum(nil)
- sumB := spongeB.sponge.Sum(nil)
- if !bytes.Equal(sumA, sumB) {
- panic(fmt.Sprintf("sequence differ: (trie) %x != %x (stacktrie)", sumA, sumB))
- }
-
- // Ensure all the nodes are persisted correctly
- var (
- nodeset = make(map[string][]byte) // path -> blob
- optionsC = NewStackTrieOptions().WithWriter(func(path []byte, hash common.Hash, blob []byte) {
- if crypto.Keccak256Hash(blob) != hash {
- panic("invalid node blob")
- }
- nodeset[string(path)] = common.CopyBytes(blob)
- })
- trieC = NewStackTrie(optionsC)
- checked int
- )
- for _, kv := range vals {
- trieC.MustUpdate(kv.k, kv.v)
- }
- rootC := trieC.Commit()
- if rootA != rootC {
- panic(fmt.Sprintf("roots differ: (trie) %x != %x (stacktrie)", rootA, rootC))
- }
- trieA, _ = New(TrieID(rootA), dbA)
- iterA := trieA.MustNodeIterator(nil)
- for iterA.Next(true) {
- if iterA.Hash() == (common.Hash{}) {
- if _, present := nodeset[string(iterA.Path())]; present {
- panic("unexpected tiny node")
- }
- continue
- }
- nodeBlob, present := nodeset[string(iterA.Path())]
- if !present {
- panic("missing node")
- }
- if !bytes.Equal(nodeBlob, iterA.NodeBlob()) {
- panic("node blob is not matched")
- }
- checked += 1
- }
- if checked != len(nodeset) {
- panic("node number is not matched")
- }
-}
diff --git a/trie/stacktrie_test.go b/trie/stacktrie_test.go
deleted file mode 100644
index 9131b2fea2..0000000000
--- a/trie/stacktrie_test.go
+++ /dev/null
@@ -1,497 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- "math/big"
- "math/rand"
- "testing"
-
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/trie/testutil"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
- "github.com/stretchr/testify/assert"
- "golang.org/x/exp/slices"
-)
-
-func TestStackTrieInsertAndHash(t *testing.T) {
- type KeyValueHash struct {
- K string // Hex string for key.
- V string // Value, directly converted to bytes.
- H string // Expected root hash after insert of (K, V) to an existing trie.
- }
- tests := [][]KeyValueHash{
- { // {0:0, 7:0, f:0}
- {"00", "v_______________________0___0", "5cb26357b95bb9af08475be00243ceb68ade0b66b5cd816b0c18a18c612d2d21"},
- {"70", "v_______________________0___1", "8ff64309574f7a437a7ad1628e690eb7663cfde10676f8a904a8c8291dbc1603"},
- {"f0", "v_______________________0___2", "9e3a01bd8d43efb8e9d4b5506648150b8e3ed1caea596f84ee28e01a72635470"},
- },
- { // {1:0cc, e:{1:fc, e:fc}}
- {"10cc", "v_______________________1___0", "233e9b257843f3dfdb1cce6676cdaf9e595ac96ee1b55031434d852bc7ac9185"},
- {"e1fc", "v_______________________1___1", "39c5e908ae83d0c78520c7c7bda0b3782daf594700e44546e93def8f049cca95"},
- {"eefc", "v_______________________1___2", "d789567559fd76fe5b7d9cc42f3750f942502ac1c7f2a466e2f690ec4b6c2a7c"},
- },
- { // {b:{a:ac, b:ac}, d:acc}
- {"baac", "v_______________________2___0", "8be1c86ba7ec4c61e14c1a9b75055e0464c2633ae66a055a24e75450156a5d42"},
- {"bbac", "v_______________________2___1", "8495159b9895a7d88d973171d737c0aace6fe6ac02a4769fff1bc43bcccce4cc"},
- {"dacc", "v_______________________2___2", "9bcfc5b220a27328deb9dc6ee2e3d46c9ebc9c69e78acda1fa2c7040602c63ca"},
- },
- { // {0:0cccc, 2:456{0:0, 2:2}
- {"00cccc", "v_______________________3___0", "e57dc2785b99ce9205080cb41b32ebea7ac3e158952b44c87d186e6d190a6530"},
- {"245600", "v_______________________3___1", "0335354adbd360a45c1871a842452287721b64b4234dfe08760b243523c998db"},
- {"245622", "v_______________________3___2", "9e6832db0dca2b5cf81c0e0727bfde6afc39d5de33e5720bccacc183c162104e"},
- },
- { // {1:4567{1:1c, 3:3c}, 3:0cccccc}
- {"1456711c", "v_______________________4___0", "f2389e78d98fed99f3e63d6d1623c1d4d9e8c91cb1d585de81fbc7c0e60d3529"},
- {"1456733c", "v_______________________4___1", "101189b3fab852be97a0120c03d95eefcf984d3ed639f2328527de6def55a9c0"},
- {"30cccccc", "v_______________________4___2", "3780ce111f98d15751dfde1eb21080efc7d3914b429e5c84c64db637c55405b3"},
- },
- { // 8800{1:f, 2:e, 3:d}
- {"88001f", "v_______________________5___0", "e817db50d84f341d443c6f6593cafda093fc85e773a762421d47daa6ac993bd5"},
- {"88002e", "v_______________________5___1", "d6e3e6047bdc110edd296a4d63c030aec451bee9d8075bc5a198eee8cda34f68"},
- {"88003d", "v_______________________5___2", "b6bdf8298c703342188e5f7f84921a402042d0e5fb059969dd53a6b6b1fb989e"},
- },
- { // 0{1:fc, 2:ec, 4:dc}
- {"01fc", "v_______________________6___0", "693268f2ca80d32b015f61cd2c4dba5a47a6b52a14c34f8e6945fad684e7a0d5"},
- {"02ec", "v_______________________6___1", "e24ddd44469310c2b785a2044618874bf486d2f7822603a9b8dce58d6524d5de"},
- {"04dc", "v_______________________6___2", "33fc259629187bbe54b92f82f0cd8083b91a12e41a9456b84fc155321e334db7"},
- },
- { // f{0:fccc, f:ff{0:f, f:f}}
- {"f0fccc", "v_______________________7___0", "b0966b5aa469a3e292bc5fcfa6c396ae7a657255eef552ea7e12f996de795b90"},
- {"ffff0f", "v_______________________7___1", "3b1ca154ec2a3d96d8d77bddef0abfe40a53a64eb03cecf78da9ec43799fa3d0"},
- {"ffffff", "v_______________________7___2", "e75463041f1be8252781be0ace579a44ea4387bf5b2739f4607af676f7719678"},
- },
- { // ff{0:f{0:f, f:f}, f:fcc}
- {"ff0f0f", "v_______________________8___0", "0928af9b14718ec8262ab89df430f1e5fbf66fac0fed037aff2b6767ae8c8684"},
- {"ff0fff", "v_______________________8___1", "d870f4d3ce26b0bf86912810a1960693630c20a48ba56be0ad04bc3e9ddb01e6"},
- {"ffffcc", "v_______________________8___2", "4239f10dd9d9915ecf2e047d6a576bdc1733ed77a30830f1bf29deaf7d8e966f"},
- },
- {
- {"123d", "x___________________________0", "fc453d88b6f128a77c448669710497380fa4588abbea9f78f4c20c80daa797d0"},
- {"123e", "x___________________________1", "5af48f2d8a9a015c1ff7fa8b8c7f6b676233bd320e8fb57fd7933622badd2cec"},
- {"123f", "x___________________________2", "1164d7299964e74ac40d761f9189b2a3987fae959800d0f7e29d3aaf3eae9e15"},
- },
- {
- {"123d", "x___________________________0", "fc453d88b6f128a77c448669710497380fa4588abbea9f78f4c20c80daa797d0"},
- {"123e", "x___________________________1", "5af48f2d8a9a015c1ff7fa8b8c7f6b676233bd320e8fb57fd7933622badd2cec"},
- {"124a", "x___________________________2", "661a96a669869d76b7231380da0649d013301425fbea9d5c5fae6405aa31cfce"},
- },
- {
- {"123d", "x___________________________0", "fc453d88b6f128a77c448669710497380fa4588abbea9f78f4c20c80daa797d0"},
- {"123e", "x___________________________1", "5af48f2d8a9a015c1ff7fa8b8c7f6b676233bd320e8fb57fd7933622badd2cec"},
- {"13aa", "x___________________________2", "6590120e1fd3ffd1a90e8de5bb10750b61079bb0776cca4414dd79a24e4d4356"},
- },
- {
- {"123d", "x___________________________0", "fc453d88b6f128a77c448669710497380fa4588abbea9f78f4c20c80daa797d0"},
- {"123e", "x___________________________1", "5af48f2d8a9a015c1ff7fa8b8c7f6b676233bd320e8fb57fd7933622badd2cec"},
- {"2aaa", "x___________________________2", "f869b40e0c55eace1918332ef91563616fbf0755e2b946119679f7ef8e44b514"},
- },
- {
- {"1234da", "x___________________________0", "1c4b4462e9f56a80ca0f5d77c0d632c41b0102290930343cf1791e971a045a79"},
- {"1234ea", "x___________________________1", "2f502917f3ba7d328c21c8b45ee0f160652e68450332c166d4ad02d1afe31862"},
- {"1234fa", "x___________________________2", "4f4e368ab367090d5bc3dbf25f7729f8bd60df84de309b4633a6b69ab66142c0"},
- },
- {
- {"1234da", "x___________________________0", "1c4b4462e9f56a80ca0f5d77c0d632c41b0102290930343cf1791e971a045a79"},
- {"1234ea", "x___________________________1", "2f502917f3ba7d328c21c8b45ee0f160652e68450332c166d4ad02d1afe31862"},
- {"1235aa", "x___________________________2", "21840121d11a91ac8bbad9a5d06af902a5c8d56a47b85600ba813814b7bfcb9b"},
- },
- {
- {"1234da", "x___________________________0", "1c4b4462e9f56a80ca0f5d77c0d632c41b0102290930343cf1791e971a045a79"},
- {"1234ea", "x___________________________1", "2f502917f3ba7d328c21c8b45ee0f160652e68450332c166d4ad02d1afe31862"},
- {"124aaa", "x___________________________2", "ea4040ddf6ae3fbd1524bdec19c0ab1581015996262006632027fa5cf21e441e"},
- },
- {
- {"1234da", "x___________________________0", "1c4b4462e9f56a80ca0f5d77c0d632c41b0102290930343cf1791e971a045a79"},
- {"1234ea", "x___________________________1", "2f502917f3ba7d328c21c8b45ee0f160652e68450332c166d4ad02d1afe31862"},
- {"13aaaa", "x___________________________2", "e4beb66c67e44f2dd8ba36036e45a44ff68f8d52942472b1911a45f886a34507"},
- },
- {
- {"1234da", "x___________________________0", "1c4b4462e9f56a80ca0f5d77c0d632c41b0102290930343cf1791e971a045a79"},
- {"1234ea", "x___________________________1", "2f502917f3ba7d328c21c8b45ee0f160652e68450332c166d4ad02d1afe31862"},
- {"2aaaaa", "x___________________________2", "5f5989b820ff5d76b7d49e77bb64f26602294f6c42a1a3becc669cd9e0dc8ec9"},
- },
- {
- {"000000", "x___________________________0", "3b32b7af0bddc7940e7364ee18b5a59702c1825e469452c8483b9c4e0218b55a"},
- {"1234da", "x___________________________1", "3ab152a1285dca31945566f872c1cc2f17a770440eda32aeee46a5e91033dde2"},
- {"1234ea", "x___________________________2", "0cccc87f96ddef55563c1b3be3c64fff6a644333c3d9cd99852cb53b6412b9b8"},
- {"1234fa", "x___________________________3", "65bb3aafea8121111d693ffe34881c14d27b128fd113fa120961f251fe28428d"},
- },
- {
- {"000000", "x___________________________0", "3b32b7af0bddc7940e7364ee18b5a59702c1825e469452c8483b9c4e0218b55a"},
- {"1234da", "x___________________________1", "3ab152a1285dca31945566f872c1cc2f17a770440eda32aeee46a5e91033dde2"},
- {"1234ea", "x___________________________2", "0cccc87f96ddef55563c1b3be3c64fff6a644333c3d9cd99852cb53b6412b9b8"},
- {"1235aa", "x___________________________3", "f670e4d2547c533c5f21e0045442e2ecb733f347ad6d29ef36e0f5ba31bb11a8"},
- },
- {
- {"000000", "x___________________________0", "3b32b7af0bddc7940e7364ee18b5a59702c1825e469452c8483b9c4e0218b55a"},
- {"1234da", "x___________________________1", "3ab152a1285dca31945566f872c1cc2f17a770440eda32aeee46a5e91033dde2"},
- {"1234ea", "x___________________________2", "0cccc87f96ddef55563c1b3be3c64fff6a644333c3d9cd99852cb53b6412b9b8"},
- {"124aaa", "x___________________________3", "c17464123050a9a6f29b5574bb2f92f6d305c1794976b475b7fb0316b6335598"},
- },
- {
- {"000000", "x___________________________0", "3b32b7af0bddc7940e7364ee18b5a59702c1825e469452c8483b9c4e0218b55a"},
- {"1234da", "x___________________________1", "3ab152a1285dca31945566f872c1cc2f17a770440eda32aeee46a5e91033dde2"},
- {"1234ea", "x___________________________2", "0cccc87f96ddef55563c1b3be3c64fff6a644333c3d9cd99852cb53b6412b9b8"},
- {"13aaaa", "x___________________________3", "aa8301be8cb52ea5cd249f5feb79fb4315ee8de2140c604033f4b3fff78f0105"},
- },
- {
- {"0000", "x___________________________0", "cb8c09ad07ae882136f602b3f21f8733a9f5a78f1d2525a8d24d1c13258000b2"},
- {"123d", "x___________________________1", "8f09663deb02f08958136410dc48565e077f76bb6c9d8c84d35fc8913a657d31"},
- {"123e", "x___________________________2", "0d230561e398c579e09a9f7b69ceaf7d3970f5a436fdb28b68b7a37c5bdd6b80"},
- {"123f", "x___________________________3", "80f7bad1893ca57e3443bb3305a517723a74d3ba831bcaca22a170645eb7aafb"},
- },
- {
- {"0000", "x___________________________0", "cb8c09ad07ae882136f602b3f21f8733a9f5a78f1d2525a8d24d1c13258000b2"},
- {"123d", "x___________________________1", "8f09663deb02f08958136410dc48565e077f76bb6c9d8c84d35fc8913a657d31"},
- {"123e", "x___________________________2", "0d230561e398c579e09a9f7b69ceaf7d3970f5a436fdb28b68b7a37c5bdd6b80"},
- {"124a", "x___________________________3", "383bc1bb4f019e6bc4da3751509ea709b58dd1ac46081670834bae072f3e9557"},
- },
- {
- {"0000", "x___________________________0", "cb8c09ad07ae882136f602b3f21f8733a9f5a78f1d2525a8d24d1c13258000b2"},
- {"123d", "x___________________________1", "8f09663deb02f08958136410dc48565e077f76bb6c9d8c84d35fc8913a657d31"},
- {"123e", "x___________________________2", "0d230561e398c579e09a9f7b69ceaf7d3970f5a436fdb28b68b7a37c5bdd6b80"},
- {"13aa", "x___________________________3", "ff0dc70ce2e5db90ee42a4c2ad12139596b890e90eb4e16526ab38fa465b35cf"},
- },
- { // branch node with short values
- {"01", "a", "b48605025f5f4b129d40a420e721aa7d504487f015fce85b96e52126365ef7dc"},
- {"80", "b", "2dc6b680daf74db067cb7aeaad73265ded93d96fce190fcbf64f498d475672ab"},
- {"ee", "c", "017dc705a54ac5328dd263fa1bae68d655310fb3e3f7b7bc57e9a43ddf99c4bf"},
- {"ff", "d", "bd5a3584d271d459bd4eb95247b2fc88656b3671b60c1125ffe7bc0b689470d0"},
- },
- { // ext node with short branch node, then becoming long
- {"a0", "a", "a83e028cb1e4365935661a9fd36a5c65c30b9ab416eaa877424146ca2a69d088"},
- {"a1", "b", "f586a4639b07b01798ca65e05c253b75d51135ebfbf6f8d6e87c0435089e65f0"},
- {"a2", "c", "63e297c295c008e09a8d531e18d57f270b6bc403e23179b915429db948cd62e3"},
- {"a3", "d", "94a7b721535578e9381f1f4e4b6ec29f8bdc5f0458a30320684c562f5d47b4b5"},
- {"a4", "e", "4b7e66d1c81965cdbe8fab8295ef56bc57fefdc5733d4782d2f8baf630f083c6"},
- {"a5", "f", "2997e7b502198ce1783b5277faacf52b25844fb55a99b63e88bdbbafac573106"},
- {"a6", "g", "bee629dd27a40772b2e1a67ec6db270d26acdf8d3b674dfae27866ad6ae1f48b"},
- },
- { // branch node with short values, then long ones
- {"a001", "v1", "b9cc982d995392b51e6787f1915f0b88efd4ad8b30f138da0a3e2242f2323e35"},
- {"b002", "v2", "a7b474bc77ef5097096fa0ee6298fdae8928c0bc3724e7311cd0fa9ed1942fc7"},
- {"c003", "v___________________________3", "dceb5bb7c92b0e348df988a8d9fc36b101397e38ebd405df55ba6ee5f14a264a"},
- {"d004", "v___________________________4", "36e60ecb86b9626165e1c6543c42ecbe4d83bca58e8e1124746961511fce362a"},
- },
- { // ext node to branch node with short values, then long ones
- {"8002", "v1", "3258fcb3e9e7d7234ecd3b8d4743999e4ab3a21592565e0a5ca64c141e8620d9"},
- {"8004", "v2", "b6cb95b7024a83c17624a3c9bed09b4b5e8ed426f49f54b8ad13c39028b1e75a"},
- {"8008", "v___________________________3", "c769d82963abe6f0900bf69754738eeb2f84559777cfa87a44f54e1aab417871"},
- {"800d", "v___________________________4", "1cad1fdaab1a6fa95d7b780fd680030e423eb76669971368ba04797a8d9cdfc9"},
- },
- { // ext node with a child of size 31 (Y) and branch node with a child of size 31 (X)
- {"000001", "ZZZZZZZZZ", "cef154b87c03c563408520ff9b26923c360cbc3ddb590c079bedeeb25a8c9c77"},
- {"000002", "Y", "2130735e600f612f6e657a32bd7be64ddcaec6512c5694844b19de713922895d"},
- {"000003", "XXXXXXXXXXXXXXXXXXXXXXXXXXXX", "962c0fffdeef7612a4f7bff1950d67e3e81c878e48b9ae45b3b374253b050bd8"},
- },
- }
- for i, test := range tests {
- // The StackTrie does not allow Insert(), Hash(), Insert(), ...
- // so we will create new trie for every sequence length of inserts.
- for l := 1; l <= len(test); l++ {
- st := NewStackTrie(nil)
- for j := 0; j < l; j++ {
- kv := &test[j]
- if err := st.Update(common.FromHex(kv.K), []byte(kv.V)); err != nil {
- t.Fatal(err)
- }
- }
- expected := common.HexToHash(test[l-1].H)
- if h := st.Hash(); h != expected {
- t.Errorf("%d(%d): root hash mismatch: %x, expected %x", i, l, h, expected)
- }
- }
- }
-}
-
-func TestSizeBug(t *testing.T) {
- st := NewStackTrie(nil)
- nt := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
-
- leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
- value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3")
-
- nt.Update(leaf, value)
- st.Update(leaf, value)
-
- if nt.Hash() != st.Hash() {
- t.Fatalf("error %x != %x", st.Hash(), nt.Hash())
- }
-}
-
-func TestEmptyBug(t *testing.T) {
- st := NewStackTrie(nil)
- nt := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
-
- //leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
- //value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3")
- kvs := []struct {
- K string
- V string
- }{
- {K: "405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace", V: "9496f4ec2bf9dab484cac6be589e8417d84781be08"},
- {K: "40edb63a35fcf86c08022722aa3287cdd36440d671b4918131b2514795fefa9c", V: "01"},
- {K: "b10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6", V: "947a30f7736e48d6599356464ba4c150d8da0302ff"},
- {K: "c2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b", V: "02"},
- }
-
- for _, kv := range kvs {
- nt.Update(common.FromHex(kv.K), common.FromHex(kv.V))
- st.Update(common.FromHex(kv.K), common.FromHex(kv.V))
- }
-
- if nt.Hash() != st.Hash() {
- t.Fatalf("error %x != %x", st.Hash(), nt.Hash())
- }
-}
-
-func TestValLength56(t *testing.T) {
- st := NewStackTrie(nil)
- nt := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
-
- //leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
- //value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3")
- kvs := []struct {
- K string
- V string
- }{
- {K: "405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace", V: "1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"},
- }
-
- for _, kv := range kvs {
- nt.Update(common.FromHex(kv.K), common.FromHex(kv.V))
- st.Update(common.FromHex(kv.K), common.FromHex(kv.V))
- }
-
- if nt.Hash() != st.Hash() {
- t.Fatalf("error %x != %x", st.Hash(), nt.Hash())
- }
-}
-
-// TestUpdateSmallNodes tests a case where the leaves are small (both key and value),
-// which causes a lot of node-within-node. This case was found via fuzzing.
-func TestUpdateSmallNodes(t *testing.T) {
- st := NewStackTrie(nil)
- nt := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- kvs := []struct {
- K string
- V string
- }{
- {"63303030", "3041"}, // stacktrie.Update
- {"65", "3000"}, // stacktrie.Update
- }
- for _, kv := range kvs {
- nt.Update(common.FromHex(kv.K), common.FromHex(kv.V))
- st.Update(common.FromHex(kv.K), common.FromHex(kv.V))
- }
- if nt.Hash() != st.Hash() {
- t.Fatalf("error %x != %x", st.Hash(), nt.Hash())
- }
-}
-
-// TestUpdateVariableKeys contains a case which stacktrie fails: when keys of different
-// sizes are used, and the second one has the same prefix as the first, then the
-// stacktrie fails, since it's unable to 'expand' on an already added leaf.
-// For all practical purposes, this is fine, since keys are fixed-size length
-// in account and storage tries.
-//
-// The test is marked as 'skipped', and exists just to have the behaviour documented.
-// This case was found via fuzzing.
-func TestUpdateVariableKeys(t *testing.T) {
- t.SkipNow()
- st := NewStackTrie(nil)
- nt := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- kvs := []struct {
- K string
- V string
- }{
- {"0x33303534636532393561313031676174", "303030"},
- {"0x3330353463653239356131303167617430", "313131"},
- }
- for _, kv := range kvs {
- nt.Update(common.FromHex(kv.K), common.FromHex(kv.V))
- st.Update(common.FromHex(kv.K), common.FromHex(kv.V))
- }
- if nt.Hash() != st.Hash() {
- t.Fatalf("error %x != %x", st.Hash(), nt.Hash())
- }
-}
-
-// TestStacktrieNotModifyValues checks that inserting blobs of data into the
-// stacktrie does not mutate the blobs
-func TestStacktrieNotModifyValues(t *testing.T) {
- st := NewStackTrie(nil)
- { // Test a very small trie
- // Give it the value as a slice with large backing alloc,
- // so if the stacktrie tries to append, it won't have to realloc
- value := make([]byte, 1, 100)
- value[0] = 0x2
- want := common.CopyBytes(value)
- st.Update([]byte{0x01}, value)
- st.Hash()
- if have := value; !bytes.Equal(have, want) {
- t.Fatalf("tiny trie: have %#x want %#x", have, want)
- }
- st = NewStackTrie(nil)
- }
- // Test with a larger trie
- keyB := big.NewInt(1)
- keyDelta := big.NewInt(1)
- var vals [][]byte
- getValue := func(i int) []byte {
- if i%2 == 0 { // large
- return crypto.Keccak256(big.NewInt(int64(i)).Bytes())
- } else { //small
- return big.NewInt(int64(i)).Bytes()
- }
- }
- for i := 0; i < 1000; i++ {
- key := common.BigToHash(keyB)
- value := getValue(i)
- st.Update(key.Bytes(), value)
- vals = append(vals, value)
- keyB = keyB.Add(keyB, keyDelta)
- keyDelta.Add(keyDelta, common.Big1)
- }
- st.Hash()
- for i := 0; i < 1000; i++ {
- want := getValue(i)
-
- have := vals[i]
- if !bytes.Equal(have, want) {
- t.Fatalf("item %d, have %#x want %#x", i, have, want)
- }
- }
-}
-
-func buildPartialTree(entries []*kv, t *testing.T) map[string]common.Hash {
- var (
- options = NewStackTrieOptions()
- nodes = make(map[string]common.Hash)
- )
- var (
- first int
- last = len(entries) - 1
-
- noLeft bool
- noRight bool
- )
- // Enter split mode if there are at least two elements
- if rand.Intn(5) != 0 {
- for {
- first = rand.Intn(len(entries))
- last = rand.Intn(len(entries))
- if first <= last {
- break
- }
- }
- if first != 0 {
- noLeft = true
- }
- if last != len(entries)-1 {
- noRight = true
- }
- }
- options = options.WithSkipBoundary(noLeft, noRight, nil)
- options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
- nodes[string(path)] = hash
- })
- tr := NewStackTrie(options)
-
- for i := first; i <= last; i++ {
- tr.MustUpdate(entries[i].k, entries[i].v)
- }
- tr.Commit()
- return nodes
-}
-
-func TestPartialStackTrie(t *testing.T) {
- for round := 0; round < 100; round++ {
- var (
- n = rand.Intn(100) + 1
- entries []*kv
- )
- for i := 0; i < n; i++ {
- var val []byte
- if rand.Intn(3) == 0 {
- val = testutil.RandBytes(3)
- } else {
- val = testutil.RandBytes(32)
- }
- entries = append(entries, &kv{
- k: testutil.RandBytes(32),
- v: val,
- })
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- var (
- nodes = make(map[string]common.Hash)
- options = NewStackTrieOptions().WithWriter(func(path []byte, hash common.Hash, blob []byte) {
- nodes[string(path)] = hash
- })
- )
- tr := NewStackTrie(options)
-
- for i := 0; i < len(entries); i++ {
- tr.MustUpdate(entries[i].k, entries[i].v)
- }
- tr.Commit()
-
- for j := 0; j < 100; j++ {
- for path, hash := range buildPartialTree(entries, t) {
- if nodes[path] != hash {
- t.Errorf("%v, want %x, got %x", []byte(path), nodes[path], hash)
- }
- }
- }
- }
-}
-
-func TestStackTrieErrors(t *testing.T) {
- s := NewStackTrie(nil)
- // Deletion
- if err := s.Update(nil, nil); err == nil {
- t.Fatal("expected error")
- }
- if err := s.Update(nil, []byte{}); err == nil {
- t.Fatal("expected error")
- }
- if err := s.Update([]byte{0xa}, []byte{}); err == nil {
- t.Fatal("expected error")
- }
- // Non-ascending keys (going backwards or repeating)
- assert.Nil(t, s.Update([]byte{0xaa}, []byte{0xa}))
- assert.NotNil(t, s.Update([]byte{0xaa}, []byte{0xa}), "repeat insert same key")
- assert.NotNil(t, s.Update([]byte{0xaa}, []byte{0xb}), "repeat insert same key")
- assert.Nil(t, s.Update([]byte{0xab}, []byte{0xa}))
- assert.NotNil(t, s.Update([]byte{0x10}, []byte{0xb}), "out of order insert")
- assert.NotNil(t, s.Update([]byte{0xaa}, []byte{0xb}), "repeat insert same key")
-}
diff --git a/trie/sync_test.go b/trie/sync_test.go
deleted file mode 100644
index ca4fcc9c26..0000000000
--- a/trie/sync_test.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "fmt"
-
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
-)
-
-// makeTestTrie create a sample test trie to test node-wise reconstruction.
-func makeTestTrie(scheme string) (ethdb.Database, *testDb, *StateTrie, map[string][]byte) {
- // Create an empty trie
- db := rawdb.NewMemoryDatabase()
- triedb := newTestDatabase(db, scheme)
- trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), triedb)
-
- // Fill it with some arbitrary data
- content := make(map[string][]byte)
- for i := byte(0); i < 255; i++ {
- // Map the same data under multiple keys
- key, val := common.LeftPadBytes([]byte{1, i}, 32), []byte{i}
- content[string(key)] = val
- trie.MustUpdate(key, val)
-
- key, val = common.LeftPadBytes([]byte{2, i}, 32), []byte{i}
- content[string(key)] = val
- trie.MustUpdate(key, val)
-
- // Add some other data to inflate the trie
- for j := byte(3); j < 13; j++ {
- key, val = common.LeftPadBytes([]byte{j, i}, 32), []byte{j, i}
- content[string(key)] = val
- trie.MustUpdate(key, val)
- }
- }
- root, nodes, _ := trie.Commit(false)
- if err := triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)); err != nil {
- panic(fmt.Errorf("failed to commit db %v", err))
- }
- if err := triedb.Commit(root); err != nil {
- panic(err)
- }
- // Re-create the trie based on the new state
- trie, _ = NewStateTrie(TrieID(root), triedb)
- return db, triedb, trie, content
-}
-
-// checkTrieConsistency checks that all nodes in a trie are indeed present.
-func checkTrieConsistency(db ethdb.Database, scheme string, root common.Hash, rawTrie bool) error {
- ndb := newTestDatabase(db, scheme)
- var it NodeIterator
- if rawTrie {
- trie, err := New(TrieID(root), ndb)
- if err != nil {
- return nil // Consider a non existent state consistent
- }
- it = trie.MustNodeIterator(nil)
- } else {
- trie, err := NewStateTrie(TrieID(root), ndb)
- if err != nil {
- return nil // Consider a non existent state consistent
- }
- it = trie.MustNodeIterator(nil)
- }
- for it.Next(true) {
- }
- return it.Error()
-}
diff --git a/trie/testutil/utils.go b/trie/testutil/utils.go
deleted file mode 100644
index f3166c1ca9..0000000000
--- a/trie/testutil/utils.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// (c) 2024, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2023 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package testutil
-
-import (
- crand "crypto/rand"
- "encoding/binary"
- mrand "math/rand"
-
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
-)
-
-// Prng is a pseudo random number generator seeded by strong randomness.
-// The randomness is printed on startup in order to make failures reproducible.
-var prng = initRand()
-
-func initRand() *mrand.Rand {
- var seed [8]byte
- crand.Read(seed[:])
- rnd := mrand.New(mrand.NewSource(int64(binary.LittleEndian.Uint64(seed[:]))))
- return rnd
-}
-
-// RandBytes generates a random byte slice with specified length.
-func RandBytes(n int) []byte {
- r := make([]byte, n)
- prng.Read(r)
- return r
-}
-
-// RandomHash generates a random blob of data and returns it as a hash.
-func RandomHash() common.Hash {
- return common.BytesToHash(RandBytes(common.HashLength))
-}
-
-// RandomAddress generates a random blob of data and returns it as an address.
-func RandomAddress() common.Address {
- return common.BytesToAddress(RandBytes(common.AddressLength))
-}
-
-// RandomNode generates a random node.
-func RandomNode() *trienode.Node {
- val := RandBytes(100)
- return trienode.New(crypto.Keccak256Hash(val), val)
-}
diff --git a/trie/tracer.go b/trie/tracer.go
deleted file mode 100644
index c2b88699a7..0000000000
--- a/trie/tracer.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "github.com/ava-labs/libevm/common"
-)
-
-// tracer tracks the changes of trie nodes. During the trie operations,
-// some nodes can be deleted from the trie, while these deleted nodes
-// won't be captured by trie.Hasher or trie.Committer. Thus, these deleted
-// nodes won't be removed from the disk at all. Tracer is an auxiliary tool
-// used to track all insert and delete operations of trie and capture all
-// deleted nodes eventually.
-//
-// The changed nodes can be mainly divided into two categories: the leaf
-// node and intermediate node. The former is inserted/deleted by callers
-// while the latter is inserted/deleted in order to follow the rule of trie.
-// This tool can track all of them no matter the node is embedded in its
-// parent or not, but valueNode is never tracked.
-//
-// Besides, it's also used for recording the original value of the nodes
-// when they are resolved from the disk. The pre-value of the nodes will
-// be used to construct trie history in the future.
-//
-// Note tracer is not thread-safe, callers should be responsible for handling
-// the concurrency issues by themselves.
-type tracer struct {
- inserts map[string]struct{}
- deletes map[string]struct{}
- accessList map[string][]byte
-}
-
-// newTracer initializes the tracer for capturing trie changes.
-func newTracer() *tracer {
- return &tracer{
- inserts: make(map[string]struct{}),
- deletes: make(map[string]struct{}),
- accessList: make(map[string][]byte),
- }
-}
-
-// onRead tracks the newly loaded trie node and caches the rlp-encoded
-// blob internally. Don't change the value outside of function since
-// it's not deep-copied.
-func (t *tracer) onRead(path []byte, val []byte) {
- t.accessList[string(path)] = val
-}
-
-// onInsert tracks the newly inserted trie node. If it's already
-// in the deletion set (resurrected node), then just wipe it from
-// the deletion set as it's "untouched".
-func (t *tracer) onInsert(path []byte) {
- if _, present := t.deletes[string(path)]; present {
- delete(t.deletes, string(path))
- return
- }
- t.inserts[string(path)] = struct{}{}
-}
-
-// onDelete tracks the newly deleted trie node. If it's already
-// in the addition set, then just wipe it from the addition set
-// as it's untouched.
-func (t *tracer) onDelete(path []byte) {
- if _, present := t.inserts[string(path)]; present {
- delete(t.inserts, string(path))
- return
- }
- t.deletes[string(path)] = struct{}{}
-}
-
-// reset clears the content tracked by tracer.
-func (t *tracer) reset() {
- t.inserts = make(map[string]struct{})
- t.deletes = make(map[string]struct{})
- t.accessList = make(map[string][]byte)
-}
-
-// copy returns a deep copied tracer instance.
-func (t *tracer) copy() *tracer {
- var (
- inserts = make(map[string]struct{})
- deletes = make(map[string]struct{})
- accessList = make(map[string][]byte)
- )
- for path := range t.inserts {
- inserts[path] = struct{}{}
- }
- for path := range t.deletes {
- deletes[path] = struct{}{}
- }
- for path, blob := range t.accessList {
- accessList[path] = common.CopyBytes(blob)
- }
- return &tracer{
- inserts: inserts,
- deletes: deletes,
- accessList: accessList,
- }
-}
-
-// deletedNodes returns a list of node paths which are deleted from the trie.
-func (t *tracer) deletedNodes() []string {
- var paths []string
- for path := range t.deletes {
- // It's possible a few deleted nodes were embedded
- // in their parent before, the deletions can be no
- // effect by deleting nothing, filter them out.
- _, ok := t.accessList[path]
- if !ok {
- continue
- }
- paths = append(paths, path)
- }
- return paths
-}
diff --git a/trie/tracer_test.go b/trie/tracer_test.go
deleted file mode 100644
index f0c1cfc88d..0000000000
--- a/trie/tracer_test.go
+++ /dev/null
@@ -1,376 +0,0 @@
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- "testing"
-
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/libevm/common"
-)
-
-var (
- tiny = []struct{ k, v string }{
- {"k1", "v1"},
- {"k2", "v2"},
- {"k3", "v3"},
- }
- nonAligned = []struct{ k, v string }{
- {"do", "verb"},
- {"ether", "wookiedoo"},
- {"horse", "stallion"},
- {"shaman", "horse"},
- {"doge", "coin"},
- {"dog", "puppy"},
- {"somethingveryoddindeedthis is", "myothernodedata"},
- }
- standard = []struct{ k, v string }{
- {string(randBytes(32)), "verb"},
- {string(randBytes(32)), "wookiedoo"},
- {string(randBytes(32)), "stallion"},
- {string(randBytes(32)), "horse"},
- {string(randBytes(32)), "coin"},
- {string(randBytes(32)), "puppy"},
- {string(randBytes(32)), "myothernodedata"},
- }
-)
-
-func TestTrieTracer(t *testing.T) {
- testTrieTracer(t, tiny)
- testTrieTracer(t, nonAligned)
- testTrieTracer(t, standard)
-}
-
-// Tests if the trie diffs are tracked correctly. Tracer should capture
-// all non-leaf dirty nodes, no matter the node is embedded or not.
-func testTrieTracer(t *testing.T, vals []struct{ k, v string }) {
- db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- trie := NewEmpty(db)
-
- // Determine all new nodes are tracked
- for _, val := range vals {
- trie.MustUpdate([]byte(val.k), []byte(val.v))
- }
- insertSet := copySet(trie.tracer.inserts) // copy before commit
- deleteSet := copySet(trie.tracer.deletes) // copy before commit
- root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
-
- seen := setKeys(iterNodes(db, root))
- if !compareSet(insertSet, seen) {
- t.Fatal("Unexpected insertion set")
- }
- if !compareSet(deleteSet, nil) {
- t.Fatal("Unexpected deletion set")
- }
-
- // Determine all deletions are tracked
- trie, _ = New(TrieID(root), db)
- for _, val := range vals {
- trie.MustDelete([]byte(val.k))
- }
- insertSet, deleteSet = copySet(trie.tracer.inserts), copySet(trie.tracer.deletes)
- if !compareSet(insertSet, nil) {
- t.Fatal("Unexpected insertion set")
- }
- if !compareSet(deleteSet, seen) {
- t.Fatal("Unexpected deletion set")
- }
-}
-
-// Test that after inserting a new batch of nodes and deleting them immediately,
-// the trie tracer should be cleared normally as no operation happened.
-func TestTrieTracerNoop(t *testing.T) {
- testTrieTracerNoop(t, tiny)
- testTrieTracerNoop(t, nonAligned)
- testTrieTracerNoop(t, standard)
-}
-
-func testTrieTracerNoop(t *testing.T, vals []struct{ k, v string }) {
- db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- trie := NewEmpty(db)
- for _, val := range vals {
- trie.MustUpdate([]byte(val.k), []byte(val.v))
- }
- for _, val := range vals {
- trie.MustDelete([]byte(val.k))
- }
- if len(trie.tracer.inserts) != 0 {
- t.Fatal("Unexpected insertion set")
- }
- if len(trie.tracer.deletes) != 0 {
- t.Fatal("Unexpected deletion set")
- }
-}
-
-// Tests if the accessList is correctly tracked.
-func TestAccessList(t *testing.T) {
- testAccessList(t, tiny)
- testAccessList(t, nonAligned)
- testAccessList(t, standard)
-}
-
-func testAccessList(t *testing.T, vals []struct{ k, v string }) {
- var (
- db = newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- trie = NewEmpty(db)
- orig = trie.Copy()
- )
- // Create trie from scratch
- for _, val := range vals {
- trie.MustUpdate([]byte(val.k), []byte(val.v))
- }
- root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
-
- trie, _ = New(TrieID(root), db)
- if err := verifyAccessList(orig, trie, nodes); err != nil {
- t.Fatalf("Invalid accessList %v", err)
- }
-
- // Update trie
- parent := root
- trie, _ = New(TrieID(root), db)
- orig = trie.Copy()
- for _, val := range vals {
- trie.MustUpdate([]byte(val.k), randBytes(32))
- }
- root, nodes, _ = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(nodes))
-
- trie, _ = New(TrieID(root), db)
- if err := verifyAccessList(orig, trie, nodes); err != nil {
- t.Fatalf("Invalid accessList %v", err)
- }
-
- // Add more new nodes
- parent = root
- trie, _ = New(TrieID(root), db)
- orig = trie.Copy()
- var keys []string
- for i := 0; i < 30; i++ {
- key := randBytes(32)
- keys = append(keys, string(key))
- trie.MustUpdate(key, randBytes(32))
- }
- root, nodes, _ = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(nodes))
-
- trie, _ = New(TrieID(root), db)
- if err := verifyAccessList(orig, trie, nodes); err != nil {
- t.Fatalf("Invalid accessList %v", err)
- }
-
- // Partial deletions
- parent = root
- trie, _ = New(TrieID(root), db)
- orig = trie.Copy()
- for _, key := range keys {
- trie.MustUpdate([]byte(key), nil)
- }
- root, nodes, _ = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(nodes))
-
- trie, _ = New(TrieID(root), db)
- if err := verifyAccessList(orig, trie, nodes); err != nil {
- t.Fatalf("Invalid accessList %v", err)
- }
-
- // Delete all
- parent = root
- trie, _ = New(TrieID(root), db)
- orig = trie.Copy()
- for _, val := range vals {
- trie.MustUpdate([]byte(val.k), nil)
- }
- root, nodes, _ = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(nodes))
-
- trie, _ = New(TrieID(root), db)
- if err := verifyAccessList(orig, trie, nodes); err != nil {
- t.Fatalf("Invalid accessList %v", err)
- }
-}
-
-// Tests origin values won't be tracked in Iterator or Prover
-func TestAccessListLeak(t *testing.T) {
- var (
- db = newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- trie = NewEmpty(db)
- )
- // Create trie from scratch
- for _, val := range standard {
- trie.MustUpdate([]byte(val.k), []byte(val.v))
- }
- root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
-
- var cases = []struct {
- op func(tr *Trie)
- }{
- {
- func(tr *Trie) {
- it := tr.MustNodeIterator(nil)
- for it.Next(true) {
- }
- },
- },
- {
- func(tr *Trie) {
- it := NewIterator(tr.MustNodeIterator(nil))
- for it.Next() {
- }
- },
- },
- {
- func(tr *Trie) {
- for _, val := range standard {
- tr.Prove([]byte(val.k), rawdb.NewMemoryDatabase())
- }
- },
- },
- }
- for _, c := range cases {
- trie, _ = New(TrieID(root), db)
- n1 := len(trie.tracer.accessList)
- c.op(trie)
- n2 := len(trie.tracer.accessList)
-
- if n1 != n2 {
- t.Fatalf("AccessList is leaked, prev %d after %d", n1, n2)
- }
- }
-}
-
-// Tests whether the original tree node is correctly deleted after being embedded
-// in its parent due to the smaller size of the original tree node.
-func TestTinyTree(t *testing.T) {
- var (
- db = newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- trie = NewEmpty(db)
- )
- for _, val := range tiny {
- trie.MustUpdate([]byte(val.k), randBytes(32))
- }
- root, set, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(set))
-
- parent := root
- trie, _ = New(TrieID(root), db)
- orig := trie.Copy()
- for _, val := range tiny {
- trie.MustUpdate([]byte(val.k), []byte(val.v))
- }
- root, set, _ = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(set))
-
- trie, _ = New(TrieID(root), db)
- if err := verifyAccessList(orig, trie, set); err != nil {
- t.Fatalf("Invalid accessList %v", err)
- }
-}
-
-func compareSet(setA, setB map[string]struct{}) bool {
- if len(setA) != len(setB) {
- return false
- }
- for key := range setA {
- if _, ok := setB[key]; !ok {
- return false
- }
- }
- return true
-}
-
-func forNodes(tr *Trie) map[string][]byte {
- var (
- it = tr.MustNodeIterator(nil)
- nodes = make(map[string][]byte)
- )
- for it.Next(true) {
- if it.Leaf() {
- continue
- }
- nodes[string(it.Path())] = common.CopyBytes(it.NodeBlob())
- }
- return nodes
-}
-
-func iterNodes(db *testDb, root common.Hash) map[string][]byte {
- tr, _ := New(TrieID(root), db)
- return forNodes(tr)
-}
-
-func forHashedNodes(tr *Trie) map[string][]byte {
- var (
- it = tr.MustNodeIterator(nil)
- nodes = make(map[string][]byte)
- )
- for it.Next(true) {
- if it.Hash() == (common.Hash{}) {
- continue
- }
- nodes[string(it.Path())] = common.CopyBytes(it.NodeBlob())
- }
- return nodes
-}
-
-func diffTries(trieA, trieB *Trie) (map[string][]byte, map[string][]byte, map[string][]byte) {
- var (
- nodesA = forHashedNodes(trieA)
- nodesB = forHashedNodes(trieB)
- inA = make(map[string][]byte) // hashed nodes in trie a but not b
- inB = make(map[string][]byte) // hashed nodes in trie b but not a
- both = make(map[string][]byte) // hashed nodes in both tries but different value
- )
- for path, blobA := range nodesA {
- if blobB, ok := nodesB[path]; ok {
- if bytes.Equal(blobA, blobB) {
- continue
- }
- both[path] = blobA
- continue
- }
- inA[path] = blobA
- }
- for path, blobB := range nodesB {
- if _, ok := nodesA[path]; ok {
- continue
- }
- inB[path] = blobB
- }
- return inA, inB, both
-}
-
-func setKeys(set map[string][]byte) map[string]struct{} {
- keys := make(map[string]struct{})
- for k := range set {
- keys[k] = struct{}{}
- }
- return keys
-}
-
-func copySet(set map[string]struct{}) map[string]struct{} {
- copied := make(map[string]struct{})
- for k := range set {
- copied[k] = struct{}{}
- }
- return copied
-}
diff --git a/trie/trie.go b/trie/trie.go
deleted file mode 100644
index c3927cc517..0000000000
--- a/trie/trie.go
+++ /dev/null
@@ -1,683 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2014 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-// Package trie implements Merkle Patricia Tries.
-package trie
-
-import (
- "bytes"
- "errors"
- "fmt"
-
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/triedb/database"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/log"
-)
-
-// Trie is a Merkle Patricia Trie. Use New to create a trie that sits on
-// top of a database. Whenever trie performs a commit operation, the generated
-// nodes will be gathered and returned in a set. Once the trie is committed,
-// it's not usable anymore. Callers have to re-create the trie with new root
-// based on the updated trie database.
-//
-// Trie is not safe for concurrent use.
-type Trie struct {
- root node
- owner common.Hash
-
- // Flag whether the commit operation is already performed. If so the
- // trie is not usable(latest states is invisible).
- committed bool
-
- // Keep track of the number leaves which have been inserted since the last
- // hashing operation. This number will not directly map to the number of
- // actually unhashed nodes.
- unhashed int
-
- // reader is the handler trie can retrieve nodes from.
- reader *trieReader
-
- // tracer is the tool to track the trie changes.
- // It will be reset after each commit operation.
- tracer *tracer
-}
-
-// newFlag returns the cache flag value for a newly created node.
-func (t *Trie) newFlag() nodeFlag {
- return nodeFlag{dirty: true}
-}
-
-// Copy returns a copy of Trie.
-func (t *Trie) Copy() *Trie {
- return &Trie{
- root: t.root,
- owner: t.owner,
- committed: t.committed,
- unhashed: t.unhashed,
- reader: t.reader,
- tracer: t.tracer.copy(),
- }
-}
-
-// New creates the trie instance with provided trie id and the read-only
-// database. The state specified by trie id must be available, otherwise
-// an error will be returned. The trie root specified by trie id can be
-// zero hash or the sha3 hash of an empty string, then trie is initially
-// empty, otherwise, the root node must be present in database or returns
-// a MissingNodeError if not.
-func New(id *ID, db database.Database) (*Trie, error) {
- reader, err := newTrieReader(id.StateRoot, id.Owner, db)
- if err != nil {
- return nil, err
- }
- trie := &Trie{
- owner: id.Owner,
- reader: reader,
- tracer: newTracer(),
- }
- if id.Root != (common.Hash{}) && id.Root != types.EmptyRootHash {
- rootnode, err := trie.resolveAndTrack(id.Root[:], nil)
- if err != nil {
- return nil, err
- }
- trie.root = rootnode
- }
- return trie, nil
-}
-
-// NewEmpty is a shortcut to create empty tree. It's mostly used in tests.
-func NewEmpty(db database.Database) *Trie {
- tr, _ := New(TrieID(types.EmptyRootHash), db)
- return tr
-}
-
-// MustNodeIterator is a wrapper of NodeIterator and will omit any encountered
-// error but just print out an error message.
-func (t *Trie) MustNodeIterator(start []byte) NodeIterator {
- it, err := t.NodeIterator(start)
- if err != nil {
- log.Error("Unhandled trie error in Trie.NodeIterator", "err", err)
- }
- return it
-}
-
-// NodeIterator returns an iterator that returns nodes of the trie. Iteration starts at
-// the key after the given start key.
-func (t *Trie) NodeIterator(start []byte) (NodeIterator, error) {
- // Short circuit if the trie is already committed and not usable.
- if t.committed {
- return nil, ErrCommitted
- }
- return newNodeIterator(t, start), nil
-}
-
-// MustGet is a wrapper of Get and will omit any encountered error but just
-// print out an error message.
-func (t *Trie) MustGet(key []byte) []byte {
- res, err := t.Get(key)
- if err != nil {
- log.Error("Unhandled trie error in Trie.Get", "err", err)
- }
- return res
-}
-
-// Get returns the value for key stored in the trie.
-// The value bytes must not be modified by the caller.
-//
-// If the requested node is not present in trie, no error will be returned.
-// If the trie is corrupted, a MissingNodeError is returned.
-func (t *Trie) Get(key []byte) ([]byte, error) {
- // Short circuit if the trie is already committed and not usable.
- if t.committed {
- return nil, ErrCommitted
- }
- value, newroot, didResolve, err := t.get(t.root, keybytesToHex(key), 0)
- if err == nil && didResolve {
- t.root = newroot
- }
- return value, err
-}
-
-func (t *Trie) get(origNode node, key []byte, pos int) (value []byte, newnode node, didResolve bool, err error) {
- switch n := (origNode).(type) {
- case nil:
- return nil, nil, false, nil
- case valueNode:
- return n, n, false, nil
- case *shortNode:
- if len(key)-pos < len(n.Key) || !bytes.Equal(n.Key, key[pos:pos+len(n.Key)]) {
- // key not found in trie
- return nil, n, false, nil
- }
- value, newnode, didResolve, err = t.get(n.Val, key, pos+len(n.Key))
- if err == nil && didResolve {
- n = n.copy()
- n.Val = newnode
- }
- return value, n, didResolve, err
- case *fullNode:
- value, newnode, didResolve, err = t.get(n.Children[key[pos]], key, pos+1)
- if err == nil && didResolve {
- n = n.copy()
- n.Children[key[pos]] = newnode
- }
- return value, n, didResolve, err
- case hashNode:
- child, err := t.resolveAndTrack(n, key[:pos])
- if err != nil {
- return nil, n, true, err
- }
- value, newnode, _, err := t.get(child, key, pos)
- return value, newnode, true, err
- default:
- panic(fmt.Sprintf("%T: invalid node: %v", origNode, origNode))
- }
-}
-
-// MustGetNode is a wrapper of GetNode and will omit any encountered error but
-// just print out an error message.
-func (t *Trie) MustGetNode(path []byte) ([]byte, int) {
- item, resolved, err := t.GetNode(path)
- if err != nil {
- log.Error("Unhandled trie error in Trie.GetNode", "err", err)
- }
- return item, resolved
-}
-
-// GetNode retrieves a trie node by compact-encoded path. It is not possible
-// to use keybyte-encoding as the path might contain odd nibbles.
-//
-// If the requested node is not present in trie, no error will be returned.
-// If the trie is corrupted, a MissingNodeError is returned.
-func (t *Trie) GetNode(path []byte) ([]byte, int, error) {
- // Short circuit if the trie is already committed and not usable.
- if t.committed {
- return nil, 0, ErrCommitted
- }
- item, newroot, resolved, err := t.getNode(t.root, compactToHex(path), 0)
- if err != nil {
- return nil, resolved, err
- }
- if resolved > 0 {
- t.root = newroot
- }
- if item == nil {
- return nil, resolved, nil
- }
- return item, resolved, nil
-}
-
-func (t *Trie) getNode(origNode node, path []byte, pos int) (item []byte, newnode node, resolved int, err error) {
- // If non-existent path requested, abort
- if origNode == nil {
- return nil, nil, 0, nil
- }
- // If we reached the requested path, return the current node
- if pos >= len(path) {
- // Although we most probably have the original node expanded, encoding
- // that into consensus form can be nasty (needs to cascade down) and
- // time consuming. Instead, just pull the hash up from disk directly.
- var hash hashNode
- if node, ok := origNode.(hashNode); ok {
- hash = node
- } else {
- hash, _ = origNode.cache()
- }
- if hash == nil {
- return nil, origNode, 0, errors.New("non-consensus node")
- }
- blob, err := t.reader.node(path, common.BytesToHash(hash))
- return blob, origNode, 1, err
- }
- // Path still needs to be traversed, descend into children
- switch n := (origNode).(type) {
- case valueNode:
- // Path prematurely ended, abort
- return nil, nil, 0, nil
-
- case *shortNode:
- if len(path)-pos < len(n.Key) || !bytes.Equal(n.Key, path[pos:pos+len(n.Key)]) {
- // Path branches off from short node
- return nil, n, 0, nil
- }
- item, newnode, resolved, err = t.getNode(n.Val, path, pos+len(n.Key))
- if err == nil && resolved > 0 {
- n = n.copy()
- n.Val = newnode
- }
- return item, n, resolved, err
-
- case *fullNode:
- item, newnode, resolved, err = t.getNode(n.Children[path[pos]], path, pos+1)
- if err == nil && resolved > 0 {
- n = n.copy()
- n.Children[path[pos]] = newnode
- }
- return item, n, resolved, err
-
- case hashNode:
- child, err := t.resolveAndTrack(n, path[:pos])
- if err != nil {
- return nil, n, 1, err
- }
- item, newnode, resolved, err := t.getNode(child, path, pos)
- return item, newnode, resolved + 1, err
-
- default:
- panic(fmt.Sprintf("%T: invalid node: %v", origNode, origNode))
- }
-}
-
-// MustUpdate is a wrapper of Update and will omit any encountered error but
-// just print out an error message.
-func (t *Trie) MustUpdate(key, value []byte) {
- if err := t.Update(key, value); err != nil {
- log.Error("Unhandled trie error in Trie.Update", "err", err)
- }
-}
-
-// Update associates key with value in the trie. Subsequent calls to
-// Get will return value. If value has length zero, any existing value
-// is deleted from the trie and calls to Get will return nil.
-//
-// The value bytes must not be modified by the caller while they are
-// stored in the trie.
-//
-// If the requested node is not present in trie, no error will be returned.
-// If the trie is corrupted, a MissingNodeError is returned.
-func (t *Trie) Update(key, value []byte) error {
- // Short circuit if the trie is already committed and not usable.
- if t.committed {
- return ErrCommitted
- }
- return t.update(key, value)
-}
-
-func (t *Trie) update(key, value []byte) error {
- t.unhashed++
- k := keybytesToHex(key)
- if len(value) != 0 {
- _, n, err := t.insert(t.root, nil, k, valueNode(value))
- if err != nil {
- return err
- }
- t.root = n
- } else {
- _, n, err := t.delete(t.root, nil, k)
- if err != nil {
- return err
- }
- t.root = n
- }
- return nil
-}
-
-func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error) {
- if len(key) == 0 {
- if v, ok := n.(valueNode); ok {
- return !bytes.Equal(v, value.(valueNode)), value, nil
- }
- return true, value, nil
- }
- switch n := n.(type) {
- case *shortNode:
- matchlen := prefixLen(key, n.Key)
- // If the whole key matches, keep this short node as is
- // and only update the value.
- if matchlen == len(n.Key) {
- dirty, nn, err := t.insert(n.Val, append(prefix, key[:matchlen]...), key[matchlen:], value)
- if !dirty || err != nil {
- return false, n, err
- }
- return true, &shortNode{n.Key, nn, t.newFlag()}, nil
- }
- // Otherwise branch out at the index where they differ.
- branch := &fullNode{flags: t.newFlag()}
- var err error
- _, branch.Children[n.Key[matchlen]], err = t.insert(nil, append(prefix, n.Key[:matchlen+1]...), n.Key[matchlen+1:], n.Val)
- if err != nil {
- return false, nil, err
- }
- _, branch.Children[key[matchlen]], err = t.insert(nil, append(prefix, key[:matchlen+1]...), key[matchlen+1:], value)
- if err != nil {
- return false, nil, err
- }
- // Replace this shortNode with the branch if it occurs at index 0.
- if matchlen == 0 {
- return true, branch, nil
- }
- // New branch node is created as a child of the original short node.
- // Track the newly inserted node in the tracer. The node identifier
- // passed is the path from the root node.
- t.tracer.onInsert(append(prefix, key[:matchlen]...))
-
- // Replace it with a short node leading up to the branch.
- return true, &shortNode{key[:matchlen], branch, t.newFlag()}, nil
-
- case *fullNode:
- dirty, nn, err := t.insert(n.Children[key[0]], append(prefix, key[0]), key[1:], value)
- if !dirty || err != nil {
- return false, n, err
- }
- n = n.copy()
- n.flags = t.newFlag()
- n.Children[key[0]] = nn
- return true, n, nil
-
- case nil:
- // New short node is created and track it in the tracer. The node identifier
- // passed is the path from the root node. Note the valueNode won't be tracked
- // since it's always embedded in its parent.
- t.tracer.onInsert(prefix)
-
- return true, &shortNode{key, value, t.newFlag()}, nil
-
- case hashNode:
- // We've hit a part of the trie that isn't loaded yet. Load
- // the node and insert into it. This leaves all child nodes on
- // the path to the value in the trie.
- rn, err := t.resolveAndTrack(n, prefix)
- if err != nil {
- return false, nil, err
- }
- dirty, nn, err := t.insert(rn, prefix, key, value)
- if !dirty || err != nil {
- return false, rn, err
- }
- return true, nn, nil
-
- default:
- panic(fmt.Sprintf("%T: invalid node: %v", n, n))
- }
-}
-
-// MustDelete is a wrapper of Delete and will omit any encountered error but
-// just print out an error message.
-func (t *Trie) MustDelete(key []byte) {
- if err := t.Delete(key); err != nil {
- log.Error("Unhandled trie error in Trie.Delete", "err", err)
- }
-}
-
-// Delete removes any existing value for key from the trie.
-//
-// If the requested node is not present in trie, no error will be returned.
-// If the trie is corrupted, a MissingNodeError is returned.
-func (t *Trie) Delete(key []byte) error {
- // Short circuit if the trie is already committed and not usable.
- if t.committed {
- return ErrCommitted
- }
- t.unhashed++
- k := keybytesToHex(key)
- _, n, err := t.delete(t.root, nil, k)
- if err != nil {
- return err
- }
- t.root = n
- return nil
-}
-
-// delete returns the new root of the trie with key deleted.
-// It reduces the trie to minimal form by simplifying
-// nodes on the way up after deleting recursively.
-func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) {
- switch n := n.(type) {
- case *shortNode:
- matchlen := prefixLen(key, n.Key)
- if matchlen < len(n.Key) {
- return false, n, nil // don't replace n on mismatch
- }
- if matchlen == len(key) {
- // The matched short node is deleted entirely and track
- // it in the deletion set. The same the valueNode doesn't
- // need to be tracked at all since it's always embedded.
- t.tracer.onDelete(prefix)
-
- return true, nil, nil // remove n entirely for whole matches
- }
- // The key is longer than n.Key. Remove the remaining suffix
- // from the subtrie. Child can never be nil here since the
- // subtrie must contain at least two other values with keys
- // longer than n.Key.
- dirty, child, err := t.delete(n.Val, append(prefix, key[:len(n.Key)]...), key[len(n.Key):])
- if !dirty || err != nil {
- return false, n, err
- }
- switch child := child.(type) {
- case *shortNode:
- // The child shortNode is merged into its parent, track
- // is deleted as well.
- t.tracer.onDelete(append(prefix, n.Key...))
-
- // Deleting from the subtrie reduced it to another
- // short node. Merge the nodes to avoid creating a
- // shortNode{..., shortNode{...}}. Use concat (which
- // always creates a new slice) instead of append to
- // avoid modifying n.Key since it might be shared with
- // other nodes.
- return true, &shortNode{concat(n.Key, child.Key...), child.Val, t.newFlag()}, nil
- default:
- return true, &shortNode{n.Key, child, t.newFlag()}, nil
- }
-
- case *fullNode:
- dirty, nn, err := t.delete(n.Children[key[0]], append(prefix, key[0]), key[1:])
- if !dirty || err != nil {
- return false, n, err
- }
- n = n.copy()
- n.flags = t.newFlag()
- n.Children[key[0]] = nn
-
- // Because n is a full node, it must've contained at least two children
- // before the delete operation. If the new child value is non-nil, n still
- // has at least two children after the deletion, and cannot be reduced to
- // a short node.
- if nn != nil {
- return true, n, nil
- }
- // Reduction:
- // Check how many non-nil entries are left after deleting and
- // reduce the full node to a short node if only one entry is
- // left. Since n must've contained at least two children
- // before deletion (otherwise it would not be a full node) n
- // can never be reduced to nil.
- //
- // When the loop is done, pos contains the index of the single
- // value that is left in n or -2 if n contains at least two
- // values.
- pos := -1
- for i, cld := range &n.Children {
- if cld != nil {
- if pos == -1 {
- pos = i
- } else {
- pos = -2
- break
- }
- }
- }
- if pos >= 0 {
- if pos != 16 {
- // If the remaining entry is a short node, it replaces
- // n and its key gets the missing nibble tacked to the
- // front. This avoids creating an invalid
- // shortNode{..., shortNode{...}}. Since the entry
- // might not be loaded yet, resolve it just for this
- // check.
- cnode, err := t.resolve(n.Children[pos], append(prefix, byte(pos)))
- if err != nil {
- return false, nil, err
- }
- if cnode, ok := cnode.(*shortNode); ok {
- // Replace the entire full node with the short node.
- // Mark the original short node as deleted since the
- // value is embedded into the parent now.
- t.tracer.onDelete(append(prefix, byte(pos)))
-
- k := append([]byte{byte(pos)}, cnode.Key...)
- return true, &shortNode{k, cnode.Val, t.newFlag()}, nil
- }
- }
- // Otherwise, n is replaced by a one-nibble short node
- // containing the child.
- return true, &shortNode{[]byte{byte(pos)}, n.Children[pos], t.newFlag()}, nil
- }
- // n still contains at least two values and cannot be reduced.
- return true, n, nil
-
- case valueNode:
- return true, nil, nil
-
- case nil:
- return false, nil, nil
-
- case hashNode:
- // We've hit a part of the trie that isn't loaded yet. Load
- // the node and delete from it. This leaves all child nodes on
- // the path to the value in the trie.
- rn, err := t.resolveAndTrack(n, prefix)
- if err != nil {
- return false, nil, err
- }
- dirty, nn, err := t.delete(rn, prefix, key)
- if !dirty || err != nil {
- return false, rn, err
- }
- return true, nn, nil
-
- default:
- panic(fmt.Sprintf("%T: invalid node: %v (%v)", n, n, key))
- }
-}
-
-func concat(s1 []byte, s2 ...byte) []byte {
- r := make([]byte, len(s1)+len(s2))
- copy(r, s1)
- copy(r[len(s1):], s2)
- return r
-}
-
-func (t *Trie) resolve(n node, prefix []byte) (node, error) {
- if n, ok := n.(hashNode); ok {
- return t.resolveAndTrack(n, prefix)
- }
- return n, nil
-}
-
-// resolveAndTrack loads node from the underlying store with the given node hash
-// and path prefix and also tracks the loaded node blob in tracer treated as the
-// node's original value. The rlp-encoded blob is preferred to be loaded from
-// database because it's easy to decode node while complex to encode node to blob.
-func (t *Trie) resolveAndTrack(n hashNode, prefix []byte) (node, error) {
- blob, err := t.reader.node(prefix, common.BytesToHash(n))
- if err != nil {
- return nil, err
- }
- t.tracer.onRead(prefix, blob)
- return mustDecodeNode(n, blob), nil
-}
-
-// Hash returns the root hash of the trie. It does not write to the
-// database and can be used even if the trie doesn't have one.
-func (t *Trie) Hash() common.Hash {
- hash, cached := t.hashRoot()
- t.root = cached
- return common.BytesToHash(hash.(hashNode))
-}
-
-// Commit collects all dirty nodes in the trie and replaces them with the
-// corresponding node hash. All collected nodes (including dirty leaves if
-// collectLeaf is true) will be encapsulated into a nodeset for return.
-// The returned nodeset can be nil if the trie is clean (nothing to commit).
-// Once the trie is committed, it's not usable anymore. A new trie must
-// be created with new root and updated trie database for following usage
-func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) {
- defer t.tracer.reset()
- defer func() {
- t.committed = true
- }()
- // Trie is empty and can be classified into two types of situations:
- // (a) The trie was empty and no update happens => return nil
- // (b) The trie was non-empty and all nodes are dropped => return
- // the node set includes all deleted nodes
- if t.root == nil {
- paths := t.tracer.deletedNodes()
- if len(paths) == 0 {
- return types.EmptyRootHash, nil, nil // case (a)
- }
- nodes := trienode.NewNodeSet(t.owner)
- for _, path := range paths {
- nodes.AddNode([]byte(path), trienode.NewDeleted())
- }
- return types.EmptyRootHash, nodes, nil // case (b)
- }
- // Derive the hash for all dirty nodes first. We hold the assumption
- // in the following procedure that all nodes are hashed.
- rootHash := t.Hash()
-
- // Do a quick check if we really need to commit. This can happen e.g.
- // if we load a trie for reading storage values, but don't write to it.
- if hashedNode, dirty := t.root.cache(); !dirty {
- // Replace the root node with the origin hash in order to
- // ensure all resolved nodes are dropped after the commit.
- t.root = hashedNode
- return rootHash, nil, nil
- }
- nodes := trienode.NewNodeSet(t.owner)
- for _, path := range t.tracer.deletedNodes() {
- nodes.AddNode([]byte(path), trienode.NewDeleted())
- }
- t.root = newCommitter(nodes, t.tracer, collectLeaf).Commit(t.root)
- return rootHash, nodes, nil
-}
-
-// hashRoot calculates the root hash of the given trie
-func (t *Trie) hashRoot() (node, node) {
- if t.root == nil {
- return hashNode(types.EmptyRootHash.Bytes()), nil
- }
- // If the number of changes is below 100, we let one thread handle it
- h := newHasher(t.unhashed >= 100)
- defer func() {
- returnHasherToPool(h)
- t.unhashed = 0
- }()
- hashed, cached := h.hash(t.root, true)
- return hashed, cached
-}
-
-// Reset drops the referenced root node and cleans all internal state.
-func (t *Trie) Reset() {
- t.root = nil
- t.owner = common.Hash{}
- t.unhashed = 0
- t.tracer.reset()
- t.committed = false
-}
diff --git a/trie/trie_id.go b/trie/trie_id.go
deleted file mode 100644
index 2cab016e4d..0000000000
--- a/trie/trie_id.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// (c) 2023, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see
-
-package trie
-
-import "github.com/ava-labs/libevm/common"
-
-// ID is the identifier for uniquely identifying a trie.
-type ID struct {
- StateRoot common.Hash // The root of the corresponding state(block.root)
- Owner common.Hash // The contract address hash which the trie belongs to
- Root common.Hash // The root hash of trie
-}
-
-// StateTrieID constructs an identifier for state trie with the provided state root.
-func StateTrieID(root common.Hash) *ID {
- return &ID{
- StateRoot: root,
- Owner: common.Hash{},
- Root: root,
- }
-}
-
-// StorageTrieID constructs an identifier for storage trie which belongs to a certain
-// state and contract specified by the stateRoot and owner.
-func StorageTrieID(stateRoot common.Hash, owner common.Hash, root common.Hash) *ID {
- return &ID{
- StateRoot: stateRoot,
- Owner: owner,
- Root: root,
- }
-}
-
-// TrieID constructs an identifier for a standard trie(not a second-layer trie)
-// with provided root. It's mostly used in tests and some other tries like CHT trie.
-func TrieID(root common.Hash) *ID {
- return &ID{
- StateRoot: root,
- Owner: common.Hash{},
- Root: root,
- }
-}
diff --git a/trie/trie_reader.go b/trie/trie_reader.go
deleted file mode 100644
index 1341b9fe33..0000000000
--- a/trie/trie_reader.go
+++ /dev/null
@@ -1,104 +0,0 @@
-// (c) 2023, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/triestate"
- "github.com/ava-labs/coreth/triedb/database"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/log"
-)
-
-// trieReader is a wrapper of the underlying node reader. It's not safe
-// for concurrent usage.
-type trieReader struct {
- owner common.Hash
- reader database.Reader
- banned map[string]struct{} // Marker to prevent node from being accessed, for tests
-}
-
-// newTrieReader initializes the trie reader with the given node reader.
-func newTrieReader(stateRoot, owner common.Hash, db database.Database) (*trieReader, error) {
- if stateRoot == (common.Hash{}) || stateRoot == types.EmptyRootHash {
- if stateRoot == (common.Hash{}) {
- log.Error("Zero state root hash!")
- }
- return &trieReader{owner: owner}, nil
- }
- reader, err := db.Reader(stateRoot)
- if err != nil {
- return nil, &MissingNodeError{Owner: owner, NodeHash: stateRoot, err: err}
- }
- return &trieReader{owner: owner, reader: reader}, nil
-}
-
-// newEmptyReader initializes the pure in-memory reader. All read operations
-// should be forbidden and returns the MissingNodeError.
-func newEmptyReader() *trieReader {
- return &trieReader{}
-}
-
-// node retrieves the rlp-encoded trie node with the provided trie node
-// information. An MissingNodeError will be returned in case the node is
-// not found or any error is encountered.
-func (r *trieReader) node(path []byte, hash common.Hash) ([]byte, error) {
- // Perform the logics in tests for preventing trie node access.
- if r.banned != nil {
- if _, ok := r.banned[string(path)]; ok {
- return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path}
- }
- }
- if r.reader == nil {
- return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path}
- }
- blob, err := r.reader.Node(r.owner, path, hash)
- if err != nil || len(blob) == 0 {
- return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path, err: err}
- }
- return blob, nil
-}
-
-// MerkleLoader implements triestate.TrieLoader for constructing tries.
-type MerkleLoader struct {
- db database.Database
-}
-
-// NewMerkleLoader creates the merkle trie loader.
-func NewMerkleLoader(db database.Database) *MerkleLoader {
- return &MerkleLoader{db: db}
-}
-
-// OpenTrie opens the main account trie.
-func (l *MerkleLoader) OpenTrie(root common.Hash) (triestate.Trie, error) {
- return New(TrieID(root), l.db)
-}
-
-// OpenStorageTrie opens the storage trie of an account.
-func (l *MerkleLoader) OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (triestate.Trie, error) {
- return New(StorageTrieID(stateRoot, addrHash, root), l.db)
-}
diff --git a/trie/trie_test.go b/trie/trie_test.go
deleted file mode 100644
index ba62047709..0000000000
--- a/trie/trie_test.go
+++ /dev/null
@@ -1,1224 +0,0 @@
-// (c) 2020-2021, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2014 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- "encoding/binary"
- "errors"
- "fmt"
- "hash"
- "io"
- "math/rand"
- "reflect"
- "sort"
- "testing"
- "testing/quick"
-
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/rlp"
- "github.com/davecgh/go-spew/spew"
- "github.com/holiman/uint256"
- "github.com/stretchr/testify/require"
- "golang.org/x/crypto/sha3"
-)
-
-func init() {
- spew.Config.Indent = " "
- spew.Config.DisableMethods = false
-}
-
-func TestEmptyTrie(t *testing.T) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- res := trie.Hash()
- exp := types.EmptyRootHash
- if res != exp {
- t.Errorf("expected %x got %x", exp, res)
- }
-}
-
-func TestNull(t *testing.T) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- key := make([]byte, 32)
- value := []byte("test")
- trie.MustUpdate(key, value)
- if !bytes.Equal(trie.MustGet(key), value) {
- t.Fatal("wrong value")
- }
-}
-
-func TestMissingRoot(t *testing.T) {
- testMissingRoot(t, rawdb.HashScheme)
- testMissingRoot(t, rawdb.PathScheme)
-}
-
-func testMissingRoot(t *testing.T, scheme string) {
- root := common.HexToHash("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33")
- trie, err := New(TrieID(root), newTestDatabase(rawdb.NewMemoryDatabase(), scheme))
- if trie != nil {
- t.Error("New returned non-nil trie for invalid root")
- }
- if _, ok := err.(*MissingNodeError); !ok {
- t.Errorf("New returned wrong error: %v", err)
- }
-}
-
-func TestMissingNode(t *testing.T) {
- testMissingNode(t, false, rawdb.HashScheme)
- testMissingNode(t, false, rawdb.PathScheme)
- testMissingNode(t, true, rawdb.HashScheme)
- testMissingNode(t, true, rawdb.PathScheme)
-}
-
-func testMissingNode(t *testing.T, memonly bool, scheme string) {
- diskdb := rawdb.NewMemoryDatabase()
- triedb := newTestDatabase(diskdb, scheme)
-
- trie := NewEmpty(triedb)
- updateString(trie, "120000", "qwerqwerqwerqwerqwerqwerqwerqwer")
- updateString(trie, "123456", "asdfasdfasdfasdfasdfasdfasdfasdf")
- root, nodes, _ := trie.Commit(false)
- triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
-
- if !memonly {
- require.NoError(t, triedb.Commit(root))
- }
-
- trie, _ = New(TrieID(root), triedb)
- _, err := trie.Get([]byte("120000"))
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- trie, _ = New(TrieID(root), triedb)
- _, err = trie.Get([]byte("120099"))
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- trie, _ = New(TrieID(root), triedb)
- _, err = trie.Get([]byte("123456"))
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- trie, _ = New(TrieID(root), triedb)
- err = trie.Update([]byte("120099"), []byte("zxcvzxcvzxcvzxcvzxcvzxcvzxcvzxcv"))
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- trie, _ = New(TrieID(root), triedb)
- err = trie.Delete([]byte("123456"))
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
-
- var (
- path []byte
- hash = common.HexToHash("0xe1d943cc8f061a0c0b98162830b970395ac9315654824bf21b73b891365262f9")
- )
- for p, n := range nodes.Nodes {
- if n.Hash == hash {
- path = common.CopyBytes([]byte(p))
- break
- }
- }
- trie, _ = New(TrieID(root), triedb)
- if memonly {
- trie.reader.banned = map[string]struct{}{string(path): {}}
- } else {
- rawdb.DeleteTrieNode(diskdb, common.Hash{}, path, hash, scheme)
- }
-
- _, err = trie.Get([]byte("120000"))
- if _, ok := err.(*MissingNodeError); !ok {
- t.Errorf("Wrong error: %v", err)
- }
- _, err = trie.Get([]byte("120099"))
- if _, ok := err.(*MissingNodeError); !ok {
- t.Errorf("Wrong error: %v", err)
- }
- _, err = trie.Get([]byte("123456"))
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- err = trie.Update([]byte("120099"), []byte("zxcv"))
- if _, ok := err.(*MissingNodeError); !ok {
- t.Errorf("Wrong error: %v", err)
- }
- err = trie.Delete([]byte("123456"))
- if _, ok := err.(*MissingNodeError); !ok {
- t.Errorf("Wrong error: %v", err)
- }
-}
-
-func TestInsert(t *testing.T) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
-
- updateString(trie, "doe", "reindeer")
- updateString(trie, "dog", "puppy")
- updateString(trie, "dogglesworth", "cat")
-
- exp := common.HexToHash("8aad789dff2f538bca5d8ea56e8abe10f4c7ba3a5dea95fea4cd6e7c3a1168d3")
- root := trie.Hash()
- if root != exp {
- t.Errorf("case 1: exp %x got %x", exp, root)
- }
-
- trie = NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- updateString(trie, "A", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
-
- exp = common.HexToHash("d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab")
- root, _, _ = trie.Commit(false)
- if root != exp {
- t.Errorf("case 2: exp %x got %x", exp, root)
- }
-}
-
-func TestGet(t *testing.T) {
- db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- trie := NewEmpty(db)
- updateString(trie, "doe", "reindeer")
- updateString(trie, "dog", "puppy")
- updateString(trie, "dogglesworth", "cat")
-
- for i := 0; i < 2; i++ {
- res := getString(trie, "dog")
- if !bytes.Equal(res, []byte("puppy")) {
- t.Errorf("expected puppy got %x", res)
- }
- unknown := getString(trie, "unknown")
- if unknown != nil {
- t.Errorf("expected nil got %x", unknown)
- }
- if i == 1 {
- return
- }
- root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
- trie, _ = New(TrieID(root), db)
- }
-}
-
-func TestDelete(t *testing.T) {
- db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- trie := NewEmpty(db)
- vals := []struct{ k, v string }{
- {"do", "verb"},
- {"ether", "wookiedoo"},
- {"horse", "stallion"},
- {"shaman", "horse"},
- {"doge", "coin"},
- {"ether", ""},
- {"dog", "puppy"},
- {"shaman", ""},
- }
- for _, val := range vals {
- if val.v != "" {
- updateString(trie, val.k, val.v)
- } else {
- deleteString(trie, val.k)
- }
- }
-
- hash := trie.Hash()
- exp := common.HexToHash("5991bb8c6514148a29db676a14ac506cd2cd5775ace63c30a4fe457715e9ac84")
- if hash != exp {
- t.Errorf("expected %x got %x", exp, hash)
- }
-}
-
-func TestEmptyValues(t *testing.T) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
-
- vals := []struct{ k, v string }{
- {"do", "verb"},
- {"ether", "wookiedoo"},
- {"horse", "stallion"},
- {"shaman", "horse"},
- {"doge", "coin"},
- {"ether", ""},
- {"dog", "puppy"},
- {"shaman", ""},
- }
- for _, val := range vals {
- updateString(trie, val.k, val.v)
- }
-
- hash := trie.Hash()
- exp := common.HexToHash("5991bb8c6514148a29db676a14ac506cd2cd5775ace63c30a4fe457715e9ac84")
- if hash != exp {
- t.Errorf("expected %x got %x", exp, hash)
- }
-}
-
-func TestReplication(t *testing.T) {
- db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- trie := NewEmpty(db)
- vals := []struct{ k, v string }{
- {"do", "verb"},
- {"ether", "wookiedoo"},
- {"horse", "stallion"},
- {"shaman", "horse"},
- {"doge", "coin"},
- {"dog", "puppy"},
- {"somethingveryoddindeedthis is", "myothernodedata"},
- }
- for _, val := range vals {
- updateString(trie, val.k, val.v)
- }
- root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
-
- // create a new trie on top of the database and check that lookups work.
- trie2, err := New(TrieID(root), db)
- if err != nil {
- t.Fatalf("can't recreate trie at %x: %v", root, err)
- }
- for _, kv := range vals {
- if string(getString(trie2, kv.k)) != kv.v {
- t.Errorf("trie2 doesn't have %q => %q", kv.k, kv.v)
- }
- }
- hash, nodes, _ := trie2.Commit(false)
- if hash != root {
- t.Errorf("root failure. expected %x got %x", root, hash)
- }
-
- // recreate the trie after commit
- if nodes != nil {
- db.Update(hash, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
- }
- trie2, err = New(TrieID(hash), db)
- if err != nil {
- t.Fatalf("can't recreate trie at %x: %v", hash, err)
- }
- // perform some insertions on the new trie.
- vals2 := []struct{ k, v string }{
- {"do", "verb"},
- {"ether", "wookiedoo"},
- {"horse", "stallion"},
- // {"shaman", "horse"},
- // {"doge", "coin"},
- // {"ether", ""},
- // {"dog", "puppy"},
- // {"somethingveryoddindeedthis is", "myothernodedata"},
- // {"shaman", ""},
- }
- for _, val := range vals2 {
- updateString(trie2, val.k, val.v)
- }
- if trie2.Hash() != hash {
- t.Errorf("root failure. expected %x got %x", hash, hash)
- }
-}
-
-func TestLargeValue(t *testing.T) {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- trie.MustUpdate([]byte("key1"), []byte{99, 99, 99, 99})
- trie.MustUpdate([]byte("key2"), bytes.Repeat([]byte{1}, 32))
- trie.Hash()
-}
-
-// TestRandomCases tests some cases that were found via random fuzzing
-func TestRandomCases(t *testing.T) {
- var rt = []randTestStep{
- {op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 0
- {op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 1
- {op: 0, key: common.Hex2Bytes("d51b182b95d677e5f1c82508c0228de96b73092d78ce78b2230cd948674f66fd1483bd"), value: common.Hex2Bytes("0000000000000002")}, // step 2
- {op: 2, key: common.Hex2Bytes("c2a38512b83107d665c65235b0250002882ac2022eb00711552354832c5f1d030d0e408e"), value: common.Hex2Bytes("")}, // step 3
- {op: 3, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 4
- {op: 3, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 5
- {op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 6
- {op: 3, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 7
- {op: 0, key: common.Hex2Bytes("c2a38512b83107d665c65235b0250002882ac2022eb00711552354832c5f1d030d0e408e"), value: common.Hex2Bytes("0000000000000008")}, // step 8
- {op: 0, key: common.Hex2Bytes("d51b182b95d677e5f1c82508c0228de96b73092d78ce78b2230cd948674f66fd1483bd"), value: common.Hex2Bytes("0000000000000009")}, // step 9
- {op: 2, key: common.Hex2Bytes("fd"), value: common.Hex2Bytes("")}, // step 10
- {op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 11
- {op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 12
- {op: 0, key: common.Hex2Bytes("fd"), value: common.Hex2Bytes("000000000000000d")}, // step 13
- {op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 14
- {op: 1, key: common.Hex2Bytes("c2a38512b83107d665c65235b0250002882ac2022eb00711552354832c5f1d030d0e408e"), value: common.Hex2Bytes("")}, // step 15
- {op: 3, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 16
- {op: 0, key: common.Hex2Bytes("c2a38512b83107d665c65235b0250002882ac2022eb00711552354832c5f1d030d0e408e"), value: common.Hex2Bytes("0000000000000011")}, // step 17
- {op: 5, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 18
- {op: 3, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 19
- {op: 0, key: common.Hex2Bytes("d51b182b95d677e5f1c82508c0228de96b73092d78ce78b2230cd948674f66fd1483bd"), value: common.Hex2Bytes("0000000000000014")}, // step 20
- {op: 0, key: common.Hex2Bytes("d51b182b95d677e5f1c82508c0228de96b73092d78ce78b2230cd948674f66fd1483bd"), value: common.Hex2Bytes("0000000000000015")}, // step 21
- {op: 0, key: common.Hex2Bytes("c2a38512b83107d665c65235b0250002882ac2022eb00711552354832c5f1d030d0e408e"), value: common.Hex2Bytes("0000000000000016")}, // step 22
- {op: 5, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 23
- {op: 1, key: common.Hex2Bytes("980c393656413a15c8da01978ed9f89feb80b502f58f2d640e3a2f5f7a99a7018f1b573befd92053ac6f78fca4a87268"), value: common.Hex2Bytes("")}, // step 24
- {op: 1, key: common.Hex2Bytes("fd"), value: common.Hex2Bytes("")}, // step 25
- }
- if err := runRandTest(rt); err != nil {
- t.Fatal(err)
- }
-}
-
-// randTest performs random trie operations.
-// Instances of this test are created by Generate.
-type randTest []randTestStep
-
-// compile-time interface check
-var _ quick.Generator = (randTest)(nil)
-
-type randTestStep struct {
- op int
- key []byte // for opUpdate, opDelete, opGet
- value []byte // for opUpdate
- err error // for debugging
-}
-
-const (
- opUpdate = iota
- opDelete
- opGet
- opHash
- opCommit
- opItercheckhash
- opNodeDiff
- opProve
- opMax // boundary value, not an actual op
-)
-
-func (randTest) Generate(r *rand.Rand, size int) reflect.Value {
- var finishedFn = func() bool {
- size--
- return size == 0
- }
- return reflect.ValueOf(generateSteps(finishedFn, r))
-}
-
-func generateSteps(finished func() bool, r io.Reader) randTest {
- var allKeys [][]byte
- var one = []byte{0}
- genKey := func() []byte {
- r.Read(one)
- if len(allKeys) < 2 || one[0]%100 > 90 {
- // new key
- size := one[0] % 50
- key := make([]byte, size)
- r.Read(key)
- allKeys = append(allKeys, key)
- return key
- }
- // use existing key
- idx := int(one[0]) % len(allKeys)
- return allKeys[idx]
- }
- var steps randTest
- for !finished() {
- r.Read(one)
- step := randTestStep{op: int(one[0]) % opMax}
- switch step.op {
- case opUpdate:
- step.key = genKey()
- step.value = make([]byte, 8)
- binary.BigEndian.PutUint64(step.value, uint64(len(steps)))
- case opGet, opDelete, opProve:
- step.key = genKey()
- }
- steps = append(steps, step)
- }
- return steps
-}
-
-func verifyAccessList(old *Trie, new *Trie, set *trienode.NodeSet) error {
- deletes, inserts, updates := diffTries(old, new)
-
- // Check insertion set
- for path := range inserts {
- n, ok := set.Nodes[path]
- if !ok || n.IsDeleted() {
- return errors.New("expect new node")
- }
- //if len(n.Prev) > 0 {
- // return errors.New("unexpected origin value")
- //}
- }
- // Check deletion set
- for path := range deletes {
- n, ok := set.Nodes[path]
- if !ok || !n.IsDeleted() {
- return errors.New("expect deleted node")
- }
- //if len(n.Prev) == 0 {
- // return errors.New("expect origin value")
- //}
- //if !bytes.Equal(n.Prev, blob) {
- // return errors.New("invalid origin value")
- //}
- }
- // Check update set
- for path := range updates {
- n, ok := set.Nodes[path]
- if !ok || n.IsDeleted() {
- return errors.New("expect updated node")
- }
- //if len(n.Prev) == 0 {
- // return errors.New("expect origin value")
- //}
- //if !bytes.Equal(n.Prev, blob) {
- // return errors.New("invalid origin value")
- //}
- }
- return nil
-}
-
-// runRandTestBool coerces error to boolean, for use in quick.Check
-func runRandTestBool(rt randTest) bool {
- return runRandTest(rt) == nil
-}
-
-func runRandTest(rt randTest) error {
- var scheme = rawdb.HashScheme
- if rand.Intn(2) == 0 {
- scheme = rawdb.PathScheme
- }
- var (
- origin = types.EmptyRootHash
- triedb = newTestDatabase(rawdb.NewMemoryDatabase(), scheme)
- tr = NewEmpty(triedb)
- values = make(map[string]string) // tracks content of the trie
- origTrie = NewEmpty(triedb)
- )
- for i, step := range rt {
- // fmt.Printf("{op: %d, key: common.Hex2Bytes(\"%x\"), value: common.Hex2Bytes(\"%x\")}, // step %d\n",
- // step.op, step.key, step.value, i)
- switch step.op {
- case opUpdate:
- tr.MustUpdate(step.key, step.value)
- values[string(step.key)] = string(step.value)
- case opDelete:
- tr.MustDelete(step.key)
- delete(values, string(step.key))
- case opGet:
- v := tr.MustGet(step.key)
- want := values[string(step.key)]
- if string(v) != want {
- rt[i].err = fmt.Errorf("mismatch for key %#x, got %#x want %#x", step.key, v, want)
- }
- case opProve:
- hash := tr.Hash()
- if hash == types.EmptyRootHash {
- continue
- }
- proofDb := rawdb.NewMemoryDatabase()
- err := tr.Prove(step.key, proofDb)
- if err != nil {
- rt[i].err = fmt.Errorf("failed for proving key %#x, %v", step.key, err)
- }
- _, err = VerifyProof(hash, step.key, proofDb)
- if err != nil {
- rt[i].err = fmt.Errorf("failed for verifying key %#x, %v", step.key, err)
- }
- case opHash:
- tr.Hash()
- case opCommit:
- root, nodes, _ := tr.Commit(true)
- if nodes != nil {
- triedb.Update(root, origin, trienode.NewWithNodeSet(nodes))
- }
- newtr, err := New(TrieID(root), triedb)
- if err != nil {
- rt[i].err = err
- return err
- }
- if nodes != nil {
- if err := verifyAccessList(origTrie, newtr, nodes); err != nil {
- rt[i].err = err
- return err
- }
- }
- tr = newtr
- origTrie = tr.Copy()
- origin = root
- case opItercheckhash:
- checktr := NewEmpty(triedb)
- it := NewIterator(tr.MustNodeIterator(nil))
- for it.Next() {
- checktr.MustUpdate(it.Key, it.Value)
- }
- if tr.Hash() != checktr.Hash() {
- rt[i].err = fmt.Errorf("hash mismatch in opItercheckhash")
- }
- case opNodeDiff:
- var (
- origIter = origTrie.MustNodeIterator(nil)
- curIter = tr.MustNodeIterator(nil)
- origSeen = make(map[string]struct{})
- curSeen = make(map[string]struct{})
- )
- for origIter.Next(true) {
- if origIter.Leaf() {
- continue
- }
- origSeen[string(origIter.Path())] = struct{}{}
- }
- for curIter.Next(true) {
- if curIter.Leaf() {
- continue
- }
- curSeen[string(curIter.Path())] = struct{}{}
- }
- var (
- insertExp = make(map[string]struct{})
- deleteExp = make(map[string]struct{})
- )
- for path := range curSeen {
- _, present := origSeen[path]
- if !present {
- insertExp[path] = struct{}{}
- }
- }
- for path := range origSeen {
- _, present := curSeen[path]
- if !present {
- deleteExp[path] = struct{}{}
- }
- }
- if len(insertExp) != len(tr.tracer.inserts) {
- rt[i].err = fmt.Errorf("insert set mismatch")
- }
- if len(deleteExp) != len(tr.tracer.deletes) {
- rt[i].err = fmt.Errorf("delete set mismatch")
- }
- for insert := range tr.tracer.inserts {
- if _, present := insertExp[insert]; !present {
- rt[i].err = fmt.Errorf("missing inserted node")
- }
- }
- for del := range tr.tracer.deletes {
- if _, present := deleteExp[del]; !present {
- rt[i].err = fmt.Errorf("missing deleted node")
- }
- }
- }
- // Abort the test on error.
- if rt[i].err != nil {
- return rt[i].err
- }
- }
- return nil
-}
-
-func TestRandom(t *testing.T) {
- if err := quick.Check(runRandTestBool, nil); err != nil {
- if cerr, ok := err.(*quick.CheckError); ok {
- t.Fatalf("random test iteration %d failed: %s", cerr.Count, spew.Sdump(cerr.In))
- }
- t.Fatal(err)
- }
-}
-
-func BenchmarkGet(b *testing.B) { benchGet(b) }
-func BenchmarkUpdateBE(b *testing.B) { benchUpdate(b, binary.BigEndian) }
-func BenchmarkUpdateLE(b *testing.B) { benchUpdate(b, binary.LittleEndian) }
-
-const benchElemCount = 20000
-
-func benchGet(b *testing.B) {
- triedb := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
- trie := NewEmpty(triedb)
- k := make([]byte, 32)
- for i := 0; i < benchElemCount; i++ {
- binary.LittleEndian.PutUint64(k, uint64(i))
- v := make([]byte, 32)
- binary.LittleEndian.PutUint64(v, uint64(i))
- trie.MustUpdate(k, v)
- }
- binary.LittleEndian.PutUint64(k, benchElemCount/2)
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- trie.MustGet(k)
- }
- b.StopTimer()
-}
-
-func benchUpdate(b *testing.B, e binary.ByteOrder) *Trie {
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- k := make([]byte, 32)
- b.ReportAllocs()
- for i := 0; i < b.N; i++ {
- v := make([]byte, 32)
- e.PutUint64(k, uint64(i))
- e.PutUint64(v, uint64(i))
- trie.MustUpdate(k, v)
- }
- return trie
-}
-
-// Benchmarks the trie hashing. Since the trie caches the result of any operation,
-// we cannot use b.N as the number of hashing rounds, since all rounds apart from
-// the first one will be NOOP. As such, we'll use b.N as the number of account to
-// insert into the trie before measuring the hashing.
-// BenchmarkHash-6 288680 4561 ns/op 682 B/op 9 allocs/op
-// BenchmarkHash-6 275095 4800 ns/op 685 B/op 9 allocs/op
-// pure hasher:
-// BenchmarkHash-6 319362 4230 ns/op 675 B/op 9 allocs/op
-// BenchmarkHash-6 257460 4674 ns/op 689 B/op 9 allocs/op
-// With hashing in-between and pure hasher:
-// BenchmarkHash-6 225417 7150 ns/op 982 B/op 12 allocs/op
-// BenchmarkHash-6 220378 6197 ns/op 983 B/op 12 allocs/op
-// same with old hasher
-// BenchmarkHash-6 229758 6437 ns/op 981 B/op 12 allocs/op
-// BenchmarkHash-6 212610 7137 ns/op 986 B/op 12 allocs/op
-func BenchmarkHash(b *testing.B) {
- // Create a realistic account trie to hash. We're first adding and hashing N
- // entries, then adding N more.
- addresses, accounts := makeAccounts(2 * b.N)
- // Insert the accounts into the trie and hash it
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- i := 0
- for ; i < len(addresses)/2; i++ {
- trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
- }
- trie.Hash()
- for ; i < len(addresses); i++ {
- trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
- }
- b.ResetTimer()
- b.ReportAllocs()
- //trie.hashRoot(nil, nil)
- trie.Hash()
-}
-
-// Benchmarks the trie Commit following a Hash. Since the trie caches the result of any operation,
-// we cannot use b.N as the number of hashing rounds, since all rounds apart from
-// the first one will be NOOP. As such, we'll use b.N as the number of account to
-// insert into the trie before measuring the hashing.
-func BenchmarkCommitAfterHash(b *testing.B) {
- b.Run("no-onleaf", func(b *testing.B) {
- benchmarkCommitAfterHash(b, false)
- })
- b.Run("with-onleaf", func(b *testing.B) {
- benchmarkCommitAfterHash(b, true)
- })
-}
-
-func benchmarkCommitAfterHash(b *testing.B, collectLeaf bool) {
- // Make the random benchmark deterministic
- addresses, accounts := makeAccounts(b.N)
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- for i := 0; i < len(addresses); i++ {
- trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
- }
- // Insert the accounts into the trie and hash it
- trie.Hash()
- b.ResetTimer()
- b.ReportAllocs()
- trie.Commit(collectLeaf)
-}
-
-func TestTinyTrie(t *testing.T) {
- // Create a realistic account trie to hash
- _, accounts := makeAccounts(5)
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- trie.MustUpdate(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001337"), accounts[3])
- if exp, root := common.HexToHash("dfb9311ba769a2bdb9d4126d0ae49046f9551063c738d10b9021343fb6550b3f"), trie.Hash(); exp != root {
- t.Errorf("1: got %x, exp %x", root, exp)
- }
- trie.MustUpdate(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001338"), accounts[4])
- if exp, root := common.HexToHash("21d0f2f4c72fed985d1196993a784d36321a44085bbe60990cb65b7bc478f52b"), trie.Hash(); exp != root {
- t.Errorf("2: got %x, exp %x", root, exp)
- }
- trie.MustUpdate(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001339"), accounts[4])
- if exp, root := common.HexToHash("e71f7f0bbcd0daf37bc03a3389408eced206e796ed6d76186387847e2193ac4e"), trie.Hash(); exp != root {
- t.Errorf("3: got %x, exp %x", root, exp)
- }
- checktr := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- it := NewIterator(trie.MustNodeIterator(nil))
- for it.Next() {
- checktr.MustUpdate(it.Key, it.Value)
- }
- if troot, itroot := trie.Hash(), checktr.Hash(); troot != itroot {
- t.Fatalf("hash mismatch in opItercheckhash, trie: %x, check: %x", troot, itroot)
- }
-}
-
-func TestCommitAfterHash(t *testing.T) {
- // Create a realistic account trie to hash
- addresses, accounts := makeAccounts(1000)
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- for i := 0; i < len(addresses); i++ {
- trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
- }
- // Insert the accounts into the trie and hash it
- trie.Hash()
- trie.Commit(false)
- root := trie.Hash()
- exp := common.HexToHash("6dcf62a0c1575866467426b55e3acab075312b38c6b112457c3cd23ab9b94fc1")
- if exp != root {
- t.Errorf("got %x, exp %x", root, exp)
- }
- root, _, _ = trie.Commit(false)
- if exp != root {
- t.Errorf("got %x, exp %x", root, exp)
- }
-}
-
-func makeAccounts(size int) (addresses [][20]byte, accounts [][]byte) {
- // Make the random benchmark deterministic
- random := rand.New(rand.NewSource(0))
- // Create a realistic account trie to hash
- addresses = make([][20]byte, size)
- for i := 0; i < len(addresses); i++ {
- data := make([]byte, 20)
- random.Read(data)
- copy(addresses[i][:], data)
- }
- accounts = make([][]byte, len(addresses))
- for i := 0; i < len(accounts); i++ {
- var (
- nonce = uint64(random.Int63())
- root = types.EmptyRootHash
- code = crypto.Keccak256(nil)
- )
- // The big.Rand function is not deterministic with regards to 64 vs 32 bit systems,
- // and will consume different amount of data from the rand source.
- //balance = new(big.Int).Rand(random, new(big.Int).Exp(common.Big2, common.Big256, nil))
- // Therefore, we instead just read via byte buffer
- numBytes := random.Uint32() % 33 // [0, 32] bytes
- balanceBytes := make([]byte, numBytes)
- random.Read(balanceBytes)
- balance := new(uint256.Int).SetBytes(balanceBytes)
- data, _ := rlp.EncodeToBytes(&types.StateAccount{Nonce: nonce, Balance: balance, Root: root, CodeHash: code})
- accounts[i] = data
- }
- return addresses, accounts
-}
-
-// spongeDb is a dummy db backend which accumulates writes in a sponge
-type spongeDb struct {
- sponge hash.Hash
- id string
- journal []string
- keys []string
- values map[string]string
-}
-
-func (s *spongeDb) Has(key []byte) (bool, error) { panic("implement me") }
-func (s *spongeDb) Get(key []byte) ([]byte, error) { return nil, errors.New("no such elem") }
-func (s *spongeDb) Delete(key []byte) error { panic("implement me") }
-func (s *spongeDb) NewBatch() ethdb.Batch { return &spongeBatch{s} }
-func (s *spongeDb) NewBatchWithSize(size int) ethdb.Batch { return &spongeBatch{s} }
-func (s *spongeDb) NewSnapshot() (ethdb.Snapshot, error) { panic("implement me") }
-func (s *spongeDb) Stat(property string) (string, error) { panic("implement me") }
-func (s *spongeDb) Compact(start []byte, limit []byte) error { panic("implement me") }
-func (s *spongeDb) Close() error { return nil }
-func (s *spongeDb) Put(key []byte, value []byte) error {
- var (
- keybrief = key
- valbrief = value
- )
- if len(keybrief) > 8 {
- keybrief = keybrief[:8]
- }
- if len(valbrief) > 8 {
- valbrief = valbrief[:8]
- }
- s.journal = append(s.journal, fmt.Sprintf("%v: PUT([%x...], [%d bytes] %x...)\n", s.id, keybrief, len(value), valbrief))
-
- if s.values == nil {
- s.sponge.Write(key)
- s.sponge.Write(value)
- } else {
- s.keys = append(s.keys, string(key))
- s.values[string(key)] = string(value)
- }
- return nil
-}
-func (s *spongeDb) NewIterator(prefix []byte, start []byte) ethdb.Iterator { panic("implement me") }
-
-func (s *spongeDb) Flush() {
- // Bottom-up, the longest path first
- sort.Sort(sort.Reverse(sort.StringSlice(s.keys)))
- for _, key := range s.keys {
- s.sponge.Write([]byte(key))
- s.sponge.Write([]byte(s.values[key]))
- }
-}
-
-// spongeBatch is a dummy batch which immediately writes to the underlying spongedb
-type spongeBatch struct {
- db *spongeDb
-}
-
-func (b *spongeBatch) Put(key, value []byte) error {
- b.db.Put(key, value)
- return nil
-}
-func (b *spongeBatch) Delete(key []byte) error { panic("implement me") }
-func (b *spongeBatch) ValueSize() int { return 100 }
-func (b *spongeBatch) Write() error { return nil }
-func (b *spongeBatch) Reset() {}
-func (b *spongeBatch) Replay(w ethdb.KeyValueWriter) error { return nil }
-
-// TestCommitSequence tests that the trie.Commit operation writes the elements of the trie
-// in the expected order.
-// The test data was based on the 'master' code, and is basically random. It can be used
-// to check whether changes to the trie modifies the write order or data in any way.
-func TestCommitSequence(t *testing.T) {
- for i, tc := range []struct {
- count int
- expWriteSeqHash []byte
- }{
- {20, common.FromHex("2e4ec8744409f17d6a3fe1540282e3ba0cf434b3a11974a2a033e3caa476a83c")},
- {200, common.FromHex("f7abb2c93e89e7e68696d855fa8982cb454190dcd7e4e7f4c7d60fd5c9f465f3")},
- {2000, common.FromHex("226f735a06e25b5306216d52ce0652ba9df17341bb0d1ae8be5484d691e8fe5c")},
- } {
- addresses, accounts := makeAccounts(tc.count)
- // This spongeDb is used to check the sequence of disk-db-writes
- s := &spongeDb{sponge: sha3.NewLegacyKeccak256()}
- db := newTestDatabase(rawdb.NewDatabase(s), rawdb.HashScheme)
- trie := NewEmpty(db)
- // Fill the trie with elements
- for i := 0; i < tc.count; i++ {
- trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
- }
- // Flush trie -> database
- root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
- // Flush memdb -> disk (sponge)
- db.Commit(root)
- if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) {
- t.Errorf("test %d, disk write sequence wrong:\ngot %x exp %x\n", i, got, exp)
- }
- }
-}
-
-// TestCommitSequenceRandomBlobs is identical to TestCommitSequence
-// but uses random blobs instead of 'accounts'
-func TestCommitSequenceRandomBlobs(t *testing.T) {
- for i, tc := range []struct {
- count int
- expWriteSeqHash []byte
- }{
- {20, common.FromHex("8016650c7a50cf88485fd06cde52d634a89711051107f00d21fae98234f2f13d")},
- {200, common.FromHex("dde92ca9812e068e6982d04b40846dc65a61a9fd4996fc0f55f2fde172a8e13c")},
- {2000, common.FromHex("ab553a7f9aff82e3929c382908e30ef7dd17a332933e92ba3fe873fc661ef382")},
- } {
- prng := rand.New(rand.NewSource(int64(i)))
- // This spongeDb is used to check the sequence of disk-db-writes
- s := &spongeDb{sponge: sha3.NewLegacyKeccak256()}
- db := newTestDatabase(rawdb.NewDatabase(s), rawdb.HashScheme)
- trie := NewEmpty(db)
- // Fill the trie with elements
- for i := 0; i < tc.count; i++ {
- key := make([]byte, 32)
- var val []byte
- // 50% short elements, 50% large elements
- if prng.Intn(2) == 0 {
- val = make([]byte, 1+prng.Intn(32))
- } else {
- val = make([]byte, 1+prng.Intn(4096))
- }
- prng.Read(key)
- prng.Read(val)
- trie.MustUpdate(key, val)
- }
- // Flush trie -> database
- root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
- // Flush memdb -> disk (sponge)
- db.Commit(root)
- if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) {
- t.Fatalf("test %d, disk write sequence wrong:\ngot %x exp %x\n", i, got, exp)
- }
- }
-}
-
-func TestCommitSequenceStackTrie(t *testing.T) {
- for count := 1; count < 200; count++ {
- prng := rand.New(rand.NewSource(int64(count)))
- // This spongeDb is used to check the sequence of disk-db-writes
- s := &spongeDb{
- sponge: sha3.NewLegacyKeccak256(),
- id: "a",
- values: make(map[string]string),
- }
- db := newTestDatabase(rawdb.NewDatabase(s), rawdb.HashScheme)
- trie := NewEmpty(db)
-
- // Another sponge is used for the stacktrie commits
- stackTrieSponge := &spongeDb{
- sponge: sha3.NewLegacyKeccak256(),
- id: "b",
- values: make(map[string]string),
- }
- options := NewStackTrieOptions()
- options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
- rawdb.WriteTrieNode(stackTrieSponge, common.Hash{}, path, hash, blob, db.Scheme())
- })
- stTrie := NewStackTrie(options)
-
- // Fill the trie with elements
- for i := 0; i < count; i++ {
- // For the stack trie, we need to do inserts in proper order
- key := make([]byte, 32)
- binary.BigEndian.PutUint64(key, uint64(i))
- var val []byte
- // 50% short elements, 50% large elements
- if prng.Intn(2) == 0 {
- val = make([]byte, 1+prng.Intn(32))
- } else {
- val = make([]byte, 1+prng.Intn(1024))
- }
- prng.Read(val)
- trie.Update(key, val)
- stTrie.Update(key, val)
- }
- // Flush trie -> database
- root, nodes, _ := trie.Commit(false)
- // Flush memdb -> disk (sponge)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
- db.Commit(root)
- s.Flush()
-
- // And flush stacktrie -> disk
- stRoot := stTrie.Commit()
- if stRoot != root {
- t.Fatalf("root wrong, got %x exp %x", stRoot, root)
- }
- stackTrieSponge.Flush()
- if got, exp := stackTrieSponge.sponge.Sum(nil), s.sponge.Sum(nil); !bytes.Equal(got, exp) {
- // Show the journal
- t.Logf("Expected:")
- for i, v := range s.journal {
- t.Logf("op %d: %v", i, v)
- }
- t.Logf("Stacktrie:")
- for i, v := range stackTrieSponge.journal {
- t.Logf("op %d: %v", i, v)
- }
- t.Fatalf("test %d, disk write sequence wrong:\ngot %x exp %x\n", count, got, exp)
- }
- }
-}
-
-// TestCommitSequenceSmallRoot tests that a trie which is essentially only a
-// small (<32 byte) shortnode with an included value is properly committed to a
-// database.
-// This case might not matter, since in practice, all keys are 32 bytes, which means
-// that even a small trie which contains a leaf will have an extension making it
-// not fit into 32 bytes, rlp-encoded. However, it's still the correct thing to do.
-func TestCommitSequenceSmallRoot(t *testing.T) {
- s := &spongeDb{
- sponge: sha3.NewLegacyKeccak256(),
- id: "a",
- values: make(map[string]string),
- }
- db := newTestDatabase(rawdb.NewDatabase(s), rawdb.HashScheme)
- trie := NewEmpty(db)
-
- // Another sponge is used for the stacktrie commits
- stackTrieSponge := &spongeDb{
- sponge: sha3.NewLegacyKeccak256(),
- id: "b",
- values: make(map[string]string),
- }
- options := NewStackTrieOptions()
- options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
- rawdb.WriteTrieNode(stackTrieSponge, common.Hash{}, path, hash, blob, db.Scheme())
- })
- stTrie := NewStackTrie(options)
-
- // Add a single small-element to the trie(s)
- key := make([]byte, 5)
- key[0] = 1
- trie.Update(key, []byte{0x1})
- stTrie.Update(key, []byte{0x1})
-
- // Flush trie -> database
- root, nodes, _ := trie.Commit(false)
- // Flush memdb -> disk (sponge)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
- db.Commit(root)
-
- // And flush stacktrie -> disk
- stRoot := stTrie.Commit()
- if stRoot != root {
- t.Fatalf("root wrong, got %x exp %x", stRoot, root)
- }
- t.Logf("root: %x\n", stRoot)
-
- s.Flush()
- stackTrieSponge.Flush()
- if got, exp := stackTrieSponge.sponge.Sum(nil), s.sponge.Sum(nil); !bytes.Equal(got, exp) {
- t.Fatalf("test, disk write sequence wrong:\ngot %x exp %x\n", got, exp)
- }
-}
-
-// BenchmarkCommitAfterHashFixedSize benchmarks the Commit (after Hash) of a fixed number of updates to a trie.
-// This benchmark is meant to capture the difference on efficiency of small versus large changes. Typically,
-// storage tries are small (a couple of entries), whereas the full post-block account trie update is large (a couple
-// of thousand entries)
-func BenchmarkHashFixedSize(b *testing.B) {
- b.Run("10", func(b *testing.B) {
- b.StopTimer()
- acc, add := makeAccounts(20)
- for i := 0; i < b.N; i++ {
- benchmarkHashFixedSize(b, acc, add)
- }
- })
- b.Run("100", func(b *testing.B) {
- b.StopTimer()
- acc, add := makeAccounts(100)
- for i := 0; i < b.N; i++ {
- benchmarkHashFixedSize(b, acc, add)
- }
- })
-
- b.Run("1K", func(b *testing.B) {
- b.StopTimer()
- acc, add := makeAccounts(1000)
- for i := 0; i < b.N; i++ {
- benchmarkHashFixedSize(b, acc, add)
- }
- })
- b.Run("10K", func(b *testing.B) {
- b.StopTimer()
- acc, add := makeAccounts(10000)
- for i := 0; i < b.N; i++ {
- benchmarkHashFixedSize(b, acc, add)
- }
- })
- b.Run("100K", func(b *testing.B) {
- b.StopTimer()
- acc, add := makeAccounts(100000)
- for i := 0; i < b.N; i++ {
- benchmarkHashFixedSize(b, acc, add)
- }
- })
-}
-
-func benchmarkHashFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) {
- b.ReportAllocs()
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- for i := 0; i < len(addresses); i++ {
- trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
- }
- // Insert the accounts into the trie and hash it
- b.StartTimer()
- trie.Hash()
- b.StopTimer()
-}
-
-func BenchmarkCommitAfterHashFixedSize(b *testing.B) {
- b.Run("10", func(b *testing.B) {
- b.StopTimer()
- acc, add := makeAccounts(20)
- for i := 0; i < b.N; i++ {
- benchmarkCommitAfterHashFixedSize(b, acc, add)
- }
- })
- b.Run("100", func(b *testing.B) {
- b.StopTimer()
- acc, add := makeAccounts(100)
- for i := 0; i < b.N; i++ {
- benchmarkCommitAfterHashFixedSize(b, acc, add)
- }
- })
-
- b.Run("1K", func(b *testing.B) {
- b.StopTimer()
- acc, add := makeAccounts(1000)
- for i := 0; i < b.N; i++ {
- benchmarkCommitAfterHashFixedSize(b, acc, add)
- }
- })
- b.Run("10K", func(b *testing.B) {
- b.StopTimer()
- acc, add := makeAccounts(10000)
- for i := 0; i < b.N; i++ {
- benchmarkCommitAfterHashFixedSize(b, acc, add)
- }
- })
- b.Run("100K", func(b *testing.B) {
- b.StopTimer()
- acc, add := makeAccounts(100000)
- for i := 0; i < b.N; i++ {
- benchmarkCommitAfterHashFixedSize(b, acc, add)
- }
- })
-}
-
-func benchmarkCommitAfterHashFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) {
- b.ReportAllocs()
- trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
- for i := 0; i < len(addresses); i++ {
- trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
- }
- // Insert the accounts into the trie and hash it
- trie.Hash()
- b.StartTimer()
- trie.Commit(false)
- b.StopTimer()
-}
-
-func getString(trie *Trie, k string) []byte {
- return trie.MustGet([]byte(k))
-}
-
-func updateString(trie *Trie, k, v string) {
- trie.MustUpdate([]byte(k), []byte(v))
-}
-
-func deleteString(trie *Trie, k string) {
- trie.MustDelete([]byte(k))
-}
-
-func TestDecodeNode(t *testing.T) {
- t.Parallel()
-
- var (
- hash = make([]byte, 20)
- elems = make([]byte, 20)
- )
- for i := 0; i < 5000000; i++ {
- prng.Read(hash)
- prng.Read(elems)
- decodeNode(hash, elems)
- }
-}
-
-func FuzzTrie(f *testing.F) {
- f.Fuzz(func(t *testing.T, data []byte) {
- var steps = 500
- var input = bytes.NewReader(data)
- var finishedFn = func() bool {
- steps--
- return steps < 0 || input.Len() == 0
- }
- if err := runRandTest(generateSteps(finishedFn, input)); err != nil {
- t.Fatal(err)
- }
- })
-}
diff --git a/trie/trienode/node.go b/trie/trienode/node.go
deleted file mode 100644
index 8bd0a18ba3..0000000000
--- a/trie/trienode/node.go
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright 2023 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see
-
-package trienode
-
-import (
- "fmt"
- "sort"
- "strings"
-
- "github.com/ava-labs/libevm/common"
-)
-
-// Node is a wrapper which contains the encoded blob of the trie node and its
-// node hash. It is general enough that can be used to represent trie node
-// corresponding to different trie implementations.
-type Node struct {
- Hash common.Hash // Node hash, empty for deleted node
- Blob []byte // Encoded node blob, nil for the deleted node
-}
-
-// Size returns the total memory size used by this node.
-func (n *Node) Size() int {
- return len(n.Blob) + common.HashLength
-}
-
-// IsDeleted returns the indicator if the node is marked as deleted.
-func (n *Node) IsDeleted() bool {
- return len(n.Blob) == 0
-}
-
-// New constructs a node with provided node information.
-func New(hash common.Hash, blob []byte) *Node {
- return &Node{Hash: hash, Blob: blob}
-}
-
-// NewDeleted constructs a node which is deleted.
-func NewDeleted() *Node { return New(common.Hash{}, nil) }
-
-// leaf represents a trie leaf node
-type leaf struct {
- Blob []byte // raw blob of leaf
- Parent common.Hash // the hash of parent node
-}
-
-// NodeSet contains a set of nodes collected during the commit operation.
-// Each node is keyed by path. It's not thread-safe to use.
-type NodeSet struct {
- Owner common.Hash
- Leaves []*leaf
- Nodes map[string]*Node
- updates int // the count of updated and inserted nodes
- deletes int // the count of deleted nodes
-}
-
-// NewNodeSet initializes a node set. The owner is zero for the account trie and
-// the owning account address hash for storage tries.
-func NewNodeSet(owner common.Hash) *NodeSet {
- return &NodeSet{
- Owner: owner,
- Nodes: make(map[string]*Node),
- }
-}
-
-// ForEachWithOrder iterates the nodes with the order from bottom to top,
-// right to left, nodes with the longest path will be iterated first.
-func (set *NodeSet) ForEachWithOrder(callback func(path string, n *Node)) {
- var paths []string
- for path := range set.Nodes {
- paths = append(paths, path)
- }
- // Bottom-up, the longest path first
- sort.Sort(sort.Reverse(sort.StringSlice(paths)))
- for _, path := range paths {
- callback(path, set.Nodes[path])
- }
-}
-
-// AddNode adds the provided node into set.
-func (set *NodeSet) AddNode(path []byte, n *Node) {
- if n.IsDeleted() {
- set.deletes += 1
- } else {
- set.updates += 1
- }
- set.Nodes[string(path)] = n
-}
-
-// Merge adds a set of nodes into the set.
-func (set *NodeSet) Merge(owner common.Hash, nodes map[string]*Node) error {
- if set.Owner != owner {
- return fmt.Errorf("nodesets belong to different owner are not mergeable %x-%x", set.Owner, owner)
- }
- for path, node := range nodes {
- prev, ok := set.Nodes[path]
- if ok {
- // overwrite happens, revoke the counter
- if prev.IsDeleted() {
- set.deletes -= 1
- } else {
- set.updates -= 1
- }
- }
- set.AddNode([]byte(path), node)
- }
- return nil
-}
-
-// AddLeaf adds the provided leaf node into set. TODO(rjl493456442) how can
-// we get rid of it?
-func (set *NodeSet) AddLeaf(parent common.Hash, blob []byte) {
- set.Leaves = append(set.Leaves, &leaf{Blob: blob, Parent: parent})
-}
-
-// Size returns the number of dirty nodes in set.
-func (set *NodeSet) Size() (int, int) {
- return set.updates, set.deletes
-}
-
-// Hashes returns the hashes of all updated nodes. TODO(rjl493456442) how can
-// we get rid of it?
-func (set *NodeSet) Hashes() []common.Hash {
- var ret []common.Hash
- for _, node := range set.Nodes {
- ret = append(ret, node.Hash)
- }
- return ret
-}
-
-// Summary returns a string-representation of the NodeSet.
-func (set *NodeSet) Summary() string {
- var out = new(strings.Builder)
- fmt.Fprintf(out, "nodeset owner: %v\n", set.Owner)
- if set.Nodes != nil {
- for path, n := range set.Nodes {
- // Deletion
- if n.IsDeleted() {
- fmt.Fprintf(out, " [-]: %x\n", path)
- continue
- }
- // Insertion or update
- fmt.Fprintf(out, " [+/*]: %x -> %v \n", path, n.Hash)
- }
- }
- for _, n := range set.Leaves {
- fmt.Fprintf(out, "[leaf]: %v\n", n)
- }
- return out.String()
-}
-
-// MergedNodeSet represents a merged node set for a group of tries.
-type MergedNodeSet struct {
- Sets map[common.Hash]*NodeSet
-}
-
-// NewMergedNodeSet initializes an empty merged set.
-func NewMergedNodeSet() *MergedNodeSet {
- return &MergedNodeSet{Sets: make(map[common.Hash]*NodeSet)}
-}
-
-// NewWithNodeSet constructs a merged nodeset with the provided single set.
-func NewWithNodeSet(set *NodeSet) *MergedNodeSet {
- merged := NewMergedNodeSet()
- merged.Merge(set)
- return merged
-}
-
-// Merge merges the provided dirty nodes of a trie into the set. The assumption
-// is held that no duplicated set belonging to the same trie will be merged twice.
-func (set *MergedNodeSet) Merge(other *NodeSet) error {
- subset, present := set.Sets[other.Owner]
- if present {
- return subset.Merge(other.Owner, other.Nodes)
- }
- set.Sets[other.Owner] = other
- return nil
-}
-
-// Flatten returns a two-dimensional map for internal nodes.
-func (set *MergedNodeSet) Flatten() map[common.Hash]map[string]*Node {
- nodes := make(map[common.Hash]map[string]*Node)
- for owner, set := range set.Sets {
- nodes[owner] = set.Nodes
- }
- return nodes
-}
diff --git a/trie/triestate/state.go b/trie/triestate/state.go
deleted file mode 100644
index 9bdc7bb9e8..0000000000
--- a/trie/triestate/state.go
+++ /dev/null
@@ -1,286 +0,0 @@
-// (c) 2024, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2023 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see
-
-package triestate
-
-import (
- "errors"
- "fmt"
- "sync"
-
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/crypto"
- "github.com/ava-labs/libevm/rlp"
- "golang.org/x/crypto/sha3"
-)
-
-// Trie is an Ethereum state trie, can be implemented by Ethereum Merkle Patricia
-// tree or Verkle tree.
-type Trie interface {
- // Get returns the value for key stored in the trie.
- Get(key []byte) ([]byte, error)
-
- // Update associates key with value in the trie.
- Update(key, value []byte) error
-
- // Delete removes any existing value for key from the trie.
- Delete(key []byte) error
-
- // Commit the trie and returns a set of dirty nodes generated along with
- // the new root hash.
- Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error)
-}
-
-// TrieLoader wraps functions to load tries.
-type TrieLoader interface {
- // OpenTrie opens the main account trie.
- OpenTrie(root common.Hash) (Trie, error)
-
- // OpenStorageTrie opens the storage trie of an account.
- OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (Trie, error)
-}
-
-// Set represents a collection of mutated states during a state transition.
-// The value refers to the original content of state before the transition
-// is made. Nil means that the state was not present previously.
-type Set struct {
- Accounts map[common.Address][]byte // Mutated account set, nil means the account was not present
- Storages map[common.Address]map[common.Hash][]byte // Mutated storage set, nil means the slot was not present
- Incomplete map[common.Address]struct{} // Indicator whether the storage is incomplete due to large deletion
- size common.StorageSize // Approximate size of set
-}
-
-// New constructs the state set with provided data.
-func New(accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, incomplete map[common.Address]struct{}) *Set {
- return &Set{
- Accounts: accounts,
- Storages: storages,
- Incomplete: incomplete,
- }
-}
-
-// Size returns the approximate memory size occupied by the set.
-func (s *Set) Size() common.StorageSize {
- if s.size != 0 {
- return s.size
- }
- for _, account := range s.Accounts {
- s.size += common.StorageSize(common.AddressLength + len(account))
- }
- for _, slots := range s.Storages {
- for _, val := range slots {
- s.size += common.StorageSize(common.HashLength + len(val))
- }
- s.size += common.StorageSize(common.AddressLength)
- }
- s.size += common.StorageSize(common.AddressLength * len(s.Incomplete))
- return s.size
-}
-
-// context wraps all fields for executing state diffs.
-type context struct {
- prevRoot common.Hash
- postRoot common.Hash
- accounts map[common.Address][]byte
- storages map[common.Address]map[common.Hash][]byte
- accountTrie Trie
- nodes *trienode.MergedNodeSet
-}
-
-// Apply traverses the provided state diffs, apply them in the associated
-// post-state and return the generated dirty trie nodes. The state can be
-// loaded via the provided trie loader.
-func Apply(prevRoot common.Hash, postRoot common.Hash, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, loader TrieLoader) (map[common.Hash]map[string]*trienode.Node, error) {
- tr, err := loader.OpenTrie(postRoot)
- if err != nil {
- return nil, err
- }
- ctx := &context{
- prevRoot: prevRoot,
- postRoot: postRoot,
- accounts: accounts,
- storages: storages,
- accountTrie: tr,
- nodes: trienode.NewMergedNodeSet(),
- }
- for addr, account := range accounts {
- var err error
- if len(account) == 0 {
- err = deleteAccount(ctx, loader, addr)
- } else {
- err = updateAccount(ctx, loader, addr)
- }
- if err != nil {
- return nil, fmt.Errorf("failed to revert state, err: %w", err)
- }
- }
- root, result, err := tr.Commit(false)
- if err != nil {
- return nil, err
- }
- if root != prevRoot {
- return nil, fmt.Errorf("failed to revert state, want %#x, got %#x", prevRoot, root)
- }
- if err := ctx.nodes.Merge(result); err != nil {
- return nil, err
- }
- return ctx.nodes.Flatten(), nil
-}
-
-// updateAccount the account was present in prev-state, and may or may not
-// existent in post-state. Apply the reverse diff and verify if the storage
-// root matches the one in prev-state account.
-func updateAccount(ctx *context, loader TrieLoader, addr common.Address) error {
- // The account was present in prev-state, decode it from the
- // 'slim-rlp' format bytes.
- h := newHasher()
- defer h.release()
-
- addrHash := h.hash(addr.Bytes())
- prev, err := types.FullAccount(ctx.accounts[addr])
- if err != nil {
- return err
- }
- // The account may or may not existent in post-state, try to
- // load it and decode if it's found.
- blob, err := ctx.accountTrie.Get(addrHash.Bytes())
- if err != nil {
- return err
- }
- post := types.NewEmptyStateAccount()
- if len(blob) != 0 {
- if err := rlp.DecodeBytes(blob, &post); err != nil {
- return err
- }
- }
- // Apply all storage changes into the post-state storage trie.
- st, err := loader.OpenStorageTrie(ctx.postRoot, addrHash, post.Root)
- if err != nil {
- return err
- }
- for key, val := range ctx.storages[addr] {
- var err error
- if len(val) == 0 {
- err = st.Delete(key.Bytes())
- } else {
- err = st.Update(key.Bytes(), val)
- }
- if err != nil {
- return err
- }
- }
- root, result, err := st.Commit(false)
- if err != nil {
- return err
- }
- if root != prev.Root {
- return errors.New("failed to reset storage trie")
- }
- // The returned set can be nil if storage trie is not changed
- // at all.
- if result != nil {
- if err := ctx.nodes.Merge(result); err != nil {
- return err
- }
- }
- // Write the prev-state account into the main trie
- full, err := rlp.EncodeToBytes(prev)
- if err != nil {
- return err
- }
- return ctx.accountTrie.Update(addrHash.Bytes(), full)
-}
-
-// deleteAccount the account was not present in prev-state, and is expected
-// to be existent in post-state. Apply the reverse diff and verify if the
-// account and storage is wiped out correctly.
-func deleteAccount(ctx *context, loader TrieLoader, addr common.Address) error {
- // The account must be existent in post-state, load the account.
- h := newHasher()
- defer h.release()
-
- addrHash := h.hash(addr.Bytes())
- blob, err := ctx.accountTrie.Get(addrHash.Bytes())
- if err != nil {
- return err
- }
- if len(blob) == 0 {
- return fmt.Errorf("account is non-existent %#x", addrHash)
- }
- var post types.StateAccount
- if err := rlp.DecodeBytes(blob, &post); err != nil {
- return err
- }
- st, err := loader.OpenStorageTrie(ctx.postRoot, addrHash, post.Root)
- if err != nil {
- return err
- }
- for key, val := range ctx.storages[addr] {
- if len(val) != 0 {
- return errors.New("expect storage deletion")
- }
- if err := st.Delete(key.Bytes()); err != nil {
- return err
- }
- }
- root, result, err := st.Commit(false)
- if err != nil {
- return err
- }
- if root != types.EmptyRootHash {
- return errors.New("failed to clear storage trie")
- }
- // The returned set can be nil if storage trie is not changed
- // at all.
- if result != nil {
- if err := ctx.nodes.Merge(result); err != nil {
- return err
- }
- }
- // Delete the post-state account from the main trie.
- return ctx.accountTrie.Delete(addrHash.Bytes())
-}
-
-// hasher is used to compute the sha256 hash of the provided data.
-type hasher struct{ sha crypto.KeccakState }
-
-var hasherPool = sync.Pool{
- New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} },
-}
-
-func newHasher() *hasher {
- return hasherPool.Get().(*hasher)
-}
-
-func (h *hasher) hash(data []byte) common.Hash {
- return crypto.HashData(h.sha, data)
-}
-
-func (h *hasher) release() {
- hasherPool.Put(h)
-}
diff --git a/trie/utils/verkle.go b/trie/utils/verkle.go
deleted file mode 100644
index 0287a7d879..0000000000
--- a/trie/utils/verkle.go
+++ /dev/null
@@ -1,342 +0,0 @@
-// Copyright 2023 go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package utils
-
-import (
- "encoding/binary"
- "sync"
-
- "github.com/ava-labs/coreth/metrics"
- "github.com/ava-labs/libevm/common/lru"
- "github.com/crate-crypto/go-ipa/bandersnatch/fr"
- "github.com/gballet/go-verkle"
- "github.com/holiman/uint256"
-)
-
-const (
- // The spec of verkle key encoding can be found here.
- // https://notes.ethereum.org/@vbuterin/verkle_tree_eip#Tree-embedding
- VersionLeafKey = 0
- BalanceLeafKey = 1
- NonceLeafKey = 2
- CodeKeccakLeafKey = 3
- CodeSizeLeafKey = 4
-)
-
-var (
- zero = uint256.NewInt(0)
- verkleNodeWidthLog2 = 8
- headerStorageOffset = uint256.NewInt(64)
- mainStorageOffsetLshVerkleNodeWidth = new(uint256.Int).Lsh(uint256.NewInt(256), 31-uint(verkleNodeWidthLog2))
- codeOffset = uint256.NewInt(128)
- verkleNodeWidth = uint256.NewInt(256)
- codeStorageDelta = uint256.NewInt(0).Sub(codeOffset, headerStorageOffset)
-
- index0Point *verkle.Point // pre-computed commitment of polynomial [2+256*64]
-
- // cacheHitGauge is the metric to track how many cache hit occurred.
- cacheHitGauge = metrics.NewRegisteredGauge("trie/verkle/cache/hit", nil)
-
- // cacheMissGauge is the metric to track how many cache miss occurred.
- cacheMissGauge = metrics.NewRegisteredGauge("trie/verkle/cache/miss", nil)
-)
-
-func init() {
- // The byte array is the Marshalled output of the point computed as such:
- //
- // var (
- // config = verkle.GetConfig()
- // fr verkle.Fr
- // )
- // verkle.FromLEBytes(&fr, []byte{2, 64})
- // point := config.CommitToPoly([]verkle.Fr{fr}, 1)
- index0Point = new(verkle.Point)
- err := index0Point.SetBytes([]byte{34, 25, 109, 242, 193, 5, 144, 224, 76, 52, 189, 92, 197, 126, 9, 145, 27, 152, 199, 130, 165, 3, 210, 27, 193, 131, 142, 28, 110, 26, 16, 191})
- if err != nil {
- panic(err)
- }
-}
-
-// PointCache is the LRU cache for storing evaluated address commitment.
-type PointCache struct {
- lru lru.BasicLRU[string, *verkle.Point]
- lock sync.RWMutex
-}
-
-// NewPointCache returns the cache with specified size.
-func NewPointCache(maxItems int) *PointCache {
- return &PointCache{
- lru: lru.NewBasicLRU[string, *verkle.Point](maxItems),
- }
-}
-
-// Get returns the cached commitment for the specified address, or computing
-// it on the flight.
-func (c *PointCache) Get(addr []byte) *verkle.Point {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- p, ok := c.lru.Get(string(addr))
- if ok {
- cacheHitGauge.Inc(1)
- return p
- }
- cacheMissGauge.Inc(1)
- p = evaluateAddressPoint(addr)
- c.lru.Add(string(addr), p)
- return p
-}
-
-// GetStem returns the first 31 bytes of the tree key as the tree stem. It only
-// works for the account metadata whose treeIndex is 0.
-func (c *PointCache) GetStem(addr []byte) []byte {
- p := c.Get(addr)
- return pointToHash(p, 0)[:31]
-}
-
-// GetTreeKey performs both the work of the spec's get_tree_key function, and that
-// of pedersen_hash: it builds the polynomial in pedersen_hash without having to
-// create a mostly zero-filled buffer and "type cast" it to a 128-long 16-byte
-// array. Since at most the first 5 coefficients of the polynomial will be non-zero,
-// these 5 coefficients are created directly.
-func GetTreeKey(address []byte, treeIndex *uint256.Int, subIndex byte) []byte {
- if len(address) < 32 {
- var aligned [32]byte
- address = append(aligned[:32-len(address)], address...)
- }
- // poly = [2+256*64, address_le_low, address_le_high, tree_index_le_low, tree_index_le_high]
- var poly [5]fr.Element
-
- // 32-byte address, interpreted as two little endian
- // 16-byte numbers.
- verkle.FromLEBytes(&poly[1], address[:16])
- verkle.FromLEBytes(&poly[2], address[16:])
-
- // treeIndex must be interpreted as a 32-byte aligned little-endian integer.
- // e.g: if treeIndex is 0xAABBCC, we need the byte representation to be 0xCCBBAA00...00.
- // poly[3] = LE({CC,BB,AA,00...0}) (16 bytes), poly[4]=LE({00,00,...}) (16 bytes).
- //
- // To avoid unnecessary endianness conversions for go-ipa, we do some trick:
- // - poly[3]'s byte representation is the same as the *top* 16 bytes (trieIndexBytes[16:]) of
- // 32-byte aligned big-endian representation (BE({00,...,AA,BB,CC})).
- // - poly[4]'s byte representation is the same as the *low* 16 bytes (trieIndexBytes[:16]) of
- // the 32-byte aligned big-endian representation (BE({00,00,...}).
- trieIndexBytes := treeIndex.Bytes32()
- verkle.FromBytes(&poly[3], trieIndexBytes[16:])
- verkle.FromBytes(&poly[4], trieIndexBytes[:16])
-
- cfg := verkle.GetConfig()
- ret := cfg.CommitToPoly(poly[:], 0)
-
- // add a constant point corresponding to poly[0]=[2+256*64].
- ret.Add(ret, index0Point)
-
- return pointToHash(ret, subIndex)
-}
-
-// GetTreeKeyWithEvaluatedAddress is basically identical to GetTreeKey, the only
-// difference is a part of polynomial is already evaluated.
-//
-// Specifically, poly = [2+256*64, address_le_low, address_le_high] is already
-// evaluated.
-func GetTreeKeyWithEvaluatedAddress(evaluated *verkle.Point, treeIndex *uint256.Int, subIndex byte) []byte {
- var poly [5]fr.Element
-
- poly[0].SetZero()
- poly[1].SetZero()
- poly[2].SetZero()
-
- // little-endian, 32-byte aligned treeIndex
- var index [32]byte
- for i := 0; i < len(treeIndex); i++ {
- binary.LittleEndian.PutUint64(index[i*8:(i+1)*8], treeIndex[i])
- }
- verkle.FromLEBytes(&poly[3], index[:16])
- verkle.FromLEBytes(&poly[4], index[16:])
-
- cfg := verkle.GetConfig()
- ret := cfg.CommitToPoly(poly[:], 0)
-
- // add the pre-evaluated address
- ret.Add(ret, evaluated)
-
- return pointToHash(ret, subIndex)
-}
-
-// VersionKey returns the verkle tree key of the version field for the specified account.
-func VersionKey(address []byte) []byte {
- return GetTreeKey(address, zero, VersionLeafKey)
-}
-
-// BalanceKey returns the verkle tree key of the balance field for the specified account.
-func BalanceKey(address []byte) []byte {
- return GetTreeKey(address, zero, BalanceLeafKey)
-}
-
-// NonceKey returns the verkle tree key of the nonce field for the specified account.
-func NonceKey(address []byte) []byte {
- return GetTreeKey(address, zero, NonceLeafKey)
-}
-
-// CodeKeccakKey returns the verkle tree key of the code keccak field for
-// the specified account.
-func CodeKeccakKey(address []byte) []byte {
- return GetTreeKey(address, zero, CodeKeccakLeafKey)
-}
-
-// CodeSizeKey returns the verkle tree key of the code size field for the
-// specified account.
-func CodeSizeKey(address []byte) []byte {
- return GetTreeKey(address, zero, CodeSizeLeafKey)
-}
-
-func codeChunkIndex(chunk *uint256.Int) (*uint256.Int, byte) {
- var (
- chunkOffset = new(uint256.Int).Add(codeOffset, chunk)
- treeIndex = new(uint256.Int).Div(chunkOffset, verkleNodeWidth)
- subIndexMod = new(uint256.Int).Mod(chunkOffset, verkleNodeWidth)
- )
- var subIndex byte
- if len(subIndexMod) != 0 {
- subIndex = byte(subIndexMod[0])
- }
- return treeIndex, subIndex
-}
-
-// CodeChunkKey returns the verkle tree key of the code chunk for the
-// specified account.
-func CodeChunkKey(address []byte, chunk *uint256.Int) []byte {
- treeIndex, subIndex := codeChunkIndex(chunk)
- return GetTreeKey(address, treeIndex, subIndex)
-}
-
-func storageIndex(bytes []byte) (*uint256.Int, byte) {
- // If the storage slot is in the header, we need to add the header offset.
- var key uint256.Int
- key.SetBytes(bytes)
- if key.Cmp(codeStorageDelta) < 0 {
- // This addition is always safe; it can't ever overflow since pos
-
-package utils
-
-import (
- "bytes"
- "testing"
-
- "github.com/gballet/go-verkle"
- "github.com/holiman/uint256"
-)
-
-func TestTreeKey(t *testing.T) {
- var (
- address = []byte{0x01}
- addressEval = evaluateAddressPoint(address)
- smallIndex = uint256.NewInt(1)
- largeIndex = uint256.NewInt(10000)
- smallStorage = []byte{0x1}
- largeStorage = bytes.Repeat([]byte{0xff}, 16)
- )
- if !bytes.Equal(VersionKey(address), VersionKeyWithEvaluatedAddress(addressEval)) {
- t.Fatal("Unmatched version key")
- }
- if !bytes.Equal(BalanceKey(address), BalanceKeyWithEvaluatedAddress(addressEval)) {
- t.Fatal("Unmatched balance key")
- }
- if !bytes.Equal(NonceKey(address), NonceKeyWithEvaluatedAddress(addressEval)) {
- t.Fatal("Unmatched nonce key")
- }
- if !bytes.Equal(CodeKeccakKey(address), CodeKeccakKeyWithEvaluatedAddress(addressEval)) {
- t.Fatal("Unmatched code keccak key")
- }
- if !bytes.Equal(CodeSizeKey(address), CodeSizeKeyWithEvaluatedAddress(addressEval)) {
- t.Fatal("Unmatched code size key")
- }
- if !bytes.Equal(CodeChunkKey(address, smallIndex), CodeChunkKeyWithEvaluatedAddress(addressEval, smallIndex)) {
- t.Fatal("Unmatched code chunk key")
- }
- if !bytes.Equal(CodeChunkKey(address, largeIndex), CodeChunkKeyWithEvaluatedAddress(addressEval, largeIndex)) {
- t.Fatal("Unmatched code chunk key")
- }
- if !bytes.Equal(StorageSlotKey(address, smallStorage), StorageSlotKeyWithEvaluatedAddress(addressEval, smallStorage)) {
- t.Fatal("Unmatched storage slot key")
- }
- if !bytes.Equal(StorageSlotKey(address, largeStorage), StorageSlotKeyWithEvaluatedAddress(addressEval, largeStorage)) {
- t.Fatal("Unmatched storage slot key")
- }
-}
-
-// goos: darwin
-// goarch: amd64
-// pkg: github.com/ava-labs/coreth/trie/utils
-// cpu: VirtualApple @ 2.50GHz
-// BenchmarkTreeKey
-// BenchmarkTreeKey-8 398731 2961 ns/op 32 B/op 1 allocs/op
-func BenchmarkTreeKey(b *testing.B) {
- // Initialize the IPA settings which can be pretty expensive.
- verkle.GetConfig()
-
- b.ReportAllocs()
- b.ResetTimer()
-
- for i := 0; i < b.N; i++ {
- BalanceKey([]byte{0x01})
- }
-}
-
-// goos: darwin
-// goarch: amd64
-// pkg: github.com/ava-labs/coreth/trie/utils
-// cpu: VirtualApple @ 2.50GHz
-// BenchmarkTreeKeyWithEvaluation
-// BenchmarkTreeKeyWithEvaluation-8 513855 2324 ns/op 32 B/op 1 allocs/op
-func BenchmarkTreeKeyWithEvaluation(b *testing.B) {
- // Initialize the IPA settings which can be pretty expensive.
- verkle.GetConfig()
-
- addr := []byte{0x01}
- eval := evaluateAddressPoint(addr)
-
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- BalanceKeyWithEvaluatedAddress(eval)
- }
-}
-
-// goos: darwin
-// goarch: amd64
-// pkg: github.com/ava-labs/coreth/trie/utils
-// cpu: VirtualApple @ 2.50GHz
-// BenchmarkStorageKey
-// BenchmarkStorageKey-8 230516 4584 ns/op 96 B/op 3 allocs/op
-func BenchmarkStorageKey(b *testing.B) {
- // Initialize the IPA settings which can be pretty expensive.
- verkle.GetConfig()
-
- b.ReportAllocs()
- b.ResetTimer()
-
- for i := 0; i < b.N; i++ {
- StorageSlotKey([]byte{0x01}, bytes.Repeat([]byte{0xff}, 32))
- }
-}
-
-// goos: darwin
-// goarch: amd64
-// pkg: github.com/ava-labs/coreth/trie/utils
-// cpu: VirtualApple @ 2.50GHz
-// BenchmarkStorageKeyWithEvaluation
-// BenchmarkStorageKeyWithEvaluation-8 320125 3753 ns/op 96 B/op 3 allocs/op
-func BenchmarkStorageKeyWithEvaluation(b *testing.B) {
- // Initialize the IPA settings which can be pretty expensive.
- verkle.GetConfig()
-
- addr := []byte{0x01}
- eval := evaluateAddressPoint(addr)
-
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- StorageSlotKeyWithEvaluatedAddress(eval, bytes.Repeat([]byte{0xff}, 32))
- }
-}
diff --git a/trie/verkle.go b/trie/verkle.go
deleted file mode 100644
index 7c6eed0f43..0000000000
--- a/trie/verkle.go
+++ /dev/null
@@ -1,372 +0,0 @@
-// Copyright 2023 go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "encoding/binary"
- "errors"
- "fmt"
-
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/trie/utils"
- "github.com/ava-labs/coreth/triedb/database"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/gballet/go-verkle"
- "github.com/holiman/uint256"
-)
-
-var (
- zero [32]byte
- errInvalidRootType = errors.New("invalid node type for root")
-)
-
-// VerkleTrie is a wrapper around VerkleNode that implements the trie.Trie
-// interface so that Verkle trees can be reused verbatim.
-type VerkleTrie struct {
- root verkle.VerkleNode
- cache *utils.PointCache
- reader *trieReader
-}
-
-// NewVerkleTrie constructs a verkle tree based on the specified root hash.
-func NewVerkleTrie(root common.Hash, db database.Database, cache *utils.PointCache) (*VerkleTrie, error) {
- reader, err := newTrieReader(root, common.Hash{}, db)
- if err != nil {
- return nil, err
- }
- // Parse the root verkle node if it's not empty.
- node := verkle.New()
- if root != types.EmptyVerkleHash && root != types.EmptyRootHash {
- blob, err := reader.node(nil, common.Hash{})
- if err != nil {
- return nil, err
- }
- node, err = verkle.ParseNode(blob, 0)
- if err != nil {
- return nil, err
- }
- }
- return &VerkleTrie{
- root: node,
- cache: cache,
- reader: reader,
- }, nil
-}
-
-// GetKey returns the sha3 preimage of a hashed key that was previously used
-// to store a value.
-func (t *VerkleTrie) GetKey(key []byte) []byte {
- return key
-}
-
-// GetAccount implements state.Trie, retrieving the account with the specified
-// account address. If the specified account is not in the verkle tree, nil will
-// be returned. If the tree is corrupted, an error will be returned.
-func (t *VerkleTrie) GetAccount(addr common.Address) (*types.StateAccount, error) {
- var (
- acc = &types.StateAccount{}
- values [][]byte
- err error
- )
- switch n := t.root.(type) {
- case *verkle.InternalNode:
- values, err = n.GetValuesAtStem(t.cache.GetStem(addr[:]), t.nodeResolver)
- if err != nil {
- return nil, fmt.Errorf("GetAccount (%x) error: %v", addr, err)
- }
- default:
- return nil, errInvalidRootType
- }
- if values == nil {
- return nil, nil
- }
- // Decode nonce in little-endian
- if len(values[utils.NonceLeafKey]) > 0 {
- acc.Nonce = binary.LittleEndian.Uint64(values[utils.NonceLeafKey])
- }
- // Decode balance in little-endian
- var balance [32]byte
- copy(balance[:], values[utils.BalanceLeafKey])
- for i := 0; i < len(balance)/2; i++ {
- balance[len(balance)-i-1], balance[i] = balance[i], balance[len(balance)-i-1]
- }
- acc.Balance = new(uint256.Int).SetBytes32(balance[:])
-
- // Decode codehash
- acc.CodeHash = values[utils.CodeKeccakLeafKey]
-
- // TODO account.Root is leave as empty. How should we handle the legacy account?
- return acc, nil
-}
-
-// GetStorage implements state.Trie, retrieving the storage slot with the specified
-// account address and storage key. If the specified slot is not in the verkle tree,
-// nil will be returned. If the tree is corrupted, an error will be returned.
-func (t *VerkleTrie) GetStorage(addr common.Address, key []byte) ([]byte, error) {
- k := utils.StorageSlotKeyWithEvaluatedAddress(t.cache.Get(addr.Bytes()), key)
- val, err := t.root.Get(k, t.nodeResolver)
- if err != nil {
- return nil, err
- }
- return common.TrimLeftZeroes(val), nil
-}
-
-// UpdateAccount implements state.Trie, writing the provided account into the tree.
-// If the tree is corrupted, an error will be returned.
-func (t *VerkleTrie) UpdateAccount(addr common.Address, acc *types.StateAccount) error {
- var (
- err error
- nonce, balance [32]byte
- values = make([][]byte, verkle.NodeWidth)
- )
- values[utils.VersionLeafKey] = zero[:]
- values[utils.CodeKeccakLeafKey] = acc.CodeHash[:]
-
- // Encode nonce in little-endian
- binary.LittleEndian.PutUint64(nonce[:], acc.Nonce)
- values[utils.NonceLeafKey] = nonce[:]
-
- // Encode balance in little-endian
- bytes := acc.Balance.Bytes()
- if len(bytes) > 0 {
- for i, b := range bytes {
- balance[len(bytes)-i-1] = b
- }
- }
- values[utils.BalanceLeafKey] = balance[:]
-
- switch n := t.root.(type) {
- case *verkle.InternalNode:
- err = n.InsertValuesAtStem(t.cache.GetStem(addr[:]), values, t.nodeResolver)
- if err != nil {
- return fmt.Errorf("UpdateAccount (%x) error: %v", addr, err)
- }
- default:
- return errInvalidRootType
- }
- // TODO figure out if the code size needs to be updated, too
- return nil
-}
-
-// UpdateStorage implements state.Trie, writing the provided storage slot into
-// the tree. If the tree is corrupted, an error will be returned.
-func (t *VerkleTrie) UpdateStorage(address common.Address, key, value []byte) error {
- // Left padding the slot value to 32 bytes.
- var v [32]byte
- if len(value) >= 32 {
- copy(v[:], value[:32])
- } else {
- copy(v[32-len(value):], value[:])
- }
- k := utils.StorageSlotKeyWithEvaluatedAddress(t.cache.Get(address.Bytes()), key)
- return t.root.Insert(k, v[:], t.nodeResolver)
-}
-
-// DeleteAccount implements state.Trie, deleting the specified account from the
-// trie. If the account was not existent in the trie, no error will be returned.
-// If the trie is corrupted, an error will be returned.
-func (t *VerkleTrie) DeleteAccount(addr common.Address) error {
- var (
- err error
- values = make([][]byte, verkle.NodeWidth)
- )
- for i := 0; i < verkle.NodeWidth; i++ {
- values[i] = zero[:]
- }
- switch n := t.root.(type) {
- case *verkle.InternalNode:
- err = n.InsertValuesAtStem(t.cache.GetStem(addr.Bytes()), values, t.nodeResolver)
- if err != nil {
- return fmt.Errorf("DeleteAccount (%x) error: %v", addr, err)
- }
- default:
- return errInvalidRootType
- }
- return nil
-}
-
-// DeleteStorage implements state.Trie, deleting the specified storage slot from
-// the trie. If the storage slot was not existent in the trie, no error will be
-// returned. If the trie is corrupted, an error will be returned.
-func (t *VerkleTrie) DeleteStorage(addr common.Address, key []byte) error {
- var zero [32]byte
- k := utils.StorageSlotKeyWithEvaluatedAddress(t.cache.Get(addr.Bytes()), key)
- return t.root.Insert(k, zero[:], t.nodeResolver)
-}
-
-// Hash returns the root hash of the tree. It does not write to the database and
-// can be used even if the tree doesn't have one.
-func (t *VerkleTrie) Hash() common.Hash {
- return t.root.Commit().Bytes()
-}
-
-// Commit writes all nodes to the tree's memory database.
-func (t *VerkleTrie) Commit(_ bool) (common.Hash, *trienode.NodeSet, error) {
- root, ok := t.root.(*verkle.InternalNode)
- if !ok {
- return common.Hash{}, nil, errors.New("unexpected root node type")
- }
- nodes, err := root.BatchSerialize()
- if err != nil {
- return common.Hash{}, nil, fmt.Errorf("serializing tree nodes: %s", err)
- }
- nodeset := trienode.NewNodeSet(common.Hash{})
- for _, node := range nodes {
- // hash parameter is not used in pathdb
- nodeset.AddNode(node.Path, trienode.New(common.Hash{}, node.SerializedBytes))
- }
- // Serialize root commitment form
- return t.Hash(), nodeset, nil
-}
-
-// NodeIterator implements state.Trie, returning an iterator that returns
-// nodes of the trie. Iteration starts at the key after the given start key.
-//
-// TODO(gballet, rjl493456442) implement it.
-func (t *VerkleTrie) NodeIterator(startKey []byte) (NodeIterator, error) {
- panic("not implemented")
-}
-
-// Prove implements state.Trie, constructing a Merkle proof for key. The result
-// contains all encoded nodes on the path to the value at key. The value itself
-// is also included in the last node and can be retrieved by verifying the proof.
-//
-// If the trie does not contain a value for key, the returned proof contains all
-// nodes of the longest existing prefix of the key (at least the root), ending
-// with the node that proves the absence of the key.
-//
-// TODO(gballet, rjl493456442) implement it.
-func (t *VerkleTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error {
- panic("not implemented")
-}
-
-// Copy returns a deep-copied verkle tree.
-func (t *VerkleTrie) Copy() *VerkleTrie {
- return &VerkleTrie{
- root: t.root.Copy(),
- cache: t.cache,
- reader: t.reader,
- }
-}
-
-// IsVerkle indicates if the trie is a Verkle trie.
-func (t *VerkleTrie) IsVerkle() bool {
- return true
-}
-
-// ChunkedCode represents a sequence of 32-bytes chunks of code (31 bytes of which
-// are actual code, and 1 byte is the pushdata offset).
-type ChunkedCode []byte
-
-// Copy the values here so as to avoid an import cycle
-const (
- PUSH1 = byte(0x60)
- PUSH32 = byte(0x7f)
-)
-
-// ChunkifyCode generates the chunked version of an array representing EVM bytecode
-func ChunkifyCode(code []byte) ChunkedCode {
- var (
- chunkOffset = 0 // offset in the chunk
- chunkCount = len(code) / 31
- codeOffset = 0 // offset in the code
- )
- if len(code)%31 != 0 {
- chunkCount++
- }
- chunks := make([]byte, chunkCount*32)
- for i := 0; i < chunkCount; i++ {
- // number of bytes to copy, 31 unless the end of the code has been reached.
- end := 31 * (i + 1)
- if len(code) < end {
- end = len(code)
- }
- copy(chunks[i*32+1:], code[31*i:end]) // copy the code itself
-
- // chunk offset = taken from the last chunk.
- if chunkOffset > 31 {
- // skip offset calculation if push data covers the whole chunk
- chunks[i*32] = 31
- chunkOffset = 1
- continue
- }
- chunks[32*i] = byte(chunkOffset)
- chunkOffset = 0
-
- // Check each instruction and update the offset it should be 0 unless
- // a PUSH-N overflows.
- for ; codeOffset < end; codeOffset++ {
- if code[codeOffset] >= PUSH1 && code[codeOffset] <= PUSH32 {
- codeOffset += int(code[codeOffset] - PUSH1 + 1)
- if codeOffset+1 >= 31*(i+1) {
- codeOffset++
- chunkOffset = codeOffset - 31*(i+1)
- break
- }
- }
- }
- }
- return chunks
-}
-
-// UpdateContractCode implements state.Trie, writing the provided contract code
-// into the trie.
-func (t *VerkleTrie) UpdateContractCode(addr common.Address, codeHash common.Hash, code []byte) error {
- var (
- chunks = ChunkifyCode(code)
- values [][]byte
- key []byte
- err error
- )
- for i, chunknr := 0, uint64(0); i < len(chunks); i, chunknr = i+32, chunknr+1 {
- groupOffset := (chunknr + 128) % 256
- if groupOffset == 0 /* start of new group */ || chunknr == 0 /* first chunk in header group */ {
- values = make([][]byte, verkle.NodeWidth)
- key = utils.CodeChunkKeyWithEvaluatedAddress(t.cache.Get(addr.Bytes()), uint256.NewInt(chunknr))
- }
- values[groupOffset] = chunks[i : i+32]
-
- // Reuse the calculated key to also update the code size.
- if i == 0 {
- cs := make([]byte, 32)
- binary.LittleEndian.PutUint64(cs, uint64(len(code)))
- values[utils.CodeSizeLeafKey] = cs
- }
- if groupOffset == 255 || len(chunks)-i <= 32 {
- switch root := t.root.(type) {
- case *verkle.InternalNode:
- err = root.InsertValuesAtStem(key[:31], values, t.nodeResolver)
- if err != nil {
- return fmt.Errorf("UpdateContractCode (addr=%x) error: %w", addr[:], err)
- }
- default:
- return errInvalidRootType
- }
- }
- }
- return nil
-}
-
-func (t *VerkleTrie) ToDot() string {
- return verkle.ToDot(t.root)
-}
-
-func (t *VerkleTrie) nodeResolver(path []byte) ([]byte, error) {
- return t.reader.node(path, common.Hash{})
-}
diff --git a/trie/verkle_test.go b/trie/verkle_test.go
deleted file mode 100644
index cd21b57d15..0000000000
--- a/trie/verkle_test.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2023 go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- "reflect"
- "testing"
-
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/utils"
- "github.com/ava-labs/libevm/common"
- "github.com/holiman/uint256"
-)
-
-var (
- accounts = map[common.Address]*types.StateAccount{
- {1}: {
- Nonce: 100,
- Balance: uint256.NewInt(100),
- CodeHash: common.Hash{0x1}.Bytes(),
- },
- {2}: {
- Nonce: 200,
- Balance: uint256.NewInt(200),
- CodeHash: common.Hash{0x2}.Bytes(),
- },
- }
- storages = map[common.Address]map[common.Hash][]byte{
- {1}: {
- common.Hash{10}: []byte{10},
- common.Hash{11}: []byte{11},
- common.MaxHash: []byte{0xff},
- },
- {2}: {
- common.Hash{20}: []byte{20},
- common.Hash{21}: []byte{21},
- common.MaxHash: []byte{0xff},
- },
- }
-)
-
-func TestVerkleTreeReadWrite(t *testing.T) {
- db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme)
- tr, _ := NewVerkleTrie(types.EmptyVerkleHash, db, utils.NewPointCache(100))
-
- for addr, acct := range accounts {
- if err := tr.UpdateAccount(addr, acct); err != nil {
- t.Fatalf("Failed to update account, %v", err)
- }
- for key, val := range storages[addr] {
- if err := tr.UpdateStorage(addr, key.Bytes(), val); err != nil {
- t.Fatalf("Failed to update account, %v", err)
- }
- }
- }
-
- for addr, acct := range accounts {
- stored, err := tr.GetAccount(addr)
- if err != nil {
- t.Fatalf("Failed to get account, %v", err)
- }
- if !reflect.DeepEqual(stored, acct) {
- t.Fatal("account is not matched")
- }
- for key, val := range storages[addr] {
- stored, err := tr.GetStorage(addr, key.Bytes())
- if err != nil {
- t.Fatalf("Failed to get storage, %v", err)
- }
- if !bytes.Equal(stored, val) {
- t.Fatal("storage is not matched")
- }
- }
- }
-}
diff --git a/triedb/database.go b/triedb/database.go
deleted file mode 100644
index 88d6add3a8..0000000000
--- a/triedb/database.go
+++ /dev/null
@@ -1,349 +0,0 @@
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package triedb
-
-import (
- "errors"
-
- "github.com/ava-labs/coreth/trie"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/trie/triestate"
- "github.com/ava-labs/coreth/triedb/database"
- "github.com/ava-labs/coreth/triedb/hashdb"
- "github.com/ava-labs/coreth/triedb/pathdb"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
- "github.com/ava-labs/libevm/log"
-)
-
-// Config defines all necessary options for database.
-type Config struct {
- Preimages bool // Flag whether the preimage of node key is recorded
- IsVerkle bool // Flag whether the db is holding a verkle tree
- HashDB *hashdb.Config // Configs for hash-based scheme
- PathDB *pathdb.Config // Configs for experimental path-based scheme
-}
-
-// HashDefaults represents a config for using hash-based scheme with
-// default settings.
-var HashDefaults = &Config{
- Preimages: false,
- HashDB: hashdb.Defaults,
-}
-
-// backend defines the methods needed to access/update trie nodes in different
-// state scheme.
-type backend interface {
- // Scheme returns the identifier of used storage scheme.
- Scheme() string
-
- // Initialized returns an indicator if the state data is already initialized
- // according to the state scheme.
- Initialized(genesisRoot common.Hash) bool
-
- // Size returns the current storage size of the diff layers on top of the
- // disk layer and the storage size of the nodes cached in the disk layer.
- //
- // For hash scheme, there is no differentiation between diff layer nodes
- // and dirty disk layer nodes, so both are merged into the second return.
- Size() (common.StorageSize, common.StorageSize)
-
- // Update performs a state transition by committing dirty nodes contained
- // in the given set in order to update state from the specified parent to
- // the specified root.
- //
- // The passed in maps(nodes, states) will be retained to avoid copying
- // everything. Therefore, these maps must not be changed afterwards.
- Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error
-
- // Commit writes all relevant trie nodes belonging to the specified state
- // to disk. Report specifies whether logs will be displayed in info level.
- Commit(root common.Hash, report bool) error
-
- // Close closes the trie database backend and releases all held resources.
- Close() error
-}
-
-// Database is the wrapper of the underlying backend which is shared by different
-// types of node backend as an entrypoint. It's responsible for all interactions
-// relevant with trie nodes and node preimages.
-type Database struct {
- config *Config // Configuration for trie database
- diskdb ethdb.Database // Persistent database to store the snapshot
- preimages *preimageStore // The store for caching preimages
- backend backend // The backend for managing trie nodes
-}
-
-// NewDatabase initializes the trie database with default settings, note
-// the legacy hash-based scheme is used by default.
-func NewDatabase(diskdb ethdb.Database, config *Config) *Database {
- // Sanitize the config and use the default one if it's not specified.
- if config == nil {
- config = HashDefaults
- }
- var preimages *preimageStore
- if config.Preimages {
- preimages = newPreimageStore(diskdb)
- }
- db := &Database{
- config: config,
- diskdb: diskdb,
- preimages: preimages,
- }
- if config.HashDB != nil && config.PathDB != nil {
- log.Crit("Both 'hash' and 'path' mode are configured")
- }
- if config.PathDB != nil {
- db.backend = pathdb.New(diskdb, config.PathDB)
- } else {
- var resolver hashdb.ChildResolver
- if config.IsVerkle {
- // TODO define verkle resolver
- log.Crit("Verkle node resolver is not defined")
- } else {
- resolver = trie.MerkleResolver{}
- }
- db.backend = hashdb.New(diskdb, config.HashDB, resolver)
- }
- return db
-}
-
-// Reader returns a reader for accessing all trie nodes with provided state root.
-// An error will be returned if the requested state is not available.
-func (db *Database) Reader(blockRoot common.Hash) (database.Reader, error) {
- switch b := db.backend.(type) {
- case *hashdb.Database:
- return b.Reader(blockRoot)
- case *pathdb.Database:
- return b.Reader(blockRoot)
- }
- return nil, errors.New("unknown backend")
-}
-
-// Update performs a state transition by committing dirty nodes contained in the
-// given set in order to update state from the specified parent to the specified
-// root. The held pre-images accumulated up to this point will be flushed in case
-// the size exceeds the threshold.
-//
-// The passed in maps(nodes, states) will be retained to avoid copying everything.
-// Therefore, these maps must not be changed afterwards.
-func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
- if db.preimages != nil {
- db.preimages.commit(false)
- }
- return db.backend.Update(root, parent, block, nodes, states)
-}
-
-func (db *Database) UpdateAndReferenceRoot(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
- if db.preimages != nil {
- db.preimages.commit(false)
- }
- hdb, ok := db.backend.(*hashdb.Database)
- if ok {
- return hdb.UpdateAndReferenceRoot(root, parent, block, nodes, states)
- }
- return db.backend.Update(root, parent, block, nodes, states)
-}
-
-// Commit iterates over all the children of a particular node, writes them out
-// to disk. As a side effect, all pre-images accumulated up to this point are
-// also written.
-func (db *Database) Commit(root common.Hash, report bool) error {
- if db.preimages != nil {
- db.preimages.commit(true)
- }
- return db.backend.Commit(root, report)
-}
-
-// Size returns the storage size of diff layer nodes above the persistent disk
-// layer, the dirty nodes buffered within the disk layer, and the size of cached
-// preimages.
-func (db *Database) Size() (common.StorageSize, common.StorageSize, common.StorageSize) {
- var (
- diffs, nodes common.StorageSize
- preimages common.StorageSize
- )
- diffs, nodes = db.backend.Size()
- if db.preimages != nil {
- preimages = db.preimages.size()
- }
- return diffs, nodes, preimages
-}
-
-// Initialized returns an indicator if the state data is already initialized
-// according to the state scheme.
-func (db *Database) Initialized(genesisRoot common.Hash) bool {
- return db.backend.Initialized(genesisRoot)
-}
-
-// Scheme returns the node scheme used in the database.
-func (db *Database) Scheme() string {
- return db.backend.Scheme()
-}
-
-// Close flushes the dangling preimages to disk and closes the trie database.
-// It is meant to be called when closing the blockchain object, so that all
-// resources held can be released correctly.
-func (db *Database) Close() error {
- db.WritePreimages()
- return db.backend.Close()
-}
-
-// WritePreimages flushes all accumulated preimages to disk forcibly.
-func (db *Database) WritePreimages() {
- if db.preimages != nil {
- db.preimages.commit(true)
- }
-}
-
-// Preimage retrieves a cached trie node pre-image from preimage store.
-func (db *Database) Preimage(hash common.Hash) []byte {
- if db.preimages == nil {
- return nil
- }
- return db.preimages.preimage(hash)
-}
-
-// InsertPreimage writes pre-images of trie node to the preimage store.
-func (db *Database) InsertPreimage(preimages map[common.Hash][]byte) {
- if db.preimages == nil {
- return
- }
- db.preimages.insertPreimage(preimages)
-}
-
-// Cap iteratively flushes old but still referenced trie nodes until the total
-// memory usage goes below the given threshold. The held pre-images accumulated
-// up to this point will be flushed in case the size exceeds the threshold.
-//
-// It's only supported by hash-based database and will return an error for others.
-func (db *Database) Cap(limit common.StorageSize) error {
- hdb, ok := db.backend.(*hashdb.Database)
- if !ok {
- return errors.New("not supported")
- }
- if db.preimages != nil {
- db.preimages.commit(false)
- }
- return hdb.Cap(limit)
-}
-
-// Reference adds a new reference from a parent node to a child node. This function
-// is used to add reference between internal trie node and external node(e.g. storage
-// trie root), all internal trie nodes are referenced together by database itself.
-//
-// It's only supported by hash-based database and will return an error for others.
-func (db *Database) Reference(root common.Hash, parent common.Hash) error {
- hdb, ok := db.backend.(*hashdb.Database)
- if !ok {
- return errors.New("not supported")
- }
- hdb.Reference(root, parent)
- return nil
-}
-
-// Dereference removes an existing reference from a root node. It's only
-// supported by hash-based database and will return an error for others.
-func (db *Database) Dereference(root common.Hash) error {
- hdb, ok := db.backend.(*hashdb.Database)
- if !ok {
- return errors.New("not supported")
- }
- hdb.Dereference(root)
- return nil
-}
-
-// Recover rollbacks the database to a specified historical point. The state is
-// supported as the rollback destination only if it's canonical state and the
-// corresponding trie histories are existent. It's only supported by path-based
-// database and will return an error for others.
-func (db *Database) Recover(target common.Hash) error {
- pdb, ok := db.backend.(*pathdb.Database)
- if !ok {
- return errors.New("not supported")
- }
- var loader triestate.TrieLoader
- if db.config.IsVerkle {
- // TODO define verkle loader
- log.Crit("Verkle loader is not defined")
- } else {
- loader = trie.NewMerkleLoader(db)
- }
- return pdb.Recover(target, loader)
-}
-
-// Recoverable returns the indicator if the specified state is enabled to be
-// recovered. It's only supported by path-based database and will return an
-// error for others.
-func (db *Database) Recoverable(root common.Hash) (bool, error) {
- pdb, ok := db.backend.(*pathdb.Database)
- if !ok {
- return false, errors.New("not supported")
- }
- return pdb.Recoverable(root), nil
-}
-
-// Disable deactivates the database and invalidates all available state layers
-// as stale to prevent access to the persistent state, which is in the syncing
-// stage.
-//
-// It's only supported by path-based database and will return an error for others.
-func (db *Database) Disable() error {
- pdb, ok := db.backend.(*pathdb.Database)
- if !ok {
- return errors.New("not supported")
- }
- return pdb.Disable()
-}
-
-// Enable activates database and resets the state tree with the provided persistent
-// state root once the state sync is finished.
-func (db *Database) Enable(root common.Hash) error {
- pdb, ok := db.backend.(*pathdb.Database)
- if !ok {
- return errors.New("not supported")
- }
- return pdb.Enable(root)
-}
-
-// Journal commits an entire diff hierarchy to disk into a single journal entry.
-// This is meant to be used during shutdown to persist the snapshot without
-// flattening everything down (bad for reorgs). It's only supported by path-based
-// database and will return an error for others.
-func (db *Database) Journal(root common.Hash) error {
- pdb, ok := db.backend.(*pathdb.Database)
- if !ok {
- return errors.New("not supported")
- }
- return pdb.Journal(root)
-}
-
-// SetBufferSize sets the node buffer size to the provided value(in bytes).
-// It's only supported by path-based database and will return an error for
-// others.
-func (db *Database) SetBufferSize(size int) error {
- pdb, ok := db.backend.(*pathdb.Database)
- if !ok {
- return errors.New("not supported")
- }
- return pdb.SetBufferSize(size)
-}
-
-// IsVerkle returns the indicator if the database is holding a verkle tree.
-func (db *Database) IsVerkle() bool {
- return db.config.IsVerkle
-}
diff --git a/triedb/database/database.go b/triedb/database/database.go
deleted file mode 100644
index 44d841016b..0000000000
--- a/triedb/database/database.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2024 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package database
-
-import (
- "github.com/ava-labs/libevm/common"
-)
-
-// Reader wraps the Node method of a backing trie reader.
-type Reader interface {
- // Node retrieves the trie node blob with the provided trie identifier,
- // node path and the corresponding node hash. No error will be returned
- // if the node is not found.
- Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error)
-}
-
-// PreimageStore wraps the methods of a backing store for reading and writing
-// trie node preimages.
-type PreimageStore interface {
- // Preimage retrieves the preimage of the specified hash.
- Preimage(hash common.Hash) []byte
-
- // InsertPreimage commits a set of preimages along with their hashes.
- InsertPreimage(preimages map[common.Hash][]byte)
-}
-
-// Database wraps the methods of a backing trie store.
-type Database interface {
- PreimageStore
-
- // Reader returns a node reader associated with the specific state.
- // An error will be returned if the specified state is not available.
- Reader(stateRoot common.Hash) (Reader, error)
-}
diff --git a/triedb/hashdb/database.go b/triedb/hashdb/database.go
index 681675132a..1ee00fbba4 100644
--- a/triedb/hashdb/database.go
+++ b/triedb/hashdb/database.go
@@ -36,13 +36,17 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/metrics"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/trie/triestate"
"github.com/ava-labs/coreth/utils"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie"
+ "github.com/ava-labs/libevm/trie/trienode"
+ "github.com/ava-labs/libevm/trie/triestate"
+ "github.com/ava-labs/libevm/triedb"
+ "github.com/ava-labs/libevm/triedb/database"
+ ethhashdb "github.com/ava-labs/libevm/triedb/hashdb"
)
const (
@@ -83,9 +87,7 @@ var (
// ChildResolver defines the required method to decode the provided
// trie node and iterate the children on top.
-type ChildResolver interface {
- ForEach(node []byte, onChild func(common.Hash))
-}
+type ChildResolver = ethhashdb.ChildResolver
type cache interface {
HasGet([]byte, []byte) ([]byte, bool)
@@ -99,6 +101,18 @@ type cache interface {
type Config struct {
CleanCacheSize int // Maximum memory allowance (in bytes) for caching clean nodes
StatsPrefix string // Prefix for cache stats (disabled if empty)
+ ReferenceRoot bool // Whether to reference the root node on update
+}
+
+func (c Config) BackendConstructor(diskdb ethdb.Database, config *triedb.Config) triedb.DBOverride {
+ var resolver ChildResolver
+ if config.IsVerkle {
+ // TODO define verkle resolver
+ log.Crit("Verkle node resolver is not defined")
+ } else {
+ resolver = trie.MerkleResolver{}
+ }
+ return New(diskdb, &c, resolver)
}
// Defaults is the default setting for database if it's not specified.
@@ -137,6 +151,8 @@ type Database struct {
childrenSize common.StorageSize // Storage size of the external children tracking
lock sync.RWMutex
+
+ referenceRoot bool
}
// cachedNode is all the information we know about a single cached trie node
@@ -174,10 +190,11 @@ func New(diskdb ethdb.Database, config *Config, resolver ChildResolver) *Databas
cleans = utils.NewMeteredCache(config.CleanCacheSize, config.StatsPrefix, cacheStatsUpdateFrequency)
}
return &Database{
- diskdb: diskdb,
- resolver: resolver,
- cleans: cleans,
- dirties: make(map[common.Hash]*cachedNode),
+ diskdb: diskdb,
+ resolver: resolver,
+ cleans: cleans,
+ dirties: make(map[common.Hash]*cachedNode),
+ referenceRoot: config.ReferenceRoot,
}
}
@@ -627,6 +644,8 @@ func (db *Database) Initialized(genesisRoot common.Hash) bool {
// Update inserts the dirty nodes in provided nodeset into database and link the
// account trie with multiple storage tries if necessary.
+// If ReferenceRoot was enabled in the config, it will also add a reference from
+// the root to the metaroot while holding the db's lock.
func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
// Ensure the parent state is present and signal a warning if not.
if parent != types.EmptyRootHash {
@@ -637,26 +656,13 @@ func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, n
db.lock.Lock()
defer db.lock.Unlock()
- return db.update(root, parent, nodes)
-}
-
-// UpdateAndReferenceRoot inserts the dirty nodes in provided nodeset into
-// database and links the account trie with multiple storage tries if necessary,
-// then adds a reference [from] root to the metaroot while holding the db's lock.
-func (db *Database) UpdateAndReferenceRoot(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
- // Ensure the parent state is present and signal a warning if not.
- if parent != types.EmptyRootHash {
- if blob, _ := db.node(parent); len(blob) == 0 {
- log.Error("parent state is not present")
- }
- }
- db.lock.Lock()
- defer db.lock.Unlock()
-
if err := db.update(root, parent, nodes); err != nil {
return err
}
- db.reference(root, common.Hash{})
+
+ if db.referenceRoot {
+ db.reference(root, common.Hash{})
+ }
return nil
}
@@ -733,7 +739,7 @@ func (db *Database) Scheme() string {
// Reader retrieves a node reader belonging to the given state root.
// An error will be returned if the requested state is not available.
-func (db *Database) Reader(root common.Hash) (*reader, error) {
+func (db *Database) Reader(root common.Hash) (database.Reader, error) {
if _, err := db.node(root); err != nil {
return nil, fmt.Errorf("state %#x is not available, %v", root, err)
}
diff --git a/triedb/pathdb/database.go b/triedb/pathdb/database.go
index 88d63c8ec2..c38c54fca8 100644
--- a/triedb/pathdb/database.go
+++ b/triedb/pathdb/database.go
@@ -35,11 +35,13 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/trie/triestate"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
+ "github.com/ava-labs/libevm/trie/trienode"
+ "github.com/ava-labs/libevm/trie/triestate"
+ "github.com/ava-labs/libevm/triedb"
+ "github.com/ava-labs/libevm/triedb/database"
)
const (
@@ -101,6 +103,10 @@ type Config struct {
ReadOnly bool // Flag whether the database is opened in read only mode.
}
+func (c Config) BackendConstructor(diskdb ethdb.Database, _ *triedb.Config) triedb.DBOverride {
+ return New(diskdb, &c)
+}
+
// sanitize checks the provided user configurations and changes anything that's
// unreasonable or unworkable.
func (c *Config) sanitize() *Config {
@@ -220,7 +226,7 @@ func New(diskdb ethdb.Database, config *Config) *Database {
}
// Reader retrieves a layer belonging to the given state root.
-func (db *Database) Reader(root common.Hash) (layer, error) {
+func (db *Database) Reader(root common.Hash) (database.Reader, error) {
l := db.tree.get(root)
if l == nil {
return nil, fmt.Errorf("state %#x is not available", root)
diff --git a/triedb/pathdb/database_test.go b/triedb/pathdb/database_test.go
index 83f32ff263..37be932816 100644
--- a/triedb/pathdb/database_test.go
+++ b/triedb/pathdb/database_test.go
@@ -35,12 +35,12 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/testutil"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/trie/triestate"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie/testutil"
+ "github.com/ava-labs/libevm/trie/trienode"
+ "github.com/ava-labs/libevm/trie/triestate"
"github.com/holiman/uint256"
"github.com/stretchr/testify/require"
)
diff --git a/triedb/pathdb/difflayer.go b/triedb/pathdb/difflayer.go
index dbe8dd63fc..eea8dc4126 100644
--- a/triedb/pathdb/difflayer.go
+++ b/triedb/pathdb/difflayer.go
@@ -30,10 +30,10 @@ import (
"fmt"
"sync"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/trie/triestate"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/log"
+ "github.com/ava-labs/libevm/trie/trienode"
+ "github.com/ava-labs/libevm/trie/triestate"
)
// diffLayer represents a collection of modifications made to the in-memory tries
diff --git a/triedb/pathdb/difflayer_test.go b/triedb/pathdb/difflayer_test.go
index 05ffca3314..d7f975ffad 100644
--- a/triedb/pathdb/difflayer_test.go
+++ b/triedb/pathdb/difflayer_test.go
@@ -31,9 +31,9 @@ import (
"testing"
"github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/trie/testutil"
- "github.com/ava-labs/coreth/trie/trienode"
"github.com/ava-labs/libevm/common"
+ "github.com/ava-labs/libevm/trie/testutil"
+ "github.com/ava-labs/libevm/trie/trienode"
)
func emptyLayer() *diskLayer {
diff --git a/triedb/pathdb/disklayer.go b/triedb/pathdb/disklayer.go
index 2d645c3e40..c13b46e710 100644
--- a/triedb/pathdb/disklayer.go
+++ b/triedb/pathdb/disklayer.go
@@ -33,11 +33,11 @@ import (
"github.com/VictoriaMetrics/fastcache"
"github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/trie/triestate"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/log"
+ "github.com/ava-labs/libevm/trie/trienode"
+ "github.com/ava-labs/libevm/trie/triestate"
"golang.org/x/crypto/sha3"
)
diff --git a/triedb/pathdb/history.go b/triedb/pathdb/history.go
index ab72fcf958..14e53383a3 100644
--- a/triedb/pathdb/history.go
+++ b/triedb/pathdb/history.go
@@ -32,8 +32,8 @@ import (
"errors"
"fmt"
- "github.com/ava-labs/coreth/trie/triestate"
"github.com/ava-labs/libevm/common"
+ "github.com/ava-labs/libevm/trie/triestate"
"golang.org/x/exp/slices"
)
diff --git a/triedb/pathdb/history_test.go b/triedb/pathdb/history_test.go
index 3bf5f7cf0e..4d30831c81 100644
--- a/triedb/pathdb/history_test.go
+++ b/triedb/pathdb/history_test.go
@@ -32,10 +32,10 @@ import (
"testing"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/testutil"
- "github.com/ava-labs/coreth/trie/triestate"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie/testutil"
+ "github.com/ava-labs/libevm/trie/triestate"
)
// randomStateSet generates a random state change set.
diff --git a/triedb/pathdb/journal.go b/triedb/pathdb/journal.go
index e7156157fa..a245e7f8c3 100644
--- a/triedb/pathdb/journal.go
+++ b/triedb/pathdb/journal.go
@@ -35,12 +35,12 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/trie/triestate"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/rlp"
+ "github.com/ava-labs/libevm/trie/trienode"
+ "github.com/ava-labs/libevm/trie/triestate"
)
var (
diff --git a/triedb/pathdb/layertree.go b/triedb/pathdb/layertree.go
index ec78876da1..a52eded0f6 100644
--- a/triedb/pathdb/layertree.go
+++ b/triedb/pathdb/layertree.go
@@ -32,9 +32,9 @@ import (
"sync"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/trie/triestate"
"github.com/ava-labs/libevm/common"
+ "github.com/ava-labs/libevm/trie/trienode"
+ "github.com/ava-labs/libevm/trie/triestate"
)
// layerTree is a group of state layers identified by the state root.
diff --git a/triedb/pathdb/nodebuffer.go b/triedb/pathdb/nodebuffer.go
index 71dc809021..7a461b60ad 100644
--- a/triedb/pathdb/nodebuffer.go
+++ b/triedb/pathdb/nodebuffer.go
@@ -32,11 +32,11 @@ import (
"github.com/VictoriaMetrics/fastcache"
"github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/trie/trienode"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
+ "github.com/ava-labs/libevm/trie/trienode"
)
// nodebuffer is a collection of modified trie nodes to aggregate the disk
diff --git a/triedb/pathdb/testutils.go b/triedb/pathdb/testutils.go
index 27abed8aad..a7ada9947e 100644
--- a/triedb/pathdb/testutils.go
+++ b/triedb/pathdb/testutils.go
@@ -31,10 +31,10 @@ import (
"fmt"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/trie/trienode"
- "github.com/ava-labs/coreth/trie/triestate"
"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/crypto"
+ "github.com/ava-labs/libevm/trie/trienode"
+ "github.com/ava-labs/libevm/trie/triestate"
"golang.org/x/exp/slices"
)
diff --git a/triedb/preimages.go b/triedb/preimages.go
deleted file mode 100644
index 538c05163e..0000000000
--- a/triedb/preimages.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// (c) 2022, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package triedb
-
-import (
- "sync"
-
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/libevm/common"
- "github.com/ava-labs/libevm/ethdb"
-)
-
-const defaultPreimagesLimit = 4 * 1024 * 1024 // 4 MB
-
-// preimageStore is the store for caching preimages of node key.
-type preimageStore struct {
- lock sync.RWMutex
- disk ethdb.KeyValueStore
- preimages map[common.Hash][]byte // Preimages of nodes from the secure trie
- preimagesSize common.StorageSize // Storage size of the preimages cache
-}
-
-// newPreimageStore initializes the store for caching preimages.
-func newPreimageStore(disk ethdb.KeyValueStore) *preimageStore {
- return &preimageStore{
- disk: disk,
- preimages: make(map[common.Hash][]byte),
- }
-}
-
-// insertPreimage writes a new trie node pre-image to the memory database if it's
-// yet unknown. The method will NOT make a copy of the slice, only use if the
-// preimage will NOT be changed later on.
-func (store *preimageStore) insertPreimage(preimages map[common.Hash][]byte) {
- store.lock.Lock()
- defer store.lock.Unlock()
-
- for hash, preimage := range preimages {
- if _, ok := store.preimages[hash]; ok {
- continue
- }
- store.preimages[hash] = preimage
- store.preimagesSize += common.StorageSize(common.HashLength + len(preimage))
- }
-}
-
-// preimage retrieves a cached trie node pre-image from memory. If it cannot be
-// found cached, the method queries the persistent database for the content.
-func (store *preimageStore) preimage(hash common.Hash) []byte {
- store.lock.RLock()
- preimage := store.preimages[hash]
- store.lock.RUnlock()
-
- if preimage != nil {
- return preimage
- }
- return rawdb.ReadPreimage(store.disk, hash)
-}
-
-// commit flushes the cached preimages into the disk.
-func (store *preimageStore) commit(force bool) error {
- store.lock.Lock()
- defer store.lock.Unlock()
-
- if store.preimagesSize <= defaultPreimagesLimit && !force {
- return nil
- }
- batch := store.disk.NewBatch()
- rawdb.WritePreimages(batch, store.preimages)
- if err := batch.Write(); err != nil {
- return err
- }
- store.preimages, store.preimagesSize = make(map[common.Hash][]byte), 0
- return nil
-}
-
-// size returns the current storage size of accumulated preimages.
-func (store *preimageStore) size() common.StorageSize {
- store.lock.RLock()
- defer store.lock.RUnlock()
-
- return store.preimagesSize
-}