Compare commits
21 Commits
master
...
verkle/onl
Author | SHA1 | Date | |
---|---|---|---|
|
a3437cc17c | ||
|
fe75603d0b | ||
|
5bac5b3262 | ||
|
fa753db9e8 | ||
|
86bdc3fb39 | ||
|
909049c5fe | ||
|
7360d168c8 | ||
|
361a328cb7 | ||
|
41c2f754cc | ||
|
7cb1add36a | ||
|
03dbc0a210 | ||
|
6d40e11fe3 | ||
|
5ca990184f | ||
|
15d98607f3 | ||
|
ef08e51e40 | ||
|
e1144745a7 | ||
|
bc06d2c740 | ||
|
97a79f50e8 | ||
|
9f9c03a94c | ||
|
719bf47354 | ||
|
162780515a |
45
.circleci/config.yml
Normal file
45
.circleci/config.yml
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
# Use the latest 2.1 version of CircleCI pipeline process engine.
|
||||||
|
# See: https://circleci.com/docs/2.0/configuration-reference
|
||||||
|
version: 2.1
|
||||||
|
|
||||||
|
# Define a job to be invoked later in a workflow.
|
||||||
|
# See: https://circleci.com/docs/2.0/configuration-reference/#jobs
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
working_directory: ~/repo
|
||||||
|
# Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub.
|
||||||
|
# See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor
|
||||||
|
docker:
|
||||||
|
- image: circleci/golang:1.16.10
|
||||||
|
# Add steps to the job
|
||||||
|
# See: https://circleci.com/docs/2.0/configuration-reference/#steps
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- restore_cache:
|
||||||
|
keys:
|
||||||
|
- go-mod-v4-{{ checksum "go.sum" }}
|
||||||
|
- run:
|
||||||
|
name: Install Dependencies
|
||||||
|
command: go mod download
|
||||||
|
- save_cache:
|
||||||
|
key: go-mod-v4-{{ checksum "go.sum" }}
|
||||||
|
paths:
|
||||||
|
- "/go/pkg/mod"
|
||||||
|
#- run:
|
||||||
|
# name: Run linter
|
||||||
|
# command: |
|
||||||
|
# go run build/ci.go lint
|
||||||
|
- run:
|
||||||
|
name: Run tests
|
||||||
|
command: |
|
||||||
|
go run build/ci.go test -coverage
|
||||||
|
- store_test_results:
|
||||||
|
path: /tmp/test-reports
|
||||||
|
|
||||||
|
# Invoke jobs via workflows
|
||||||
|
# See: https://circleci.com/docs/2.0/configuration-reference/#workflows
|
||||||
|
workflows:
|
||||||
|
sample: # This is the name of the workflow, feel free to change it to better match your workflow.
|
||||||
|
# Inside the workflow, you define the jobs you want to run.
|
||||||
|
jobs:
|
||||||
|
- build
|
@ -220,7 +220,7 @@ func verifyState(ctx *cli.Context) error {
|
|||||||
log.Error("Failed to load head block")
|
log.Error("Failed to load head block")
|
||||||
return errors.New("no head block")
|
return errors.New("no head block")
|
||||||
}
|
}
|
||||||
snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, headBlock.Root(), false, false, false)
|
snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, headBlock.Root(), false, false, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Failed to open snapshot tree", "err", err)
|
log.Error("Failed to open snapshot tree", "err", err)
|
||||||
return err
|
return err
|
||||||
@ -472,7 +472,7 @@ func dumpState(ctx *cli.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, root, false, false, false)
|
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, root, false, false, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -34,6 +34,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
"github.com/ethereum/go-ethereum/trie/utils"
|
||||||
"golang.org/x/crypto/sha3"
|
"golang.org/x/crypto/sha3"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -660,10 +661,14 @@ func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header
|
|||||||
r.Sub(r, header.Number)
|
r.Sub(r, header.Number)
|
||||||
r.Mul(r, blockReward)
|
r.Mul(r, blockReward)
|
||||||
r.Div(r, big8)
|
r.Div(r, big8)
|
||||||
|
uncleCoinbase := utils.GetTreeKeyBalance(uncle.Coinbase.Bytes())
|
||||||
|
state.Witness().TouchAddress(uncleCoinbase, state.GetBalance(uncle.Coinbase).Bytes())
|
||||||
state.AddBalance(uncle.Coinbase, r)
|
state.AddBalance(uncle.Coinbase, r)
|
||||||
|
|
||||||
r.Div(blockReward, big32)
|
r.Div(blockReward, big32)
|
||||||
reward.Add(reward, r)
|
reward.Add(reward, r)
|
||||||
}
|
}
|
||||||
|
coinbase := utils.GetTreeKeyBalance(header.Coinbase.Bytes())
|
||||||
|
state.Witness().TouchAddress(coinbase, state.GetBalance(header.Coinbase).Bytes())
|
||||||
state.AddBalance(header.Coinbase, reward)
|
state.AddBalance(header.Coinbase, reward)
|
||||||
}
|
}
|
||||||
|
@ -226,15 +226,10 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
|
|||||||
futureBlocks, _ := lru.New(maxFutureBlocks)
|
futureBlocks, _ := lru.New(maxFutureBlocks)
|
||||||
|
|
||||||
bc := &BlockChain{
|
bc := &BlockChain{
|
||||||
chainConfig: chainConfig,
|
chainConfig: chainConfig,
|
||||||
cacheConfig: cacheConfig,
|
cacheConfig: cacheConfig,
|
||||||
db: db,
|
db: db,
|
||||||
triegc: prque.New(nil),
|
triegc: prque.New(nil),
|
||||||
stateCache: state.NewDatabaseWithConfig(db, &trie.Config{
|
|
||||||
Cache: cacheConfig.TrieCleanLimit,
|
|
||||||
Journal: cacheConfig.TrieCleanJournal,
|
|
||||||
Preimages: cacheConfig.Preimages,
|
|
||||||
}),
|
|
||||||
quit: make(chan struct{}),
|
quit: make(chan struct{}),
|
||||||
chainmu: syncx.NewClosableMutex(),
|
chainmu: syncx.NewClosableMutex(),
|
||||||
bodyCache: bodyCache,
|
bodyCache: bodyCache,
|
||||||
@ -283,6 +278,13 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
|
|||||||
|
|
||||||
// Make sure the state associated with the block is available
|
// Make sure the state associated with the block is available
|
||||||
head := bc.CurrentBlock()
|
head := bc.CurrentBlock()
|
||||||
|
bc.stateCache = state.NewDatabaseWithConfig(db, &trie.Config{
|
||||||
|
Cache: cacheConfig.TrieCleanLimit,
|
||||||
|
Journal: cacheConfig.TrieCleanJournal,
|
||||||
|
Preimages: cacheConfig.Preimages,
|
||||||
|
UseVerkle: chainConfig.IsCancun(head.Header().Number),
|
||||||
|
})
|
||||||
|
|
||||||
if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil {
|
if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil {
|
||||||
// Head state is missing, before the state recovery, find out the
|
// Head state is missing, before the state recovery, find out the
|
||||||
// disk layer point of snapshot(if it's enabled). Make sure the
|
// disk layer point of snapshot(if it's enabled). Make sure the
|
||||||
@ -375,7 +377,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
|
|||||||
log.Warn("Enabling snapshot recovery", "chainhead", head.NumberU64(), "diskbase", *layer)
|
log.Warn("Enabling snapshot recovery", "chainhead", head.NumberU64(), "diskbase", *layer)
|
||||||
recover = true
|
recover = true
|
||||||
}
|
}
|
||||||
bc.snaps, _ = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, true, recover)
|
bc.snaps, _ = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, true, recover, chainConfig.IsCancun(head.Header().Number))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start future block processor.
|
// Start future block processor.
|
||||||
@ -1592,7 +1594,12 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
|
|||||||
|
|
||||||
// Process block using the parent state as reference point
|
// Process block using the parent state as reference point
|
||||||
substart := time.Now()
|
substart := time.Now()
|
||||||
receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig)
|
var (
|
||||||
|
usedGas uint64
|
||||||
|
receipts types.Receipts
|
||||||
|
logs []*types.Log
|
||||||
|
)
|
||||||
|
receipts, logs, usedGas, err = bc.processor.Process(block, statedb, bc.vmConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
bc.reportBlock(block, receipts, err)
|
bc.reportBlock(block, receipts, err)
|
||||||
atomic.StoreUint32(&followupInterrupt, 1)
|
atomic.StoreUint32(&followupInterrupt, 1)
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
)
|
)
|
||||||
|
|
||||||
// BlockGen creates blocks for testing.
|
// BlockGen creates blocks for testing.
|
||||||
@ -284,6 +285,91 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
|
|||||||
return blocks, receipts
|
return blocks, receipts
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine consensus.Engine, db ethdb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts) {
|
||||||
|
if config == nil {
|
||||||
|
config = params.TestChainConfig
|
||||||
|
}
|
||||||
|
blocks, receipts := make(types.Blocks, n), make([]types.Receipts, n)
|
||||||
|
chainreader := &fakeChainReader{config: config}
|
||||||
|
genblock := func(i int, parent *types.Block, statedb *state.StateDB) (*types.Block, types.Receipts) {
|
||||||
|
b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine}
|
||||||
|
b.header = makeHeader(chainreader, parent, statedb, b.engine)
|
||||||
|
|
||||||
|
// Mutate the state and block according to any hard-fork specs
|
||||||
|
if daoBlock := config.DAOForkBlock; daoBlock != nil {
|
||||||
|
limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange)
|
||||||
|
if b.header.Number.Cmp(daoBlock) >= 0 && b.header.Number.Cmp(limit) < 0 {
|
||||||
|
if config.DAOForkSupport {
|
||||||
|
b.header.Extra = common.CopyBytes(params.DAOForkBlockExtra)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if config.DAOForkSupport && config.DAOForkBlock != nil && config.DAOForkBlock.Cmp(b.header.Number) == 0 {
|
||||||
|
misc.ApplyDAOHardFork(statedb)
|
||||||
|
}
|
||||||
|
// Execute any user modifications to the block
|
||||||
|
if gen != nil {
|
||||||
|
gen(i, b)
|
||||||
|
}
|
||||||
|
if b.engine != nil {
|
||||||
|
// Finalize and seal the block
|
||||||
|
block, err := b.engine.FinalizeAndAssemble(chainreader, b.header, statedb, b.txs, b.uncles, b.receipts)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write state changes to db
|
||||||
|
root, err := statedb.Commit(config.IsEIP158(b.header.Number))
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("state write error: %v", err))
|
||||||
|
}
|
||||||
|
if err := statedb.Database().TrieDB().Commit(root, false, nil); err != nil {
|
||||||
|
panic(fmt.Sprintf("trie write error: %v", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate an associated verkle proof
|
||||||
|
if tr := statedb.GetTrie(); tr.IsVerkle() {
|
||||||
|
vtr := tr.(*trie.VerkleTrie)
|
||||||
|
// Generate the proof if we are using a verkle tree
|
||||||
|
// WORKAROUND: make sure all keys are resolved
|
||||||
|
// before building the proof. Ultimately, node
|
||||||
|
// resolution can be done with a prefetcher or
|
||||||
|
// from GetCommitmentsAlongPath.
|
||||||
|
|
||||||
|
keys := statedb.Witness().Keys()
|
||||||
|
for _, key := range keys {
|
||||||
|
out, err := vtr.TryGet(key)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if len(out) == 0 {
|
||||||
|
panic(fmt.Sprintf("%x should be present in the tree", key))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
vtr.Hash()
|
||||||
|
_, err := vtr.ProveAndSerialize(keys, statedb.Witness().KeyVals())
|
||||||
|
//block.SetVerkleProof(p)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return block, b.receipts
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
statedb, err := state.New(parent.Root(), state.NewDatabaseWithConfig(db, &trie.Config{UseVerkle: true}), nil)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
block, receipt := genblock(i, parent, statedb)
|
||||||
|
blocks[i] = block
|
||||||
|
receipts[i] = receipt
|
||||||
|
parent = block
|
||||||
|
}
|
||||||
|
return blocks, receipts
|
||||||
|
}
|
||||||
|
|
||||||
func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header {
|
func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header {
|
||||||
var time uint64
|
var time uint64
|
||||||
if parent.Time() == 0 {
|
if parent.Time() == 0 {
|
||||||
|
@ -69,6 +69,7 @@ func NewEVMTxContext(msg Message) vm.TxContext {
|
|||||||
return vm.TxContext{
|
return vm.TxContext{
|
||||||
Origin: msg.From(),
|
Origin: msg.From(),
|
||||||
GasPrice: new(big.Int).Set(msg.GasPrice()),
|
GasPrice: new(big.Int).Set(msg.GasPrice()),
|
||||||
|
Accesses: types.NewAccessWitness(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -162,6 +162,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
|
|||||||
if genesis != nil && genesis.Config == nil {
|
if genesis != nil && genesis.Config == nil {
|
||||||
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
|
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
// Just commit the new block if there is no stored genesis block.
|
// Just commit the new block if there is no stored genesis block.
|
||||||
stored := rawdb.ReadCanonicalHash(db, 0)
|
stored := rawdb.ReadCanonicalHash(db, 0)
|
||||||
if (stored == common.Hash{}) {
|
if (stored == common.Hash{}) {
|
||||||
@ -177,13 +178,29 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
|
|||||||
}
|
}
|
||||||
return genesis.Config, block.Hash(), nil
|
return genesis.Config, block.Hash(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// We have the genesis block in database(perhaps in ancient database)
|
// We have the genesis block in database(perhaps in ancient database)
|
||||||
// but the corresponding state is missing.
|
// but the corresponding state is missing.
|
||||||
header := rawdb.ReadHeader(db, stored, 0)
|
header := rawdb.ReadHeader(db, stored, 0)
|
||||||
if _, err := state.New(header.Root, state.NewDatabaseWithConfig(db, nil), nil); err != nil {
|
|
||||||
if genesis == nil {
|
var trieCfg *trie.Config
|
||||||
genesis = DefaultGenesisBlock()
|
|
||||||
|
if genesis == nil {
|
||||||
|
storedcfg := rawdb.ReadChainConfig(db, stored)
|
||||||
|
if storedcfg == nil {
|
||||||
|
panic("this should never be reached: if genesis is nil, the config is already present or 'geth init' is being called which created it (in the code above, which means genesis != nil)")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if storedcfg.CancunBlock != nil {
|
||||||
|
if storedcfg.CancunBlock.Cmp(big.NewInt(0)) != 0 {
|
||||||
|
panic("cancun block must be 0")
|
||||||
|
}
|
||||||
|
|
||||||
|
trieCfg = &trie.Config{UseVerkle: storedcfg.IsCancun(big.NewInt(header.Number.Int64()))}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := state.New(header.Root, state.NewDatabaseWithConfig(db, trieCfg), nil); err != nil {
|
||||||
// Ensure the stored genesis matches with the given one.
|
// Ensure the stored genesis matches with the given one.
|
||||||
hash := genesis.ToBlock(nil).Hash()
|
hash := genesis.ToBlock(nil).Hash()
|
||||||
if hash != stored {
|
if hash != stored {
|
||||||
@ -264,7 +281,11 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
|
|||||||
if db == nil {
|
if db == nil {
|
||||||
db = rawdb.NewMemoryDatabase()
|
db = rawdb.NewMemoryDatabase()
|
||||||
}
|
}
|
||||||
statedb, err := state.New(common.Hash{}, state.NewDatabase(db), nil)
|
var trieCfg *trie.Config
|
||||||
|
if g.Config != nil {
|
||||||
|
trieCfg = &trie.Config{UseVerkle: g.Config.IsCancun(big.NewInt(int64(g.Number)))}
|
||||||
|
}
|
||||||
|
statedb, err := state.New(common.Hash{}, state.NewDatabaseWithConfig(db, trieCfg), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@ -306,6 +327,9 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
|
|||||||
}
|
}
|
||||||
statedb.Commit(false)
|
statedb.Commit(false)
|
||||||
statedb.Database().TrieDB().Commit(root, true, nil)
|
statedb.Database().TrieDB().Commit(root, true, nil)
|
||||||
|
if err := statedb.Cap(root); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil))
|
return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil))
|
||||||
}
|
}
|
||||||
@ -357,6 +381,20 @@ func GenesisBlockForTesting(db ethdb.Database, addr common.Address, balance *big
|
|||||||
return g.MustCommit(db)
|
return g.MustCommit(db)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func DefaultVerkleGenesisBlock() *Genesis {
|
||||||
|
return &Genesis{
|
||||||
|
Config: params.VerkleChainConfig,
|
||||||
|
Nonce: 86,
|
||||||
|
GasLimit: 0x2fefd8,
|
||||||
|
Difficulty: big.NewInt(1),
|
||||||
|
Alloc: map[common.Address]GenesisAccount{
|
||||||
|
common.BytesToAddress([]byte{97, 118, 97, 209, 72, 165, 43, 239, 81, 162, 104, 199, 40, 179, 162, 27, 88, 249, 67, 6}): {
|
||||||
|
Balance: big.NewInt(0).Lsh(big.NewInt(1), 27),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// DefaultGenesisBlock returns the Ethereum main net genesis block.
|
// DefaultGenesisBlock returns the Ethereum main net genesis block.
|
||||||
func DefaultGenesisBlock() *Genesis {
|
func DefaultGenesisBlock() *Genesis {
|
||||||
return &Genesis{
|
return &Genesis{
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
"github.com/gballet/go-verkle"
|
||||||
lru "github.com/hashicorp/golang-lru"
|
lru "github.com/hashicorp/golang-lru"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -104,6 +105,9 @@ type Trie interface {
|
|||||||
// nodes of the longest existing prefix of the key (at least the root), ending
|
// nodes of the longest existing prefix of the key (at least the root), ending
|
||||||
// with the node that proves the absence of the key.
|
// with the node that proves the absence of the key.
|
||||||
Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error
|
Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error
|
||||||
|
|
||||||
|
// IsVerkle returns true if the trie is verkle-tree based
|
||||||
|
IsVerkle() bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDatabase creates a backing store for state. The returned database is safe for
|
// NewDatabase creates a backing store for state. The returned database is safe for
|
||||||
@ -118,6 +122,13 @@ func NewDatabase(db ethdb.Database) Database {
|
|||||||
// large memory cache.
|
// large memory cache.
|
||||||
func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database {
|
func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database {
|
||||||
csc, _ := lru.New(codeSizeCacheSize)
|
csc, _ := lru.New(codeSizeCacheSize)
|
||||||
|
if config != nil && config.UseVerkle {
|
||||||
|
return &VerkleDB{
|
||||||
|
db: trie.NewDatabaseWithConfig(db, config),
|
||||||
|
codeSizeCache: csc,
|
||||||
|
codeCache: fastcache.New(codeCacheSize),
|
||||||
|
}
|
||||||
|
}
|
||||||
return &cachingDB{
|
return &cachingDB{
|
||||||
db: trie.NewDatabaseWithConfig(db, config),
|
db: trie.NewDatabaseWithConfig(db, config),
|
||||||
codeSizeCache: csc,
|
codeSizeCache: csc,
|
||||||
@ -202,3 +213,67 @@ func (db *cachingDB) ContractCodeSize(addrHash, codeHash common.Hash) (int, erro
|
|||||||
func (db *cachingDB) TrieDB() *trie.Database {
|
func (db *cachingDB) TrieDB() *trie.Database {
|
||||||
return db.db
|
return db.db
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VerkleDB implements state.Database for a verkle tree
|
||||||
|
type VerkleDB struct {
|
||||||
|
db *trie.Database
|
||||||
|
codeSizeCache *lru.Cache
|
||||||
|
codeCache *fastcache.Cache
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenTrie opens the main account trie.
|
||||||
|
func (db *VerkleDB) OpenTrie(root common.Hash) (Trie, error) {
|
||||||
|
if root == (common.Hash{}) || root == emptyRoot {
|
||||||
|
return trie.NewVerkleTrie(verkle.New(), db.db), nil
|
||||||
|
}
|
||||||
|
payload, err := db.db.DiskDB().Get(root[:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := verkle.ParseNode(payload, 0)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return trie.NewVerkleTrie(r, db.db), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenStorageTrie opens the storage trie of an account.
|
||||||
|
func (db *VerkleDB) OpenStorageTrie(addrHash, root common.Hash) (Trie, error) {
|
||||||
|
// alternatively, return accTrie
|
||||||
|
panic("should not be called")
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyTrie returns an independent copy of the given trie.
|
||||||
|
func (db *VerkleDB) CopyTrie(tr Trie) Trie {
|
||||||
|
t, ok := tr.(*trie.VerkleTrie)
|
||||||
|
if ok {
|
||||||
|
return t.Copy(db.db)
|
||||||
|
}
|
||||||
|
|
||||||
|
panic("invalid tree type != VerkleTrie")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContractCode retrieves a particular contract's code.
|
||||||
|
func (db *VerkleDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error) {
|
||||||
|
if code := db.codeCache.Get(nil, codeHash.Bytes()); len(code) > 0 {
|
||||||
|
return code, nil
|
||||||
|
}
|
||||||
|
code := rawdb.ReadCode(db.db.DiskDB(), codeHash)
|
||||||
|
if len(code) > 0 {
|
||||||
|
db.codeCache.Set(codeHash.Bytes(), code)
|
||||||
|
db.codeSizeCache.Add(codeHash, len(code))
|
||||||
|
return code, nil
|
||||||
|
}
|
||||||
|
return nil, errors.New("not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContractCodeSize retrieves a particular contracts code's size.
|
||||||
|
func (db *VerkleDB) ContractCodeSize(addrHash, codeHash common.Hash) (int, error) {
|
||||||
|
panic("need to merge #31 for this to work")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrieDB retrieves the low level trie database used for data storage.
|
||||||
|
func (db *VerkleDB) TrieDB() *trie.Database {
|
||||||
|
return db.db
|
||||||
|
}
|
||||||
|
@ -76,6 +76,14 @@ func (it *NodeIterator) step() error {
|
|||||||
// Initialize the iterator if we've just started
|
// Initialize the iterator if we've just started
|
||||||
if it.stateIt == nil {
|
if it.stateIt == nil {
|
||||||
it.stateIt = it.state.trie.NodeIterator(nil)
|
it.stateIt = it.state.trie.NodeIterator(nil)
|
||||||
|
|
||||||
|
// If the trie is a verkle trie, then the data and state
|
||||||
|
// are the same tree, and as a result both iterators are
|
||||||
|
// the same. This is a hack meant for both tree types to
|
||||||
|
// work.
|
||||||
|
if _, ok := it.state.trie.(*trie.VerkleTrie); ok {
|
||||||
|
it.dataIt = it.stateIt
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// If we had data nodes previously, we surely have at least state nodes
|
// If we had data nodes previously, we surely have at least state nodes
|
||||||
if it.dataIt != nil {
|
if it.dataIt != nil {
|
||||||
@ -100,10 +108,11 @@ func (it *NodeIterator) step() error {
|
|||||||
it.state, it.stateIt = nil, nil
|
it.state, it.stateIt = nil, nil
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// If the state trie node is an internal entry, leave as is
|
// If the state trie node is an internal entry, leave as is.
|
||||||
if !it.stateIt.Leaf() {
|
if !it.stateIt.Leaf() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Otherwise we've reached an account node, initiate data iteration
|
// Otherwise we've reached an account node, initiate data iteration
|
||||||
var account types.StateAccount
|
var account types.StateAccount
|
||||||
if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob()), &account); err != nil {
|
if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob()), &account); err != nil {
|
||||||
|
@ -89,7 +89,7 @@ func NewPruner(db ethdb.Database, datadir, trieCachePath string, bloomSize uint6
|
|||||||
if headBlock == nil {
|
if headBlock == nil {
|
||||||
return nil, errors.New("Failed to load head block")
|
return nil, errors.New("Failed to load head block")
|
||||||
}
|
}
|
||||||
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, false)
|
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err // The relevant snapshot(s) might not exist
|
return nil, err // The relevant snapshot(s) might not exist
|
||||||
}
|
}
|
||||||
@ -362,7 +362,7 @@ func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) err
|
|||||||
// - The state HEAD is rewound already because of multiple incomplete `prune-state`
|
// - The state HEAD is rewound already because of multiple incomplete `prune-state`
|
||||||
// In this case, even the state HEAD is not exactly matched with snapshot, it
|
// In this case, even the state HEAD is not exactly matched with snapshot, it
|
||||||
// still feasible to recover the pruning correctly.
|
// still feasible to recover the pruning correctly.
|
||||||
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, true)
|
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err // The relevant snapshot(s) might not exist
|
return err // The relevant snapshot(s) might not exist
|
||||||
}
|
}
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/VictoriaMetrics/fastcache"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
@ -183,7 +184,7 @@ type Tree struct {
|
|||||||
// This case happens when the snapshot is 'ahead' of the state trie.
|
// This case happens when the snapshot is 'ahead' of the state trie.
|
||||||
// - otherwise, the entire snapshot is considered invalid and will be recreated on
|
// - otherwise, the entire snapshot is considered invalid and will be recreated on
|
||||||
// a background thread.
|
// a background thread.
|
||||||
func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool, rebuild bool, recovery bool) (*Tree, error) {
|
func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool, rebuild bool, recovery bool, useVerkle bool) (*Tree, error) {
|
||||||
// Create a new, empty snapshot tree
|
// Create a new, empty snapshot tree
|
||||||
snap := &Tree{
|
snap := &Tree{
|
||||||
diskdb: diskdb,
|
diskdb: diskdb,
|
||||||
@ -202,6 +203,17 @@ func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root comm
|
|||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if rebuild {
|
if rebuild {
|
||||||
|
if useVerkle {
|
||||||
|
snap.layers = map[common.Hash]snapshot{
|
||||||
|
root: &diskLayer{
|
||||||
|
diskdb: diskdb,
|
||||||
|
triedb: triedb,
|
||||||
|
root: root,
|
||||||
|
cache: fastcache.New(cache * 1024 * 1024),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return snap, nil
|
||||||
|
}
|
||||||
log.Warn("Failed to load snapshot, regenerating", "err", err)
|
log.Warn("Failed to load snapshot, regenerating", "err", err)
|
||||||
snap.Rebuild(root)
|
snap.Rebuild(root)
|
||||||
return snap, nil
|
return snap, nil
|
||||||
|
@ -28,6 +28,8 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
trieUtils "github.com/ethereum/go-ethereum/trie/utils"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
var emptyCodeHash = crypto.Keccak256(nil)
|
var emptyCodeHash = crypto.Keccak256(nil)
|
||||||
@ -239,9 +241,13 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
|
|||||||
if metrics.EnabledExpensive {
|
if metrics.EnabledExpensive {
|
||||||
meter = &s.db.StorageReads
|
meter = &s.db.StorageReads
|
||||||
}
|
}
|
||||||
if enc, err = s.getTrie(db).TryGet(key.Bytes()); err != nil {
|
if !s.db.trie.IsVerkle() {
|
||||||
s.setError(err)
|
if enc, err = s.getTrie(db).TryGet(key.Bytes()); err != nil {
|
||||||
return common.Hash{}
|
s.setError(err)
|
||||||
|
return common.Hash{}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
panic("verkle trees use the snapshot")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var value common.Hash
|
var value common.Hash
|
||||||
@ -332,7 +338,12 @@ func (s *stateObject) updateTrie(db Database) Trie {
|
|||||||
// The snapshot storage map for the object
|
// The snapshot storage map for the object
|
||||||
var storage map[common.Hash][]byte
|
var storage map[common.Hash][]byte
|
||||||
// Insert all the pending updates into the trie
|
// Insert all the pending updates into the trie
|
||||||
tr := s.getTrie(db)
|
var tr Trie
|
||||||
|
if s.db.trie.IsVerkle() {
|
||||||
|
tr = s.db.trie
|
||||||
|
} else {
|
||||||
|
tr = s.getTrie(db)
|
||||||
|
}
|
||||||
hasher := s.db.hasher
|
hasher := s.db.hasher
|
||||||
|
|
||||||
usedStorage := make([][]byte, 0, len(s.pendingStorage))
|
usedStorage := make([][]byte, 0, len(s.pendingStorage))
|
||||||
@ -345,12 +356,25 @@ func (s *stateObject) updateTrie(db Database) Trie {
|
|||||||
|
|
||||||
var v []byte
|
var v []byte
|
||||||
if (value == common.Hash{}) {
|
if (value == common.Hash{}) {
|
||||||
s.setError(tr.TryDelete(key[:]))
|
if tr.IsVerkle() {
|
||||||
|
k := trieUtils.GetTreeKeyStorageSlot(s.address[:], new(uint256.Int).SetBytes(key[:]))
|
||||||
|
s.setError(tr.TryDelete(k))
|
||||||
|
//s.db.db.TrieDB().DiskDB().Delete(append(s.address[:], key[:]...))
|
||||||
|
} else {
|
||||||
|
s.setError(tr.TryDelete(key[:]))
|
||||||
|
}
|
||||||
s.db.StorageDeleted += 1
|
s.db.StorageDeleted += 1
|
||||||
} else {
|
} else {
|
||||||
// Encoding []byte cannot fail, ok to ignore the error.
|
// Encoding []byte cannot fail, ok to ignore the error.
|
||||||
v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:]))
|
v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:]))
|
||||||
s.setError(tr.TryUpdate(key[:], v))
|
|
||||||
|
if !tr.IsVerkle() {
|
||||||
|
s.setError(tr.TryUpdate(key[:], v))
|
||||||
|
} else {
|
||||||
|
k := trieUtils.GetTreeKeyStorageSlot(s.address[:], new(uint256.Int).SetBytes(key[:]))
|
||||||
|
// Update the trie, with v as a value
|
||||||
|
s.setError(tr.TryUpdate(k, v))
|
||||||
|
}
|
||||||
s.db.StorageUpdated += 1
|
s.db.StorageUpdated += 1
|
||||||
}
|
}
|
||||||
// If state snapshotting is active, cache the data til commit
|
// If state snapshotting is active, cache the data til commit
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
package state
|
package state
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
@ -33,6 +34,8 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
trieUtils "github.com/ethereum/go-ethereum/trie/utils"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
type revision struct {
|
type revision struct {
|
||||||
@ -99,6 +102,8 @@ type StateDB struct {
|
|||||||
// Per-transaction access list
|
// Per-transaction access list
|
||||||
accessList *accessList
|
accessList *accessList
|
||||||
|
|
||||||
|
witness *types.AccessWitness
|
||||||
|
|
||||||
// Journal of state modifications. This is the backbone of
|
// Journal of state modifications. This is the backbone of
|
||||||
// Snapshot and RevertToSnapshot.
|
// Snapshot and RevertToSnapshot.
|
||||||
journal *journal
|
journal *journal
|
||||||
@ -143,6 +148,13 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error)
|
|||||||
journal: newJournal(),
|
journal: newJournal(),
|
||||||
accessList: newAccessList(),
|
accessList: newAccessList(),
|
||||||
hasher: crypto.NewKeccakState(),
|
hasher: crypto.NewKeccakState(),
|
||||||
|
witness: types.NewAccessWitness(),
|
||||||
|
}
|
||||||
|
if sdb.snaps == nil && tr.IsVerkle() {
|
||||||
|
sdb.snaps, err = snapshot.New(db.TrieDB().DiskDB(), db.TrieDB(), 1, root, false, true, false, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if sdb.snaps != nil {
|
if sdb.snaps != nil {
|
||||||
if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap != nil {
|
if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap != nil {
|
||||||
@ -154,6 +166,14 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error)
|
|||||||
return sdb, nil
|
return sdb, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *StateDB) Witness() *types.AccessWitness {
|
||||||
|
return s.witness
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StateDB) SetWitness(aw *types.AccessWitness) {
|
||||||
|
s.witness = aw
|
||||||
|
}
|
||||||
|
|
||||||
// StartPrefetcher initializes a new trie prefetcher to pull in nodes from the
|
// StartPrefetcher initializes a new trie prefetcher to pull in nodes from the
|
||||||
// state trie concurrently while the state is mutated so that when we reach the
|
// state trie concurrently while the state is mutated so that when we reach the
|
||||||
// commit phase, most of the needed data is already hot.
|
// commit phase, most of the needed data is already hot.
|
||||||
@ -460,8 +480,26 @@ func (s *StateDB) updateStateObject(obj *stateObject) {
|
|||||||
}
|
}
|
||||||
// Encode the account and update the account trie
|
// Encode the account and update the account trie
|
||||||
addr := obj.Address()
|
addr := obj.Address()
|
||||||
|
|
||||||
if err := s.trie.TryUpdateAccount(addr[:], &obj.data); err != nil {
|
if err := s.trie.TryUpdateAccount(addr[:], &obj.data); err != nil {
|
||||||
s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err))
|
s.setError(fmt.Errorf("updateStateObject (%x) error: %w", addr[:], err))
|
||||||
|
}
|
||||||
|
if len(obj.code) > 0 && s.trie.IsVerkle() {
|
||||||
|
cs := make([]byte, 32)
|
||||||
|
binary.BigEndian.PutUint64(cs, uint64(len(obj.code)))
|
||||||
|
if err := s.trie.TryUpdate(trieUtils.GetTreeKeyCodeSize(addr[:]), cs); err != nil {
|
||||||
|
s.setError(fmt.Errorf("updateStateObject (%x) error: %w", addr[:], err))
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.dirtyCode {
|
||||||
|
if chunks, err := trie.ChunkifyCode(addr, obj.code); err == nil {
|
||||||
|
for i := range chunks {
|
||||||
|
s.trie.TryUpdate(trieUtils.GetTreeKeyCodeChunk(addr[:], uint256.NewInt(uint64(i))), chunks[i][:])
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
s.setError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If state snapshotting is active, cache the data til commit. Note, this
|
// If state snapshotting is active, cache the data til commit. Note, this
|
||||||
@ -479,10 +517,19 @@ func (s *StateDB) deleteStateObject(obj *stateObject) {
|
|||||||
if metrics.EnabledExpensive {
|
if metrics.EnabledExpensive {
|
||||||
defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now())
|
defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete the account from the trie
|
// Delete the account from the trie
|
||||||
addr := obj.Address()
|
if !s.trie.IsVerkle() {
|
||||||
if err := s.trie.TryDelete(addr[:]); err != nil {
|
addr := obj.Address()
|
||||||
s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err))
|
if err := s.trie.TryDelete(addr[:]); err != nil {
|
||||||
|
s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for i := byte(0); i <= 255; i++ {
|
||||||
|
if err := s.trie.TryDelete(trieUtils.GetTreeKeyAccountLeaf(obj.Address().Bytes(), i)); err != nil {
|
||||||
|
s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", obj.Address(), err))
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -532,6 +579,14 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
|
|||||||
data.Root = emptyRoot
|
data.Root = emptyRoot
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NOTE: Do not touch the addresses here, kick the can down the
|
||||||
|
// road. That is because I don't want to change the interface
|
||||||
|
// to getDeletedStateObject at this stage, as the PR would then
|
||||||
|
// have a huge footprint.
|
||||||
|
// The alternative is to make accesses available via the state
|
||||||
|
// db instead of the evm. This requires a significant rewrite,
|
||||||
|
// that isn't currently warranted.
|
||||||
}
|
}
|
||||||
// If snapshot unavailable or reading from it failed, load from the database
|
// If snapshot unavailable or reading from it failed, load from the database
|
||||||
if s.snap == nil || err != nil {
|
if s.snap == nil || err != nil {
|
||||||
@ -658,6 +713,7 @@ func (s *StateDB) Copy() *StateDB {
|
|||||||
preimages: make(map[common.Hash][]byte, len(s.preimages)),
|
preimages: make(map[common.Hash][]byte, len(s.preimages)),
|
||||||
journal: newJournal(),
|
journal: newJournal(),
|
||||||
hasher: crypto.NewKeccakState(),
|
hasher: crypto.NewKeccakState(),
|
||||||
|
witness: s.witness.Copy(),
|
||||||
}
|
}
|
||||||
// Copy the dirty states, logs, and preimages
|
// Copy the dirty states, logs, and preimages
|
||||||
for addr := range s.journal.dirties {
|
for addr := range s.journal.dirties {
|
||||||
@ -845,7 +901,11 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
|
|||||||
// to pull useful data from disk.
|
// to pull useful data from disk.
|
||||||
for addr := range s.stateObjectsPending {
|
for addr := range s.stateObjectsPending {
|
||||||
if obj := s.stateObjects[addr]; !obj.deleted {
|
if obj := s.stateObjects[addr]; !obj.deleted {
|
||||||
obj.updateRoot(s.db)
|
if s.trie.IsVerkle() {
|
||||||
|
obj.updateTrie(s.db)
|
||||||
|
} else {
|
||||||
|
obj.updateRoot(s.db)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Now we're about to start to write changes to the trie. The trie is so far
|
// Now we're about to start to write changes to the trie. The trie is so far
|
||||||
@ -896,6 +956,20 @@ func (s *StateDB) clearJournalAndRefund() {
|
|||||||
s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entires
|
s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entires
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetTrie returns the account trie.
|
||||||
|
func (s *StateDB) GetTrie() Trie {
|
||||||
|
return s.trie
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StateDB) Cap(root common.Hash) error {
|
||||||
|
if s.snaps != nil {
|
||||||
|
return s.snaps.Cap(root, 0)
|
||||||
|
}
|
||||||
|
// pre-verkle path: noop if s.snaps hasn't been
|
||||||
|
// initialized.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Commit writes the state to the underlying in-memory trie database.
|
// Commit writes the state to the underlying in-memory trie database.
|
||||||
func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
|
func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
|
||||||
if s.dbErr != nil {
|
if s.dbErr != nil {
|
||||||
@ -909,17 +983,27 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
|
|||||||
codeWriter := s.db.TrieDB().DiskDB().NewBatch()
|
codeWriter := s.db.TrieDB().DiskDB().NewBatch()
|
||||||
for addr := range s.stateObjectsDirty {
|
for addr := range s.stateObjectsDirty {
|
||||||
if obj := s.stateObjects[addr]; !obj.deleted {
|
if obj := s.stateObjects[addr]; !obj.deleted {
|
||||||
// Write any contract code associated with the state object
|
|
||||||
if obj.code != nil && obj.dirtyCode {
|
|
||||||
rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code)
|
|
||||||
obj.dirtyCode = false
|
|
||||||
}
|
|
||||||
// Write any storage changes in the state object to its storage trie
|
// Write any storage changes in the state object to its storage trie
|
||||||
committed, err := obj.CommitTrie(s.db)
|
committed, err := obj.CommitTrie(s.db)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return common.Hash{}, err
|
return common.Hash{}, err
|
||||||
}
|
}
|
||||||
storageCommitted += committed
|
storageCommitted += committed
|
||||||
|
// Write any contract code associated with the state object
|
||||||
|
if obj.code != nil && obj.dirtyCode {
|
||||||
|
if s.trie.IsVerkle() {
|
||||||
|
if chunks, err := trie.ChunkifyCode(addr, obj.code); err == nil {
|
||||||
|
for i := range chunks {
|
||||||
|
s.trie.TryUpdate(trieUtils.GetTreeKeyCodeChunk(addr[:], uint256.NewInt(uint64(i))), chunks[i][:])
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
s.setError(err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code)
|
||||||
|
}
|
||||||
|
obj.dirtyCode = false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(s.stateObjectsDirty) > 0 {
|
if len(s.stateObjectsDirty) > 0 {
|
||||||
|
@ -704,7 +704,10 @@ func TestMissingTrieNodes(t *testing.T) {
|
|||||||
memDb := rawdb.NewMemoryDatabase()
|
memDb := rawdb.NewMemoryDatabase()
|
||||||
db := NewDatabase(memDb)
|
db := NewDatabase(memDb)
|
||||||
var root common.Hash
|
var root common.Hash
|
||||||
state, _ := New(common.Hash{}, db, nil)
|
state, err := New(common.Hash{}, db, nil)
|
||||||
|
if err != nil {
|
||||||
|
panic("nil stte")
|
||||||
|
}
|
||||||
addr := common.BytesToAddress([]byte("so"))
|
addr := common.BytesToAddress([]byte("so"))
|
||||||
{
|
{
|
||||||
state.SetBalance(addr, big.NewInt(1))
|
state.SetBalance(addr, big.NewInt(1))
|
||||||
@ -736,7 +739,7 @@ func TestMissingTrieNodes(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Modify the state
|
// Modify the state
|
||||||
state.SetBalance(addr, big.NewInt(2))
|
state.SetBalance(addr, big.NewInt(2))
|
||||||
root, err := state.Commit(false)
|
root, err = state.Commit(false)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("expected error, got root :%x", root)
|
t.Fatalf("expected error, got root :%x", root)
|
||||||
}
|
}
|
||||||
|
@ -70,7 +70,10 @@ func makeTestState() (Database, common.Hash, []*testAccount) {
|
|||||||
state.updateStateObject(obj)
|
state.updateStateObject(obj)
|
||||||
accounts = append(accounts, acc)
|
accounts = append(accounts, acc)
|
||||||
}
|
}
|
||||||
root, _ := state.Commit(false)
|
root, err := state.Commit(false)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
// Return the generated state
|
// Return the generated state
|
||||||
return db, root, accounts
|
return db, root, accounts
|
||||||
|
@ -128,6 +128,8 @@ func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainCon
|
|||||||
receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, tx.Nonce())
|
receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, tx.Nonce())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
statedb.Witness().Merge(txContext.Accesses)
|
||||||
|
|
||||||
// Set the receipt logs and create the bloom filter.
|
// Set the receipt logs and create the bloom filter.
|
||||||
receipt.Logs = statedb.GetLogs(tx.Hash(), blockHash)
|
receipt.Logs = statedb.GetLogs(tx.Hash(), blockHash)
|
||||||
receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
|
receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
|
||||||
|
@ -340,3 +340,55 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr
|
|||||||
// Assemble and return the final block for sealing
|
// Assemble and return the final block for sealing
|
||||||
return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil))
|
return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestProcessStateless(t *testing.T) {
|
||||||
|
var (
|
||||||
|
config = ¶ms.ChainConfig{
|
||||||
|
ChainID: big.NewInt(1),
|
||||||
|
HomesteadBlock: big.NewInt(0),
|
||||||
|
EIP150Block: big.NewInt(0),
|
||||||
|
EIP155Block: big.NewInt(0),
|
||||||
|
EIP158Block: big.NewInt(0),
|
||||||
|
ByzantiumBlock: big.NewInt(0),
|
||||||
|
ConstantinopleBlock: big.NewInt(0),
|
||||||
|
PetersburgBlock: big.NewInt(0),
|
||||||
|
IstanbulBlock: big.NewInt(0),
|
||||||
|
MuirGlacierBlock: big.NewInt(0),
|
||||||
|
BerlinBlock: big.NewInt(0),
|
||||||
|
LondonBlock: big.NewInt(0),
|
||||||
|
Ethash: new(params.EthashConfig),
|
||||||
|
CancunBlock: big.NewInt(0),
|
||||||
|
}
|
||||||
|
signer = types.LatestSigner(config)
|
||||||
|
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||||
|
db = rawdb.NewMemoryDatabase()
|
||||||
|
gspec = &Genesis{
|
||||||
|
Config: config,
|
||||||
|
Alloc: GenesisAlloc{
|
||||||
|
common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{
|
||||||
|
Balance: big.NewInt(1000000000000000000), // 1 ether
|
||||||
|
Nonce: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
// Verkle trees use the snapshot, which must be enabled before the
|
||||||
|
// data is saved into the tree+database.
|
||||||
|
genesis := gspec.MustCommit(db)
|
||||||
|
blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||||
|
defer blockchain.Stop()
|
||||||
|
chain, _ := GenerateVerkleChain(gspec.Config, genesis, ethash.NewFaker(), db, 1, func(_ int, gen *BlockGen) {
|
||||||
|
tx, _ := types.SignTx(types.NewTransaction(0, common.Address{1, 2, 3}, big.NewInt(999), params.TxGas, big.NewInt(875000000), nil), signer, testKey)
|
||||||
|
gen.AddTx(tx)
|
||||||
|
tx, _ = types.SignTx(types.NewTransaction(1, common.Address{}, big.NewInt(999), params.TxGas, big.NewInt(875000000), nil), signer, testKey)
|
||||||
|
gen.AddTx(tx)
|
||||||
|
tx, _ = types.SignTx(types.NewTransaction(2, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(875000000), nil), signer, testKey)
|
||||||
|
gen.AddTx(tx)
|
||||||
|
|
||||||
|
})
|
||||||
|
|
||||||
|
_, err := blockchain.InsertChain(chain)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("block imported with error: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
package core
|
package core
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
@ -27,6 +28,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
trieUtils "github.com/ethereum/go-ethereum/trie/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
var emptyCodeHash = crypto.Keccak256Hash(nil)
|
var emptyCodeHash = crypto.Keccak256Hash(nil)
|
||||||
@ -115,7 +117,7 @@ func (result *ExecutionResult) Revert() []byte {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// IntrinsicGas computes the 'intrinsic gas' for a message with the given data.
|
// IntrinsicGas computes the 'intrinsic gas' for a message with the given data.
|
||||||
func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation bool, isHomestead, isEIP2028 bool) (uint64, error) {
|
func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation, isHomestead, isEIP2028 bool) (uint64, error) {
|
||||||
// Set the starting gas for the raw transaction
|
// Set the starting gas for the raw transaction
|
||||||
var gas uint64
|
var gas uint64
|
||||||
if isContractCreation && isHomestead {
|
if isContractCreation && isHomestead {
|
||||||
@ -302,6 +304,27 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
|
|||||||
if st.gas < gas {
|
if st.gas < gas {
|
||||||
return nil, fmt.Errorf("%w: have %d, want %d", ErrIntrinsicGas, st.gas, gas)
|
return nil, fmt.Errorf("%w: have %d, want %d", ErrIntrinsicGas, st.gas, gas)
|
||||||
}
|
}
|
||||||
|
if st.evm.TxContext.Accesses != nil {
|
||||||
|
if msg.To() != nil {
|
||||||
|
toBalance := trieUtils.GetTreeKeyBalance(msg.To().Bytes())
|
||||||
|
pre := st.state.GetBalance(*msg.To())
|
||||||
|
gas += st.evm.TxContext.Accesses.TouchAddressAndChargeGas(toBalance, pre.Bytes())
|
||||||
|
|
||||||
|
// NOTE: Nonce also needs to be charged, because it is needed for execution
|
||||||
|
// on the statless side.
|
||||||
|
var preTN [8]byte
|
||||||
|
fromNonce := trieUtils.GetTreeKeyNonce(msg.To().Bytes())
|
||||||
|
binary.BigEndian.PutUint64(preTN[:], st.state.GetNonce(*msg.To()))
|
||||||
|
gas += st.evm.TxContext.Accesses.TouchAddressAndChargeGas(fromNonce, preTN[:])
|
||||||
|
}
|
||||||
|
fromBalance := trieUtils.GetTreeKeyBalance(msg.From().Bytes())
|
||||||
|
preFB := st.state.GetBalance(msg.From()).Bytes()
|
||||||
|
fromNonce := trieUtils.GetTreeKeyNonce(msg.From().Bytes())
|
||||||
|
var preFN [8]byte
|
||||||
|
binary.BigEndian.PutUint64(preFN[:], st.state.GetNonce(msg.From()))
|
||||||
|
gas += st.evm.TxContext.Accesses.TouchAddressAndChargeGas(fromNonce, preFN[:])
|
||||||
|
gas += st.evm.TxContext.Accesses.TouchAddressAndChargeGas(fromBalance, preFB[:])
|
||||||
|
}
|
||||||
st.gas -= gas
|
st.gas -= gas
|
||||||
|
|
||||||
// Check clause 6
|
// Check clause 6
|
||||||
|
144
core/types/access_witness.go
Normal file
144
core/types/access_witness.go
Normal file
@ -0,0 +1,144 @@
|
|||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AccessWitness lists the locations of the state that are being accessed
|
||||||
|
// during the production of a block.
|
||||||
|
// TODO(@gballet) this doesn't fully support deletions
|
||||||
|
type AccessWitness struct {
|
||||||
|
// Branches flags if a given branch has been loaded
|
||||||
|
Branches map[[31]byte]struct{}
|
||||||
|
|
||||||
|
// Chunks contains the initial value of each address
|
||||||
|
Chunks map[common.Hash][]byte
|
||||||
|
|
||||||
|
// The initial value isn't always available at the time an
|
||||||
|
// address is touched, this map references addresses that
|
||||||
|
// were touched but can not yet be put in Chunks.
|
||||||
|
Undefined map[common.Hash]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAccessWitness() *AccessWitness {
|
||||||
|
return &AccessWitness{
|
||||||
|
Branches: make(map[[31]byte]struct{}),
|
||||||
|
Chunks: make(map[common.Hash][]byte),
|
||||||
|
Undefined: make(map[common.Hash]struct{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TouchAddress adds any missing addr to the witness and returns respectively
|
||||||
|
// true if the stem or the stub weren't arleady present.
|
||||||
|
func (aw *AccessWitness) TouchAddress(addr, value []byte) (bool, bool) {
|
||||||
|
var (
|
||||||
|
stem [31]byte
|
||||||
|
newStem bool
|
||||||
|
newSelector bool
|
||||||
|
)
|
||||||
|
copy(stem[:], addr[:31])
|
||||||
|
|
||||||
|
// Check for the presence of the stem
|
||||||
|
if _, newStem := aw.Branches[stem]; !newStem {
|
||||||
|
aw.Branches[stem] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for the presence of the selector
|
||||||
|
if _, newSelector := aw.Chunks[common.BytesToHash(addr)]; !newSelector {
|
||||||
|
if value == nil {
|
||||||
|
aw.Undefined[common.BytesToHash(addr)] = struct{}{}
|
||||||
|
} else {
|
||||||
|
if _, ok := aw.Undefined[common.BytesToHash(addr)]; !ok {
|
||||||
|
delete(aw.Undefined, common.BytesToHash(addr))
|
||||||
|
}
|
||||||
|
aw.Chunks[common.BytesToHash(addr)] = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return newStem, newSelector
|
||||||
|
}
|
||||||
|
|
||||||
|
// TouchAddressAndChargeGas checks if a location has already been touched in
|
||||||
|
// the current witness, and charge extra gas if that isn't the case. This is
|
||||||
|
// meant to only be called on a tx-context access witness (i.e. before it is
|
||||||
|
// merged), not a block-context witness: witness costs are charged per tx.
|
||||||
|
func (aw *AccessWitness) TouchAddressAndChargeGas(addr, value []byte) uint64 {
|
||||||
|
var gas uint64
|
||||||
|
|
||||||
|
nstem, nsel := aw.TouchAddress(addr, value)
|
||||||
|
if nstem {
|
||||||
|
gas += params.WitnessBranchCost
|
||||||
|
}
|
||||||
|
if nsel {
|
||||||
|
gas += params.WitnessChunkCost
|
||||||
|
}
|
||||||
|
return gas
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge is used to merge the witness that got generated during the execution
|
||||||
|
// of a tx, with the accumulation of witnesses that were generated during the
|
||||||
|
// execution of all the txs preceding this one in a given block.
|
||||||
|
func (aw *AccessWitness) Merge(other *AccessWitness) {
|
||||||
|
for k := range other.Undefined {
|
||||||
|
if _, ok := aw.Undefined[k]; !ok {
|
||||||
|
aw.Undefined[k] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for k := range other.Branches {
|
||||||
|
if _, ok := aw.Branches[k]; !ok {
|
||||||
|
aw.Branches[k] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, chunk := range other.Chunks {
|
||||||
|
if _, ok := aw.Chunks[k]; !ok {
|
||||||
|
aw.Chunks[k] = chunk
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Key returns, predictably, the list of keys that were touched during the
|
||||||
|
// buildup of the access witness.
|
||||||
|
func (aw *AccessWitness) Keys() [][]byte {
|
||||||
|
keys := make([][]byte, 0, len(aw.Chunks))
|
||||||
|
for key := range aw.Chunks {
|
||||||
|
var k [32]byte
|
||||||
|
copy(k[:], key[:])
|
||||||
|
keys = append(keys, k[:])
|
||||||
|
}
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aw *AccessWitness) KeyVals() map[common.Hash][]byte {
|
||||||
|
return aw.Chunks
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aw *AccessWitness) Copy() *AccessWitness {
|
||||||
|
naw := &AccessWitness{
|
||||||
|
Branches: make(map[[31]byte]struct{}),
|
||||||
|
Chunks: make(map[common.Hash][]byte),
|
||||||
|
Undefined: make(map[common.Hash]struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
naw.Merge(aw)
|
||||||
|
|
||||||
|
return naw
|
||||||
|
}
|
@ -86,6 +86,9 @@ type Header struct {
|
|||||||
// BaseFee was added by EIP-1559 and is ignored in legacy headers.
|
// BaseFee was added by EIP-1559 and is ignored in legacy headers.
|
||||||
BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"`
|
BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"`
|
||||||
|
|
||||||
|
// The verkle proof is ignored in legacy headers
|
||||||
|
VerkleProof []byte `json:"verkleProof" rlp:"optional"`
|
||||||
|
|
||||||
/*
|
/*
|
||||||
TODO (MariusVanDerWijden) Add this field once needed
|
TODO (MariusVanDerWijden) Add this field once needed
|
||||||
// Random was added during the merge and contains the BeaconState randomness
|
// Random was added during the merge and contains the BeaconState randomness
|
||||||
@ -337,6 +340,10 @@ func (b *Block) SanityCheck() error {
|
|||||||
return b.header.SanityCheck()
|
return b.header.SanityCheck()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *Block) SetVerkleProof(vp []byte) {
|
||||||
|
b.header.VerkleProof = vp
|
||||||
|
}
|
||||||
|
|
||||||
type writeCounter common.StorageSize
|
type writeCounter common.StorageSize
|
||||||
|
|
||||||
func (c *writeCounter) Write(b []byte) (int, error) {
|
func (c *writeCounter) Write(b []byte) (int, error) {
|
||||||
|
@ -93,12 +93,12 @@ func (c *Contract) validJumpdest(dest *uint256.Int) bool {
|
|||||||
if OpCode(c.Code[udest]) != JUMPDEST {
|
if OpCode(c.Code[udest]) != JUMPDEST {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return c.isCode(udest)
|
return c.IsCode(udest)
|
||||||
}
|
}
|
||||||
|
|
||||||
// isCode returns true if the provided PC location is an actual opcode, as
|
// IsCode returns true if the provided PC location is an actual opcode, as
|
||||||
// opposed to a data-segment following a PUSHN operation.
|
// opposed to a data-segment following a PUSHN operation.
|
||||||
func (c *Contract) isCode(udest uint64) bool {
|
func (c *Contract) IsCode(udest uint64) bool {
|
||||||
// Do we already have an analysis laying around?
|
// Do we already have an analysis laying around?
|
||||||
if c.analysis != nil {
|
if c.analysis != nil {
|
||||||
return c.analysis.codeSegment(udest)
|
return c.analysis.codeSegment(udest)
|
||||||
|
@ -17,13 +17,16 @@
|
|||||||
package vm
|
package vm
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/binary"
|
||||||
"math/big"
|
"math/big"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/ethereum/go-ethereum/trie/utils"
|
||||||
"github.com/holiman/uint256"
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -83,6 +86,8 @@ type TxContext struct {
|
|||||||
// Message information
|
// Message information
|
||||||
Origin common.Address // Provides information for ORIGIN
|
Origin common.Address // Provides information for ORIGIN
|
||||||
GasPrice *big.Int // Provides information for GASPRICE
|
GasPrice *big.Int // Provides information for GASPRICE
|
||||||
|
|
||||||
|
Accesses *types.AccessWitness
|
||||||
}
|
}
|
||||||
|
|
||||||
// EVM is the Ethereum Virtual Machine base object and provides
|
// EVM is the Ethereum Virtual Machine base object and provides
|
||||||
@ -120,11 +125,16 @@ type EVM struct {
|
|||||||
// available gas is calculated in gasCall* according to the 63/64 rule and later
|
// available gas is calculated in gasCall* according to the 63/64 rule and later
|
||||||
// applied in opCall*.
|
// applied in opCall*.
|
||||||
callGasTemp uint64
|
callGasTemp uint64
|
||||||
|
|
||||||
|
accesses map[common.Hash]common.Hash
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewEVM returns a new EVM. The returned EVM is not thread safe and should
|
// NewEVM returns a new EVM. The returned EVM is not thread safe and should
|
||||||
// only ever be used *once*.
|
// only ever be used *once*.
|
||||||
func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig *params.ChainConfig, config Config) *EVM {
|
func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig *params.ChainConfig, config Config) *EVM {
|
||||||
|
if txCtx.Accesses == nil {
|
||||||
|
txCtx.Accesses = types.NewAccessWitness()
|
||||||
|
}
|
||||||
evm := &EVM{
|
evm := &EVM{
|
||||||
Context: blockCtx,
|
Context: blockCtx,
|
||||||
TxContext: txCtx,
|
TxContext: txCtx,
|
||||||
@ -222,6 +232,16 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
|
|||||||
if len(code) == 0 {
|
if len(code) == 0 {
|
||||||
ret, err = nil, nil // gas is unchanged
|
ret, err = nil, nil // gas is unchanged
|
||||||
} else {
|
} else {
|
||||||
|
// Touch the account data
|
||||||
|
var data [32]byte
|
||||||
|
evm.Accesses.TouchAddress(utils.GetTreeKeyVersion(addr.Bytes()), data[:])
|
||||||
|
binary.BigEndian.PutUint64(data[:], evm.StateDB.GetNonce(addr))
|
||||||
|
evm.Accesses.TouchAddress(utils.GetTreeKeyNonce(addr[:]), data[:])
|
||||||
|
evm.Accesses.TouchAddress(utils.GetTreeKeyBalance(addr[:]), evm.StateDB.GetBalance(addr).Bytes())
|
||||||
|
binary.BigEndian.PutUint64(data[:], uint64(len(code)))
|
||||||
|
evm.Accesses.TouchAddress(utils.GetTreeKeyCodeSize(addr[:]), data[:])
|
||||||
|
evm.Accesses.TouchAddress(utils.GetTreeKeyCodeKeccak(addr[:]), evm.StateDB.GetCodeHash(addr).Bytes())
|
||||||
|
|
||||||
addrCopy := addr
|
addrCopy := addr
|
||||||
// If the account has no code, we can abort here
|
// If the account has no code, we can abort here
|
||||||
// The depth-check is already done, and precompiles handled above
|
// The depth-check is already done, and precompiles handled above
|
||||||
|
@ -22,6 +22,8 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/math"
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
trieUtils "github.com/ethereum/go-ethereum/trie/utils"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
// memoryGasCost calculates the quadratic gas for memory expansion. It does so
|
// memoryGasCost calculates the quadratic gas for memory expansion. It does so
|
||||||
@ -86,14 +88,102 @@ func memoryCopierGas(stackpos int) gasFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func gasExtCodeSize(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
|
||||||
|
usedGas := uint64(0)
|
||||||
|
slot := stack.Back(0)
|
||||||
|
if evm.accesses != nil {
|
||||||
|
index := trieUtils.GetTreeKeyCodeSize(slot.Bytes())
|
||||||
|
usedGas += evm.TxContext.Accesses.TouchAddressAndChargeGas(index, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
return usedGas, nil
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
gasCallDataCopy = memoryCopierGas(2)
|
gasCallDataCopy = memoryCopierGas(2)
|
||||||
gasCodeCopy = memoryCopierGas(2)
|
gasCodeCopyStateful = memoryCopierGas(2)
|
||||||
gasExtCodeCopy = memoryCopierGas(3)
|
gasExtCodeCopyStateful = memoryCopierGas(3)
|
||||||
gasReturnDataCopy = memoryCopierGas(2)
|
gasReturnDataCopy = memoryCopierGas(2)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func gasCodeCopy(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
|
||||||
|
var statelessGas uint64
|
||||||
|
if evm.accesses != nil {
|
||||||
|
var (
|
||||||
|
codeOffset = stack.Back(1)
|
||||||
|
length = stack.Back(2)
|
||||||
|
)
|
||||||
|
uint64CodeOffset, overflow := codeOffset.Uint64WithOverflow()
|
||||||
|
if overflow {
|
||||||
|
uint64CodeOffset = 0xffffffffffffffff
|
||||||
|
}
|
||||||
|
uint64CodeEnd, overflow := new(uint256.Int).Add(codeOffset, length).Uint64WithOverflow()
|
||||||
|
if overflow {
|
||||||
|
uint64CodeEnd = 0xffffffffffffffff
|
||||||
|
}
|
||||||
|
addr := contract.Address()
|
||||||
|
chunk := uint64CodeOffset / 31
|
||||||
|
endChunk := uint64CodeEnd / 31
|
||||||
|
// XXX uint64 overflow in condition check
|
||||||
|
for ; chunk < endChunk; chunk++ {
|
||||||
|
|
||||||
|
// TODO make a version of GetTreeKeyCodeChunk without the bigint
|
||||||
|
index := trieUtils.GetTreeKeyCodeChunk(addr[:], uint256.NewInt(chunk))
|
||||||
|
statelessGas += evm.TxContext.Accesses.TouchAddressAndChargeGas(index, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
usedGas, err := gasCodeCopyStateful(evm, contract, stack, mem, memorySize)
|
||||||
|
return usedGas + statelessGas, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func gasExtCodeCopy(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
|
||||||
|
var statelessGas uint64
|
||||||
|
if evm.accesses != nil {
|
||||||
|
var (
|
||||||
|
a = stack.Back(0)
|
||||||
|
codeOffset = stack.Back(2)
|
||||||
|
length = stack.Back(3)
|
||||||
|
)
|
||||||
|
uint64CodeOffset, overflow := codeOffset.Uint64WithOverflow()
|
||||||
|
if overflow {
|
||||||
|
uint64CodeOffset = 0xffffffffffffffff
|
||||||
|
}
|
||||||
|
uint64CodeEnd, overflow := new(uint256.Int).Add(codeOffset, length).Uint64WithOverflow()
|
||||||
|
if overflow {
|
||||||
|
uint64CodeEnd = 0xffffffffffffffff
|
||||||
|
}
|
||||||
|
addr := common.Address(a.Bytes20())
|
||||||
|
chunk := uint64CodeOffset / 31
|
||||||
|
endChunk := uint64CodeEnd / 31
|
||||||
|
// XXX uint64 overflow in condition check
|
||||||
|
for ; chunk < endChunk; chunk++ {
|
||||||
|
// TODO(@gballet) make a version of GetTreeKeyCodeChunk without the bigint
|
||||||
|
index := trieUtils.GetTreeKeyCodeChunk(addr[:], uint256.NewInt(chunk))
|
||||||
|
statelessGas += evm.TxContext.Accesses.TouchAddressAndChargeGas(index, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
usedGas, err := gasExtCodeCopyStateful(evm, contract, stack, mem, memorySize)
|
||||||
|
return usedGas + statelessGas, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func gasSLoad(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
|
||||||
|
usedGas := uint64(0)
|
||||||
|
|
||||||
|
if evm.accesses != nil {
|
||||||
|
where := stack.Back(0)
|
||||||
|
addr := contract.Address()
|
||||||
|
index := trieUtils.GetTreeKeyStorageSlot(addr[:], where)
|
||||||
|
usedGas += evm.TxContext.Accesses.TouchAddressAndChargeGas(index, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
return usedGas, nil
|
||||||
|
}
|
||||||
|
|
||||||
func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
|
func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
|
||||||
|
// Apply the witness access costs, err is nil
|
||||||
|
accessGas, _ := gasSLoad(evm, contract, stack, mem, memorySize)
|
||||||
var (
|
var (
|
||||||
y, x = stack.Back(1), stack.Back(0)
|
y, x = stack.Back(1), stack.Back(0)
|
||||||
current = evm.StateDB.GetState(contract.Address(), x.Bytes32())
|
current = evm.StateDB.GetState(contract.Address(), x.Bytes32())
|
||||||
@ -109,14 +199,15 @@ func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySi
|
|||||||
// 3. From a non-zero to a non-zero (CHANGE)
|
// 3. From a non-zero to a non-zero (CHANGE)
|
||||||
switch {
|
switch {
|
||||||
case current == (common.Hash{}) && y.Sign() != 0: // 0 => non 0
|
case current == (common.Hash{}) && y.Sign() != 0: // 0 => non 0
|
||||||
return params.SstoreSetGas, nil
|
return params.SstoreSetGas + accessGas, nil
|
||||||
case current != (common.Hash{}) && y.Sign() == 0: // non 0 => 0
|
case current != (common.Hash{}) && y.Sign() == 0: // non 0 => 0
|
||||||
evm.StateDB.AddRefund(params.SstoreRefundGas)
|
evm.StateDB.AddRefund(params.SstoreRefundGas)
|
||||||
return params.SstoreClearGas, nil
|
return params.SstoreClearGas + accessGas, nil
|
||||||
default: // non 0 => non 0 (or 0 => 0)
|
default: // non 0 => non 0 (or 0 => 0)
|
||||||
return params.SstoreResetGas, nil
|
return params.SstoreResetGas + accessGas, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// The new gas metering is based on net gas costs (EIP-1283):
|
// The new gas metering is based on net gas costs (EIP-1283):
|
||||||
//
|
//
|
||||||
// 1. If current value equals new value (this is a no-op), 200 gas is deducted.
|
// 1. If current value equals new value (this is a no-op), 200 gas is deducted.
|
||||||
@ -331,6 +422,14 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize
|
|||||||
transfersValue = !stack.Back(2).IsZero()
|
transfersValue = !stack.Back(2).IsZero()
|
||||||
address = common.Address(stack.Back(1).Bytes20())
|
address = common.Address(stack.Back(1).Bytes20())
|
||||||
)
|
)
|
||||||
|
if evm.accesses != nil {
|
||||||
|
// Charge witness costs
|
||||||
|
for i := trieUtils.VersionLeafKey; i <= trieUtils.CodeSizeLeafKey; i++ {
|
||||||
|
index := trieUtils.GetTreeKeyAccountLeaf(address[:], byte(i))
|
||||||
|
gas += evm.TxContext.Accesses.TouchAddressAndChargeGas(index, nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if evm.chainRules.IsEIP158 {
|
if evm.chainRules.IsEIP158 {
|
||||||
if transfersValue && evm.StateDB.Empty(address) {
|
if transfersValue && evm.StateDB.Empty(address) {
|
||||||
gas += params.CallNewAccountGas
|
gas += params.CallNewAccountGas
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
trieUtils "github.com/ethereum/go-ethereum/trie/utils"
|
||||||
"github.com/holiman/uint256"
|
"github.com/holiman/uint256"
|
||||||
"golang.org/x/crypto/sha3"
|
"golang.org/x/crypto/sha3"
|
||||||
)
|
)
|
||||||
@ -341,7 +342,12 @@ func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeConte
|
|||||||
|
|
||||||
func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||||
slot := scope.Stack.peek()
|
slot := scope.Stack.peek()
|
||||||
slot.SetUint64(uint64(interpreter.evm.StateDB.GetCodeSize(slot.Bytes20())))
|
cs := uint64(interpreter.evm.StateDB.GetCodeSize(slot.Bytes20()))
|
||||||
|
if interpreter.evm.accesses != nil {
|
||||||
|
index := trieUtils.GetTreeKeyCodeSize(slot.Bytes())
|
||||||
|
interpreter.evm.TxContext.Accesses.TouchAddress(index, uint256.NewInt(cs).Bytes())
|
||||||
|
}
|
||||||
|
slot.SetUint64(cs)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -362,12 +368,65 @@ func opCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([
|
|||||||
if overflow {
|
if overflow {
|
||||||
uint64CodeOffset = 0xffffffffffffffff
|
uint64CodeOffset = 0xffffffffffffffff
|
||||||
}
|
}
|
||||||
codeCopy := getData(scope.Contract.Code, uint64CodeOffset, length.Uint64())
|
uint64CodeEnd, overflow := new(uint256.Int).Add(&codeOffset, &length).Uint64WithOverflow()
|
||||||
scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy)
|
if overflow {
|
||||||
|
uint64CodeEnd = 0xffffffffffffffff
|
||||||
|
}
|
||||||
|
if interpreter.evm.accesses != nil {
|
||||||
|
copyCodeFromAccesses(scope.Contract.Address(), uint64CodeOffset, uint64CodeEnd, memOffset.Uint64(), interpreter, scope)
|
||||||
|
} else {
|
||||||
|
codeCopy := getData(scope.Contract.Code, uint64CodeOffset, length.Uint64())
|
||||||
|
scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy)
|
||||||
|
|
||||||
|
touchEachChunks(uint64CodeOffset, uint64CodeEnd, codeCopy, scope.Contract, interpreter.evm)
|
||||||
|
}
|
||||||
|
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Helper function to touch every chunk in a code range
|
||||||
|
func touchEachChunks(start, end uint64, code []byte, contract *Contract, evm *EVM) {
|
||||||
|
for chunk := start / 31; chunk <= end/31 && chunk <= uint64(len(code))/31; chunk++ {
|
||||||
|
index := trieUtils.GetTreeKeyCodeChunk(contract.Address().Bytes(), uint256.NewInt(chunk))
|
||||||
|
count := uint64(0)
|
||||||
|
end := (chunk + 1) * 31
|
||||||
|
|
||||||
|
// Look for the first code byte (i.e. no pushdata)
|
||||||
|
for ; count < 31 && end+count < uint64(len(contract.Code)) && !contract.IsCode(chunk*31+count); count++ {
|
||||||
|
}
|
||||||
|
var value [32]byte
|
||||||
|
value[0] = byte(count)
|
||||||
|
if end > uint64(len(code)) {
|
||||||
|
end = uint64(len(code))
|
||||||
|
}
|
||||||
|
copy(value[1:], code[chunk*31:end])
|
||||||
|
evm.Accesses.TouchAddress(index, value[:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// copyCodeFromAccesses perform codecopy from the witness, not from the db.
|
||||||
|
func copyCodeFromAccesses(addr common.Address, codeOffset, codeEnd, memOffset uint64, in *EVMInterpreter, scope *ScopeContext) {
|
||||||
|
chunk := codeOffset / 31
|
||||||
|
endChunk := codeEnd / 31
|
||||||
|
start := codeOffset % 31 // start inside the first code chunk
|
||||||
|
offset := uint64(0) // memory offset to write to
|
||||||
|
// XXX uint64 overflow in condition check
|
||||||
|
for end := uint64(31); chunk < endChunk; chunk, start = chunk+1, 0 {
|
||||||
|
// case of the last chunk: figure out how many bytes need to
|
||||||
|
// be extracted from the last chunk.
|
||||||
|
if chunk+1 == endChunk {
|
||||||
|
end = codeEnd % 31
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO make a version of GetTreeKeyCodeChunk without the bigint
|
||||||
|
index := common.BytesToHash(trieUtils.GetTreeKeyCodeChunk(addr[:], uint256.NewInt(chunk)))
|
||||||
|
h := in.evm.accesses[index]
|
||||||
|
//in.evm.Accesses.TouchAddress(index.Bytes(), h[1+start:1+end])
|
||||||
|
scope.Memory.Set(memOffset+offset, end-start, h[1+start:end])
|
||||||
|
offset += 31 - start
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||||
var (
|
var (
|
||||||
stack = scope.Stack
|
stack = scope.Stack
|
||||||
@ -380,9 +439,19 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
|
|||||||
if overflow {
|
if overflow {
|
||||||
uint64CodeOffset = 0xffffffffffffffff
|
uint64CodeOffset = 0xffffffffffffffff
|
||||||
}
|
}
|
||||||
|
uint64CodeEnd, overflow := new(uint256.Int).Add(&codeOffset, &length).Uint64WithOverflow()
|
||||||
|
if overflow {
|
||||||
|
uint64CodeEnd = 0xffffffffffffffff
|
||||||
|
}
|
||||||
addr := common.Address(a.Bytes20())
|
addr := common.Address(a.Bytes20())
|
||||||
codeCopy := getData(interpreter.evm.StateDB.GetCode(addr), uint64CodeOffset, length.Uint64())
|
if interpreter.evm.accesses != nil {
|
||||||
scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy)
|
copyCodeFromAccesses(addr, uint64CodeOffset, uint64CodeEnd, memOffset.Uint64(), interpreter, scope)
|
||||||
|
} else {
|
||||||
|
codeCopy := getData(interpreter.evm.StateDB.GetCode(addr), uint64CodeOffset, length.Uint64())
|
||||||
|
scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy)
|
||||||
|
|
||||||
|
touchEachChunks(uint64CodeOffset, uint64CodeEnd, codeCopy, scope.Contract, interpreter.evm)
|
||||||
|
}
|
||||||
|
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
@ -510,6 +579,10 @@ func opSload(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by
|
|||||||
hash := common.Hash(loc.Bytes32())
|
hash := common.Hash(loc.Bytes32())
|
||||||
val := interpreter.evm.StateDB.GetState(scope.Contract.Address(), hash)
|
val := interpreter.evm.StateDB.GetState(scope.Contract.Address(), hash)
|
||||||
loc.SetBytes(val.Bytes())
|
loc.SetBytes(val.Bytes())
|
||||||
|
// Get the initial value as it might not be present
|
||||||
|
|
||||||
|
index := trieUtils.GetTreeKeyStorageSlot(scope.Contract.Address().Bytes(), loc)
|
||||||
|
interpreter.evm.TxContext.Accesses.TouchAddress(index, val.Bytes())
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -834,6 +907,25 @@ func opPush1(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by
|
|||||||
*pc += 1
|
*pc += 1
|
||||||
if *pc < codeLen {
|
if *pc < codeLen {
|
||||||
scope.Stack.push(integer.SetUint64(uint64(scope.Contract.Code[*pc])))
|
scope.Stack.push(integer.SetUint64(uint64(scope.Contract.Code[*pc])))
|
||||||
|
// touch next chunk if PUSH1 is at the boundary. if so, *pc has
|
||||||
|
// advanced past this boundary.
|
||||||
|
if *pc%31 == 0 {
|
||||||
|
// touch push data by adding the last byte of the pushdata
|
||||||
|
var value [32]byte
|
||||||
|
chunk := *pc / 31
|
||||||
|
count := uint64(0)
|
||||||
|
// Look for the first code byte (i.e. no pushdata)
|
||||||
|
for ; count < 31 && !scope.Contract.IsCode(chunk*31+count); count++ {
|
||||||
|
}
|
||||||
|
value[0] = byte(count)
|
||||||
|
endMin := (chunk + 1) * 31
|
||||||
|
if endMin > uint64(len(scope.Contract.Code)) {
|
||||||
|
endMin = uint64(len(scope.Contract.Code))
|
||||||
|
}
|
||||||
|
copy(value[1:], scope.Contract.Code[chunk*31:endMin])
|
||||||
|
index := trieUtils.GetTreeKeyCodeChunk(scope.Contract.Address().Bytes(), uint256.NewInt(chunk))
|
||||||
|
interpreter.evm.TxContext.Accesses.TouchAddressAndChargeGas(index, nil)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
scope.Stack.push(integer.Clear())
|
scope.Stack.push(integer.Clear())
|
||||||
}
|
}
|
||||||
@ -859,6 +951,39 @@ func makePush(size uint64, pushByteSize int) executionFunc {
|
|||||||
scope.Stack.push(integer.SetBytes(common.RightPadBytes(
|
scope.Stack.push(integer.SetBytes(common.RightPadBytes(
|
||||||
scope.Contract.Code[startMin:endMin], pushByteSize)))
|
scope.Contract.Code[startMin:endMin], pushByteSize)))
|
||||||
|
|
||||||
|
// touch push data by adding the last byte of the pushdata
|
||||||
|
var value [32]byte
|
||||||
|
chunk := uint64(endMin-1) / 31
|
||||||
|
count := uint64(0)
|
||||||
|
// Look for the first code byte (i.e. no pushdata)
|
||||||
|
for ; count < 31 && !scope.Contract.IsCode(chunk*31+count); count++ {
|
||||||
|
}
|
||||||
|
value[0] = byte(count)
|
||||||
|
copy(value[1:], scope.Contract.Code[chunk*31:endMin])
|
||||||
|
index := trieUtils.GetTreeKeyCodeChunk(scope.Contract.Address().Bytes(), uint256.NewInt(chunk))
|
||||||
|
interpreter.evm.TxContext.Accesses.TouchAddressAndChargeGas(index, nil)
|
||||||
|
|
||||||
|
// in the case of PUSH32, the end data might be two chunks away,
|
||||||
|
// so also get the middle chunk. There is a boundary condition
|
||||||
|
// check (endMin > 2) in the case the code is a single PUSH32
|
||||||
|
// insctruction, whose immediate are just 0s.
|
||||||
|
if pushByteSize == 32 && endMin > 2 {
|
||||||
|
chunk = uint64(endMin-2) / 31
|
||||||
|
count = uint64(0)
|
||||||
|
// Look for the first code byte (i.e. no pushdata)
|
||||||
|
for ; count < 31 && !scope.Contract.IsCode(chunk*31+count); count++ {
|
||||||
|
}
|
||||||
|
value[0] = byte(count)
|
||||||
|
end := (chunk + 1) * 31
|
||||||
|
if end > uint64(len(scope.Contract.Code)) {
|
||||||
|
end = uint64(len(scope.Contract.Code))
|
||||||
|
}
|
||||||
|
copy(value[1:], scope.Contract.Code[chunk*31:end])
|
||||||
|
index := trieUtils.GetTreeKeyCodeChunk(scope.Contract.Address().Bytes(), uint256.NewInt(chunk))
|
||||||
|
interpreter.evm.TxContext.Accesses.TouchAddressAndChargeGas(index, nil)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
*pc += size
|
*pc += size
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
@ -74,6 +74,9 @@ type StateDB interface {
|
|||||||
AddPreimage(common.Hash, []byte)
|
AddPreimage(common.Hash, []byte)
|
||||||
|
|
||||||
ForEachStorage(common.Address, func(common.Hash, common.Hash) bool) error
|
ForEachStorage(common.Address, func(common.Hash, common.Hash) bool) error
|
||||||
|
|
||||||
|
Witness() *types.AccessWitness
|
||||||
|
SetWitness(*types.AccessWitness)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CallContext provides a basic interface for the EVM calling conventions. The EVM
|
// CallContext provides a basic interface for the EVM calling conventions. The EVM
|
||||||
|
@ -17,12 +17,15 @@
|
|||||||
package vm
|
package vm
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"hash"
|
"hash"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/math"
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
trieUtils "github.com/ethereum/go-ethereum/trie/utils"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Config are the configuration options for the Interpreter
|
// Config are the configuration options for the Interpreter
|
||||||
@ -191,9 +194,53 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
|
|||||||
logged, pcCopy, gasCopy = false, pc, contract.Gas
|
logged, pcCopy, gasCopy = false, pc, contract.Gas
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// if the PC ends up in a new "page" of verkleized code, charge the
|
||||||
|
// associated witness costs.
|
||||||
|
inWitness := false
|
||||||
|
var codePage common.Hash
|
||||||
|
if in.evm.chainRules.IsCancun {
|
||||||
|
index := trieUtils.GetTreeKeyCodeChunk(contract.Address().Bytes(), uint256.NewInt(pc/31))
|
||||||
|
|
||||||
|
var value [32]byte
|
||||||
|
if in.evm.accesses != nil {
|
||||||
|
codePage, inWitness = in.evm.accesses[common.BytesToHash(index)]
|
||||||
|
// Return an error if we're in stateless mode
|
||||||
|
// and the code isn't in the witness. It means
|
||||||
|
// that if code is read beyond the actual code
|
||||||
|
// size, pages of 0s need to be added to the
|
||||||
|
// witness.
|
||||||
|
if !inWitness {
|
||||||
|
return nil, errors.New("code chunk missing from proof")
|
||||||
|
}
|
||||||
|
copy(value[:], codePage[:])
|
||||||
|
} else {
|
||||||
|
// Calculate the chunk
|
||||||
|
chunk := pc / 31
|
||||||
|
end := (chunk + 1) * 31
|
||||||
|
if end >= uint64(len(contract.Code)) {
|
||||||
|
end = uint64(len(contract.Code))
|
||||||
|
}
|
||||||
|
count := uint64(0)
|
||||||
|
// Look for the first code byte (i.e. no pushdata)
|
||||||
|
for ; chunk*31+count < end && count < 31 && !contract.IsCode(chunk*31+count); count++ {
|
||||||
|
}
|
||||||
|
value[0] = byte(count)
|
||||||
|
copy(value[1:], contract.Code[chunk*31:end])
|
||||||
|
}
|
||||||
|
contract.Gas -= in.evm.TxContext.Accesses.TouchAddressAndChargeGas(index, value[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
if inWitness {
|
||||||
|
// Get the op from the tree, skipping the header byte
|
||||||
|
op = OpCode(codePage[1+pc%31])
|
||||||
|
} else {
|
||||||
|
// If we are in witness mode, then raise an error
|
||||||
|
op = contract.GetOp(pc)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
// Get the operation from the jump table and validate the stack to ensure there are
|
// Get the operation from the jump table and validate the stack to ensure there are
|
||||||
// enough stack items available to perform the operation.
|
// enough stack items available to perform the operation.
|
||||||
op = contract.GetOp(pc)
|
|
||||||
operation := in.cfg.JumpTable[op]
|
operation := in.cfg.JumpTable[op]
|
||||||
if operation == nil {
|
if operation == nil {
|
||||||
return nil, &ErrInvalidOpCode{opcode: op}
|
return nil, &ErrInvalidOpCode{opcode: op}
|
||||||
|
@ -433,6 +433,7 @@ func newFrontierInstructionSet() JumpTable {
|
|||||||
EXTCODESIZE: {
|
EXTCODESIZE: {
|
||||||
execute: opExtCodeSize,
|
execute: opExtCodeSize,
|
||||||
constantGas: params.ExtcodeSizeGasFrontier,
|
constantGas: params.ExtcodeSizeGasFrontier,
|
||||||
|
dynamicGas: gasExtCodeSize,
|
||||||
minStack: minStack(1, 1),
|
minStack: minStack(1, 1),
|
||||||
maxStack: maxStack(1, 1),
|
maxStack: maxStack(1, 1),
|
||||||
},
|
},
|
||||||
@ -513,6 +514,7 @@ func newFrontierInstructionSet() JumpTable {
|
|||||||
SLOAD: {
|
SLOAD: {
|
||||||
execute: opSload,
|
execute: opSload,
|
||||||
constantGas: params.SloadGasFrontier,
|
constantGas: params.SloadGasFrontier,
|
||||||
|
dynamicGas: gasSLoad,
|
||||||
minStack: minStack(1, 1),
|
minStack: minStack(1, 1),
|
||||||
maxStack: maxStack(1, 1),
|
maxStack: maxStack(1, 1),
|
||||||
},
|
},
|
||||||
|
@ -197,7 +197,7 @@ func TestNoStepExec(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIsPrecompile(t *testing.T) {
|
func TestIsPrecompile(t *testing.T) {
|
||||||
chaincfg := ¶ms.ChainConfig{ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), DAOForkBlock: nil, DAOForkSupport: false, EIP150Block: big.NewInt(0), EIP150Hash: common.Hash{}, EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), ByzantiumBlock: big.NewInt(100), ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(200), MuirGlacierBlock: big.NewInt(0), BerlinBlock: big.NewInt(300), LondonBlock: big.NewInt(0), TerminalTotalDifficulty: nil, Ethash: new(params.EthashConfig), Clique: nil}
|
chaincfg := ¶ms.ChainConfig{ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), DAOForkBlock: nil, DAOForkSupport: false, EIP150Block: big.NewInt(0), EIP150Hash: common.Hash{}, EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), ByzantiumBlock: big.NewInt(100), ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(200), MuirGlacierBlock: big.NewInt(0), BerlinBlock: big.NewInt(300), LondonBlock: big.NewInt(0), TerminalTotalDifficulty: nil, CancunBlock: nil, Ethash: new(params.EthashConfig), Clique: nil}
|
||||||
chaincfg.ByzantiumBlock = big.NewInt(100)
|
chaincfg.ByzantiumBlock = big.NewInt(100)
|
||||||
chaincfg.IstanbulBlock = big.NewInt(200)
|
chaincfg.IstanbulBlock = big.NewInt(200)
|
||||||
chaincfg.BerlinBlock = big.NewInt(300)
|
chaincfg.BerlinBlock = big.NewInt(300)
|
||||||
@ -235,14 +235,17 @@ func TestEnterExit(t *testing.T) {
|
|||||||
if _, err := newJsTracer("{step: function() {}, fault: function() {}, result: function() { return null; }, enter: function() {}, exit: function() {}}", new(tracers.Context)); err != nil {
|
if _, err := newJsTracer("{step: function() {}, fault: function() {}, result: function() { return null; }, enter: function() {}, exit: function() {}}", new(tracers.Context)); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// test that the enter and exit method are correctly invoked and the values passed
|
// test that the enter and exit method are correctly invoked and the values passed
|
||||||
tracer, err := newJsTracer("{enters: 0, exits: 0, enterGas: 0, gasUsed: 0, step: function() {}, fault: function() {}, result: function() { return {enters: this.enters, exits: this.exits, enterGas: this.enterGas, gasUsed: this.gasUsed} }, enter: function(frame) { this.enters++; this.enterGas = frame.getGas(); }, exit: function(res) { this.exits++; this.gasUsed = res.getGasUsed(); }}", new(tracers.Context))
|
tracer, err := newJsTracer("{enters: 0, exits: 0, enterGas: 0, gasUsed: 0, step: function() {}, fault: function() {}, result: function() { return {enters: this.enters, exits: this.exits, enterGas: this.enterGas, gasUsed: this.gasUsed} }, enter: function(frame) { this.enters++; this.enterGas = frame.getGas(); }, exit: function(res) { this.exits++; this.gasUsed = res.getGasUsed(); }}", new(tracers.Context))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
scope := &vm.ScopeContext{
|
scope := &vm.ScopeContext{
|
||||||
Contract: vm.NewContract(&account{}, &account{}, big.NewInt(0), 0),
|
Contract: vm.NewContract(&account{}, &account{}, big.NewInt(0), 0),
|
||||||
}
|
}
|
||||||
|
|
||||||
tracer.CaptureEnter(vm.CALL, scope.Contract.Caller(), scope.Contract.Address(), []byte{}, 1000, new(big.Int))
|
tracer.CaptureEnter(vm.CALL, scope.Contract.Caller(), scope.Contract.Address(), []byte{}, 1000, new(big.Int))
|
||||||
tracer.CaptureExit([]byte{}, 400, nil)
|
tracer.CaptureExit([]byte{}, 400, nil)
|
||||||
|
|
||||||
|
3
go.mod
3
go.mod
@ -25,6 +25,7 @@ require (
|
|||||||
github.com/fatih/color v1.7.0
|
github.com/fatih/color v1.7.0
|
||||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5
|
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5
|
||||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff
|
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff
|
||||||
|
github.com/gballet/go-verkle v0.0.0-20211108173141-3d812b8722d3 // indirect
|
||||||
github.com/go-ole/go-ole v1.2.1 // indirect
|
github.com/go-ole/go-ole v1.2.1 // indirect
|
||||||
github.com/go-stack/stack v1.8.0
|
github.com/go-stack/stack v1.8.0
|
||||||
github.com/golang/protobuf v1.4.3
|
github.com/golang/protobuf v1.4.3
|
||||||
@ -64,7 +65,7 @@ require (
|
|||||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2
|
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2
|
||||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d // indirect
|
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d // indirect
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||||
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912
|
golang.org/x/sys v0.0.0-20211112143042-c6105e7cf70d
|
||||||
golang.org/x/text v0.3.6
|
golang.org/x/text v0.3.6
|
||||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba
|
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba
|
||||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce
|
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce
|
||||||
|
8
go.sum
8
go.sum
@ -104,6 +104,8 @@ github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/
|
|||||||
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f h1:C43yEtQ6NIf4ftFXD/V55gnGFgPbMQobd//YlnLjUJ8=
|
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f h1:C43yEtQ6NIf4ftFXD/V55gnGFgPbMQobd//YlnLjUJ8=
|
||||||
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q=
|
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
|
github.com/crate-crypto/go-ipa v0.0.0-20211107182441-1aeb67f49de7 h1:P6yxenBOOu4RF26bRLiWG+zrwQ1kPAaz71sDmX6b76I=
|
||||||
|
github.com/crate-crypto/go-ipa v0.0.0-20211107182441-1aeb67f49de7/go.mod h1:gFnFS95y8HstDP6P9pPwzrxOOC5TRDkwbM+ao15ChAI=
|
||||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
|
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
|
||||||
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
|
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
|
||||||
@ -142,6 +144,8 @@ github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWo
|
|||||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
|
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
|
||||||
|
github.com/gballet/go-verkle v0.0.0-20211108173141-3d812b8722d3 h1:YuCllCO2PQ1U2RXeJBbWEd5LLJCk+Ha4FRo+j7reX78=
|
||||||
|
github.com/gballet/go-verkle v0.0.0-20211108173141-3d812b8722d3/go.mod h1:BASlDXGXxlAP/xls4A48nC+QVn3jLiP0s4RTmMgh/IE=
|
||||||
github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
||||||
github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
||||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
@ -538,6 +542,10 @@ golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912 h1:uCLL3g5wH2xjxVREVuAbP9JM5PPKjRbXKRa6IBjkzmU=
|
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912 h1:uCLL3g5wH2xjxVREVuAbP9JM5PPKjRbXKRa6IBjkzmU=
|
||||||
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20211107104306-e0b2ad06fe42/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20211112143042-c6105e7cf70d h1:jp6PtFmjL+vGsuzd86xYqaJGv6eXdLvmVGzVVLI6EPI=
|
||||||
|
golang.org/x/sys v0.0.0-20211112143042-c6105e7cf70d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
@ -163,6 +163,8 @@ func (t *odrTrie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter
|
|||||||
return errors.New("not implemented, needs client/server interface split")
|
return errors.New("not implemented, needs client/server interface split")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *odrTrie) IsVerkle() bool { return false }
|
||||||
|
|
||||||
// do tries and retries to execute a function until it returns with no error or
|
// do tries and retries to execute a function until it returns with no error or
|
||||||
// an error type other than MissingNodeError
|
// an error type other than MissingNodeError
|
||||||
func (t *odrTrie) do(key []byte, fn func() error) error {
|
func (t *odrTrie) do(key []byte, fn func() error) error {
|
||||||
|
@ -1039,7 +1039,15 @@ func (w *worker) commit(uncles []*types.Header, interval func(), update bool, st
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if tr := s.GetTrie(); tr.IsVerkle() {
|
||||||
|
vtr := tr.(*trie.VerkleTrie)
|
||||||
|
// Generate the proof if we are using a verkle tree
|
||||||
|
p, err := vtr.ProveAndSerialize(s.Witness().Keys(), s.Witness().KeyVals())
|
||||||
|
w.current.header.VerkleProof = p
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
if w.isRunning() && !w.merger.TDDReached() {
|
if w.isRunning() && !w.merger.TDDReached() {
|
||||||
if interval != nil {
|
if interval != nil {
|
||||||
interval()
|
interval()
|
||||||
|
@ -75,6 +75,26 @@ var (
|
|||||||
Ethash: new(EthashConfig),
|
Ethash: new(EthashConfig),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
VerkleChainConfig = &ChainConfig{
|
||||||
|
ChainID: big.NewInt(86),
|
||||||
|
HomesteadBlock: big.NewInt(0),
|
||||||
|
DAOForkBlock: big.NewInt(0),
|
||||||
|
DAOForkSupport: true,
|
||||||
|
EIP150Block: big.NewInt(0),
|
||||||
|
EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"),
|
||||||
|
EIP155Block: big.NewInt(0),
|
||||||
|
EIP158Block: big.NewInt(0),
|
||||||
|
ByzantiumBlock: big.NewInt(0),
|
||||||
|
ConstantinopleBlock: big.NewInt(0),
|
||||||
|
PetersburgBlock: big.NewInt(0),
|
||||||
|
IstanbulBlock: big.NewInt(0),
|
||||||
|
MuirGlacierBlock: big.NewInt(0),
|
||||||
|
BerlinBlock: big.NewInt(0),
|
||||||
|
LondonBlock: big.NewInt(0),
|
||||||
|
CancunBlock: big.NewInt(0),
|
||||||
|
Ethash: new(EthashConfig),
|
||||||
|
}
|
||||||
|
|
||||||
// MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network.
|
// MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network.
|
||||||
MainnetTrustedCheckpoint = &TrustedCheckpoint{
|
MainnetTrustedCheckpoint = &TrustedCheckpoint{
|
||||||
SectionIndex: 413,
|
SectionIndex: 413,
|
||||||
@ -257,16 +277,16 @@ var (
|
|||||||
//
|
//
|
||||||
// This configuration is intentionally not using keyed fields to force anyone
|
// This configuration is intentionally not using keyed fields to force anyone
|
||||||
// adding flags to the config to also have to set these fields.
|
// adding flags to the config to also have to set these fields.
|
||||||
AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
|
AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil}
|
||||||
|
|
||||||
// AllCliqueProtocolChanges contains every protocol change (EIPs) introduced
|
// AllCliqueProtocolChanges contains every protocol change (EIPs) introduced
|
||||||
// and accepted by the Ethereum core developers into the Clique consensus.
|
// and accepted by the Ethereum core developers into the Clique consensus.
|
||||||
//
|
//
|
||||||
// This configuration is intentionally not using keyed fields to force anyone
|
// This configuration is intentionally not using keyed fields to force anyone
|
||||||
// adding flags to the config to also have to set these fields.
|
// adding flags to the config to also have to set these fields.
|
||||||
AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}}
|
AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}}
|
||||||
|
|
||||||
TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
|
TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil}
|
||||||
TestRules = TestChainConfig.Rules(new(big.Int))
|
TestRules = TestChainConfig.Rules(new(big.Int))
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -346,6 +366,7 @@ type ChainConfig struct {
|
|||||||
BerlinBlock *big.Int `json:"berlinBlock,omitempty"` // Berlin switch block (nil = no fork, 0 = already on berlin)
|
BerlinBlock *big.Int `json:"berlinBlock,omitempty"` // Berlin switch block (nil = no fork, 0 = already on berlin)
|
||||||
LondonBlock *big.Int `json:"londonBlock,omitempty"` // London switch block (nil = no fork, 0 = already on london)
|
LondonBlock *big.Int `json:"londonBlock,omitempty"` // London switch block (nil = no fork, 0 = already on london)
|
||||||
ArrowGlacierBlock *big.Int `json:"arrowGlacierBlock,omitempty"` // Eip-4345 (bomb delay) switch block (nil = no fork, 0 = already activated)
|
ArrowGlacierBlock *big.Int `json:"arrowGlacierBlock,omitempty"` // Eip-4345 (bomb delay) switch block (nil = no fork, 0 = already activated)
|
||||||
|
CancunBlock *big.Int `json:"cancunBlock,omitempty"`
|
||||||
|
|
||||||
// TerminalTotalDifficulty is the amount of total difficulty reached by
|
// TerminalTotalDifficulty is the amount of total difficulty reached by
|
||||||
// the network that triggers the consensus upgrade.
|
// the network that triggers the consensus upgrade.
|
||||||
@ -386,7 +407,7 @@ func (c *ChainConfig) String() string {
|
|||||||
default:
|
default:
|
||||||
engine = "unknown"
|
engine = "unknown"
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Berlin: %v, London: %v, Arrow Glacier: %v, Engine: %v}",
|
return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Berlin: %v, London: %v, Arrow Glacier: %v, Cancun: %v, Engine: %v}",
|
||||||
c.ChainID,
|
c.ChainID,
|
||||||
c.HomesteadBlock,
|
c.HomesteadBlock,
|
||||||
c.DAOForkBlock,
|
c.DAOForkBlock,
|
||||||
@ -402,6 +423,7 @@ func (c *ChainConfig) String() string {
|
|||||||
c.BerlinBlock,
|
c.BerlinBlock,
|
||||||
c.LondonBlock,
|
c.LondonBlock,
|
||||||
c.ArrowGlacierBlock,
|
c.ArrowGlacierBlock,
|
||||||
|
c.CancunBlock,
|
||||||
engine,
|
engine,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -446,6 +468,10 @@ func (c *ChainConfig) IsMuirGlacier(num *big.Int) bool {
|
|||||||
return isForked(c.MuirGlacierBlock, num)
|
return isForked(c.MuirGlacierBlock, num)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *ChainConfig) IsCancun(num *big.Int) bool {
|
||||||
|
return isForked(c.CancunBlock, num)
|
||||||
|
}
|
||||||
|
|
||||||
// IsPetersburg returns whether num is either
|
// IsPetersburg returns whether num is either
|
||||||
// - equal to or greater than the PetersburgBlock fork block,
|
// - equal to or greater than the PetersburgBlock fork block,
|
||||||
// - OR is nil, and Constantinople is active
|
// - OR is nil, and Constantinople is active
|
||||||
@ -661,7 +687,7 @@ type Rules struct {
|
|||||||
ChainID *big.Int
|
ChainID *big.Int
|
||||||
IsHomestead, IsEIP150, IsEIP155, IsEIP158 bool
|
IsHomestead, IsEIP150, IsEIP155, IsEIP158 bool
|
||||||
IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool
|
IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool
|
||||||
IsBerlin, IsLondon bool
|
IsBerlin, IsLondon, IsCancun bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rules ensures c's ChainID is not nil.
|
// Rules ensures c's ChainID is not nil.
|
||||||
@ -682,5 +708,6 @@ func (c *ChainConfig) Rules(num *big.Int) Rules {
|
|||||||
IsIstanbul: c.IsIstanbul(num),
|
IsIstanbul: c.IsIstanbul(num),
|
||||||
IsBerlin: c.IsBerlin(num),
|
IsBerlin: c.IsBerlin(num),
|
||||||
IsLondon: c.IsLondon(num),
|
IsLondon: c.IsLondon(num),
|
||||||
|
IsCancun: c.IsCancun(num),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -156,6 +156,10 @@ const (
|
|||||||
// up to half the consumed gas could be refunded. Redefined as 1/5th in EIP-3529
|
// up to half the consumed gas could be refunded. Redefined as 1/5th in EIP-3529
|
||||||
RefundQuotient uint64 = 2
|
RefundQuotient uint64 = 2
|
||||||
RefundQuotientEIP3529 uint64 = 5
|
RefundQuotientEIP3529 uint64 = 5
|
||||||
|
|
||||||
|
// Verkle tree EIP: costs associated to witness accesses
|
||||||
|
WitnessBranchCost = uint64(1900)
|
||||||
|
WitnessChunkCost = uint64(200)
|
||||||
)
|
)
|
||||||
|
|
||||||
// Gas discount table for BLS12-381 G1 and G2 multi exponentiation operations
|
// Gas discount table for BLS12-381 G1 and G2 multi exponentiation operations
|
||||||
|
@ -261,7 +261,7 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter boo
|
|||||||
|
|
||||||
var snaps *snapshot.Tree
|
var snaps *snapshot.Tree
|
||||||
if snapshotter {
|
if snapshotter {
|
||||||
snaps, _ = snapshot.New(db, sdb.TrieDB(), 1, root, false, true, false)
|
snaps, _ = snapshot.New(db, sdb.TrieDB(), 1, root, false, true, false, false)
|
||||||
}
|
}
|
||||||
statedb, _ = state.New(root, sdb, snaps)
|
statedb, _ = state.New(root, sdb, snaps)
|
||||||
return snaps, statedb
|
return snaps, statedb
|
||||||
|
@ -277,6 +277,7 @@ type Config struct {
|
|||||||
Cache int // Memory allowance (MB) to use for caching trie nodes in memory
|
Cache int // Memory allowance (MB) to use for caching trie nodes in memory
|
||||||
Journal string // Journal of clean cache to survive node restarts
|
Journal string // Journal of clean cache to survive node restarts
|
||||||
Preimages bool // Flag whether the preimage of trie key is recorded
|
Preimages bool // Flag whether the preimage of trie key is recorded
|
||||||
|
UseVerkle bool // Flag whether the data is stored in a verkle trie
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDatabase creates a new trie database to store ephemeral trie content before
|
// NewDatabase creates a new trie database to store ephemeral trie content before
|
||||||
|
@ -217,3 +217,7 @@ func (t *SecureTrie) getSecKeyCache() map[string][]byte {
|
|||||||
}
|
}
|
||||||
return t.secKeyCache
|
return t.secKeyCache
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *SecureTrie) IsVerkle() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
107
trie/utils/verkle.go
Normal file
107
trie/utils/verkle.go
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
// Copyright 2021 go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
|
||||||
|
"github.com/holiman/uint256"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
VersionLeafKey = 0
|
||||||
|
BalanceLeafKey = 1
|
||||||
|
NonceLeafKey = 2
|
||||||
|
CodeKeccakLeafKey = 3
|
||||||
|
CodeSizeLeafKey = 4
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
zero = uint256.NewInt(0)
|
||||||
|
HeaderStorageOffset = uint256.NewInt(64)
|
||||||
|
CodeOffset = uint256.NewInt(128)
|
||||||
|
MainStorageOffset = new(uint256.Int).Lsh(uint256.NewInt(256), 31)
|
||||||
|
VerkleNodeWidth = uint256.NewInt(8)
|
||||||
|
codeStorageDelta = uint256.NewInt(0).Sub(CodeOffset, HeaderStorageOffset)
|
||||||
|
)
|
||||||
|
|
||||||
|
func GetTreeKey(address []byte, treeIndex *uint256.Int, subIndex byte) []byte {
|
||||||
|
digest := sha256.New()
|
||||||
|
digest.Write(address)
|
||||||
|
treeIndexBytes := treeIndex.Bytes()
|
||||||
|
var payload [32]byte
|
||||||
|
copy(payload[:len(treeIndexBytes)], treeIndexBytes)
|
||||||
|
digest.Write(payload[:])
|
||||||
|
h := digest.Sum(nil)
|
||||||
|
h[31] = subIndex
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetTreeKeyAccountLeaf(address []byte, leaf byte) []byte {
|
||||||
|
return GetTreeKey(address, zero, leaf)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetTreeKeyVersion(address []byte) []byte {
|
||||||
|
return GetTreeKey(address, zero, VersionLeafKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetTreeKeyBalance(address []byte) []byte {
|
||||||
|
return GetTreeKey(address, zero, BalanceLeafKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetTreeKeyNonce(address []byte) []byte {
|
||||||
|
return GetTreeKey(address, zero, NonceLeafKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetTreeKeyCodeKeccak(address []byte) []byte {
|
||||||
|
return GetTreeKey(address, zero, CodeKeccakLeafKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetTreeKeyCodeSize(address []byte) []byte {
|
||||||
|
return GetTreeKey(address, zero, CodeSizeLeafKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetTreeKeyCodeChunk(address []byte, chunk *uint256.Int) []byte {
|
||||||
|
chunkOffset := new(uint256.Int).Add(CodeOffset, chunk)
|
||||||
|
treeIndex := new(uint256.Int).Div(chunkOffset, VerkleNodeWidth)
|
||||||
|
subIndexMod := new(uint256.Int).Mod(chunkOffset, VerkleNodeWidth).Bytes()
|
||||||
|
var subIndex byte
|
||||||
|
if len(subIndexMod) != 0 {
|
||||||
|
subIndex = subIndexMod[0]
|
||||||
|
}
|
||||||
|
return GetTreeKey(address, treeIndex, subIndex)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetTreeKeyStorageSlot(address []byte, storageKey *uint256.Int) []byte {
|
||||||
|
treeIndex := storageKey.Clone()
|
||||||
|
if storageKey.Cmp(codeStorageDelta) < 0 {
|
||||||
|
treeIndex.Add(HeaderStorageOffset, storageKey)
|
||||||
|
} else {
|
||||||
|
treeIndex.Add(MainStorageOffset, storageKey)
|
||||||
|
}
|
||||||
|
treeIndex.Div(treeIndex, VerkleNodeWidth)
|
||||||
|
|
||||||
|
// calculate the sub_index, i.e. the index in the stem tree.
|
||||||
|
// Because the modulus is 256, it's the last byte of treeIndex
|
||||||
|
subIndexMod := treeIndex.Bytes()
|
||||||
|
var subIndex byte
|
||||||
|
if len(subIndexMod) != 0 {
|
||||||
|
// Get the last byte, as uint256.Int is big-endian
|
||||||
|
subIndex = subIndexMod[len(subIndexMod)-1]
|
||||||
|
}
|
||||||
|
return GetTreeKey(address, treeIndex, subIndex)
|
||||||
|
}
|
266
trie/verkle.go
Normal file
266
trie/verkle.go
Normal file
@ -0,0 +1,266 @@
|
|||||||
|
// Copyright 2021 go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package trie
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"github.com/ethereum/go-ethereum/trie/utils"
|
||||||
|
"github.com/gballet/go-verkle"
|
||||||
|
)
|
||||||
|
|
||||||
|
// VerkleTrie is a wrapper around VerkleNode that implements the trie.Trie
|
||||||
|
// interface so that Verkle trees can be reused verbatim.
|
||||||
|
type VerkleTrie struct {
|
||||||
|
root verkle.VerkleNode
|
||||||
|
db *Database
|
||||||
|
}
|
||||||
|
|
||||||
|
//func (vt *VerkleTrie) ToDot() string {
|
||||||
|
//return verkle.ToDot(vt.root)
|
||||||
|
//}
|
||||||
|
|
||||||
|
func NewVerkleTrie(root verkle.VerkleNode, db *Database) *VerkleTrie {
|
||||||
|
return &VerkleTrie{
|
||||||
|
root: root,
|
||||||
|
db: db,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var errInvalidProof = errors.New("invalid proof")
|
||||||
|
|
||||||
|
// GetKey returns the sha3 preimage of a hashed key that was previously used
|
||||||
|
// to store a value.
|
||||||
|
func (trie *VerkleTrie) GetKey(key []byte) []byte {
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
// TryGet returns the value for key stored in the trie. The value bytes must
|
||||||
|
// not be modified by the caller. If a node was not found in the database, a
|
||||||
|
// trie.MissingNodeError is returned.
|
||||||
|
func (trie *VerkleTrie) TryGet(key []byte) ([]byte, error) {
|
||||||
|
return trie.root.Get(key, trie.db.DiskDB().Get)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *VerkleTrie) TryUpdateAccount(key []byte, acc *types.StateAccount) error {
|
||||||
|
|
||||||
|
var err error
|
||||||
|
if err = t.TryUpdate(utils.GetTreeKeyVersion(key), []byte{0}); err != nil {
|
||||||
|
return fmt.Errorf("updateStateObject (%x) error: %v", key, err)
|
||||||
|
}
|
||||||
|
var nonce [32]byte
|
||||||
|
binary.BigEndian.PutUint64(nonce[:], acc.Nonce)
|
||||||
|
if err = t.TryUpdate(utils.GetTreeKeyNonce(key), nonce[:]); err != nil {
|
||||||
|
return fmt.Errorf("updateStateObject (%x) error: %v", key, err)
|
||||||
|
}
|
||||||
|
if err = t.TryUpdate(utils.GetTreeKeyBalance(key), acc.Balance.Bytes()); err != nil {
|
||||||
|
return fmt.Errorf("updateStateObject (%x) error: %v", key, err)
|
||||||
|
}
|
||||||
|
if err = t.TryUpdate(utils.GetTreeKeyCodeKeccak(key), acc.CodeHash); err != nil {
|
||||||
|
return fmt.Errorf("updateStateObject (%x) error: %v", key, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TryUpdate associates key with value in the trie. If value has length zero, any
|
||||||
|
// existing value is deleted from the trie. The value bytes must not be modified
|
||||||
|
// by the caller while they are stored in the trie. If a node was not found in the
|
||||||
|
// database, a trie.MissingNodeError is returned.
|
||||||
|
func (trie *VerkleTrie) TryUpdate(key, value []byte) error {
|
||||||
|
return trie.root.Insert(key, value, func(h []byte) ([]byte, error) {
|
||||||
|
return trie.db.DiskDB().Get(h)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TryDelete removes any existing value for key from the trie. If a node was not
|
||||||
|
// found in the database, a trie.MissingNodeError is returned.
|
||||||
|
func (trie *VerkleTrie) TryDelete(key []byte) error {
|
||||||
|
return trie.root.Delete(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hash returns the root hash of the trie. It does not write to the database and
|
||||||
|
// can be used even if the trie doesn't have one.
|
||||||
|
func (trie *VerkleTrie) Hash() common.Hash {
|
||||||
|
// TODO cache this value
|
||||||
|
rootC := trie.root.ComputeCommitment()
|
||||||
|
return rootC.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
func nodeToDBKey(n verkle.VerkleNode) []byte {
|
||||||
|
ret := n.ComputeCommitment().Bytes()
|
||||||
|
return ret[:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commit writes all nodes to the trie's memory database, tracking the internal
|
||||||
|
// and external (for account tries) references.
|
||||||
|
func (trie *VerkleTrie) Commit(onleaf LeafCallback) (common.Hash, int, error) {
|
||||||
|
flush := make(chan verkle.VerkleNode)
|
||||||
|
go func() {
|
||||||
|
trie.root.(*verkle.InternalNode).Flush(func(n verkle.VerkleNode) {
|
||||||
|
if onleaf != nil {
|
||||||
|
if leaf, isLeaf := n.(*verkle.LeafNode); isLeaf {
|
||||||
|
for i := 0; i < verkle.NodeWidth; i++ {
|
||||||
|
if leaf.Value(i) != nil {
|
||||||
|
comm := n.ComputeCommitment().Bytes()
|
||||||
|
onleaf(nil, nil, leaf.Value(i), common.BytesToHash(comm[:]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
flush <- n
|
||||||
|
})
|
||||||
|
close(flush)
|
||||||
|
}()
|
||||||
|
var commitCount int
|
||||||
|
for n := range flush {
|
||||||
|
commitCount += 1
|
||||||
|
value, err := n.Serialize()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := trie.db.DiskDB().Put(nodeToDBKey(n), value); err != nil {
|
||||||
|
return common.Hash{}, commitCount, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return trie.Hash(), commitCount, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeIterator returns an iterator that returns nodes of the trie. Iteration
|
||||||
|
// starts at the key after the given start key.
|
||||||
|
func (trie *VerkleTrie) NodeIterator(startKey []byte) NodeIterator {
|
||||||
|
return newVerkleNodeIterator(trie, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prove constructs a Merkle proof for key. The result contains all encoded nodes
|
||||||
|
// on the path to the value at key. The value itself is also included in the last
|
||||||
|
// node and can be retrieved by verifying the proof.
|
||||||
|
//
|
||||||
|
// If the trie does not contain a value for key, the returned proof contains all
|
||||||
|
// nodes of the longest existing prefix of the key (at least the root), ending
|
||||||
|
// with the node that proves the absence of the key.
|
||||||
|
func (trie *VerkleTrie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error {
|
||||||
|
panic("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (trie *VerkleTrie) Copy(db *Database) *VerkleTrie {
|
||||||
|
return &VerkleTrie{
|
||||||
|
root: trie.root.Copy(),
|
||||||
|
db: db,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (trie *VerkleTrie) IsVerkle() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
type KeyValuePair struct {
|
||||||
|
Key []byte
|
||||||
|
Value []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type verkleproof struct {
|
||||||
|
Proof *verkle.Proof
|
||||||
|
|
||||||
|
Cis []*verkle.Point
|
||||||
|
Indices []byte
|
||||||
|
Yis []*verkle.Fr
|
||||||
|
|
||||||
|
Leaves []KeyValuePair
|
||||||
|
}
|
||||||
|
|
||||||
|
func (trie *VerkleTrie) ProveAndSerialize(keys [][]byte, kv map[common.Hash][]byte) ([]byte, error) {
|
||||||
|
proof, cis, indices, yis := verkle.MakeVerkleMultiProof(trie.root, keys)
|
||||||
|
vp := verkleproof{
|
||||||
|
Proof: proof,
|
||||||
|
Cis: cis,
|
||||||
|
Indices: indices,
|
||||||
|
Yis: yis,
|
||||||
|
}
|
||||||
|
for key, val := range kv {
|
||||||
|
var k [32]byte
|
||||||
|
copy(k[:], key[:])
|
||||||
|
vp.Leaves = append(vp.Leaves, KeyValuePair{
|
||||||
|
Key: k[:],
|
||||||
|
Value: val,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return rlp.EncodeToBytes(vp)
|
||||||
|
}
|
||||||
|
|
||||||
|
func DeserializeAndVerifyVerkleProof(serialized []byte) (map[common.Hash]common.Hash, error) {
|
||||||
|
proof, cis, indices, yis, leaves, err := deserializeVerkleProof(serialized)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not deserialize proof: %w", err)
|
||||||
|
}
|
||||||
|
if !verkle.VerifyVerkleProof(proof, cis, indices, yis, verkle.GetConfig()) {
|
||||||
|
return nil, errInvalidProof
|
||||||
|
}
|
||||||
|
|
||||||
|
return leaves, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func deserializeVerkleProof(proof []byte) (*verkle.Proof, []*verkle.Point, []byte, []*verkle.Fr, map[common.Hash]common.Hash, error) {
|
||||||
|
var vp verkleproof
|
||||||
|
err := rlp.DecodeBytes(proof, &vp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, nil, nil, fmt.Errorf("verkle proof deserialization error: %w", err)
|
||||||
|
}
|
||||||
|
leaves := make(map[common.Hash]common.Hash, len(vp.Leaves))
|
||||||
|
for _, kvp := range vp.Leaves {
|
||||||
|
leaves[common.BytesToHash(kvp.Key)] = common.BytesToHash(kvp.Value)
|
||||||
|
}
|
||||||
|
return vp.Proof, vp.Cis, vp.Indices, vp.Yis, leaves, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy the values here so as to avoid an import cycle
|
||||||
|
const (
|
||||||
|
PUSH1 = 0x60
|
||||||
|
PUSH32 = 0x71
|
||||||
|
)
|
||||||
|
|
||||||
|
func ChunkifyCode(addr common.Address, code []byte) ([][32]byte, error) {
|
||||||
|
lastOffset := byte(0)
|
||||||
|
chunkCount := len(code) / 31
|
||||||
|
if len(code)%31 != 0 {
|
||||||
|
chunkCount++
|
||||||
|
}
|
||||||
|
chunks := make([][32]byte, chunkCount)
|
||||||
|
for i := range chunks {
|
||||||
|
end := 31 * (i + 1)
|
||||||
|
if len(code) < end {
|
||||||
|
end = len(code)
|
||||||
|
}
|
||||||
|
copy(chunks[i][1:], code[31*i:end])
|
||||||
|
for j := lastOffset; int(j) < len(code[31*i:end]); j++ {
|
||||||
|
if code[j] >= byte(PUSH1) && code[j] <= byte(PUSH32) {
|
||||||
|
j += code[j] - byte(PUSH1) + 1
|
||||||
|
lastOffset = (j + 1) % 31
|
||||||
|
}
|
||||||
|
}
|
||||||
|
chunks[i][0] = lastOffset
|
||||||
|
}
|
||||||
|
|
||||||
|
return chunks, nil
|
||||||
|
}
|
249
trie/verkle_iterator.go
Normal file
249
trie/verkle_iterator.go
Normal file
@ -0,0 +1,249 @@
|
|||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package trie
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
|
||||||
|
"github.com/gballet/go-verkle"
|
||||||
|
)
|
||||||
|
|
||||||
|
type verkleNodeIteratorState struct {
|
||||||
|
Node verkle.VerkleNode
|
||||||
|
Index int
|
||||||
|
}
|
||||||
|
|
||||||
|
type verkleNodeIterator struct {
|
||||||
|
trie *VerkleTrie
|
||||||
|
current verkle.VerkleNode
|
||||||
|
lastErr error
|
||||||
|
|
||||||
|
stack []verkleNodeIteratorState
|
||||||
|
}
|
||||||
|
|
||||||
|
func newVerkleNodeIterator(trie *VerkleTrie, start []byte) NodeIterator {
|
||||||
|
if trie.Hash() == emptyState {
|
||||||
|
return new(nodeIterator)
|
||||||
|
}
|
||||||
|
it := &verkleNodeIterator{trie: trie, current: trie.root}
|
||||||
|
//it.err = it.seek(start)
|
||||||
|
return it
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next moves the iterator to the next node. If the parameter is false, any child
|
||||||
|
// nodes will be skipped.
|
||||||
|
func (it *verkleNodeIterator) Next(descend bool) bool {
|
||||||
|
if it.lastErr == errIteratorEnd {
|
||||||
|
it.lastErr = errIteratorEnd
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(it.stack) == 0 {
|
||||||
|
it.stack = append(it.stack, verkleNodeIteratorState{Node: it.trie.root, Index: 0})
|
||||||
|
it.current = it.trie.root
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
switch node := it.current.(type) {
|
||||||
|
case *verkle.InternalNode:
|
||||||
|
context := &it.stack[len(it.stack)-1]
|
||||||
|
|
||||||
|
// Look for the next non-empty child
|
||||||
|
children := node.Children()
|
||||||
|
for ; context.Index < len(children); context.Index++ {
|
||||||
|
if _, ok := children[context.Index].(verkle.Empty); !ok {
|
||||||
|
it.stack = append(it.stack, verkleNodeIteratorState{Node: children[context.Index], Index: 0})
|
||||||
|
it.current = children[context.Index]
|
||||||
|
return it.Next(descend)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reached the end of this node, go back to the parent, if
|
||||||
|
// this isn't root.
|
||||||
|
if len(it.stack) == 1 {
|
||||||
|
it.lastErr = errIteratorEnd
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
it.stack = it.stack[:len(it.stack)-1]
|
||||||
|
it.current = it.stack[len(it.stack)-1].Node
|
||||||
|
it.stack[len(it.stack)-1].Index++
|
||||||
|
return it.Next(descend)
|
||||||
|
case *verkle.LeafNode:
|
||||||
|
// Look for the next non-empty value
|
||||||
|
for i := it.stack[len(it.stack)-1].Index + 1; i < 256; i++ {
|
||||||
|
if node.Value(i) != nil {
|
||||||
|
it.stack[len(it.stack)-1].Index = i
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// go back to parent to get the next leaf
|
||||||
|
it.stack = it.stack[:len(it.stack)-1]
|
||||||
|
it.current = it.stack[len(it.stack)-1].Node
|
||||||
|
it.stack[len(it.stack)-1].Index++
|
||||||
|
return it.Next(descend)
|
||||||
|
case *verkle.HashedNode:
|
||||||
|
// resolve the node
|
||||||
|
data, err := it.trie.db.diskdb.Get(nodeToDBKey(node))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
it.current, err = verkle.ParseNode(data, len(it.stack)-1)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// update the stack and parent with the resolved node
|
||||||
|
it.stack[len(it.stack)-1].Node = it.current
|
||||||
|
parent := &it.stack[len(it.stack)-2]
|
||||||
|
parent.Node.(*verkle.InternalNode).SetChild(parent.Index, it.current)
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
fmt.Println(node)
|
||||||
|
panic("invalid node type")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns the error status of the iterator.
|
||||||
|
func (it *verkleNodeIterator) Error() error {
|
||||||
|
if it.lastErr == errIteratorEnd {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return it.lastErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hash returns the hash of the current node.
|
||||||
|
func (it *verkleNodeIterator) Hash() common.Hash {
|
||||||
|
return it.current.ComputeCommitment().Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parent returns the hash of the parent of the current node. The hash may be the one
|
||||||
|
// grandparent if the immediate parent is an internal node with no hash.
|
||||||
|
func (it *verkleNodeIterator) Parent() common.Hash {
|
||||||
|
return it.stack[len(it.stack)-1].Node.ComputeCommitment().Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Path returns the hex-encoded path to the current node.
|
||||||
|
// Callers must not retain references to the return value after calling Next.
|
||||||
|
// For leaf nodes, the last element of the path is the 'terminator symbol' 0x10.
|
||||||
|
func (it *verkleNodeIterator) Path() []byte {
|
||||||
|
|
||||||
|
panic("not completely implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Leaf returns true iff the current node is a leaf node.
|
||||||
|
func (it *verkleNodeIterator) Leaf() bool {
|
||||||
|
_, ok := it.current.(*verkle.LeafNode)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// LeafKey returns the key of the leaf. The method panics if the iterator is not
|
||||||
|
// positioned at a leaf. Callers must not retain references to the value after
|
||||||
|
// calling Next.
|
||||||
|
func (it *verkleNodeIterator) LeafKey() []byte {
|
||||||
|
leaf, ok := it.current.(*verkle.LeafNode)
|
||||||
|
if !ok {
|
||||||
|
panic("Leaf() called on an verkle node iterator not at a leaf location")
|
||||||
|
}
|
||||||
|
|
||||||
|
return leaf.Key(it.stack[len(it.stack)-1].Index)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LeafBlob returns the content of the leaf. The method panics if the iterator
|
||||||
|
// is not positioned at a leaf. Callers must not retain references to the value
|
||||||
|
// after calling Next.
|
||||||
|
func (it *verkleNodeIterator) LeafBlob() []byte {
|
||||||
|
leaf, ok := it.current.(*verkle.LeafNode)
|
||||||
|
if !ok {
|
||||||
|
panic("LeafBlob() called on an verkle node iterator not at a leaf location")
|
||||||
|
}
|
||||||
|
|
||||||
|
return leaf.Value(it.stack[len(it.stack)-1].Index)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LeafProof returns the Merkle proof of the leaf. The method panics if the
|
||||||
|
// iterator is not positioned at a leaf. Callers must not retain references
|
||||||
|
// to the value after calling Next.
|
||||||
|
func (it *verkleNodeIterator) LeafProof() [][]byte {
|
||||||
|
_, ok := it.current.(*verkle.LeafNode)
|
||||||
|
if !ok {
|
||||||
|
panic("LeafProof() called on an verkle node iterator not at a leaf location")
|
||||||
|
}
|
||||||
|
|
||||||
|
//return it.trie.Prove(leaf.Key())
|
||||||
|
panic("not completely implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddResolver sets an intermediate database to use for looking up trie nodes
|
||||||
|
// before reaching into the real persistent layer.
|
||||||
|
//
|
||||||
|
// This is not required for normal operation, rather is an optimization for
|
||||||
|
// cases where trie nodes can be recovered from some external mechanism without
|
||||||
|
// reading from disk. In those cases, this resolver allows short circuiting
|
||||||
|
// accesses and returning them from memory.
|
||||||
|
//
|
||||||
|
// Before adding a similar mechanism to any other place in Geth, consider
|
||||||
|
// making trie.Database an interface and wrapping at that level. It's a huge
|
||||||
|
// refactor, but it could be worth it if another occurrence arises.
|
||||||
|
func (it *verkleNodeIterator) AddResolver(ethdb.KeyValueStore) {
|
||||||
|
panic("not completely implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
type dummy struct{}
|
||||||
|
|
||||||
|
func (it dummy) Next(descend bool) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it dummy) Error() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it dummy) Hash() common.Hash {
|
||||||
|
panic("should not be called")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it dummy) Leaf() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it dummy) LeafKey() []byte {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it dummy) LeafProof() [][]byte {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it dummy) LeafBlob() []byte {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it dummy) Parent() common.Hash {
|
||||||
|
return common.Hash{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it dummy) Path() []byte {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it dummy) AddResolver(ethdb.KeyValueStore) {
|
||||||
|
panic("not completely implemented")
|
||||||
|
}
|
Loading…
x
Reference in New Issue
Block a user