Compare commits

...

77 Commits

Author SHA1 Message Date
Guillaume Ballet
9c67d521ac reproduce the bug 2022-01-25 16:19:58 +01:00
Guillaume Ballet
15b353d7b4 verkle proof deserialization (#61)
* use proof serialization

* remove cruft

* save current state

* fix most issues up to this point

* fix remaining build issues

* update the go.mod to use the right branch

* remove custom-defined set type

* update go-verkle to get merged PRs

* extract key, value data from proof

* only activate precomp calculations if this is a verkle chain

Co-authored-by: Jared Wasinger <j-wasinger@hotmail.com>
2022-01-21 12:38:40 +01:00
Guillaume Ballet
5beac51808 Charge witness gas when calling/creating a contract (#60)
* Charge witness gas when calling/creating a contract

Co-authored-by: Jared Wasinger <j-wasinger@hotmail.com>

* gofmt

* replace checks with evm.Access!=nil with IsCancun

* remove double-charging of witness access costs for contract creation initialization

Co-authored-by: Jared Wasinger <j-wasinger@hotmail.com>
2022-01-19 08:36:57 +01:00
jwasinger
99604b0699 Use IsCancun where applicable (#56)
* replace Accesses != nil with IsCancun(...)

* fix
2022-01-14 10:53:27 +01:00
jwasinger
952be80177 Verkle EXTCODECOPY implementation (#55)
* core/vm: verkle extcodecopy naive way (do jumpdest analysis on target contract every EXTCODECOPY)

* no double-charge

* address edge-case in touchEachChunksAndChargeGas

* simplify line

Co-authored-by: Guillaume Ballet <3272758+gballet@users.noreply.github.com>
2022-01-13 13:06:21 +01:00
jwasinger
d761880fd2 core/vm: don't include contract deployer bytecode in AccessWitness or charge witness access costs for it (#54) 2022-01-13 10:08:22 +01:00
Guillaume Ballet
4428439fdf fix: don't use rlp in the tree for slot values (#51)
* fix: don't use rlp in the tree for slot values

* fix timeout after rebase
2022-01-11 11:38:49 +01:00
Guillaume Ballet
99f3c92361 fix: don't create settings each time a key is calculated (#53) 2022-01-10 15:39:46 +01:00
Guillaume Ballet
c87a6d904f reactivate working lines in test 2022-01-07 17:35:19 +01:00
Guillaume Ballet
e16e9cc84b replace sha256 with pedersen_hash in get_key (#46)
* replace sha256 with pedersen_hash

* fix: prevent an OOB

* workaround timeout in unit test

* update go-ipa and reduce the timeout

* fix for unit tests: do not call NewAccessWitness in NewEVMTxContext (#49)

* potential fix: do not call NewAccessWitness in NewEVMTxContext

* more fixes: check for the existence of Accesses

* fix absence of witness in copy

* fix another witness issue

* workaround: ensure the prefetcher is off in verkle mode

* fix the remaining issues in tests

* review feedback

* fix witness allocation in stateless test
2022-01-07 11:53:48 +01:00
Guillaume Ballet
f215cc0791 call onleaf in verkle commit (#45) 2022-01-03 16:27:18 +01:00
jwasinger
99ebf767b9 Refactor witness-accumulation in EVM (#42)
* make push dynamically-charged.  charge witness gas costs for push.  refactor evm witness gas charging to move logic for touching a range of bytecode into a helper method 'touchEachChunksAndChargeGas'

* add witness gas calculation for CodeCopy, ExtCodeCopy, SLoad back to gas_table.go

* witness gas charging for CALL

* remove explicit reference to evm.TxContext

* core/vm: make touchEachChunksAndCharge gas handle nil code value

* core/vm: call implementation, separate out witnesses into touch/set

* some fixes

* remove witness touching from opCall: this will go in evm.go

* remove witness touching for call from gas_table.go

* (hopefully) fix tests

* add SSTORE witness charging that was removed mistakenly

* charge witness gas for call

* clean up and comment touchEachChunksAndChargeGas

* make suggested changes

* address remaining points

* fix build issues

* remove double-charging for contract creation witness gas charging
2021-12-16 11:21:59 +01:00
jwasinger
6af78cba9e miner: embed verkle proof in sealing block (#39)
* miner: embed verkle proof in sealing block

* add test to ensure that verkle proof is present in mined blocks
2021-12-07 17:06:27 +01:00
Guillaume Ballet
fe75603d0b remove outdated comment 2021-12-06 11:09:25 +01:00
jwasinger
5bac5b3262 consensus/ethash: move accumulation of coinbase witness before coinbase account is credited (#41) 2021-12-02 09:41:11 +01:00
jwasinger
fa753db9e8 consensus/ethash: ensure uncle accounts are included in block witness (#40) 2021-12-02 09:39:51 +01:00
Guillaume Ballet
86bdc3fb39 Remove access witness from the signature of Process (#38) 2021-11-26 16:38:20 +01:00
Guillaume Ballet
909049c5fe use the witness in statedb, revert applyTx signature (#36)
* use the witness in statedb, revert applyTx signature

* fix miner tests

* fix catalyst build
2021-11-26 16:38:20 +01:00
Guillaume Ballet
7360d168c8 fix calculation in get_tree_key_for_storage_slot (#35) 2021-11-26 16:38:20 +01:00
Guillaume Ballet
361a328cb7 upgrade go version (#34) 2021-11-26 16:38:20 +01:00
Guillaume Ballet
41c2f754cc remove unnecessary cancun block declaration in tests (#33) 2021-11-26 16:38:20 +01:00
Guillaume Ballet
7cb1add36a add circleci support (#32)
* add circleci support

* disable linter, which is broken again

* actually run tests
2021-11-26 16:38:20 +01:00
Guillaume Ballet
03dbc0a210 fix boundary condition check in PUSH32 2021-11-26 16:38:20 +01:00
Guillaume Ballet
6d40e11fe3 fix bound check in code chunking 2021-11-26 16:38:20 +01:00
Guillaume Ballet
5ca990184f fix boundary check in PUSH 2021-11-26 16:38:20 +01:00
Guillaume Ballet
15d98607f3 initialize the new access witness if not already present 2021-11-26 16:38:20 +01:00
Guillaume Ballet
ef08e51e40 merge undefined instead of panicking (#30) 2021-11-26 16:38:20 +01:00
Guillaume Ballet
e1144745a7 fix linter issue 2021-11-26 16:38:20 +01:00
Guillaume Ballet
bc06d2c740 fix rebase issues 2021-11-26 16:38:20 +01:00
jwasinger
97a79f50e8 enable verkle on cancun block: take 2 (#28)
* enable verkle on cancun block: take 2

* fix typo.  make unreachable line panic message more clear
2021-11-26 16:38:17 +01:00
Guillaume Ballet
9f9c03a94c fixes for the IPA testnet
upgrade to latest go-verkle

update go-verkle to get more fixes

simplify code by removing all stateless references (#25)

fix verkle proof test by enforcing values alignment to 32 bytes

remove unneeded KZG tag

fix the stateless test

Move AccessWitness into StateDB (#27)

* move AccessWitness into StateDB

* set Accesses in TxContext constructor

* Ensures that a statedb is initialized with a witness

* copy AccessWitness in StateDB.Copy.  use copied state in miner worker.commit.

* remove redundant line

Co-authored-by: Guillaume Ballet <3272758+gballet@users.noreply.github.com>

Fix contract creation issue
2021-11-26 16:30:06 +01:00
Guillaume Ballet
719bf47354 Upgrade go-verkle to its IPA version (#24) 2021-11-26 16:30:06 +01:00
Guillaume Ballet
162780515a all: implement EIP-compliant verkle trees
verkle: Implement Trie, NodeIterator and Database ifs

Fix crash in TestDump

Fix TestDump

Fix TrieCopy

remove unnecessary traces

fix: Error() returned errIteratorEnd in verkle node iterator

rewrite the iterator and change the signature of OpenStorageTrie

add the adapter to reuse the account trie for storage

don't try to deserialize a storage leaf into an account

Fix statedb unit tests (#14)

* debug code

* Fix more unit tests

* remove traces

* Go back to the full range

One tree to rule them all

remove updateRoot, there is no root to update

store code inside the account leaf

fix build

save current state for Sina

Update go-verkle to latest

Charge WITNESS_*_COST gas on storage loads

Add witness costs for SSTORE as well

Charge witness gas in the case of code execution

corresponding code deletion

add a --verkle flag to separate verkle experiments from regular geth operations

use the snapshot to get data

stateless execution from block witness

AccessWitness functions

Add block generation test + genesis snapshot generation

test stateless block execution (#18)

* test stateless block execution

* Force tree resolution before generating the proof

increased coverage in stateless test execution (#19)

* test stateless block execution

* Force tree resolution before generating the proof

* increase coverage in stateless test execution

ensure geth compiles

fix issues in tests with verkle trees deactivated

Ensure stateless data is available when executing statelessly (#20)

* Ensure stateless data is available when executing statelessly

* Actual execution of a statless block

* bugfixes in stateless block execution

* code cleanup

 - Reduce PR footprint by reverting NewEVM to its original signature
 - Move the access witness to the block context
 - prepare for a change in AW semantics
   Need to store the initial values.
 - Use the touch helper function, DRY

* revert the signature of MustCommit to its original form (#21)

fix leaf proofs in stateless execution (#22)

* Fixes in witness pre-state

* Add the recipient's nonce to the witness

* reduce PR footprint and investigate issue in root state calculation

* quick build fix

cleanup: Remove extra parameter in ToBlock

revert ToBlock to its older signature

fix import cycle in vm tests

fix linter issue

fix appveyor build

fix nil pointers in tests

Add indices, yis and Cis to the block's Verkle proof

upgrade geth dependency to drop geth's common dep

fix cmd/devp2p tests

fix rebase issues

quell an appveyor warning

fix address touching in SLOAD and SSTORE

fix access witness for code size

touch target account data before calling

make sure the proper locations get touched in (ext)codecopy

touch all code pages in execution

add pushdata to witness

remove useless code in genesis snapshot generation

testnet: fix some of the rebase/drift issues

Fix verkle proof generation in block

fix an issue occuring when chunking past the code size

fix: ensure the code copy doesn't extend past the code size
2021-11-26 16:30:03 +01:00
Péter Szilágyi
c10a0a62c3 eth: request id dispatcher and direct req/reply APIs (#23576)
* eth: request ID based message dispatcher

* eth: fix dispatcher cancellation, rework fetchers idleness tracker

* eth/downloader: drop peers who refuse to serve advertised chains
2021-11-26 13:26:03 +02:00
Marius van der Wijden
3038e480f5 all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition

* consensus/beacon, eth: change beacon difficulty to 0

* eth: updates

* all: add terminalBlockDifficulty config, fix rebasing issues

* eth: implemented merge interop spec

* internal/ethapi: update to v1.0.0.alpha.2

                                                                 This commit updates the code to the new spec, moving payloadId into
                                                                 it's own object. It also fixes an issue with finalizing an empty blockhash.
                                                                 It also properly sets the basefee

* all: sync polishes, other fixes + refactors

* core, eth: correct semantics for LeavePoW, EnterPoS

* core: fixed rebasing artifacts

* core: light: performance improvements

* core: use keyed field (f)

* core: eth: fix compilation issues + tests

* eth/catalyst: dbetter error codes

* all: move Merger to consensus/, remove reliance on it in bc

* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS

* core: make mergelogs a function

* core: use InsertChain instead of InsertBlock

* les: drop merger from lightchain object

* consensus: add merger

* core: recoverAncestors in catalyst mode

* core: fix nitpick

* all: removed merger from beacon, use TTD, nitpicks

* consensus: eth: add docstring, removed unnecessary code duplication

* consensus/beacon: better comment

* all: easy to fix nitpicks by karalabe

* consensus/beacon: verify known headers to be sure

* core: comments

* core: eth: don't drop peers who advertise blocks, nitpicks

* core: never add beacon blocks to the future queue

* core: fixed nitpicks

* consensus/beacon: simplify IsTTDReached check

* consensus/beacon: correct IsTTDReached check

Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 13:23:02 +02:00
Andrei Maiboroda
519cf98b69 core/vm: simplify op lookup in contract (#23974) 2021-11-25 20:10:01 +01:00
Péter Szilágyi
4ebeca19d7 Merge pull request #23967 from ipsilon/evm_jumptable_type
core/vm: use proper JumpTable type
2021-11-25 14:18:34 +02:00
Sina Mahmoodi
1876cb443b all: move loggers to eth/tracers (#23892)
* all: mv loggers to eth/tracers

* core/vm: minor

* eth/tracers: tmp comment out testStoreCapture

* eth/tracers: uncomment and fix logger test

* eth/tracers: simplify test

* core/vm: re-add license

* core/vm: minor

* rename LogConfig to Config
2021-11-25 14:17:09 +02:00
Martin Holst Swende
9055cc14ec core/vm, core/state/snapshot: remove unused code (#23956)
* core/state/snapshot: remove wiper functionality

* core/vm: remove unused 'unofficial' opcodes
2021-11-25 10:37:47 +02:00
Martin Holst Swende
ad7c90c198 cmd/devp2p/internal/v4test: fix false-positive hive test (#23966)
This PR fixes two problems in devp2p tests (and through them, hive).

- Make the output more detailed about what is returned (always print packet kind).
- Allow Ping response to unsolicited findnode.

Without this PR, nethermind fails a hive protocol test, and I misinterpreted the result (NethermindEth/nethermind#3617). Ergo, the output was not fool-proof.
2021-11-24 21:22:45 +01:00
Paweł Bylica
10b1cd9b1b core/vm: use proper JumpTable type 2021-11-24 16:02:12 +01:00
Sina Mahmoodi
66ee9422f5 consensus/clique: fix block number unmarshal (#23961)
* clique: fix block number unmarshal

* clique: rename
2021-11-24 14:12:26 +01:00
Péter Szilágyi
8151dd67e1 params: begin v1.10.14 release cycle 2021-11-24 14:09:57 +02:00
Péter Szilágyi
7a0c19f813 params: release Geth v1.10.13 2021-11-24 13:44:10 +02:00
lightclient
0a7672fc9a cmd/evm: rename t8n args to improve clarity when tracing (#23934)
* cmd/evm: rename t8n args to improve clarity when tracing

* cmd/evm: add back removed tracing flags and note that they are deprecated

* cmd/evm: add warning when using deprecated flag
2021-11-24 10:15:23 +01:00
Péter Szilágyi
7322b2590c Merge pull request #23960 from karalabe/verify-range-deletion
trie: reject deletions when verifying range proofs
2021-11-23 22:21:10 +02:00
Péter Szilágyi
743769f48e trie: reject deletions when verifying range proofs 2021-11-23 19:28:17 +02:00
Serhat Şevki Dinçer
d15e423562 p2p/enode: store local port number as uint16 (#23926) 2021-11-23 15:14:08 +01:00
Martin Holst Swende
347c37b362 core/rawdb: use AncientRange when initializing leveldb from freezer (#23612)
* core/rawdb: utilize AncientRange when initiating from freezer

* core/rawdb: remove debug sanity check
2021-11-23 12:37:26 +01:00
Péter Szilágyi
50e07a1e16 Merge pull request #23928 from holiman/no_iota
core/vm: don't use iota for opcode definitions
2021-11-23 11:50:17 +02:00
lightclient
23f69c6db0 cmd/evm: add support for signing transactions in the unprotected format (#23937)
* cmd/evm: add support for signing transactions in the unprotected format

* cmd/evm: simplify signing of unprotected txs
2021-11-23 10:33:15 +01:00
Péter Szilágyi
17f1c2dc0f Merge pull request #23949 from karalabe/fix-repair-heuristic
core, eth/downloader: fix resetting below freezer threshold
2021-11-22 12:16:27 +02:00
Péter Szilágyi
d9c13d407f core, eth/downloader: fix resetting below freezer threshold 2021-11-22 11:12:51 +02:00
lightclient
441c7f2b0f cmd/evm: add b11r tool (#23843)
evm block-builder (a.k.a b11r) is a utility to help assemble blocks, for use during the test-creation process.
2021-11-22 09:25:35 +01:00
Paweł Bylica
5d4bcbc14f trie: more tests for stacktrie (#23936) 2021-11-22 08:49:18 +01:00
lightclient
6f2c3f2114 cmd/geth: add ancient flag to db inspect (#23946) 2021-11-22 09:07:17 +02:00
Anatole
e0761432a4 eth: fix typo in comment (#23941) 2021-11-22 02:53:16 +01:00
Martin Holst Swende
e761255ba7 cmd/evm: make t9n intrinsicGas output hex, fixes #23883 (#23889) 2021-11-19 10:53:20 +01:00
courtier
c52def7f11 eth/gasprice: sanitize max header and block history (#23886)
Fixes #23452
2021-11-18 19:20:36 +01:00
Martin Holst Swende
ab31fbbde1 core/vm: don't use iota for opcode definitions 2021-11-18 09:50:52 +01:00
Lee Bousfield
16341e0563 ethclient: fix tx sender cache miss detection (#23877)
This fixes a bug in TransactionSender where it would return the
zero address for transactions where the sender address wasn't
cached already.

Co-authored-by: Felix Lange <fjl@twurst.com>
2021-11-17 14:44:41 +01:00
lightclient
fa96718512 cmd/evm: rename t8n result to match types.Header (ReceiptRoot->ReceiptsRoot) (#23924) 2021-11-17 13:50:08 +01:00
Jesse Tane
33f2813809 cmd/geth: add flag --dev.gaslimit for dev mode (#23686)
* cmd, core: add flag --dev.gaslimit to allow configuring initial block gas limit in dev mode

* core: use provided gaslimit

Co-authored-by: Martin Holst Swende <martin@swende.se>
2021-11-16 13:45:02 +01:00
Martin Holst Swende
b7a6409cc1 core/rawdb: better error message in freezer (#23901)
* core/rawdb: better error message in freezer

* Apply suggestions from code review
2021-11-16 11:33:56 +02:00
Sina Mahmoodi
05acc272b5 eth/tracers: make native 4byte default, remove js version (#23916) 2021-11-16 08:44:57 +01:00
lightclient
b0b708bf23 cmd/evm: add gasUsed to t8n result (#23919)
* cmd/evm: add gas used accumulator to t8n result

* cmd/evm: update t8n tests to include gas used field
2021-11-16 08:43:58 +01:00
Joshua Colvin
abc74a5ffe accounts/abi/bind/backends: fix race condition in simulated backend (#23898)
Now that `SimulatedBackend.SuggestGasPrice` inspects member values, a lock needs to be added to prevent a race condition.
2021-11-12 15:50:08 +01:00
Ward Bradt
e9294a7fe9 eth/tracers: add golang 4byte tracer (#23882)
* native 4byte tracer

* Update eth/tracers/native/4byte.go

Co-authored-by: Martin Holst Swende <martin@swende.se>

* Update eth/tracers/native/4byte.go

Co-authored-by: Martin Holst Swende <martin@swende.se>

* goimports

* eth/tracers: make 4byte tracer not care about create

Co-authored-by: Martin Holst Swende <martin@swende.se>
2021-11-11 20:20:46 +01:00
meowsbits
5358e491f3 cmd/devp2p: update TTL max for Cloudflare (#23885)
This was apparently recently changed by Cloudflare, and
began returning an error: 'TTL must be between 60 and 86400
seconds, or 1 for Automatic'

Date: 2021-11-10 15:25:20-08:00
Signed-off-by: meows <b5c6@protonmail.com>
2021-11-11 17:07:11 +01:00
Sina Mahmoodi
c57df9ca28 core/rawdb: add slow path for getting legacy logs (#23879)
* eth/tracers: add slow path for getting legacy logs

* core/rawdb: fix test
2021-11-11 15:04:06 +01:00
Andrei Maiboroda
f32feeb260 core/vm: implement EIP-2681: Limit account nonce to 2^64-1 (#23853)
This retroactively implements requirements or EIP-2681 for the account nonce upper limit.
2021-11-11 15:00:58 +01:00
Péter Szilágyi
e185a8c818 Merge pull request #23880 from eltociear/patch-3
p2p: fix typo in v4wire.go
2021-11-10 16:15:51 +02:00
Ikko Ashimine
fb7da82dde p2p: fix typo in v4wire.go
Neigbors -> Neighbors
2021-11-10 22:18:12 +09:00
Martin Holst Swende
0efed7f58b cmd/devp2p/internal/ethtest: clarify protocol version in tests (#23872)
Debugging recent geth failures in hive, it took a while to realize that it's because
geth doesn't support eth/65 any longer. This PR makes such failures a bit more
easy to figure out.
2021-11-09 14:45:34 +01:00
Martin Holst Swende
6b9c77f060 eth/tracers: package restructuring (#23857)
* eth/tracers: restructure tracer package

* core/vm/runtime: load js tracers

* eth/tracers: mv bigint js code to own file

* eth/tracers: add method docs for native tracers

* eth/tracers: minor doc fix

* core,eth: cancel evm on nativecalltracer stop

* core/vm: fix failing test

Co-authored-by: Sina Mahmoodi <itz.s1na@gmail.com>
2021-11-09 12:09:35 +01:00
Felix Lange
9489853321 core: check effective tip in txpool pricelimit validation (#23855)
The price limit is supposed to exclude transactions with too low fee
amount. Before EIP-1559, it was sufficient to check the limit against
the gas price of the transaction. After 1559, it is more complicated
because the concept of 'transaction gas price' does not really exist.

When mining, the price limit is used to exclude transactions below a
certain effective fee amount. This change makes it apply the same check
earlier, in tx validation. Transactions below the specified fee amount
cannot enter the pool.

Fixes #23837
2021-11-08 16:25:35 +02:00
Péter Szilágyi
ad11691daf params: begin v1.10.13 release cycle 2021-11-08 15:44:11 +02:00
258 changed files with 12270 additions and 6603 deletions

45
.circleci/config.yml Normal file
View File

@@ -0,0 +1,45 @@
# Use the latest 2.1 version of CircleCI pipeline process engine.
# See: https://circleci.com/docs/2.0/configuration-reference
version: 2.1
# Define a job to be invoked later in a workflow.
# See: https://circleci.com/docs/2.0/configuration-reference/#jobs
jobs:
build:
working_directory: ~/repo
# Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub.
# See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor
docker:
- image: circleci/golang:1.16.10
# Add steps to the job
# See: https://circleci.com/docs/2.0/configuration-reference/#steps
steps:
- checkout
- restore_cache:
keys:
- go-mod-v4-{{ checksum "go.sum" }}
- run:
name: Install Dependencies
command: go mod download
- save_cache:
key: go-mod-v4-{{ checksum "go.sum" }}
paths:
- "/go/pkg/mod"
#- run:
# name: Run linter
# command: |
# go run build/ci.go lint
- run:
name: Run tests
command: |
go run build/ci.go test -coverage
- store_test_results:
path: /tmp/test-reports
# Invoke jobs via workflows
# See: https://circleci.com/docs/2.0/configuration-reference/#workflows
workflows:
sample: # This is the name of the workflow, feel free to change it to better match your workflow.
# Inside the workflow, you define the jobs you want to run.
jobs:
- build

View File

@@ -462,6 +462,9 @@ func (b *SimulatedBackend) PendingNonceAt(ctx context.Context, account common.Ad
// SuggestGasPrice implements ContractTransactor.SuggestGasPrice. Since the simulated
// chain doesn't have miners, we just return a gas price of 1 for any call.
func (b *SimulatedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) {
b.mu.Lock()
defer b.mu.Unlock()
if b.pendingBlock.Header().BaseFee != nil {
return b.pendingBlock.Header().BaseFee, nil
}

View File

@@ -133,7 +133,8 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string)
log.Info(fmt.Sprintf("Creating %s = %q", path, val))
ttl := rootTTL
if path != name {
ttl = treeNodeTTL // Max TTL permitted by Cloudflare
ttl = treeNodeTTLCloudflare // Max TTL permitted by Cloudflare
}
record := cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl}
_, err = c.CreateDNSRecord(context.Background(), c.zoneID, record)

View File

@@ -115,8 +115,9 @@ var (
)
const (
rootTTL = 30 * 60 // 30 min
treeNodeTTL = 4 * 7 * 24 * 60 * 60 // 4 weeks
rootTTL = 30 * 60 // 30 min
treeNodeTTL = 4 * 7 * 24 * 60 * 60 // 4 weeks
treeNodeTTLCloudflare = 24 * 60 * 60 // 1 day
)
// dnsSync performs dnsSyncCommand.

View File

@@ -131,7 +131,7 @@ func (c *Conn) handshake() error {
}
c.negotiateEthProtocol(msg.Caps)
if c.negotiatedProtoVersion == 0 {
return fmt.Errorf("unexpected eth protocol version")
return fmt.Errorf("could not negotiate protocol (remote caps: %v, local eth version: %v)", msg.Caps, c.ourHighestProtoVersion)
}
return nil
default:

View File

@@ -52,35 +52,35 @@ func NewSuite(dest *enode.Node, chainfile string, genesisfile string) (*Suite, e
func (s *Suite) AllEthTests() []utesting.Test {
return []utesting.Test{
// status
{Name: "TestStatus", Fn: s.TestStatus},
{Name: "TestStatus65", Fn: s.TestStatus65},
{Name: "TestStatus66", Fn: s.TestStatus66},
// get block headers
{Name: "TestGetBlockHeaders", Fn: s.TestGetBlockHeaders},
{Name: "TestGetBlockHeaders65", Fn: s.TestGetBlockHeaders65},
{Name: "TestGetBlockHeaders66", Fn: s.TestGetBlockHeaders66},
{Name: "TestSimultaneousRequests66", Fn: s.TestSimultaneousRequests66},
{Name: "TestSameRequestID66", Fn: s.TestSameRequestID66},
{Name: "TestZeroRequestID66", Fn: s.TestZeroRequestID66},
// get block bodies
{Name: "TestGetBlockBodies", Fn: s.TestGetBlockBodies},
{Name: "TestGetBlockBodies65", Fn: s.TestGetBlockBodies65},
{Name: "TestGetBlockBodies66", Fn: s.TestGetBlockBodies66},
// broadcast
{Name: "TestBroadcast", Fn: s.TestBroadcast},
{Name: "TestBroadcast65", Fn: s.TestBroadcast65},
{Name: "TestBroadcast66", Fn: s.TestBroadcast66},
{Name: "TestLargeAnnounce", Fn: s.TestLargeAnnounce},
{Name: "TestLargeAnnounce65", Fn: s.TestLargeAnnounce65},
{Name: "TestLargeAnnounce66", Fn: s.TestLargeAnnounce66},
{Name: "TestOldAnnounce", Fn: s.TestOldAnnounce},
{Name: "TestOldAnnounce65", Fn: s.TestOldAnnounce65},
{Name: "TestOldAnnounce66", Fn: s.TestOldAnnounce66},
{Name: "TestBlockHashAnnounce", Fn: s.TestBlockHashAnnounce},
{Name: "TestBlockHashAnnounce65", Fn: s.TestBlockHashAnnounce65},
{Name: "TestBlockHashAnnounce66", Fn: s.TestBlockHashAnnounce66},
// malicious handshakes + status
{Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake},
{Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus},
{Name: "TestMaliciousHandshake65", Fn: s.TestMaliciousHandshake65},
{Name: "TestMaliciousStatus65", Fn: s.TestMaliciousStatus65},
{Name: "TestMaliciousHandshake66", Fn: s.TestMaliciousHandshake66},
{Name: "TestMaliciousStatus66", Fn: s.TestMaliciousStatus66},
// test transactions
{Name: "TestTransaction", Fn: s.TestTransaction},
{Name: "TestTransaction65", Fn: s.TestTransaction65},
{Name: "TestTransaction66", Fn: s.TestTransaction66},
{Name: "TestMaliciousTx", Fn: s.TestMaliciousTx},
{Name: "TestMaliciousTx65", Fn: s.TestMaliciousTx65},
{Name: "TestMaliciousTx66", Fn: s.TestMaliciousTx66},
{Name: "TestLargeTxRequest66", Fn: s.TestLargeTxRequest66},
{Name: "TestNewPooledTxs66", Fn: s.TestNewPooledTxs66},
@@ -89,17 +89,17 @@ func (s *Suite) AllEthTests() []utesting.Test {
func (s *Suite) EthTests() []utesting.Test {
return []utesting.Test{
{Name: "TestStatus", Fn: s.TestStatus},
{Name: "TestGetBlockHeaders", Fn: s.TestGetBlockHeaders},
{Name: "TestGetBlockBodies", Fn: s.TestGetBlockBodies},
{Name: "TestBroadcast", Fn: s.TestBroadcast},
{Name: "TestLargeAnnounce", Fn: s.TestLargeAnnounce},
{Name: "TestOldAnnounce", Fn: s.TestOldAnnounce},
{Name: "TestBlockHashAnnounce", Fn: s.TestBlockHashAnnounce},
{Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake},
{Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus},
{Name: "TestTransaction", Fn: s.TestTransaction},
{Name: "TestMaliciousTx", Fn: s.TestMaliciousTx},
{Name: "TestStatus65", Fn: s.TestStatus65},
{Name: "TestGetBlockHeaders65", Fn: s.TestGetBlockHeaders65},
{Name: "TestGetBlockBodies65", Fn: s.TestGetBlockBodies65},
{Name: "TestBroadcast65", Fn: s.TestBroadcast65},
{Name: "TestLargeAnnounce65", Fn: s.TestLargeAnnounce65},
{Name: "TestOldAnnounce65", Fn: s.TestOldAnnounce65},
{Name: "TestBlockHashAnnounce65", Fn: s.TestBlockHashAnnounce65},
{Name: "TestMaliciousHandshake65", Fn: s.TestMaliciousHandshake65},
{Name: "TestMaliciousStatus65", Fn: s.TestMaliciousStatus65},
{Name: "TestTransaction65", Fn: s.TestTransaction65},
{Name: "TestMaliciousTx65", Fn: s.TestMaliciousTx65},
}
}
@@ -130,9 +130,9 @@ var (
eth65 = false // indicates whether suite should negotiate eth65 connection or below.
)
// TestStatus attempts to connect to the given node and exchange
// TestStatus65 attempts to connect to the given node and exchange
// a status message with it.
func (s *Suite) TestStatus(t *utesting.T) {
func (s *Suite) TestStatus65(t *utesting.T) {
conn, err := s.dial()
if err != nil {
t.Fatalf("dial failed: %v", err)
@@ -156,9 +156,9 @@ func (s *Suite) TestStatus66(t *utesting.T) {
}
}
// TestGetBlockHeaders tests whether the given node can respond to
// TestGetBlockHeaders65 tests whether the given node can respond to
// a `GetBlockHeaders` request accurately.
func (s *Suite) TestGetBlockHeaders(t *utesting.T) {
func (s *Suite) TestGetBlockHeaders65(t *utesting.T) {
conn, err := s.dial()
if err != nil {
t.Fatalf("dial failed: %v", err)
@@ -392,9 +392,9 @@ func (s *Suite) TestZeroRequestID66(t *utesting.T) {
}
}
// TestGetBlockBodies tests whether the given node can respond to
// TestGetBlockBodies65 tests whether the given node can respond to
// a `GetBlockBodies` request and that the response is accurate.
func (s *Suite) TestGetBlockBodies(t *utesting.T) {
func (s *Suite) TestGetBlockBodies65(t *utesting.T) {
conn, err := s.dial()
if err != nil {
t.Fatalf("dial failed: %v", err)
@@ -460,9 +460,9 @@ func (s *Suite) TestGetBlockBodies66(t *utesting.T) {
}
}
// TestBroadcast tests whether a block announcement is correctly
// TestBroadcast65 tests whether a block announcement is correctly
// propagated to the given node's peer(s).
func (s *Suite) TestBroadcast(t *utesting.T) {
func (s *Suite) TestBroadcast65(t *utesting.T) {
if err := s.sendNextBlock(eth65); err != nil {
t.Fatalf("block broadcast failed: %v", err)
}
@@ -476,8 +476,8 @@ func (s *Suite) TestBroadcast66(t *utesting.T) {
}
}
// TestLargeAnnounce tests the announcement mechanism with a large block.
func (s *Suite) TestLargeAnnounce(t *utesting.T) {
// TestLargeAnnounce65 tests the announcement mechanism with a large block.
func (s *Suite) TestLargeAnnounce65(t *utesting.T) {
nextBlock := len(s.chain.blocks)
blocks := []*NewBlock{
{
@@ -569,8 +569,8 @@ func (s *Suite) TestLargeAnnounce66(t *utesting.T) {
}
}
// TestOldAnnounce tests the announcement mechanism with an old block.
func (s *Suite) TestOldAnnounce(t *utesting.T) {
// TestOldAnnounce65 tests the announcement mechanism with an old block.
func (s *Suite) TestOldAnnounce65(t *utesting.T) {
if err := s.oldAnnounce(eth65); err != nil {
t.Fatal(err)
}
@@ -584,9 +584,9 @@ func (s *Suite) TestOldAnnounce66(t *utesting.T) {
}
}
// TestBlockHashAnnounce sends a new block hash announcement and expects
// TestBlockHashAnnounce65 sends a new block hash announcement and expects
// the node to perform a `GetBlockHeaders` request.
func (s *Suite) TestBlockHashAnnounce(t *utesting.T) {
func (s *Suite) TestBlockHashAnnounce65(t *utesting.T) {
if err := s.hashAnnounce(eth65); err != nil {
t.Fatalf("block hash announcement failed: %v", err)
}
@@ -600,8 +600,8 @@ func (s *Suite) TestBlockHashAnnounce66(t *utesting.T) {
}
}
// TestMaliciousHandshake tries to send malicious data during the handshake.
func (s *Suite) TestMaliciousHandshake(t *utesting.T) {
// TestMaliciousHandshake65 tries to send malicious data during the handshake.
func (s *Suite) TestMaliciousHandshake65(t *utesting.T) {
if err := s.maliciousHandshakes(t, eth65); err != nil {
t.Fatal(err)
}
@@ -614,8 +614,8 @@ func (s *Suite) TestMaliciousHandshake66(t *utesting.T) {
}
}
// TestMaliciousStatus sends a status package with a large total difficulty.
func (s *Suite) TestMaliciousStatus(t *utesting.T) {
// TestMaliciousStatus65 sends a status package with a large total difficulty.
func (s *Suite) TestMaliciousStatus65(t *utesting.T) {
conn, err := s.dial()
if err != nil {
t.Fatalf("dial failed: %v", err)
@@ -641,9 +641,9 @@ func (s *Suite) TestMaliciousStatus66(t *utesting.T) {
}
}
// TestTransaction sends a valid transaction to the node and
// TestTransaction65 sends a valid transaction to the node and
// checks if the transaction gets propagated.
func (s *Suite) TestTransaction(t *utesting.T) {
func (s *Suite) TestTransaction65(t *utesting.T) {
if err := s.sendSuccessfulTxs(t, eth65); err != nil {
t.Fatal(err)
}
@@ -657,9 +657,9 @@ func (s *Suite) TestTransaction66(t *utesting.T) {
}
}
// TestMaliciousTx sends several invalid transactions and tests whether
// TestMaliciousTx65 sends several invalid transactions and tests whether
// the node will propagate them.
func (s *Suite) TestMaliciousTx(t *utesting.T) {
func (s *Suite) TestMaliciousTx65(t *utesting.T) {
if err := s.sendMaliciousTxs(t, eth65); err != nil {
t.Fatal(err)
}

View File

@@ -229,7 +229,7 @@ func PingPastExpiration(t *utesting.T) {
reply, _, _ := te.read(te.l1)
if reply != nil {
t.Fatal("Expected no reply, got", reply)
t.Fatalf("Expected no reply, got %v %v", reply.Name(), reply)
}
}
@@ -247,7 +247,7 @@ func WrongPacketType(t *utesting.T) {
reply, _, _ := te.read(te.l1)
if reply != nil {
t.Fatal("Expected no reply, got", reply)
t.Fatalf("Expected no reply, got %v %v", reply.Name(), reply)
}
}
@@ -282,9 +282,16 @@ func FindnodeWithoutEndpointProof(t *utesting.T) {
rand.Read(req.Target[:])
te.send(te.l1, &req)
reply, _, _ := te.read(te.l1)
if reply != nil {
t.Fatal("Expected no response, got", reply)
for {
reply, _, _ := te.read(te.l1)
if reply == nil {
// No response, all good
break
}
if reply.Kind() == v4wire.PingPacket {
continue // A ping is ok, just ignore it
}
t.Fatalf("Expected no reply, got %v %v", reply.Name(), reply)
}
}
@@ -304,7 +311,7 @@ func BasicFindnode(t *utesting.T) {
t.Fatal("read find nodes", err)
}
if reply.Kind() != v4wire.NeighborsPacket {
t.Fatal("Expected neighbors, got", reply.Name())
t.Fatalf("Expected neighbors, got %v %v", reply.Name(), reply)
}
}
@@ -341,7 +348,7 @@ func UnsolicitedNeighbors(t *utesting.T) {
t.Fatal("read find nodes", err)
}
if reply.Kind() != v4wire.NeighborsPacket {
t.Fatal("Expected neighbors, got", reply.Name())
t.Fatalf("Expected neighbors, got %v %v", reply.Name(), reply)
}
nodes := reply.(*v4wire.Neighbors).Nodes
if contains(nodes, encFakeKey) {

View File

@@ -0,0 +1,380 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package t8ntool
import (
"crypto/ecdsa"
"encoding/json"
"errors"
"fmt"
"math/big"
"os"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus/clique"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"gopkg.in/urfave/cli.v1"
)
//go:generate gencodec -type header -field-override headerMarshaling -out gen_header.go
type header struct {
ParentHash common.Hash `json:"parentHash"`
OmmerHash *common.Hash `json:"sha3Uncles"`
Coinbase *common.Address `json:"miner"`
Root common.Hash `json:"stateRoot" gencodec:"required"`
TxHash *common.Hash `json:"transactionsRoot"`
ReceiptHash *common.Hash `json:"receiptsRoot"`
Bloom types.Bloom `json:"logsBloom"`
Difficulty *big.Int `json:"difficulty"`
Number *big.Int `json:"number" gencodec:"required"`
GasLimit uint64 `json:"gasLimit" gencodec:"required"`
GasUsed uint64 `json:"gasUsed"`
Time uint64 `json:"timestamp" gencodec:"required"`
Extra []byte `json:"extraData"`
MixDigest common.Hash `json:"mixHash"`
Nonce *types.BlockNonce `json:"nonce"`
BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"`
}
type headerMarshaling struct {
Difficulty *math.HexOrDecimal256
Number *math.HexOrDecimal256
GasLimit math.HexOrDecimal64
GasUsed math.HexOrDecimal64
Time math.HexOrDecimal64
Extra hexutil.Bytes
BaseFee *math.HexOrDecimal256
}
type bbInput struct {
Header *header `json:"header,omitempty"`
OmmersRlp []string `json:"ommers,omitempty"`
TxRlp string `json:"txs,omitempty"`
Clique *cliqueInput `json:"clique,omitempty"`
Ethash bool `json:"-"`
EthashDir string `json:"-"`
PowMode ethash.Mode `json:"-"`
Txs []*types.Transaction `json:"-"`
Ommers []*types.Header `json:"-"`
}
type cliqueInput struct {
Key *ecdsa.PrivateKey
Voted *common.Address
Authorize *bool
Vanity common.Hash
}
// UnmarshalJSON implements json.Unmarshaler interface.
func (c *cliqueInput) UnmarshalJSON(input []byte) error {
var x struct {
Key *common.Hash `json:"secretKey"`
Voted *common.Address `json:"voted"`
Authorize *bool `json:"authorize"`
Vanity common.Hash `json:"vanity"`
}
if err := json.Unmarshal(input, &x); err != nil {
return err
}
if x.Key == nil {
return errors.New("missing required field 'secretKey' for cliqueInput")
}
if ecdsaKey, err := crypto.ToECDSA(x.Key[:]); err != nil {
return err
} else {
c.Key = ecdsaKey
}
c.Voted = x.Voted
c.Authorize = x.Authorize
c.Vanity = x.Vanity
return nil
}
// ToBlock converts i into a *types.Block
func (i *bbInput) ToBlock() *types.Block {
header := &types.Header{
ParentHash: i.Header.ParentHash,
UncleHash: types.EmptyUncleHash,
Coinbase: common.Address{},
Root: i.Header.Root,
TxHash: types.EmptyRootHash,
ReceiptHash: types.EmptyRootHash,
Bloom: i.Header.Bloom,
Difficulty: common.Big0,
Number: i.Header.Number,
GasLimit: i.Header.GasLimit,
GasUsed: i.Header.GasUsed,
Time: i.Header.Time,
Extra: i.Header.Extra,
MixDigest: i.Header.MixDigest,
BaseFee: i.Header.BaseFee,
}
// Fill optional values.
if i.Header.OmmerHash != nil {
header.UncleHash = *i.Header.OmmerHash
} else if len(i.Ommers) != 0 {
// Calculate the ommer hash if none is provided and there are ommers to hash
header.UncleHash = types.CalcUncleHash(i.Ommers)
}
if i.Header.Coinbase != nil {
header.Coinbase = *i.Header.Coinbase
}
if i.Header.TxHash != nil {
header.TxHash = *i.Header.TxHash
}
if i.Header.ReceiptHash != nil {
header.ReceiptHash = *i.Header.ReceiptHash
}
if i.Header.Nonce != nil {
header.Nonce = *i.Header.Nonce
}
if header.Difficulty != nil {
header.Difficulty = i.Header.Difficulty
}
return types.NewBlockWithHeader(header).WithBody(i.Txs, i.Ommers)
}
// SealBlock seals the given block using the configured engine.
func (i *bbInput) SealBlock(block *types.Block) (*types.Block, error) {
switch {
case i.Ethash:
return i.sealEthash(block)
case i.Clique != nil:
return i.sealClique(block)
default:
return block, nil
}
}
// sealEthash seals the given block using ethash.
func (i *bbInput) sealEthash(block *types.Block) (*types.Block, error) {
if i.Header.Nonce != nil {
return nil, NewError(ErrorConfig, fmt.Errorf("sealing with ethash will overwrite provided nonce"))
}
ethashConfig := ethash.Config{
PowMode: i.PowMode,
DatasetDir: i.EthashDir,
CacheDir: i.EthashDir,
DatasetsInMem: 1,
DatasetsOnDisk: 2,
CachesInMem: 2,
CachesOnDisk: 3,
}
engine := ethash.New(ethashConfig, nil, true)
defer engine.Close()
// Use a buffered chan for results.
// If the testmode is used, the sealer will return quickly, and complain
// "Sealing result is not read by miner" if it cannot write the result.
results := make(chan *types.Block, 1)
if err := engine.Seal(nil, block, results, nil); err != nil {
panic(fmt.Sprintf("failed to seal block: %v", err))
}
found := <-results
return block.WithSeal(found.Header()), nil
}
// sealClique seals the given block using clique.
func (i *bbInput) sealClique(block *types.Block) (*types.Block, error) {
// If any clique value overwrites an explicit header value, fail
// to avoid silently building a block with unexpected values.
if i.Header.Extra != nil {
return nil, NewError(ErrorConfig, fmt.Errorf("sealing with clique will overwrite provided extra data"))
}
header := block.Header()
if i.Clique.Voted != nil {
if i.Header.Coinbase != nil {
return nil, NewError(ErrorConfig, fmt.Errorf("sealing with clique and voting will overwrite provided coinbase"))
}
header.Coinbase = *i.Clique.Voted
}
if i.Clique.Authorize != nil {
if i.Header.Nonce != nil {
return nil, NewError(ErrorConfig, fmt.Errorf("sealing with clique and voting will overwrite provided nonce"))
}
if *i.Clique.Authorize {
header.Nonce = [8]byte{}
} else {
header.Nonce = [8]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
}
}
// Extra is fixed 32 byte vanity and 65 byte signature
header.Extra = make([]byte, 32+65)
copy(header.Extra[0:32], i.Clique.Vanity.Bytes()[:])
// Sign the seal hash and fill in the rest of the extra data
h := clique.SealHash(header)
sighash, err := crypto.Sign(h[:], i.Clique.Key)
if err != nil {
return nil, err
}
copy(header.Extra[32:], sighash)
block = block.WithSeal(header)
return block, nil
}
// BuildBlock constructs a block from the given inputs.
func BuildBlock(ctx *cli.Context) error {
// Configure the go-ethereum logger
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
log.Root().SetHandler(glogger)
baseDir, err := createBasedir(ctx)
if err != nil {
return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err))
}
inputData, err := readInput(ctx)
if err != nil {
return err
}
block := inputData.ToBlock()
block, err = inputData.SealBlock(block)
if err != nil {
return err
}
return dispatchBlock(ctx, baseDir, block)
}
func readInput(ctx *cli.Context) (*bbInput, error) {
var (
headerStr = ctx.String(InputHeaderFlag.Name)
ommersStr = ctx.String(InputOmmersFlag.Name)
txsStr = ctx.String(InputTxsRlpFlag.Name)
cliqueStr = ctx.String(SealCliqueFlag.Name)
ethashOn = ctx.Bool(SealEthashFlag.Name)
ethashDir = ctx.String(SealEthashDirFlag.Name)
ethashMode = ctx.String(SealEthashModeFlag.Name)
inputData = &bbInput{}
)
if ethashOn && cliqueStr != "" {
return nil, NewError(ErrorConfig, fmt.Errorf("both ethash and clique sealing specified, only one may be chosen"))
}
if ethashOn {
inputData.Ethash = ethashOn
inputData.EthashDir = ethashDir
switch ethashMode {
case "normal":
inputData.PowMode = ethash.ModeNormal
case "test":
inputData.PowMode = ethash.ModeTest
case "fake":
inputData.PowMode = ethash.ModeFake
default:
return nil, NewError(ErrorConfig, fmt.Errorf("unknown pow mode: %s, supported modes: test, fake, normal", ethashMode))
}
}
if headerStr == stdinSelector || ommersStr == stdinSelector || txsStr == stdinSelector || cliqueStr == stdinSelector {
decoder := json.NewDecoder(os.Stdin)
if err := decoder.Decode(inputData); err != nil {
return nil, NewError(ErrorJson, fmt.Errorf("failed unmarshaling stdin: %v", err))
}
}
if cliqueStr != stdinSelector && cliqueStr != "" {
var clique cliqueInput
if err := readFile(cliqueStr, "clique", &clique); err != nil {
return nil, err
}
inputData.Clique = &clique
}
if headerStr != stdinSelector {
var env header
if err := readFile(headerStr, "header", &env); err != nil {
return nil, err
}
inputData.Header = &env
}
if ommersStr != stdinSelector && ommersStr != "" {
var ommers []string
if err := readFile(ommersStr, "ommers", &ommers); err != nil {
return nil, err
}
inputData.OmmersRlp = ommers
}
if txsStr != stdinSelector {
var txs string
if err := readFile(txsStr, "txs", &txs); err != nil {
return nil, err
}
inputData.TxRlp = txs
}
// Deserialize rlp txs and ommers
var (
ommers = []*types.Header{}
txs = []*types.Transaction{}
)
if inputData.TxRlp != "" {
if err := rlp.DecodeBytes(common.FromHex(inputData.TxRlp), &txs); err != nil {
return nil, NewError(ErrorRlp, fmt.Errorf("unable to decode transaction from rlp data: %v", err))
}
inputData.Txs = txs
}
for _, str := range inputData.OmmersRlp {
type extblock struct {
Header *types.Header
Txs []*types.Transaction
Ommers []*types.Header
}
var ommer *extblock
if err := rlp.DecodeBytes(common.FromHex(str), &ommer); err != nil {
return nil, NewError(ErrorRlp, fmt.Errorf("unable to decode ommer from rlp data: %v", err))
}
ommers = append(ommers, ommer.Header)
}
inputData.Ommers = ommers
return inputData, nil
}
// dispatchOutput writes the output data to either stderr or stdout, or to the specified
// files
func dispatchBlock(ctx *cli.Context, baseDir string, block *types.Block) error {
raw, _ := rlp.EncodeToBytes(block)
type blockInfo struct {
Rlp hexutil.Bytes `json:"rlp"`
Hash common.Hash `json:"hash"`
}
var enc blockInfo
enc.Rlp = raw
enc.Hash = block.Hash()
b, err := json.MarshalIndent(enc, "", " ")
if err != nil {
return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err))
}
switch dest := ctx.String(OutputBlockFlag.Name); dest {
case "stdout":
os.Stdout.Write(b)
os.Stdout.WriteString("\n")
case "stderr":
os.Stderr.Write(b)
os.Stderr.WriteString("\n")
default:
if err := saveFile(baseDir, dest, enc); err != nil {
return err
}
}
return nil
}

View File

@@ -49,12 +49,13 @@ type Prestate struct {
type ExecutionResult struct {
StateRoot common.Hash `json:"stateRoot"`
TxRoot common.Hash `json:"txRoot"`
ReceiptRoot common.Hash `json:"receiptRoot"`
ReceiptRoot common.Hash `json:"receiptsRoot"`
LogsHash common.Hash `json:"logsHash"`
Bloom types.Bloom `json:"logsBloom" gencodec:"required"`
Receipts types.Receipts `json:"receipts"`
Rejected []*rejectedTx `json:"rejected,omitempty"`
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
}
type ommer struct {
@@ -255,6 +256,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
Receipts: receipts,
Rejected: rejectedTxs,
Difficulty: (*math.HexOrDecimal256)(vmContext.Difficulty),
GasUsed: (math.HexOrDecimal64)(gasUsed),
}
return statedb, execRs, nil
}

View File

@@ -32,7 +32,11 @@ var (
}
TraceDisableMemoryFlag = cli.BoolTFlag{
Name: "trace.nomemory",
Usage: "Disable full memory dump in traces",
Usage: "Disable full memory dump in traces (deprecated)",
}
TraceEnableMemoryFlag = cli.BoolFlag{
Name: "trace.memory",
Usage: "Enable full memory dump in traces",
}
TraceDisableStackFlag = cli.BoolFlag{
Name: "trace.nostack",
@@ -40,7 +44,11 @@ var (
}
TraceDisableReturnDataFlag = cli.BoolTFlag{
Name: "trace.noreturndata",
Usage: "Disable return data output in traces",
Usage: "Disable return data output in traces (deprecated)",
}
TraceEnableReturnDataFlag = cli.BoolFlag{
Name: "trace.returndata",
Usage: "Enable return data output in traces",
}
OutputBasedir = cli.StringFlag{
Name: "output.basedir",
@@ -68,6 +76,14 @@ var (
"\t<file> - into the file <file> ",
Value: "result.json",
}
OutputBlockFlag = cli.StringFlag{
Name: "output.block",
Usage: "Determines where to put the `block` after building.\n" +
"\t`stdout` - into the stdout output\n" +
"\t`stderr` - into the stderr output\n" +
"\t<file> - into the file <file> ",
Value: "block.json",
}
InputAllocFlag = cli.StringFlag{
Name: "input.alloc",
Usage: "`stdin` or file name of where to find the prestate alloc to use.",
@@ -81,10 +97,41 @@ var (
InputTxsFlag = cli.StringFlag{
Name: "input.txs",
Usage: "`stdin` or file name of where to find the transactions to apply. " +
"If the file prefix is '.rlp', then the data is interpreted as an RLP list of signed transactions." +
"If the file extension is '.rlp', then the data is interpreted as an RLP list of signed transactions." +
"The '.rlp' format is identical to the output.body format.",
Value: "txs.json",
}
InputHeaderFlag = cli.StringFlag{
Name: "input.header",
Usage: "`stdin` or file name of where to find the block header to use.",
Value: "header.json",
}
InputOmmersFlag = cli.StringFlag{
Name: "input.ommers",
Usage: "`stdin` or file name of where to find the list of ommer header RLPs to use.",
}
InputTxsRlpFlag = cli.StringFlag{
Name: "input.txs",
Usage: "`stdin` or file name of where to find the transactions list in RLP form.",
Value: "txs.rlp",
}
SealCliqueFlag = cli.StringFlag{
Name: "seal.clique",
Usage: "Seal block with Clique. `stdin` or file name of where to find the Clique sealing data.",
}
SealEthashFlag = cli.BoolFlag{
Name: "seal.ethash",
Usage: "Seal block with ethash.",
}
SealEthashDirFlag = cli.StringFlag{
Name: "seal.ethash.dir",
Usage: "Path to ethash DAG. If none exists, a new DAG will be generated.",
}
SealEthashModeFlag = cli.StringFlag{
Name: "seal.ethash.mode",
Usage: "Defines the type and amount of PoW verification an ethash engine makes.",
Value: "normal",
}
RewardFlag = cli.Int64Flag{
Name: "state.reward",
Usage: "Mining reward. Set to -1 to disable",

View File

@@ -0,0 +1,135 @@
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
package t8ntool
import (
"encoding/json"
"errors"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/types"
)
var _ = (*headerMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (h header) MarshalJSON() ([]byte, error) {
type header struct {
ParentHash common.Hash `json:"parentHash"`
OmmerHash *common.Hash `json:"sha3Uncles"`
Coinbase *common.Address `json:"miner"`
Root common.Hash `json:"stateRoot" gencodec:"required"`
TxHash *common.Hash `json:"transactionsRoot"`
ReceiptHash *common.Hash `json:"receiptsRoot"`
Bloom types.Bloom `json:"logsBloom"`
Difficulty *math.HexOrDecimal256 `json:"difficulty"`
Number *math.HexOrDecimal256 `json:"number" gencodec:"required"`
GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
Time math.HexOrDecimal64 `json:"timestamp" gencodec:"required"`
Extra hexutil.Bytes `json:"extraData"`
MixDigest common.Hash `json:"mixHash"`
Nonce *types.BlockNonce `json:"nonce"`
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"`
}
var enc header
enc.ParentHash = h.ParentHash
enc.OmmerHash = h.OmmerHash
enc.Coinbase = h.Coinbase
enc.Root = h.Root
enc.TxHash = h.TxHash
enc.ReceiptHash = h.ReceiptHash
enc.Bloom = h.Bloom
enc.Difficulty = (*math.HexOrDecimal256)(h.Difficulty)
enc.Number = (*math.HexOrDecimal256)(h.Number)
enc.GasLimit = math.HexOrDecimal64(h.GasLimit)
enc.GasUsed = math.HexOrDecimal64(h.GasUsed)
enc.Time = math.HexOrDecimal64(h.Time)
enc.Extra = h.Extra
enc.MixDigest = h.MixDigest
enc.Nonce = h.Nonce
enc.BaseFee = (*math.HexOrDecimal256)(h.BaseFee)
return json.Marshal(&enc)
}
// UnmarshalJSON unmarshals from JSON.
func (h *header) UnmarshalJSON(input []byte) error {
type header struct {
ParentHash *common.Hash `json:"parentHash"`
OmmerHash *common.Hash `json:"sha3Uncles"`
Coinbase *common.Address `json:"miner"`
Root *common.Hash `json:"stateRoot" gencodec:"required"`
TxHash *common.Hash `json:"transactionsRoot"`
ReceiptHash *common.Hash `json:"receiptsRoot"`
Bloom *types.Bloom `json:"logsBloom"`
Difficulty *math.HexOrDecimal256 `json:"difficulty"`
Number *math.HexOrDecimal256 `json:"number" gencodec:"required"`
GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
Time *math.HexOrDecimal64 `json:"timestamp" gencodec:"required"`
Extra *hexutil.Bytes `json:"extraData"`
MixDigest *common.Hash `json:"mixHash"`
Nonce *types.BlockNonce `json:"nonce"`
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"`
}
var dec header
if err := json.Unmarshal(input, &dec); err != nil {
return err
}
if dec.ParentHash != nil {
h.ParentHash = *dec.ParentHash
}
if dec.OmmerHash != nil {
h.OmmerHash = dec.OmmerHash
}
if dec.Coinbase != nil {
h.Coinbase = dec.Coinbase
}
if dec.Root == nil {
return errors.New("missing required field 'stateRoot' for header")
}
h.Root = *dec.Root
if dec.TxHash != nil {
h.TxHash = dec.TxHash
}
if dec.ReceiptHash != nil {
h.ReceiptHash = dec.ReceiptHash
}
if dec.Bloom != nil {
h.Bloom = *dec.Bloom
}
if dec.Difficulty != nil {
h.Difficulty = (*big.Int)(dec.Difficulty)
}
if dec.Number == nil {
return errors.New("missing required field 'number' for header")
}
h.Number = (*big.Int)(dec.Number)
if dec.GasLimit == nil {
return errors.New("missing required field 'gasLimit' for header")
}
h.GasLimit = uint64(*dec.GasLimit)
if dec.GasUsed != nil {
h.GasUsed = uint64(*dec.GasUsed)
}
if dec.Time == nil {
return errors.New("missing required field 'timestamp' for header")
}
h.Time = uint64(*dec.Time)
if dec.Extra != nil {
h.Extra = *dec.Extra
}
if dec.MixDigest != nil {
h.MixDigest = *dec.MixDigest
}
if dec.Nonce != nil {
h.Nonce = dec.Nonce
}
if dec.BaseFee != nil {
h.BaseFee = (*big.Int)(dec.BaseFee)
}
return nil
}

View File

@@ -48,7 +48,7 @@ func (r *result) MarshalJSON() ([]byte, error) {
Error string `json:"error,omitempty"`
Address *common.Address `json:"address,omitempty"`
Hash *common.Hash `json:"hash,omitempty"`
IntrinsicGas uint64 `json:"intrinsicGas,omitempty"`
IntrinsicGas hexutil.Uint64 `json:"intrinsicGas,omitempty"`
}
var out xx
if r.Error != nil {
@@ -60,7 +60,7 @@ func (r *result) MarshalJSON() ([]byte, error) {
if r.Hash != (common.Hash{}) {
out.Hash = &r.Hash
}
out.IntrinsicGas = r.IntrinsicGas
out.IntrinsicGas = hexutil.Uint64(r.IntrinsicGas)
return json.Marshal(out)
}
@@ -82,7 +82,7 @@ func Transaction(ctx *cli.Context) error {
)
// Construct the chainconfig
if cConf, _, err := tests.GetChainConfig(ctx.String(ForknameFlag.Name)); err != nil {
return NewError(ErrorVMConfig, fmt.Errorf("failed constructing chain configuration: %v", err))
return NewError(ErrorConfig, fmt.Errorf("failed constructing chain configuration: %v", err))
} else {
chainConfig = cConf
}
@@ -154,6 +154,8 @@ func Transaction(ctx *cli.Context) error {
}
// Validate <256bit fields
switch {
case tx.Nonce()+1 < tx.Nonce():
r.Error = errors.New("nonce exceeds 2^64-1")
case tx.Value().BitLen() > 256:
r.Error = errors.New("value exceeds 256 bits")
case tx.GasPrice().BitLen() > 256:

View File

@@ -34,6 +34,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
@@ -43,11 +44,12 @@ import (
const (
ErrorEVM = 2
ErrorVMConfig = 3
ErrorConfig = 3
ErrorMissingBlockhash = 4
ErrorJson = 10
ErrorIO = 11
ErrorRlp = 12
stdinSelector = "stdin"
)
@@ -88,28 +90,33 @@ func Transition(ctx *cli.Context) error {
log.Root().SetHandler(glogger)
var (
err error
tracer vm.EVMLogger
baseDir = ""
err error
tracer vm.EVMLogger
)
var getTracer func(txIndex int, txHash common.Hash) (vm.EVMLogger, error)
// If user specified a basedir, make sure it exists
if ctx.IsSet(OutputBasedir.Name) {
if base := ctx.String(OutputBasedir.Name); len(base) > 0 {
err := os.MkdirAll(base, 0755) // //rw-r--r--
if err != nil {
return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err))
}
baseDir = base
}
baseDir, err := createBasedir(ctx)
if err != nil {
return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err))
}
if ctx.Bool(TraceFlag.Name) {
if ctx.IsSet(TraceDisableMemoryFlag.Name) && ctx.IsSet(TraceEnableMemoryFlag.Name) {
return NewError(ErrorConfig, fmt.Errorf("can't use both flags --%s and --%s", TraceDisableMemoryFlag.Name, TraceEnableMemoryFlag.Name))
}
if ctx.IsSet(TraceDisableReturnDataFlag.Name) && ctx.IsSet(TraceEnableReturnDataFlag.Name) {
return NewError(ErrorConfig, fmt.Errorf("can't use both flags --%s and --%s", TraceDisableReturnDataFlag.Name, TraceEnableReturnDataFlag.Name))
}
if ctx.IsSet(TraceDisableMemoryFlag.Name) {
log.Warn(fmt.Sprintf("--%s has been deprecated in favour of --%s", TraceDisableMemoryFlag.Name, TraceEnableMemoryFlag.Name))
}
if ctx.IsSet(TraceDisableReturnDataFlag.Name) {
log.Warn(fmt.Sprintf("--%s has been deprecated in favour of --%s", TraceDisableReturnDataFlag.Name, TraceEnableReturnDataFlag.Name))
}
// Configure the EVM logger
logConfig := &vm.LogConfig{
logConfig := &logger.Config{
DisableStack: ctx.Bool(TraceDisableStackFlag.Name),
EnableMemory: !ctx.Bool(TraceDisableMemoryFlag.Name),
EnableReturnData: !ctx.Bool(TraceDisableReturnDataFlag.Name),
EnableMemory: !ctx.Bool(TraceDisableMemoryFlag.Name) || ctx.Bool(TraceEnableMemoryFlag.Name),
EnableReturnData: !ctx.Bool(TraceDisableReturnDataFlag.Name) || ctx.Bool(TraceEnableReturnDataFlag.Name),
Debug: true,
}
var prevFile *os.File
@@ -128,7 +135,7 @@ func Transition(ctx *cli.Context) error {
return nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err))
}
prevFile = traceFile
return vm.NewJSONLogger(logConfig, traceFile), nil
return logger.NewJSONLogger(logConfig, traceFile), nil
}
} else {
getTracer = func(txIndex int, txHash common.Hash) (tracer vm.EVMLogger, err error) {
@@ -155,29 +162,17 @@ func Transition(ctx *cli.Context) error {
}
}
if allocStr != stdinSelector {
inFile, err := os.Open(allocStr)
if err != nil {
return NewError(ErrorIO, fmt.Errorf("failed reading alloc file: %v", err))
}
defer inFile.Close()
decoder := json.NewDecoder(inFile)
if err := decoder.Decode(&inputData.Alloc); err != nil {
return NewError(ErrorJson, fmt.Errorf("failed unmarshaling alloc-file: %v", err))
if err := readFile(allocStr, "alloc", &inputData.Alloc); err != nil {
return err
}
}
prestate.Pre = inputData.Alloc
// Set the block environment
if envStr != stdinSelector {
inFile, err := os.Open(envStr)
if err != nil {
return NewError(ErrorIO, fmt.Errorf("failed reading env file: %v", err))
}
defer inFile.Close()
decoder := json.NewDecoder(inFile)
var env stEnv
if err := decoder.Decode(&env); err != nil {
return NewError(ErrorJson, fmt.Errorf("failed unmarshaling env-file: %v", err))
if err := readFile(envStr, "env", &env); err != nil {
return err
}
inputData.Env = &env
}
@@ -190,7 +185,7 @@ func Transition(ctx *cli.Context) error {
// Construct the chainconfig
var chainConfig *params.ChainConfig
if cConf, extraEips, err := tests.GetChainConfig(ctx.String(ForknameFlag.Name)); err != nil {
return NewError(ErrorVMConfig, fmt.Errorf("failed constructing chain configuration: %v", err))
return NewError(ErrorConfig, fmt.Errorf("failed constructing chain configuration: %v", err))
} else {
chainConfig = cConf
vmConfig.ExtraEips = extraEips
@@ -254,18 +249,18 @@ func Transition(ctx *cli.Context) error {
// Sanity check, to not `panic` in state_transition
if chainConfig.IsLondon(big.NewInt(int64(prestate.Env.Number))) {
if prestate.Env.BaseFee == nil {
return NewError(ErrorVMConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section"))
return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section"))
}
}
if env := prestate.Env; env.Difficulty == nil {
// If difficulty was not provided by caller, we need to calculate it.
switch {
case env.ParentDifficulty == nil:
return NewError(ErrorVMConfig, errors.New("currentDifficulty was not provided, and cannot be calculated due to missing parentDifficulty"))
return NewError(ErrorConfig, errors.New("currentDifficulty was not provided, and cannot be calculated due to missing parentDifficulty"))
case env.Number == 0:
return NewError(ErrorVMConfig, errors.New("currentDifficulty needs to be provided for block number 0"))
return NewError(ErrorConfig, errors.New("currentDifficulty needs to be provided for block number 0"))
case env.Timestamp <= env.ParentTimestamp:
return NewError(ErrorVMConfig, fmt.Errorf("currentDifficulty cannot be calculated -- currentTime (%d) needs to be after parent time (%d)",
return NewError(ErrorConfig, fmt.Errorf("currentDifficulty cannot be calculated -- currentTime (%d) needs to be after parent time (%d)",
env.Timestamp, env.ParentTimestamp))
}
prestate.Env.Difficulty = calcDifficulty(chainConfig, env.Number, env.Timestamp,
@@ -286,27 +281,34 @@ func Transition(ctx *cli.Context) error {
// txWithKey is a helper-struct, to allow us to use the types.Transaction along with
// a `secretKey`-field, for input
type txWithKey struct {
key *ecdsa.PrivateKey
tx *types.Transaction
key *ecdsa.PrivateKey
tx *types.Transaction
protected bool
}
func (t *txWithKey) UnmarshalJSON(input []byte) error {
// Read the secretKey, if present
type sKey struct {
Key *common.Hash `json:"secretKey"`
// Read the metadata, if present
type txMetadata struct {
Key *common.Hash `json:"secretKey"`
Protected *bool `json:"protected"`
}
var key sKey
if err := json.Unmarshal(input, &key); err != nil {
var data txMetadata
if err := json.Unmarshal(input, &data); err != nil {
return err
}
if key.Key != nil {
k := key.Key.Hex()[2:]
if data.Key != nil {
k := data.Key.Hex()[2:]
if ecdsaKey, err := crypto.HexToECDSA(k); err != nil {
return err
} else {
t.key = ecdsaKey
}
}
if data.Protected != nil {
t.protected = *data.Protected
} else {
t.protected = true
}
// Now, read the transaction itself
var tx types.Transaction
if err := json.Unmarshal(input, &tx); err != nil {
@@ -335,7 +337,15 @@ func signUnsignedTransactions(txs []*txWithKey, signer types.Signer) (types.Tran
v, r, s := tx.RawSignatureValues()
if key != nil && v.BitLen()+r.BitLen()+s.BitLen() == 0 {
// This transaction needs to be signed
signed, err := types.SignTx(tx, signer, key)
var (
signed *types.Transaction
err error
)
if txWithKey.protected {
signed, err = types.SignTx(tx, signer, key)
} else {
signed, err = types.SignTx(tx, types.FrontierSigner{}, key)
}
if err != nil {
return nil, NewError(ErrorJson, fmt.Errorf("tx %d: failed to sign tx: %v", i, err))
}

View File

@@ -0,0 +1,54 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package t8ntool
import (
"encoding/json"
"fmt"
"os"
"gopkg.in/urfave/cli.v1"
)
// readFile reads the json-data in the provided path and marshals into dest.
func readFile(path, desc string, dest interface{}) error {
inFile, err := os.Open(path)
if err != nil {
return NewError(ErrorIO, fmt.Errorf("failed reading %s file: %v", desc, err))
}
defer inFile.Close()
decoder := json.NewDecoder(inFile)
if err := decoder.Decode(dest); err != nil {
return NewError(ErrorJson, fmt.Errorf("failed unmarshaling %s file: %v", desc, err))
}
return nil
}
// createBasedir makes sure the basedir exists, if user specified one.
func createBasedir(ctx *cli.Context) (string, error) {
baseDir := ""
if ctx.IsSet(OutputBasedir.Name) {
if base := ctx.String(OutputBasedir.Name); len(base) > 0 {
err := os.MkdirAll(base, 0755) // //rw-r--r--
if err != nil {
return "", err
}
baseDir = base
}
}
return baseDir, nil
}

View File

@@ -139,8 +139,10 @@ var stateTransitionCommand = cli.Command{
Flags: []cli.Flag{
t8ntool.TraceFlag,
t8ntool.TraceDisableMemoryFlag,
t8ntool.TraceEnableMemoryFlag,
t8ntool.TraceDisableStackFlag,
t8ntool.TraceDisableReturnDataFlag,
t8ntool.TraceEnableReturnDataFlag,
t8ntool.OutputBasedir,
t8ntool.OutputAllocFlag,
t8ntool.OutputResultFlag,
@@ -167,6 +169,25 @@ var transactionCommand = cli.Command{
},
}
var blockBuilderCommand = cli.Command{
Name: "block-builder",
Aliases: []string{"b11r"},
Usage: "builds a block",
Action: t8ntool.BuildBlock,
Flags: []cli.Flag{
t8ntool.OutputBasedir,
t8ntool.OutputBlockFlag,
t8ntool.InputHeaderFlag,
t8ntool.InputOmmersFlag,
t8ntool.InputTxsRlpFlag,
t8ntool.SealCliqueFlag,
t8ntool.SealEthashFlag,
t8ntool.SealEthashDirFlag,
t8ntool.SealEthashModeFlag,
t8ntool.VerbosityFlag,
},
}
func init() {
app.Flags = []cli.Flag{
BenchFlag,
@@ -200,6 +221,7 @@ func init() {
stateTestCommand,
stateTransitionCommand,
transactionCommand,
blockBuilderCommand,
}
cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate
}

View File

@@ -36,6 +36,7 @@ import (
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/core/vm/runtime"
"github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"gopkg.in/urfave/cli.v1"
@@ -107,7 +108,7 @@ func runCmd(ctx *cli.Context) error {
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name)))
log.Root().SetHandler(glogger)
logconfig := &vm.LogConfig{
logconfig := &logger.Config{
EnableMemory: !ctx.GlobalBool(DisableMemoryFlag.Name),
DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name),
@@ -117,7 +118,7 @@ func runCmd(ctx *cli.Context) error {
var (
tracer vm.EVMLogger
debugLogger *vm.StructLogger
debugLogger *logger.StructLogger
statedb *state.StateDB
chainConfig *params.ChainConfig
sender = common.BytesToAddress([]byte("sender"))
@@ -125,12 +126,12 @@ func runCmd(ctx *cli.Context) error {
genesisConfig *core.Genesis
)
if ctx.GlobalBool(MachineFlag.Name) {
tracer = vm.NewJSONLogger(logconfig, os.Stdout)
tracer = logger.NewJSONLogger(logconfig, os.Stdout)
} else if ctx.GlobalBool(DebugFlag.Name) {
debugLogger = vm.NewStructLogger(logconfig)
debugLogger = logger.NewStructLogger(logconfig)
tracer = debugLogger
} else {
debugLogger = vm.NewStructLogger(logconfig)
debugLogger = logger.NewStructLogger(logconfig)
}
if ctx.GlobalString(GenesisFlag.Name) != "" {
gen := readGenesis(ctx.GlobalString(GenesisFlag.Name))
@@ -288,10 +289,10 @@ func runCmd(ctx *cli.Context) error {
if ctx.GlobalBool(DebugFlag.Name) {
if debugLogger != nil {
fmt.Fprintln(os.Stderr, "#### TRACE ####")
vm.WriteTrace(os.Stderr, debugLogger.StructLogs())
logger.WriteTrace(os.Stderr, debugLogger.StructLogs())
}
fmt.Fprintln(os.Stderr, "#### LOGS ####")
vm.WriteLogs(os.Stderr, statedb.Logs())
logger.WriteLogs(os.Stderr, statedb.Logs())
}
if bench || ctx.GlobalBool(StatDumpFlag.Name) {

View File

@@ -25,6 +25,7 @@ import (
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/tests"
@@ -58,7 +59,7 @@ func stateTestCmd(ctx *cli.Context) error {
log.Root().SetHandler(glogger)
// Configure the EVM logger
config := &vm.LogConfig{
config := &logger.Config{
EnableMemory: !ctx.GlobalBool(DisableMemoryFlag.Name),
DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name),
@@ -66,18 +67,18 @@ func stateTestCmd(ctx *cli.Context) error {
}
var (
tracer vm.EVMLogger
debugger *vm.StructLogger
debugger *logger.StructLogger
)
switch {
case ctx.GlobalBool(MachineFlag.Name):
tracer = vm.NewJSONLogger(config, os.Stderr)
tracer = logger.NewJSONLogger(config, os.Stderr)
case ctx.GlobalBool(DebugFlag.Name):
debugger = vm.NewStructLogger(config)
debugger = logger.NewStructLogger(config)
tracer = debugger
default:
debugger = vm.NewStructLogger(config)
debugger = logger.NewStructLogger(config)
}
// Load the test content from the input file
src, err := ioutil.ReadFile(ctx.Args().First())
@@ -118,7 +119,7 @@ func stateTestCmd(ctx *cli.Context) error {
if ctx.GlobalBool(DebugFlag.Name) {
if debugger != nil {
fmt.Fprintln(os.Stderr, "#### TRACE ####")
vm.WriteTrace(os.Stderr, debugger.StructLogs())
logger.WriteTrace(os.Stderr, debugger.StructLogs())
}
}
}

View File

@@ -131,7 +131,7 @@ func TestT8n(t *testing.T) {
output: t8nOutput{alloc: true, result: true},
expExitCode: 4,
},
{ // Ommer test
{ // Uncle test
base: "./testdata/5",
input: t8nInput{
"alloc.json", "txs.json", "env.json", "Byzantium", "0x80",
@@ -171,7 +171,7 @@ func TestT8n(t *testing.T) {
output: t8nOutput{result: true},
expOut: "exp2.json",
},
{ // Difficulty calculation - with uncles + Berlin
{ // Difficulty calculation - with ommers + Berlin
base: "./testdata/14",
input: t8nInput{
"alloc.json", "txs.json", "env.uncles.json", "Berlin", "",
@@ -195,6 +195,14 @@ func TestT8n(t *testing.T) {
output: t8nOutput{result: true},
expOut: "exp_arrowglacier.json",
},
{ // Sign unprotected (pre-EIP155) transaction
base: "./testdata/23",
input: t8nInput{
"alloc.json", "txs.json", "env.json", "Berlin", "",
},
output: t8nOutput{result: true},
expOut: "exp.json",
},
} {
args := []string{"t8n"}
@@ -336,6 +344,126 @@ func TestT9n(t *testing.T) {
}
}
type b11rInput struct {
inEnv string
inOmmersRlp string
inTxsRlp string
inClique string
ethash bool
ethashMode string
ethashDir string
}
func (args *b11rInput) get(base string) []string {
var out []string
if opt := args.inEnv; opt != "" {
out = append(out, "--input.header")
out = append(out, fmt.Sprintf("%v/%v", base, opt))
}
if opt := args.inOmmersRlp; opt != "" {
out = append(out, "--input.ommers")
out = append(out, fmt.Sprintf("%v/%v", base, opt))
}
if opt := args.inTxsRlp; opt != "" {
out = append(out, "--input.txs")
out = append(out, fmt.Sprintf("%v/%v", base, opt))
}
if opt := args.inClique; opt != "" {
out = append(out, "--seal.clique")
out = append(out, fmt.Sprintf("%v/%v", base, opt))
}
if args.ethash {
out = append(out, "--seal.ethash")
}
if opt := args.ethashMode; opt != "" {
out = append(out, "--seal.ethash.mode")
out = append(out, fmt.Sprintf("%v/%v", base, opt))
}
if opt := args.ethashDir; opt != "" {
out = append(out, "--seal.ethash.dir")
out = append(out, fmt.Sprintf("%v/%v", base, opt))
}
out = append(out, "--output.block")
out = append(out, "stdout")
return out
}
func TestB11r(t *testing.T) {
tt := new(testT8n)
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
for i, tc := range []struct {
base string
input b11rInput
expExitCode int
expOut string
}{
{ // unsealed block
base: "./testdata/20",
input: b11rInput{
inEnv: "header.json",
inOmmersRlp: "ommers.json",
inTxsRlp: "txs.rlp",
},
expOut: "exp.json",
},
{ // ethash test seal
base: "./testdata/21",
input: b11rInput{
inEnv: "header.json",
inOmmersRlp: "ommers.json",
inTxsRlp: "txs.rlp",
},
expOut: "exp.json",
},
{ // clique test seal
base: "./testdata/21",
input: b11rInput{
inEnv: "header.json",
inOmmersRlp: "ommers.json",
inTxsRlp: "txs.rlp",
inClique: "clique.json",
},
expOut: "exp-clique.json",
},
{ // block with ommers
base: "./testdata/22",
input: b11rInput{
inEnv: "header.json",
inOmmersRlp: "ommers.json",
inTxsRlp: "txs.rlp",
},
expOut: "exp.json",
},
} {
args := []string{"b11r"}
args = append(args, tc.input.get(tc.base)...)
tt.Run("evm-test", args...)
tt.Logf("args:\n go run . %v\n", strings.Join(args, " "))
// Compare the expected output, if provided
if tc.expOut != "" {
want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut))
if err != nil {
t.Fatalf("test %d: could not read expected output: %v", i, err)
}
have := tt.Output()
ok, err := cmpJson(have, want)
switch {
case err != nil:
t.Logf(string(have))
t.Fatalf("test %d, json parsing failed: %v", i, err)
case !ok:
t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want))
}
}
tt.WaitExit()
if have, want := tt.ExitStatus(), tc.expExitCode; have != want {
t.Fatalf("test %d: wrong exit code, have %d, want %d", i, have, want)
}
}
}
// cmpJson compares the JSON in two byte slices.
func cmpJson(a, b []byte) (bool, error) {
var j, j2 interface{}

View File

@@ -15,7 +15,7 @@
"result": {
"stateRoot": "0x84208a19bc2b46ada7445180c1db162be5b39b9abc8c0a54b05d32943eae4e13",
"txRoot": "0xc4761fd7b87ff2364c7c60b6c5c8d02e522e815328aaea3f20e3b7b7ef52c42d",
"receiptRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
"receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [
@@ -38,6 +38,7 @@
"error": "nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1"
}
],
"currentDifficulty": "0x20000"
"currentDifficulty": "0x20000",
"gasUsed": "0x5208"
}
}
}

View File

@@ -2,7 +2,7 @@
"result": {
"stateRoot": "0xe4b924a6adb5959fccf769d5b7bb2f6359e26d1e76a2443c5a91a36d826aef61",
"txRoot": "0x013509c8563d41c0ae4bf38f2d6d19fc6512a1d0d6be045079c8c9f68bf45f9d",
"receiptRoot": "0xa532a08aa9f62431d6fe5d924951b8efb86ed3c54d06fee77788c3767dd13420",
"receiptsRoot": "0xa532a08aa9f62431d6fe5d924951b8efb86ed3c54d06fee77788c3767dd13420",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [
@@ -33,6 +33,7 @@
"transactionIndex": "0x1"
}
],
"currentDifficulty": "0x20000"
"currentDifficulty": "0x20000",
"gasUsed": "0x109a0"
}
}

View File

@@ -2,10 +2,11 @@
"result": {
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"currentDifficulty": "0x2000020000000",
"receipts": []
"receipts": [],
"gasUsed": "0x0"
}
}
}

View File

@@ -2,10 +2,11 @@
"result": {
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [],
"currentDifficulty": "0x1ff8020000000"
"currentDifficulty": "0x1ff8020000000",
"gasUsed": "0x0"
}
}
}

View File

@@ -2,10 +2,11 @@
"result": {
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [],
"currentDifficulty": "0x1ff9000000000"
"currentDifficulty": "0x1ff9000000000",
"gasUsed": "0x0"
}
}
}

View File

@@ -2,11 +2,11 @@
{
"address": "0xd02d72e067e77158444ef2020ff2d325f929b363",
"hash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476",
"intrinsicGas": 21000
"intrinsicGas": "0x5208"
},
{
"address": "0xd02d72e067e77158444ef2020ff2d325f929b363",
"hash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a",
"intrinsicGas": 21000
"intrinsicGas": "0x5208"
}
]

View File

@@ -2,12 +2,12 @@
{
"address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
"hash": "0x7cc3d1a8540a44736750f03bb4d85c0113be4b3472a71bf82241a3b261b479e6",
"intrinsicGas": 21000
"intrinsicGas": "0x5208"
},
{
"error": "intrinsic gas too low: have 82, want 21000",
"address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
"hash": "0x3b2d2609e4361562edb9169314f4c05afc6dbf5d706bf9dda5abe242ab76a22b",
"intrinsicGas": 21000
"intrinsicGas": "0x5208"
}
]

View File

@@ -3,13 +3,13 @@
"error": "value exceeds 256 bits",
"address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
"hash": "0xfbd91685dcbf8172f0e8c53e2ddbb4d26707840da6b51a74371f62a33868fd82",
"intrinsicGas": 21000
"intrinsicGas": "0x5208"
},
{
"error": "gasPrice exceeds 256 bits",
"address": "0x1b57ccef1fe5fb73f1e64530fb4ebd9cf1655964",
"hash": "0x45dc05035cada83748e4c1fe617220106b331eca054f44c2304d5654a9fb29d5",
"intrinsicGas": 21000
"intrinsicGas": "0x5208"
},
{
"error": "invalid transaction v, r, s values",

View File

@@ -2,10 +2,11 @@
"result": {
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"currentDifficulty": "0x2000000200000",
"receipts": []
"receipts": [],
"gasUsed": "0x0"
}
}
}

View File

@@ -2,10 +2,11 @@
"result": {
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"currentDifficulty": "0x2000080000000",
"receipts": []
"receipts": [],
"gasUsed": "0x0"
}
}
}

4
cmd/evm/testdata/20/exp.json vendored Normal file
View File

@@ -0,0 +1,4 @@
{
"rlp": "0xf902d9f90211a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794e997a23b159e2e2a5ce72333262972374b15425ca0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e99476574682f76312e302e312f6c696e75782f676f312e342e32a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf8897435673d874f7c8f8c2f85f8002825208948a8eafb1cf62bfbeb1741769dae1a9dd4799619201801ba09500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdba07235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600f85f8002825208948a8eafb1cf62bfbeb1741769dae1a9dd4799619201801ba09500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdba07235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600c0",
"hash": "0xaba9a3b6a4e96e9ecffcadaa5a2ae0589359455617535cd86589fe1dd26fe899"
}

14
cmd/evm/testdata/20/header.json vendored Normal file
View File

@@ -0,0 +1,14 @@
{
"parentHash": "0xd6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34e",
"miner": "0xe997a23b159e2e2a5ce72333262972374b15425c",
"stateRoot": "0x325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2e",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"difficulty": "0x1000",
"number": "0xc3be",
"gasLimit": "0x50785",
"gasUsed": "0x0",
"timestamp": "0x55c5277e",
"extraData": "0x476574682f76312e302e312f6c696e75782f676f312e342e32",
"mixHash": "0x5865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf",
"nonce": "0x97435673d874f7c8"
}

1
cmd/evm/testdata/20/ommers.json vendored Normal file
View File

@@ -0,0 +1 @@
[]

11
cmd/evm/testdata/20/readme.md vendored Normal file
View File

@@ -0,0 +1,11 @@
# Block building
This test shows how `b11r` can be used to assemble an unsealed block.
```console
$ go run . b11r --input.header=testdata/20/header.json --input.txs=testdata/20/txs.rlp --input.ommers=testdata/20/ommers.json --output.block=stdout
{
"rlp": "0xf90216f90211a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794e997a23b159e2e2a5ce72333262972374b15425ca0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e99476574682f76312e302e312f6c696e75782f676f312e342e32a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf8897435673d874f7c8c0c0",
"hash": "0xaba9a3b6a4e96e9ecffcadaa5a2ae0589359455617535cd86589fe1dd26fe899"
}
```

1
cmd/evm/testdata/20/txs.rlp vendored Normal file
View File

@@ -0,0 +1 @@
"0xf8c2f85f8002825208948a8eafb1cf62bfbeb1741769dae1a9dd4799619201801ba09500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdba07235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600f85f8002825208948a8eafb1cf62bfbeb1741769dae1a9dd4799619201801ba09500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdba07235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600"

6
cmd/evm/testdata/21/clique.json vendored Normal file
View File

@@ -0,0 +1,6 @@
{
"secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
"voted": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
"authorize": false,
"vanity": "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}

4
cmd/evm/testdata/21/exp-clique.json vendored Normal file
View File

@@ -0,0 +1,4 @@
{
"rlp": "0xf9025ff9025aa0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277eb861aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac540a67aaee364005841da84f488f6b6d0116dfb5103d091402c81a163d5f66666595e37f56f196d8c5c98da714dbfae68d6b7e1790cc734a20ec6ce52213ad800a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf88ffffffffffffffffc0c0",
"hash": "0x71c59102cc805dbe8741e1210ebe229a321eff144ac7276006fefe39e8357dc7"
}

4
cmd/evm/testdata/21/exp.json vendored Normal file
View File

@@ -0,0 +1,4 @@
{
"rlp": "0xf901fdf901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0c0",
"hash": "0x801411e9f6609a659825690d13e4f75a3cfe9143952fa2d9573f3b0a5eb9ebbb"
}

11
cmd/evm/testdata/21/header.json vendored Normal file
View File

@@ -0,0 +1,11 @@
{
"parentHash": "0xd6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34e",
"stateRoot": "0x325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2e",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"difficulty": "0x1000",
"number": "0xc3be",
"gasLimit": "0x50785",
"gasUsed": "0x0",
"timestamp": "0x55c5277e",
"mixHash": "0x5865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf"
}

1
cmd/evm/testdata/21/ommers.json vendored Normal file
View File

@@ -0,0 +1 @@
[]

23
cmd/evm/testdata/21/readme.md vendored Normal file
View File

@@ -0,0 +1,23 @@
# Sealed block building
This test shows how `b11r` can be used to assemble a sealed block.
## Ethash
```console
$ go run . b11r --input.header=testdata/21/header.json --input.txs=testdata/21/txs.rlp --input.ommers=testdata/21/ommers.json --seal.ethash --seal.ethash.mode=test --output.block=stdout
{
"rlp": "0xf901fdf901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0c0",
"hash": "0x801411e9f6609a659825690d13e4f75a3cfe9143952fa2d9573f3b0a5eb9ebbb"
}
```
## Clique
```console
$ go run . b11r --input.header=testdata/21/header.json --input.txs=testdata/21/txs.rlp --input.ommers=testdata/21/ommers.json --seal.clique=testdata/21/clique.json --output.block=stdout
{
"rlp": "0xf9025ff9025aa0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277eb861aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac540a67aaee364005841da84f488f6b6d0116dfb5103d091402c81a163d5f66666595e37f56f196d8c5c98da714dbfae68d6b7e1790cc734a20ec6ce52213ad800a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf88ffffffffffffffffc0c0",
"hash": "0x71c59102cc805dbe8741e1210ebe229a321eff144ac7276006fefe39e8357dc7"
}
```

1
cmd/evm/testdata/21/txs.rlp vendored Normal file
View File

@@ -0,0 +1 @@
"c0"

4
cmd/evm/testdata/22/exp-clique.json vendored Normal file
View File

@@ -0,0 +1,4 @@
{
"rlp": "0xf9025ff9025aa0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277eb861aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac540a67aaee364005841da84f488f6b6d0116dfb5103d091402c81a163d5f66666595e37f56f196d8c5c98da714dbfae68d6b7e1790cc734a20ec6ce52213ad800a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf88ffffffffffffffffc0c0",
"hash": "0x71c59102cc805dbe8741e1210ebe229a321eff144ac7276006fefe39e8357dc7"
}

4
cmd/evm/testdata/22/exp.json vendored Normal file
View File

@@ -0,0 +1,4 @@
{
"rlp": "0xf905f5f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea06eb9f0c3cd68c9e97134e6725d12b1f1d8f0644458da6870a37ff84c908fb1e7940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0f903f6f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000",
"hash": "0xd9a81c8fcd57a7f2a0d2c375eff6ad192c30c3729a271303f0a9a7e1b357e755"
}

11
cmd/evm/testdata/22/header.json vendored Normal file
View File

@@ -0,0 +1,11 @@
{
"parentHash": "0xd6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34e",
"stateRoot": "0x325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2e",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"difficulty": "0x1000",
"number": "0xc3be",
"gasLimit": "0x50785",
"gasUsed": "0x0",
"timestamp": "0x55c5277e",
"mixHash": "0x5865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf"
}

1
cmd/evm/testdata/22/ommers.json vendored Normal file
View File

@@ -0,0 +1 @@
["0xf901fdf901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0c0","0xf901fdf901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0c0"]

11
cmd/evm/testdata/22/readme.md vendored Normal file
View File

@@ -0,0 +1,11 @@
# Building blocks with ommers
This test shows how `b11r` can chain together ommer assembles into a canonical block.
```console
$ echo "{ \"ommers\": [`go run . b11r --input.header=testdata/22/header.json --input.txs=testdata/22/txs.rlp --output.block=stdout | jq '.[\"rlp\"]'`,`go run . b11r --input.header=testdata/22/header.json --input.txs=testdata/22/txs.rlp --output.block=stdout | jq '.[\"rlp\"]'`]}" | go run . b11r --input.header=testdata/22/header.json --input.txs=testdata/22/txs.rlp --input.ommers=stdin --output.block=stdout
{
"rlp": "0xf905f5f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea06eb9f0c3cd68c9e97134e6725d12b1f1d8f0644458da6870a37ff84c908fb1e7940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0f903f6f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000",
"hash": "0xd9a81c8fcd57a7f2a0d2c375eff6ad192c30c3729a271303f0a9a7e1b357e755"
}
```

1
cmd/evm/testdata/22/txs.rlp vendored Normal file
View File

@@ -0,0 +1 @@
"c0"

16
cmd/evm/testdata/23/alloc.json vendored Normal file
View File

@@ -0,0 +1,16 @@
{
"0x095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
"balance" : "0x0de0b6b3a7640000",
"code" : "0x6001",
"nonce" : "0x00",
"storage" : {
}
},
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
"balance" : "0x0de0b6b3a7640000",
"code" : "0x",
"nonce" : "0x00",
"storage" : {
}
}
}

7
cmd/evm/testdata/23/env.json vendored Normal file
View File

@@ -0,0 +1,7 @@
{
"currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
"currentDifficulty" : "0x020000",
"currentGasLimit" : "0x3b9aca00",
"currentNumber" : "0x05",
"currentTimestamp" : "0x03e8"
}

25
cmd/evm/testdata/23/exp.json vendored Normal file
View File

@@ -0,0 +1,25 @@
{
"result": {
"stateRoot": "0x65334305e4accfa18352deb24f007b837b5036425b0712cf0e65a43bfa95154d",
"txRoot": "0x75e61774a2ff58cbe32653420256c7f44bc715715a423b0b746d5c622979af6b",
"receiptsRoot": "0xf951f9396af203499cc7d379715a9110323de73967c5700e2f424725446a3c76",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [
{
"root": "0x",
"status": "0x1",
"cumulativeGasUsed": "0x520b",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"logs": null,
"transactionHash": "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81",
"contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x520b",
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"transactionIndex": "0x0"
}
],
"currentDifficulty": "0x20000",
"gasUsed": "0x520b"
}
}

1
cmd/evm/testdata/23/readme.md vendored Normal file
View File

@@ -0,0 +1 @@
These files examplify how to sign a transaction using the pre-EIP155 scheme.

15
cmd/evm/testdata/23/txs.json vendored Normal file
View File

@@ -0,0 +1,15 @@
[
{
"input" : "0x",
"gas" : "0x5f5e100",
"gasPrice" : "0x1",
"nonce" : "0x0",
"to" : "0x095e7baea6a6c7c4c2dfeb977efac326af552d87",
"value" : "0x186a0",
"v" : "0x0",
"r" : "0x0",
"s" : "0x0",
"secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
"protected": false
}
]

View File

@@ -15,7 +15,7 @@
"result": {
"stateRoot": "0xb7341da3f9f762a6884eaa186c32942734c146b609efee11c4b0214c44857ea1",
"txRoot": "0x75e61774a2ff58cbe32653420256c7f44bc715715a423b0b746d5c622979af6b",
"receiptRoot": "0xd0d26df80374a327c025d405ebadc752b1bbd089d864801ae78ab704bcad8086",
"receiptsRoot": "0xd0d26df80374a327c025d405ebadc752b1bbd089d864801ae78ab704bcad8086",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [
@@ -32,6 +32,7 @@
"transactionIndex": "0x0"
}
],
"currentDifficulty": "0x20000"
"currentDifficulty": "0x20000",
"gasUsed": "0x521f"
}
}

View File

@@ -13,10 +13,11 @@
"result": {
"stateRoot": "0xa7312add33811645c6aa65d928a1a4f49d65d448801912c069a0aa8fe9c1f393",
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [],
"currentDifficulty": "0x20000"
"currentDifficulty": "0x20000",
"gasUsed": "0x0"
}
}

View File

@@ -32,7 +32,6 @@ import (
"github.com/ethereum/go-ethereum/accounts/scwallet"
"github.com/ethereum/go-ethereum/accounts/usbwallet"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/log"
@@ -159,17 +158,10 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
if ctx.GlobalIsSet(utils.OverrideArrowGlacierFlag.Name) {
cfg.Eth.OverrideArrowGlacier = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideArrowGlacierFlag.Name))
}
backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
// Configure catalyst.
if ctx.GlobalBool(utils.CatalystFlag.Name) {
if eth == nil {
utils.Fatalf("Catalyst does not work in light client mode.")
}
if err := catalyst.Register(stack, eth); err != nil {
utils.Fatalf("%v", err)
}
if ctx.GlobalIsSet(utils.OverrideTerminalTotalDifficulty.Name) {
cfg.Eth.Genesis.Config.TerminalTotalDifficulty = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideTerminalTotalDifficulty.Name))
}
backend, _ := utils.RegisterEthService(stack, &cfg.Eth, ctx.GlobalBool(utils.CatalystFlag.Name))
// Configure GraphQL if requested
if ctx.GlobalIsSet(utils.GraphQLEnabledFlag.Name) {

View File

@@ -77,6 +77,7 @@ Remove blockchain and state databases`,
ArgsUsage: "<prefix> <start>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.AncientFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,

View File

@@ -40,7 +40,8 @@ import (
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/node"
// Force-load the native, to trigger registration
// Force-load the tracer engines to trigger registration
_ "github.com/ethereum/go-ethereum/eth/tracers/js"
_ "github.com/ethereum/go-ethereum/eth/tracers/native"
"gopkg.in/urfave/cli.v1"
@@ -71,6 +72,7 @@ var (
utils.USBFlag,
utils.SmartCardDaemonPathFlag,
utils.OverrideArrowGlacierFlag,
utils.OverrideTerminalTotalDifficulty,
utils.EthashCacheDirFlag,
utils.EthashCachesInMemoryFlag,
utils.EthashCachesOnDiskFlag,
@@ -139,6 +141,7 @@ var (
utils.MainnetFlag,
utils.DeveloperFlag,
utils.DeveloperPeriodFlag,
utils.DeveloperGasLimitFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,

View File

@@ -220,7 +220,7 @@ func verifyState(ctx *cli.Context) error {
log.Error("Failed to load head block")
return errors.New("no head block")
}
snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, headBlock.Root(), false, false, false)
snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, headBlock.Root(), false, false, false, false)
if err != nil {
log.Error("Failed to open snapshot tree", "err", err)
return err
@@ -472,7 +472,7 @@ func dumpState(ctx *cli.Context) error {
if err != nil {
return err
}
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, root, false, false, false)
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, root, false, false, false, false)
if err != nil {
return err
}

View File

@@ -75,6 +75,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
Flags: []cli.Flag{
utils.DeveloperFlag,
utils.DeveloperPeriodFlag,
utils.DeveloperGasLimitFlag,
},
},
{

View File

@@ -45,6 +45,7 @@ import (
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/eth/gasprice"
@@ -167,6 +168,11 @@ var (
Name: "dev.period",
Usage: "Block period to use in developer mode (0 = mine only if transaction pending)",
}
DeveloperGasLimitFlag = cli.Uint64Flag{
Name: "dev.gaslimit",
Usage: "Initial block gas limit",
Value: 11500000,
}
IdentityFlag = cli.StringFlag{
Name: "identity",
Usage: "Custom node name",
@@ -209,7 +215,7 @@ var (
defaultSyncMode = ethconfig.Defaults.SyncMode
SyncModeFlag = TextMarshalerFlag{
Name: "syncmode",
Usage: `Blockchain sync mode ("fast", "full", "snap" or "light")`,
Usage: `Blockchain sync mode ("snap", "full" or "light")`,
Value: &defaultSyncMode,
}
GCModeFlag = cli.StringFlag{
@@ -243,6 +249,10 @@ var (
Name: "override.arrowglacier",
Usage: "Manually specify Arrow Glacier fork-block, overriding the bundled setting",
}
OverrideTerminalTotalDifficulty = cli.Uint64Flag{
Name: "override.terminaltotaldifficulty",
Usage: "Manually specify TerminalTotalDifficulty, overriding the bundled setting",
}
// Light server and client settings
LightServeFlag = cli.IntFlag{
Name: "light.serve",
@@ -1191,7 +1201,7 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
cfg.NetRestrict = list
}
if ctx.GlobalBool(DeveloperFlag.Name) || ctx.GlobalBool(CatalystFlag.Name) {
if ctx.GlobalBool(DeveloperFlag.Name) {
// --dev mode can't use p2p networking.
cfg.MaxPeers = 0
cfg.ListenAddr = ""
@@ -1661,7 +1671,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
log.Info("Using developer account", "address", developer.Address)
// Create a new developer genesis block or reuse existing one
cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.GlobalInt(DeveloperPeriodFlag.Name)), developer.Address)
cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.GlobalInt(DeveloperPeriodFlag.Name)), ctx.GlobalUint64(DeveloperGasLimitFlag.Name), developer.Address)
if ctx.GlobalIsSet(DataDirFlag.Name) {
// Check if we have an already initialized chain and fall back to
// that if so. Otherwise we need to generate a new genesis spec.
@@ -1700,13 +1710,18 @@ func SetDNSDiscoveryDefaults(cfg *ethconfig.Config, genesis common.Hash) {
// RegisterEthService adds an Ethereum client to the stack.
// The second return value is the full node instance, which may be nil if the
// node is running as a light client.
func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend, *eth.Ethereum) {
func RegisterEthService(stack *node.Node, cfg *ethconfig.Config, isCatalyst bool) (ethapi.Backend, *eth.Ethereum) {
if cfg.SyncMode == downloader.LightSync {
backend, err := les.New(stack, cfg)
if err != nil {
Fatalf("Failed to register the Ethereum service: %v", err)
}
stack.RegisterAPIs(tracers.APIs(backend.ApiBackend))
if isCatalyst {
if err := catalyst.RegisterLight(stack, backend); err != nil {
Fatalf("Failed to register the catalyst service: %v", err)
}
}
return backend.ApiBackend, nil
}
backend, err := eth.New(stack, cfg)
@@ -1719,6 +1734,11 @@ func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend
Fatalf("Failed to create the LES server: %v", err)
}
}
if isCatalyst {
if err := catalyst.Register(stack, backend); err != nil {
Fatalf("Failed to register the catalyst service: %v", err)
}
}
stack.RegisterAPIs(tracers.APIs(backend.APIBackend))
return backend.APIBackend, backend
}

View File

@@ -0,0 +1,376 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package beacon
import (
"errors"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/trie"
)
// Proof-of-stake protocol constants.
var (
beaconDifficulty = common.Big0 // The default block difficulty in the beacon consensus
beaconNonce = types.EncodeNonce(0) // The default block nonce in the beacon consensus
)
// Various error messages to mark blocks invalid. These should be private to
// prevent engine specific errors from being referenced in the remainder of the
// codebase, inherently breaking if the engine is swapped out. Please put common
// error types into the consensus package.
var (
errTooManyUncles = errors.New("too many uncles")
errInvalidMixDigest = errors.New("invalid mix digest")
errInvalidNonce = errors.New("invalid nonce")
errInvalidUncleHash = errors.New("invalid uncle hash")
)
// Beacon is a consensus engine that combines the eth1 consensus and proof-of-stake
// algorithm. There is a special flag inside to decide whether to use legacy consensus
// rules or new rules. The transition rule is described in the eth1/2 merge spec.
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-3675.md
//
// The beacon here is a half-functional consensus engine with partial functions which
// is only used for necessary consensus checks. The legacy consensus engine can be any
// engine implements the consensus interface (except the beacon itself).
type Beacon struct {
ethone consensus.Engine // Original consensus engine used in eth1, e.g. ethash or clique
}
// New creates a consensus engine with the given embedded eth1 engine.
func New(ethone consensus.Engine) *Beacon {
if _, ok := ethone.(*Beacon); ok {
panic("nested consensus engine")
}
return &Beacon{ethone: ethone}
}
// Author implements consensus.Engine, returning the verified author of the block.
func (beacon *Beacon) Author(header *types.Header) (common.Address, error) {
if !beacon.IsPoSHeader(header) {
return beacon.ethone.Author(header)
}
return header.Coinbase, nil
}
// VerifyHeader checks whether a header conforms to the consensus rules of the
// stock Ethereum consensus engine.
func (beacon *Beacon) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error {
reached, _ := IsTTDReached(chain, header.ParentHash, header.Number.Uint64()-1)
if !reached {
return beacon.ethone.VerifyHeader(chain, header, seal)
}
// Short circuit if the parent is not known
parent := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1)
if parent == nil {
return consensus.ErrUnknownAncestor
}
// Sanity checks passed, do a proper verification
return beacon.verifyHeader(chain, header, parent)
}
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
// concurrently. The method returns a quit channel to abort the operations and
// a results channel to retrieve the async verifications.
// VerifyHeaders expect the headers to be ordered and continuous.
func (beacon *Beacon) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
if !beacon.IsPoSHeader(headers[len(headers)-1]) {
return beacon.ethone.VerifyHeaders(chain, headers, seals)
}
var (
preHeaders []*types.Header
postHeaders []*types.Header
preSeals []bool
)
for index, header := range headers {
if beacon.IsPoSHeader(header) {
preHeaders = headers[:index]
postHeaders = headers[index:]
preSeals = seals[:index]
break
}
}
// All the headers have passed the transition point, use new rules.
if len(preHeaders) == 0 {
return beacon.verifyHeaders(chain, headers, nil)
}
// The transition point exists in the middle, separate the headers
// into two batches and apply different verification rules for them.
var (
abort = make(chan struct{})
results = make(chan error, len(headers))
)
go func() {
var (
old, new, out = 0, len(preHeaders), 0
errors = make([]error, len(headers))
done = make([]bool, len(headers))
oldDone, oldResult = beacon.ethone.VerifyHeaders(chain, preHeaders, preSeals)
newDone, newResult = beacon.verifyHeaders(chain, postHeaders, preHeaders[len(preHeaders)-1])
)
for {
for ; done[out]; out++ {
results <- errors[out]
if out == len(headers)-1 {
return
}
}
select {
case err := <-oldResult:
errors[old], done[old] = err, true
old++
case err := <-newResult:
errors[new], done[new] = err, true
new++
case <-abort:
close(oldDone)
close(newDone)
return
}
}
}()
return abort, results
}
// VerifyUncles verifies that the given block's uncles conform to the consensus
// rules of the Ethereum consensus engine.
func (beacon *Beacon) VerifyUncles(chain consensus.ChainReader, block *types.Block) error {
if !beacon.IsPoSHeader(block.Header()) {
return beacon.ethone.VerifyUncles(chain, block)
}
// Verify that there is no uncle block. It's explicitly disabled in the beacon
if len(block.Uncles()) > 0 {
return errTooManyUncles
}
return nil
}
// verifyHeader checks whether a header conforms to the consensus rules of the
// stock Ethereum consensus engine. The difference between the beacon and classic is
// (a) The following fields are expected to be constants:
// - difficulty is expected to be 0
// - nonce is expected to be 0
// - unclehash is expected to be Hash(emptyHeader)
// to be the desired constants
// (b) the timestamp is not verified anymore
// (c) the extradata is limited to 32 bytes
func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header) error {
// Ensure that the header's extra-data section is of a reasonable size
if len(header.Extra) > 32 {
return fmt.Errorf("extra-data longer than 32 bytes (%d)", len(header.Extra))
}
// Verify the seal parts. Ensure the mixhash, nonce and uncle hash are the expected value.
if header.MixDigest != (common.Hash{}) {
return errInvalidMixDigest
}
if header.Nonce != beaconNonce {
return errInvalidNonce
}
if header.UncleHash != types.EmptyUncleHash {
return errInvalidUncleHash
}
// Verify the block's difficulty to ensure it's the default constant
if beaconDifficulty.Cmp(header.Difficulty) != 0 {
return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, beaconDifficulty)
}
// Verify that the gas limit is <= 2^63-1
cap := uint64(0x7fffffffffffffff)
if header.GasLimit > cap {
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, cap)
}
// Verify that the gasUsed is <= gasLimit
if header.GasUsed > header.GasLimit {
return fmt.Errorf("invalid gasUsed: have %d, gasLimit %d", header.GasUsed, header.GasLimit)
}
// Verify that the block number is parent's +1
if diff := new(big.Int).Sub(header.Number, parent.Number); diff.Cmp(common.Big1) != 0 {
return consensus.ErrInvalidNumber
}
// Verify the header's EIP-1559 attributes.
return misc.VerifyEip1559Header(chain.Config(), parent, header)
}
// verifyHeaders is similar to verifyHeader, but verifies a batch of headers
// concurrently. The method returns a quit channel to abort the operations and
// a results channel to retrieve the async verifications. An additional parent
// header will be passed if the relevant header is not in the database yet.
func (beacon *Beacon) verifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, ancestor *types.Header) (chan<- struct{}, <-chan error) {
var (
abort = make(chan struct{})
results = make(chan error, len(headers))
)
go func() {
for i, header := range headers {
var parent *types.Header
if i == 0 {
if ancestor != nil {
parent = ancestor
} else {
parent = chain.GetHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1)
}
} else if headers[i-1].Hash() == headers[i].ParentHash {
parent = headers[i-1]
}
if parent == nil {
select {
case <-abort:
return
case results <- consensus.ErrUnknownAncestor:
}
continue
}
err := beacon.verifyHeader(chain, header, parent)
select {
case <-abort:
return
case results <- err:
}
}
}()
return abort, results
}
// Prepare implements consensus.Engine, initializing the difficulty field of a
// header to conform to the beacon protocol. The changes are done inline.
func (beacon *Beacon) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
// Transition isn't triggered yet, use the legacy rules for preparation.
reached, err := IsTTDReached(chain, header.ParentHash, header.Number.Uint64()-1)
if err != nil {
return err
}
if !reached {
return beacon.ethone.Prepare(chain, header)
}
header.Difficulty = beaconDifficulty
return nil
}
// Finalize implements consensus.Engine, setting the final state on the header
func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) {
// Finalize is different with Prepare, it can be used in both block generation
// and verification. So determine the consensus rules by header type.
if !beacon.IsPoSHeader(header) {
beacon.ethone.Finalize(chain, header, state, txs, uncles)
return
}
// The block reward is no longer handled here. It's done by the
// external consensus engine.
header.Root = state.IntermediateRoot(true)
}
// FinalizeAndAssemble implements consensus.Engine, setting the final state and
// assembling the block.
func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
// FinalizeAndAssemble is different with Prepare, it can be used in both block
// generation and verification. So determine the consensus rules by header type.
if !beacon.IsPoSHeader(header) {
return beacon.ethone.FinalizeAndAssemble(chain, header, state, txs, uncles, receipts)
}
// Finalize and assemble the block
beacon.Finalize(chain, header, state, txs, uncles)
return types.NewBlock(header, txs, uncles, receipts, trie.NewStackTrie(nil)), nil
}
// Seal generates a new sealing request for the given input block and pushes
// the result into the given channel.
//
// Note, the method returns immediately and will send the result async. More
// than one result may also be returned depending on the consensus algorithm.
func (beacon *Beacon) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
if !beacon.IsPoSHeader(block.Header()) {
return beacon.ethone.Seal(chain, block, results, stop)
}
// The seal verification is done by the external consensus engine,
// return directly without pushing any block back. In another word
// beacon won't return any result by `results` channel which may
// blocks the receiver logic forever.
return nil
}
// SealHash returns the hash of a block prior to it being sealed.
func (beacon *Beacon) SealHash(header *types.Header) common.Hash {
return beacon.ethone.SealHash(header)
}
// CalcDifficulty is the difficulty adjustment algorithm. It returns
// the difficulty that a new block should have when created at time
// given the parent block's time and difficulty.
func (beacon *Beacon) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int {
// Transition isn't triggered yet, use the legacy rules for calculation
if reached, _ := IsTTDReached(chain, parent.Hash(), parent.Number.Uint64()); !reached {
return beacon.ethone.CalcDifficulty(chain, time, parent)
}
return beaconDifficulty
}
// APIs implements consensus.Engine, returning the user facing RPC APIs.
func (beacon *Beacon) APIs(chain consensus.ChainHeaderReader) []rpc.API {
return beacon.ethone.APIs(chain)
}
// Close shutdowns the consensus engine
func (beacon *Beacon) Close() error {
return beacon.ethone.Close()
}
// IsPoSHeader reports the header belongs to the PoS-stage with some special fields.
// This function is not suitable for a part of APIs like Prepare or CalcDifficulty
// because the header difficulty is not set yet.
func (beacon *Beacon) IsPoSHeader(header *types.Header) bool {
if header.Difficulty == nil {
panic("IsPoSHeader called with invalid difficulty")
}
return header.Difficulty.Cmp(beaconDifficulty) == 0
}
// InnerEngine returns the embedded eth1 consensus engine.
func (beacon *Beacon) InnerEngine() consensus.Engine {
return beacon.ethone
}
// SetThreads updates the mining threads. Delegate the call
// to the eth1 engine if it's threaded.
func (beacon *Beacon) SetThreads(threads int) {
type threaded interface {
SetThreads(threads int)
}
if th, ok := beacon.ethone.(threaded); ok {
th.SetThreads(threads)
}
}
// IsTTDReached checks if the TotalTerminalDifficulty has been surpassed on the `parentHash` block.
// It depends on the parentHash already being stored in the database.
// If the parentHash is not stored in the database a UnknownAncestor error is returned.
func IsTTDReached(chain consensus.ChainHeaderReader, parentHash common.Hash, number uint64) (bool, error) {
if chain.Config().TerminalTotalDifficulty == nil {
return false, nil
}
td := chain.GetTd(parentHash, number)
if td == nil {
return false, consensus.ErrUnknownAncestor
}
return td.Cmp(chain.Config().TerminalTotalDifficulty) >= 0, nil
}

View File

@@ -196,7 +196,11 @@ func (sb *blockNumberOrHashOrRLP) UnmarshalJSON(data []byte) error {
if err := json.Unmarshal(data, &input); err != nil {
return err
}
sb.RLP = hexutil.MustDecode(input)
blob, err := hexutil.Decode(input)
if err != nil {
return err
}
sb.RLP = blob
return nil
}

View File

@@ -44,6 +44,9 @@ type ChainHeaderReader interface {
// GetHeaderByHash retrieves a block header from the database by its hash.
GetHeaderByHash(hash common.Hash) *types.Header
// GetTd retrieves the total difficulty from the database by hash and number.
GetTd(hash common.Hash, number uint64) *big.Int
}
// ChainReader defines a small collection of methods needed to access the local

View File

@@ -34,6 +34,7 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/utils"
"golang.org/x/crypto/sha3"
)
@@ -660,10 +661,19 @@ func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header
r.Sub(r, header.Number)
r.Mul(r, blockReward)
r.Div(r, big8)
if state.Witness() != nil {
uncleCoinbase := utils.GetTreeKeyBalance(uncle.Coinbase.Bytes())
state.Witness().TouchAddress(uncleCoinbase, state.GetBalance(uncle.Coinbase).Bytes())
}
state.AddBalance(uncle.Coinbase, r)
r.Div(blockReward, big32)
reward.Add(reward, r)
}
if config.IsCancun(header.Number) {
coinbase := utils.GetTreeKeyBalance(header.Coinbase.Bytes())
state.Witness().TouchAddress(coinbase, state.GetBalance(header.Coinbase).Bytes())
}
state.AddBalance(header.Coinbase, reward)
}

110
consensus/merger.go Normal file
View File

@@ -0,0 +1,110 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package consensus
import (
"fmt"
"sync"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
)
// transitionStatus describes the status of eth1/2 transition. This switch
// between modes is a one-way action which is triggered by corresponding
// consensus-layer message.
type transitionStatus struct {
LeftPoW bool // The flag is set when the first NewHead message received
EnteredPoS bool // The flag is set when the first FinalisedBlock message received
}
// Merger is an internal help structure used to track the eth1/2 transition status.
// It's a common structure can be used in both full node and light client.
type Merger struct {
db ethdb.KeyValueStore
status transitionStatus
mu sync.RWMutex
}
// NewMerger creates a new Merger which stores its transition status in the provided db.
func NewMerger(db ethdb.KeyValueStore) *Merger {
var status transitionStatus
blob := rawdb.ReadTransitionStatus(db)
if len(blob) != 0 {
if err := rlp.DecodeBytes(blob, &status); err != nil {
log.Crit("Failed to decode the transition status", "err", err)
}
}
return &Merger{
db: db,
status: status,
}
}
// ReachTTD is called whenever the first NewHead message received
// from the consensus-layer.
func (m *Merger) ReachTTD() {
m.mu.Lock()
defer m.mu.Unlock()
if m.status.LeftPoW {
return
}
m.status = transitionStatus{LeftPoW: true}
blob, err := rlp.EncodeToBytes(m.status)
if err != nil {
panic(fmt.Sprintf("Failed to encode the transition status: %v", err))
}
rawdb.WriteTransitionStatus(m.db, blob)
log.Info("Left PoW stage")
}
// FinalizePoS is called whenever the first FinalisedBlock message received
// from the consensus-layer.
func (m *Merger) FinalizePoS() {
m.mu.Lock()
defer m.mu.Unlock()
if m.status.EnteredPoS {
return
}
m.status = transitionStatus{LeftPoW: true, EnteredPoS: true}
blob, err := rlp.EncodeToBytes(m.status)
if err != nil {
panic(fmt.Sprintf("Failed to encode the transition status: %v", err))
}
rawdb.WriteTransitionStatus(m.db, blob)
log.Info("Entered PoS stage")
}
// TDDReached reports whether the chain has left the PoW stage.
func (m *Merger) TDDReached() bool {
m.mu.RLock()
defer m.mu.RUnlock()
return m.status.LeftPoW
}
// PoSFinalized reports whether the chain has entered the PoS stage.
func (m *Merger) PoSFinalized() bool {
m.mu.RLock()
defer m.mu.RUnlock()
return m.status.EnteredPoS
}

View File

@@ -99,7 +99,7 @@ func newTester(t *testing.T, confOverride func(*ethconfig.Config)) *tester {
t.Fatalf("failed to create node: %v", err)
}
ethConf := &ethconfig.Config{
Genesis: core.DeveloperGenesisBlock(15, common.Address{}),
Genesis: core.DeveloperGenesisBlock(15, 11_500_000, common.Address{}),
Miner: miner.Config{
Etherbase: common.HexToAddress(testAddress),
},

View File

@@ -17,14 +17,21 @@
package core
import (
"encoding/json"
"math/big"
"runtime"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/beacon"
"github.com/ethereum/go-ethereum/consensus/clique"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
)
@@ -76,6 +83,172 @@ func TestHeaderVerification(t *testing.T) {
}
}
func TestHeaderVerificationForMergingClique(t *testing.T) { testHeaderVerificationForMerging(t, true) }
func TestHeaderVerificationForMergingEthash(t *testing.T) { testHeaderVerificationForMerging(t, false) }
// Tests the verification for eth1/2 merging, including pre-merge and post-merge
func testHeaderVerificationForMerging(t *testing.T, isClique bool) {
var (
testdb = rawdb.NewMemoryDatabase()
preBlocks []*types.Block
postBlocks []*types.Block
runEngine consensus.Engine
chainConfig *params.ChainConfig
merger = consensus.NewMerger(rawdb.NewMemoryDatabase())
)
if isClique {
var (
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr = crypto.PubkeyToAddress(key.PublicKey)
engine = clique.New(params.AllCliqueProtocolChanges.Clique, testdb)
)
genspec := &Genesis{
ExtraData: make([]byte, 32+common.AddressLength+crypto.SignatureLength),
Alloc: map[common.Address]GenesisAccount{
addr: {Balance: big.NewInt(1)},
},
BaseFee: big.NewInt(params.InitialBaseFee),
}
copy(genspec.ExtraData[32:], addr[:])
genesis := genspec.MustCommit(testdb)
genEngine := beacon.New(engine)
preBlocks, _ = GenerateChain(params.AllCliqueProtocolChanges, genesis, genEngine, testdb, 8, nil)
td := 0
for i, block := range preBlocks {
header := block.Header()
if i > 0 {
header.ParentHash = preBlocks[i-1].Hash()
}
header.Extra = make([]byte, 32+crypto.SignatureLength)
header.Difficulty = big.NewInt(2)
sig, _ := crypto.Sign(genEngine.SealHash(header).Bytes(), key)
copy(header.Extra[len(header.Extra)-crypto.SignatureLength:], sig)
preBlocks[i] = block.WithSeal(header)
// calculate td
td += int(block.Difficulty().Uint64())
}
config := *params.AllCliqueProtocolChanges
config.TerminalTotalDifficulty = big.NewInt(int64(td))
postBlocks, _ = GenerateChain(&config, preBlocks[len(preBlocks)-1], genEngine, testdb, 8, nil)
chainConfig = &config
runEngine = beacon.New(engine)
} else {
gspec := &Genesis{Config: params.TestChainConfig}
genesis := gspec.MustCommit(testdb)
genEngine := beacon.New(ethash.NewFaker())
preBlocks, _ = GenerateChain(params.TestChainConfig, genesis, genEngine, testdb, 8, nil)
td := 0
for _, block := range preBlocks {
// calculate td
td += int(block.Difficulty().Uint64())
}
config := *params.TestChainConfig
config.TerminalTotalDifficulty = big.NewInt(int64(td))
postBlocks, _ = GenerateChain(params.TestChainConfig, preBlocks[len(preBlocks)-1], genEngine, testdb, 8, nil)
chainConfig = &config
runEngine = beacon.New(ethash.NewFaker())
}
preHeaders := make([]*types.Header, len(preBlocks))
for i, block := range preBlocks {
preHeaders[i] = block.Header()
blob, _ := json.Marshal(block.Header())
t.Logf("Log header before the merging %d: %v", block.NumberU64(), string(blob))
}
postHeaders := make([]*types.Header, len(postBlocks))
for i, block := range postBlocks {
postHeaders[i] = block.Header()
blob, _ := json.Marshal(block.Header())
t.Logf("Log header after the merging %d: %v", block.NumberU64(), string(blob))
}
// Run the header checker for blocks one-by-one, checking for both valid and invalid nonces
chain, _ := NewBlockChain(testdb, nil, chainConfig, runEngine, vm.Config{}, nil, nil)
defer chain.Stop()
// Verify the blocks before the merging
for i := 0; i < len(preBlocks); i++ {
_, results := runEngine.VerifyHeaders(chain, []*types.Header{preHeaders[i]}, []bool{true})
// Wait for the verification result
select {
case result := <-results:
if result != nil {
t.Errorf("test %d: verification failed %v", i, result)
}
case <-time.After(time.Second):
t.Fatalf("test %d: verification timeout", i)
}
// Make sure no more data is returned
select {
case result := <-results:
t.Fatalf("test %d: unexpected result returned: %v", i, result)
case <-time.After(25 * time.Millisecond):
}
chain.InsertChain(preBlocks[i : i+1])
}
// Make the transition
merger.ReachTTD()
merger.FinalizePoS()
// Verify the blocks after the merging
for i := 0; i < len(postBlocks); i++ {
_, results := runEngine.VerifyHeaders(chain, []*types.Header{postHeaders[i]}, []bool{true})
// Wait for the verification result
select {
case result := <-results:
if result != nil {
t.Errorf("test %d: verification failed %v", i, result)
}
case <-time.After(time.Second):
t.Fatalf("test %d: verification timeout", i)
}
// Make sure no more data is returned
select {
case result := <-results:
t.Fatalf("test %d: unexpected result returned: %v", i, result)
case <-time.After(25 * time.Millisecond):
}
chain.InsertBlockWithoutSetHead(postBlocks[i])
}
// Verify the blocks with pre-merge blocks and post-merge blocks
var (
headers []*types.Header
seals []bool
)
for _, block := range preBlocks {
headers = append(headers, block.Header())
seals = append(seals, true)
}
for _, block := range postBlocks {
headers = append(headers, block.Header())
seals = append(seals, true)
}
_, results := runEngine.VerifyHeaders(chain, headers, seals)
for i := 0; i < len(headers); i++ {
select {
case result := <-results:
if result != nil {
t.Errorf("test %d: verification failed %v", i, result)
}
case <-time.After(time.Second):
t.Fatalf("test %d: verification timeout", i)
}
}
// Make sure no more data is returned
select {
case result := <-results:
t.Fatalf("unexpected result returned: %v", result)
case <-time.After(25 * time.Millisecond):
}
}
// Tests that concurrent header verification works, for both good and bad blocks.
func TestHeaderConcurrentVerification2(t *testing.T) { testHeaderConcurrentVerification(t, 2) }
func TestHeaderConcurrentVerification8(t *testing.T) { testHeaderConcurrentVerification(t, 8) }

View File

@@ -22,7 +22,6 @@ import (
"fmt"
"io"
"math/big"
mrand "math/rand"
"sort"
"sync"
"sync/atomic"
@@ -208,15 +207,14 @@ type BlockChain struct {
validator Validator // Block and state validator interface
prefetcher Prefetcher
processor Processor // Block transaction processor interface
forker *ForkChoice
vmConfig vm.Config
shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block.
}
// NewBlockChain returns a fully initialised block chain using information
// available in the database. It initialises the default Ethereum Validator and
// Processor.
func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool, txLookupLimit *uint64) (*BlockChain, error) {
// available in the database. It initialises the default Ethereum Validator
// and Processor.
func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(header *types.Header) bool, txLookupLimit *uint64) (*BlockChain, error) {
if cacheConfig == nil {
cacheConfig = defaultCacheConfig
}
@@ -228,27 +226,22 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
futureBlocks, _ := lru.New(maxFutureBlocks)
bc := &BlockChain{
chainConfig: chainConfig,
cacheConfig: cacheConfig,
db: db,
triegc: prque.New(nil),
stateCache: state.NewDatabaseWithConfig(db, &trie.Config{
Cache: cacheConfig.TrieCleanLimit,
Journal: cacheConfig.TrieCleanJournal,
Preimages: cacheConfig.Preimages,
}),
quit: make(chan struct{}),
chainmu: syncx.NewClosableMutex(),
shouldPreserve: shouldPreserve,
bodyCache: bodyCache,
bodyRLPCache: bodyRLPCache,
receiptsCache: receiptsCache,
blockCache: blockCache,
txLookupCache: txLookupCache,
futureBlocks: futureBlocks,
engine: engine,
vmConfig: vmConfig,
chainConfig: chainConfig,
cacheConfig: cacheConfig,
db: db,
triegc: prque.New(nil),
quit: make(chan struct{}),
chainmu: syncx.NewClosableMutex(),
bodyCache: bodyCache,
bodyRLPCache: bodyRLPCache,
receiptsCache: receiptsCache,
blockCache: blockCache,
txLookupCache: txLookupCache,
futureBlocks: futureBlocks,
engine: engine,
vmConfig: vmConfig,
}
bc.forker = NewForkChoice(bc, shouldPreserve)
bc.validator = NewBlockValidator(chainConfig, bc, engine)
bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine)
bc.processor = NewStateProcessor(chainConfig, bc, engine)
@@ -285,6 +278,13 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
// Make sure the state associated with the block is available
head := bc.CurrentBlock()
bc.stateCache = state.NewDatabaseWithConfig(db, &trie.Config{
Cache: cacheConfig.TrieCleanLimit,
Journal: cacheConfig.TrieCleanJournal,
Preimages: cacheConfig.Preimages,
UseVerkle: chainConfig.IsCancun(head.Header().Number),
})
if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil {
// Head state is missing, before the state recovery, find out the
// disk layer point of snapshot(if it's enabled). Make sure the
@@ -296,7 +296,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
if diskRoot != (common.Hash{}) {
log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash(), "snaproot", diskRoot)
snapDisk, err := bc.SetHeadBeyondRoot(head.NumberU64(), diskRoot)
snapDisk, err := bc.setHeadBeyondRoot(head.NumberU64(), diskRoot, true)
if err != nil {
return nil, err
}
@@ -306,7 +306,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
}
} else {
log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash())
if err := bc.SetHead(head.NumberU64()); err != nil {
if _, err := bc.setHeadBeyondRoot(head.NumberU64(), common.Hash{}, true); err != nil {
return nil, err
}
}
@@ -377,12 +377,12 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
log.Warn("Enabling snapshot recovery", "chainhead", head.NumberU64(), "diskbase", *layer)
recover = true
}
bc.snaps, _ = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, true, recover)
bc.snaps, _ = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, true, recover, chainConfig.IsCancun(head.Header().Number))
}
// Start future block processor.
bc.wg.Add(1)
go bc.futureBlocksLoop()
go bc.updateFutureBlocks()
// Start tx indexer/unindexer.
if txLookupLimit != nil {
@@ -482,11 +482,11 @@ func (bc *BlockChain) loadLastState() error {
// was fast synced or full synced and in which state, the method will try to
// delete minimal data from disk whilst retaining chain consistency.
func (bc *BlockChain) SetHead(head uint64) error {
_, err := bc.SetHeadBeyondRoot(head, common.Hash{})
_, err := bc.setHeadBeyondRoot(head, common.Hash{}, false)
return err
}
// SetHeadBeyondRoot rewinds the local chain to a new head with the extra condition
// setHeadBeyondRoot rewinds the local chain to a new head with the extra condition
// that the rewind must pass the specified state root. This method is meant to be
// used when rewinding with snapshots enabled to ensure that we go back further than
// persistent disk layer. Depending on whether the node was fast synced or full, and
@@ -494,7 +494,7 @@ func (bc *BlockChain) SetHead(head uint64) error {
// retaining chain consistency.
//
// The method returns the block number where the requested root cap was found.
func (bc *BlockChain) SetHeadBeyondRoot(head uint64, root common.Hash) (uint64, error) {
func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bool) (uint64, error) {
if !bc.chainmu.TryLock() {
return 0, errChainStopped
}
@@ -509,7 +509,7 @@ func (bc *BlockChain) SetHeadBeyondRoot(head uint64, root common.Hash) (uint64,
frozen, _ := bc.db.Ancients()
updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (uint64, bool) {
// Rewind the block chain, ensuring we don't end up with a stateless head
// Rewind the blockchain, ensuring we don't end up with a stateless head
// block. Note, depth equality is permitted to allow using SetHead as a
// chain reparation mechanism without deleting any data!
if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() <= currentBlock.NumberU64() {
@@ -610,8 +610,8 @@ func (bc *BlockChain) SetHeadBeyondRoot(head uint64, root common.Hash) (uint64,
}
// If SetHead was only called as a chain reparation method, try to skip
// touching the header chain altogether, unless the freezer is broken
if block := bc.CurrentBlock(); block.NumberU64() == head {
if target, force := updateFn(bc.db, block.Header()); force {
if repair {
if target, force := updateFn(bc.db, bc.CurrentBlock().Header()); force {
bc.hc.SetHead(target, updateFn, delFn)
}
} else {
@@ -631,9 +631,9 @@ func (bc *BlockChain) SetHeadBeyondRoot(head uint64, root common.Hash) (uint64,
return rootNumber, bc.loadLastState()
}
// FastSyncCommitHead sets the current head block to the one defined by the hash
// SnapSyncCommitHead sets the current head block to the one defined by the hash
// irrelevant what the chain contents were prior.
func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
func (bc *BlockChain) SnapSyncCommitHead(hash common.Hash) error {
// Make sure that both the block as well at its state trie exists
block := bc.GetBlockByHash(hash)
if block == nil {
@@ -738,30 +738,24 @@ func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
//
// Note, this function assumes that the `mu` mutex is held!
func (bc *BlockChain) writeHeadBlock(block *types.Block) {
// If the block is on a side chain or an unknown one, force other heads onto it too
updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash()
// Add the block to the canonical chain number scheme and mark as the head
batch := bc.db.NewBatch()
rawdb.WriteHeadHeaderHash(batch, block.Hash())
rawdb.WriteHeadFastBlockHash(batch, block.Hash())
rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64())
rawdb.WriteTxLookupEntriesByBlock(batch, block)
rawdb.WriteHeadBlockHash(batch, block.Hash())
// If the block is better than our head or is on a different chain, force update heads
if updateHeads {
rawdb.WriteHeadHeaderHash(batch, block.Hash())
rawdb.WriteHeadFastBlockHash(batch, block.Hash())
}
// Flush the whole batch into the disk, exit the node if failed
if err := batch.Write(); err != nil {
log.Crit("Failed to update chain indexes and markers", "err", err)
}
// Update all in-memory chain markers in the last step
if updateHeads {
bc.hc.SetCurrentHeader(block.Header())
bc.currentFastBlock.Store(block)
headFastBlockGauge.Update(int64(block.NumberU64()))
}
bc.hc.SetCurrentHeader(block.Header())
bc.currentFastBlock.Store(block)
headFastBlockGauge.Update(int64(block.NumberU64()))
bc.currentBlock.Store(block)
headBlockGauge.Update(int64(block.NumberU64()))
}
@@ -877,12 +871,6 @@ const (
SideStatTy
)
// numberHash is just a container for a number and a hash, to represent a block
type numberHash struct {
number uint64
hash common.Hash
}
// InsertReceiptChain attempts to complete an already existing header chain with
// transaction and receipt data.
func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts, ancientLimit uint64) (int, error) {
@@ -928,13 +916,17 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
// Rewind may have occurred, skip in that case.
if bc.CurrentHeader().Number.Cmp(head.Number()) >= 0 {
currentFastBlock, td := bc.CurrentFastBlock(), bc.GetTd(head.Hash(), head.NumberU64())
if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
bc.currentFastBlock.Store(head)
headFastBlockGauge.Update(int64(head.NumberU64()))
return true
reorg, err := bc.forker.ReorgNeeded(bc.CurrentFastBlock().Header(), head.Header())
if err != nil {
log.Warn("Reorg failed", "err", err)
return false
} else if !reorg {
return false
}
rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
bc.currentFastBlock.Store(head)
headFastBlockGauge.Update(int64(head.NumberU64()))
return true
}
return false
}
@@ -1181,30 +1173,15 @@ func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
return nil
}
// WriteBlockWithState writes the block and all associated state to the database.
func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
if !bc.chainmu.TryLock() {
return NonStatTy, errInsertionInterrupted
}
defer bc.chainmu.Unlock()
return bc.writeBlockWithState(block, receipts, logs, state, emitHeadEvent)
}
// writeBlockWithState writes the block and all associated state to the database,
// but is expects the chain mutex to be held.
func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
if bc.insertStopped() {
return NonStatTy, errInsertionInterrupted
}
// writeBlockWithState writes block, metadata and corresponding state data to the
// database.
func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB) error {
// Calculate the total difficulty of the block
ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
if ptd == nil {
return NonStatTy, consensus.ErrUnknownAncestor
return consensus.ErrUnknownAncestor
}
// Make sure no inconsistent state is leaked during insertion
currentBlock := bc.CurrentBlock()
localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
externTd := new(big.Int).Add(block.Difficulty(), ptd)
// Irrelevant of the canonical status, write the block itself to the database.
@@ -1222,15 +1199,13 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
// Commit all cached state changes into underlying memory database.
root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
if err != nil {
return NonStatTy, err
return err
}
triedb := bc.stateCache.TrieDB()
// If we're running an archive node, always flush
if bc.cacheConfig.TrieDirtyDisabled {
if err := triedb.Commit(root, false, nil); err != nil {
return NonStatTy, err
}
return triedb.Commit(root, false, nil)
} else {
// Full but not archive node, do proper garbage collection
triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
@@ -1278,23 +1253,30 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
}
}
}
// If the total difficulty is higher than our known, add it to the canonical chain
// Second clause in the if statement reduces the vulnerability to selfish mining.
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
reorg := externTd.Cmp(localTd) > 0
currentBlock = bc.CurrentBlock()
if !reorg && externTd.Cmp(localTd) == 0 {
// Split same-difficulty blocks by number, then preferentially select
// the block generated by the local miner as the canonical block.
if block.NumberU64() < currentBlock.NumberU64() {
reorg = true
} else if block.NumberU64() == currentBlock.NumberU64() {
var currentPreserve, blockPreserve bool
if bc.shouldPreserve != nil {
currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block)
}
reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5)
}
return nil
}
// WriteBlockWithState writes the block and all associated state to the database.
func (bc *BlockChain) WriteBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
if !bc.chainmu.TryLock() {
return NonStatTy, errChainStopped
}
defer bc.chainmu.Unlock()
return bc.writeBlockAndSetHead(block, receipts, logs, state, emitHeadEvent)
}
// writeBlockAndSetHead writes the block and all associated state to the database,
// and also it applies the given block as the new chain head. This function expects
// the chain mutex to be held.
func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
if err := bc.writeBlockWithState(block, receipts, logs, state); err != nil {
return NonStatTy, err
}
currentBlock := bc.CurrentBlock()
reorg, err := bc.forker.ReorgNeeded(currentBlock.Header(), block.Header())
if err != nil {
return NonStatTy, err
}
if reorg {
// Reorganise the chain if the parent is not the head block
@@ -1320,7 +1302,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
}
// In theory we should fire a ChainHeadEvent when we inject
// a canonical block, but sometimes we can insert a batch of
// canonicial blocks. Avoid firing too much ChainHeadEvents,
// canonicial blocks. Avoid firing too many ChainHeadEvents,
// we will fire an accumulated ChainHeadEvent and disable fire
// event here.
if emitHeadEvent {
@@ -1335,11 +1317,18 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
// addFutureBlock checks if the block is within the max allowed window to get
// accepted for future processing, and returns an error if the block is too far
// ahead and was not added.
//
// TODO after the transition, the future block shouldn't be kept. Because
// it's not checked in the Geth side anymore.
func (bc *BlockChain) addFutureBlock(block *types.Block) error {
max := uint64(time.Now().Unix() + maxTimeFutureBlocks)
if block.Time() > max {
return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max)
}
if block.Difficulty().Cmp(common.Big0) == 0 {
// Never add PoS blocks into the future queue
return nil
}
bc.futureBlocks.Add(block.Hash(), block)
return nil
}
@@ -1347,15 +1336,12 @@ func (bc *BlockChain) addFutureBlock(block *types.Block) error {
// InsertChain attempts to insert the given batch of blocks in to the canonical
// chain or, otherwise, create a fork. If an error is returned it will return
// the index number of the failing block as well an error describing what went
// wrong.
//
// After insertion is done, all accumulated events will be fired.
// wrong. After insertion is done, all accumulated events will be fired.
func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
// Sanity check that we have something meaningful to import
if len(chain) == 0 {
return 0, nil
}
bc.blockProcFeed.Send(true)
defer bc.blockProcFeed.Send(false)
@@ -1374,26 +1360,12 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4])
}
}
// Pre-check passed, start the full block imports.
// Pre-checks passed, start the full block imports
if !bc.chainmu.TryLock() {
return 0, errChainStopped
}
defer bc.chainmu.Unlock()
return bc.insertChain(chain, true)
}
// InsertChainWithoutSealVerification works exactly the same
// except for seal verification, seal verification is omitted
func (bc *BlockChain) InsertChainWithoutSealVerification(block *types.Block) (int, error) {
bc.blockProcFeed.Send(true)
defer bc.blockProcFeed.Send(false)
if !bc.chainmu.TryLock() {
return 0, errChainStopped
}
defer bc.chainmu.Unlock()
return bc.insertChain(types.Blocks([]*types.Block{block}), false)
return bc.insertChain(chain, true, true)
}
// insertChain is the internal implementation of InsertChain, which assumes that
@@ -1404,7 +1376,7 @@ func (bc *BlockChain) InsertChainWithoutSealVerification(block *types.Block) (in
// racey behaviour. If a sidechain import is in progress, and the historic state
// is imported, but then new canon-head is added before the actual sidechain
// completes, then the historic state could be pruned again
func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, error) {
func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool) (int, error) {
// If the chain is terminating, don't even bother starting up.
if bc.insertStopped() {
return 0, nil
@@ -1446,14 +1418,23 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
// from the canonical chain, which has not been verified.
// Skip all known blocks that are behind us.
var (
current = bc.CurrentBlock()
localTd = bc.GetTd(current.Hash(), current.NumberU64())
externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) // The first block can't be nil
reorg bool
current = bc.CurrentBlock()
)
for block != nil && bc.skipBlock(err, it) {
externTd = new(big.Int).Add(externTd, block.Difficulty())
if localTd.Cmp(externTd) < 0 {
break
reorg, err = bc.forker.ReorgNeeded(current.Header(), block.Header())
if err != nil {
return it.index, err
}
if reorg {
// Switch to import mode if the forker says the reorg is necessary
// and also the block is not on the canonical chain.
// In eth2 the forker always returns true for reorg decision (blindly trusting
// the external consensus engine), but in order to prevent the unnecessary
// reorgs when importing known blocks, the special case is handled here.
if block.NumberU64() > current.NumberU64() || bc.GetCanonicalHash(block.NumberU64()) != block.Hash() {
break
}
}
log.Debug("Ignoring already known block", "number", block.Number(), "hash", block.Hash())
stats.ignored++
@@ -1480,11 +1461,17 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
// Falls through to the block import
}
switch {
// First block is pruned, insert as sidechain and reorg only if TD grows enough
// First block is pruned
case errors.Is(err, consensus.ErrPrunedAncestor):
log.Debug("Pruned ancestor, inserting as sidechain", "number", block.Number(), "hash", block.Hash())
return bc.insertSideChain(block, it)
if setHead {
// First block is pruned, insert as sidechain and reorg only if TD grows enough
log.Debug("Pruned ancestor, inserting as sidechain", "number", block.Number(), "hash", block.Hash())
return bc.insertSideChain(block, it)
} else {
// We're post-merge and the parent is pruned, try to recover the parent state
log.Debug("Pruned ancestor", "number", block.Number(), "hash", block.Hash())
return it.index, bc.recoverAncestors(block)
}
// First block is future, shove it (and all children) to the future queue (unknown ancestor)
case errors.Is(err, consensus.ErrFutureBlock) || (errors.Is(err, consensus.ErrUnknownAncestor) && bc.futureBlocks.Contains(it.first().ParentHash())):
for block != nil && (it.index == 0 || errors.Is(err, consensus.ErrUnknownAncestor)) {
@@ -1607,7 +1594,12 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
// Process block using the parent state as reference point
substart := time.Now()
receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig)
var (
usedGas uint64
receipts types.Receipts
logs []*types.Log
)
receipts, logs, usedGas, err = bc.processor.Process(block, statedb, bc.vmConfig)
if err != nil {
bc.reportBlock(block, receipts, err)
atomic.StoreUint32(&followupInterrupt, 1)
@@ -1639,12 +1631,17 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
// Update the metrics touched during block validation
accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete, we can mark them
storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete, we can mark them
blockValidationTimer.Update(time.Since(substart) - (statedb.AccountHashes + statedb.StorageHashes - triehash))
// Write the block to the chain and get the status.
substart = time.Now()
status, err := bc.writeBlockWithState(block, receipts, logs, statedb, false)
var status WriteStatus
if !setHead {
// Don't set the head, only insert the block
err = bc.writeBlockWithState(block, receipts, logs, statedb)
} else {
status, err = bc.writeBlockAndSetHead(block, receipts, logs, statedb, false)
}
atomic.StoreUint32(&followupInterrupt, 1)
if err != nil {
return it.index, err
@@ -1657,6 +1654,12 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits)
blockInsertTimer.UpdateSince(start)
if !setHead {
// We did not setHead, so we don't have any stats to update
log.Info("Inserted block", "number", block.Number(), "hash", block.Hash(), "txs", len(block.Transactions()), "elapsed", common.PrettyDuration(time.Since(start)))
return it.index, nil
}
switch status {
case CanonStatTy:
log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
@@ -1715,10 +1718,12 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
//
// The method writes all (header-and-body-valid) blocks to disk, then tries to
// switch over to the new chain if the TD exceeded the current chain.
// insertSideChain is only used pre-merge.
func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, error) {
var (
externTd *big.Int
current = bc.CurrentBlock()
externTd *big.Int
lastBlock = block
current = bc.CurrentBlock()
)
// The first sidechain block error is already verified to be ErrPrunedAncestor.
// Since we don't import them here, we expect ErrUnknownAncestor for the remaining
@@ -1769,6 +1774,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
"root", block.Root())
}
lastBlock = block
}
// At this point, we've written all sidechain blocks to database. Loop ended
// either on some other error or all were processed. If there was some other
@@ -1776,8 +1782,12 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
//
// If the externTd was larger than our local TD, we now need to reimport the previous
// blocks to regenerate the required state
localTd := bc.GetTd(current.Hash(), current.NumberU64())
if localTd.Cmp(externTd) > 0 {
reorg, err := bc.forker.ReorgNeeded(current.Header(), lastBlock.Header())
if err != nil {
return it.index, err
}
if !reorg {
localTd := bc.GetTd(current.Hash(), current.NumberU64())
log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd)
return it.index, err
}
@@ -1813,7 +1823,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
// memory here.
if len(blocks) >= 2048 || memory > 64*1024*1024 {
log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
if _, err := bc.insertChain(blocks, false); err != nil {
if _, err := bc.insertChain(blocks, false, true); err != nil {
return 0, err
}
blocks, memory = blocks[:0], 0
@@ -1827,14 +1837,98 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
}
if len(blocks) > 0 {
log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
return bc.insertChain(blocks, false)
return bc.insertChain(blocks, false, true)
}
return 0, nil
}
// recoverAncestors finds the closest ancestor with available state and re-execute
// all the ancestor blocks since that.
// recoverAncestors is only used post-merge.
func (bc *BlockChain) recoverAncestors(block *types.Block) error {
// Gather all the sidechain hashes (full blocks may be memory heavy)
var (
hashes []common.Hash
numbers []uint64
parent = block
)
for parent != nil && !bc.HasState(parent.Root()) {
hashes = append(hashes, parent.Hash())
numbers = append(numbers, parent.NumberU64())
parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1)
// If the chain is terminating, stop iteration
if bc.insertStopped() {
log.Debug("Abort during blocks iteration")
return errInsertionInterrupted
}
}
if parent == nil {
return errors.New("missing parent")
}
// Import all the pruned blocks to make the state available
for i := len(hashes) - 1; i >= 0; i-- {
// If the chain is terminating, stop processing blocks
if bc.insertStopped() {
log.Debug("Abort during blocks processing")
return errInsertionInterrupted
}
var b *types.Block
if i == 0 {
b = block
} else {
b = bc.GetBlock(hashes[i], numbers[i])
}
if _, err := bc.insertChain(types.Blocks{b}, false, false); err != nil {
return err
}
}
return nil
}
// collectLogs collects the logs that were generated or removed during
// the processing of the block that corresponds with the given hash.
// These logs are later announced as deleted or reborn.
func (bc *BlockChain) collectLogs(hash common.Hash, removed bool) []*types.Log {
number := bc.hc.GetBlockNumber(hash)
if number == nil {
return nil
}
receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig)
var logs []*types.Log
for _, receipt := range receipts {
for _, log := range receipt.Logs {
l := *log
if removed {
l.Removed = true
}
logs = append(logs, &l)
}
}
return logs
}
// mergeLogs returns a merged log slice with specified sort order.
func mergeLogs(logs [][]*types.Log, reverse bool) []*types.Log {
var ret []*types.Log
if reverse {
for i := len(logs) - 1; i >= 0; i-- {
ret = append(ret, logs[i]...)
}
} else {
for i := 0; i < len(logs); i++ {
ret = append(ret, logs[i]...)
}
}
return ret
}
// reorg takes two blocks, an old chain and a new chain and will reconstruct the
// blocks and inserts them to be part of the new canonical chain and accumulates
// potential missing transactions and post an event about them.
// Note the new head block won't be processed here, callers need to handle it
// externally.
func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
var (
newChain types.Blocks
@@ -1846,49 +1940,6 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
deletedLogs [][]*types.Log
rebirthLogs [][]*types.Log
// collectLogs collects the logs that were generated or removed during
// the processing of the block that corresponds with the given hash.
// These logs are later announced as deleted or reborn
collectLogs = func(hash common.Hash, removed bool) {
number := bc.hc.GetBlockNumber(hash)
if number == nil {
return
}
receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig)
var logs []*types.Log
for _, receipt := range receipts {
for _, log := range receipt.Logs {
l := *log
if removed {
l.Removed = true
}
logs = append(logs, &l)
}
}
if len(logs) > 0 {
if removed {
deletedLogs = append(deletedLogs, logs)
} else {
rebirthLogs = append(rebirthLogs, logs)
}
}
}
// mergeLogs returns a merged log slice with specified sort order.
mergeLogs = func(logs [][]*types.Log, reverse bool) []*types.Log {
var ret []*types.Log
if reverse {
for i := len(logs) - 1; i >= 0; i-- {
ret = append(ret, logs[i]...)
}
} else {
for i := 0; i < len(logs); i++ {
ret = append(ret, logs[i]...)
}
}
return ret
}
)
// Reduce the longer chain to the same number as the shorter one
if oldBlock.NumberU64() > newBlock.NumberU64() {
@@ -1896,7 +1947,12 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
oldChain = append(oldChain, oldBlock)
deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
collectLogs(oldBlock.Hash(), true)
// Collect deleted logs for notification
logs := bc.collectLogs(oldBlock.Hash(), true)
if len(logs) > 0 {
deletedLogs = append(deletedLogs, logs)
}
}
} else {
// New chain is longer, stash all blocks away for subsequent insertion
@@ -1921,8 +1977,12 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
// Remove an old block as well as stash away a new block
oldChain = append(oldChain, oldBlock)
deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
collectLogs(oldBlock.Hash(), true)
// Collect deleted logs for notification
logs := bc.collectLogs(oldBlock.Hash(), true)
if len(logs) > 0 {
deletedLogs = append(deletedLogs, logs)
}
newChain = append(newChain, newBlock)
// Step back with both chains
@@ -1948,8 +2008,15 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
blockReorgAddMeter.Mark(int64(len(newChain)))
blockReorgDropMeter.Mark(int64(len(oldChain)))
blockReorgMeter.Mark(1)
} else if len(newChain) > 0 {
// Special case happens in the post merge stage that current head is
// the ancestor of new head while these two blocks are not consecutive
log.Info("Extend chain", "add", len(newChain), "number", newChain[0].NumberU64(), "hash", newChain[0].Hash())
blockReorgAddMeter.Mark(int64(len(newChain)))
} else {
log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
// len(newChain) == 0 && len(oldChain) > 0
// rewind the canonical chain to a lower point.
log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "oldblocks", len(oldChain), "newnum", newBlock.Number(), "newhash", newBlock.Hash(), "newblocks", len(newChain))
}
// Insert the new chain(except the head block(reverse order)),
// taking care of the proper incremental order.
@@ -1958,8 +2025,10 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
bc.writeHeadBlock(newChain[i])
// Collect reborn logs due to chain reorg
collectLogs(newChain[i].Hash(), false)
logs := bc.collectLogs(newChain[i].Hash(), false)
if len(logs) > 0 {
rebirthLogs = append(rebirthLogs, logs)
}
// Collect the new added transactions.
addedTxs = append(addedTxs, newChain[i].Transactions()...)
}
@@ -1999,12 +2068,54 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
return nil
}
// futureBlocksLoop processes the 'future block' queue.
func (bc *BlockChain) futureBlocksLoop() {
defer bc.wg.Done()
// InsertBlockWithoutSetHead executes the block, runs the necessary verification
// upon it and then persist the block and the associate state into the database.
// The key difference between the InsertChain is it won't do the canonical chain
// updating. It relies on the additional SetChainHead call to finalize the entire
// procedure.
func (bc *BlockChain) InsertBlockWithoutSetHead(block *types.Block) error {
if !bc.chainmu.TryLock() {
return errChainStopped
}
defer bc.chainmu.Unlock()
_, err := bc.insertChain(types.Blocks{block}, true, false)
return err
}
// SetChainHead rewinds the chain to set the new head block as the specified
// block. It's possible that after the reorg the relevant state of head
// is missing. It can be fixed by inserting a new block which triggers
// the re-execution.
func (bc *BlockChain) SetChainHead(newBlock *types.Block) error {
if !bc.chainmu.TryLock() {
return errChainStopped
}
defer bc.chainmu.Unlock()
// Run the reorg if necessary and set the given block as new head.
if newBlock.ParentHash() != bc.CurrentBlock().Hash() {
if err := bc.reorg(bc.CurrentBlock(), newBlock); err != nil {
return err
}
}
bc.writeHeadBlock(newBlock)
// Emit events
logs := bc.collectLogs(newBlock.Hash(), false)
bc.chainFeed.Send(ChainEvent{Block: newBlock, Hash: newBlock.Hash(), Logs: logs})
if len(logs) > 0 {
bc.logsFeed.Send(logs)
}
bc.chainHeadFeed.Send(ChainHeadEvent{Block: newBlock})
log.Info("Set the chain head", "number", newBlock.Number(), "hash", newBlock.Hash())
return nil
}
func (bc *BlockChain) updateFutureBlocks() {
futureTimer := time.NewTicker(5 * time.Second)
defer futureTimer.Stop()
defer bc.wg.Done()
for {
select {
case <-futureTimer.C:
@@ -2185,6 +2296,6 @@ func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (i
return 0, errChainStopped
}
defer bc.chainmu.Unlock()
_, err := bc.hc.InsertHeaderChain(chain, start)
_, err := bc.hc.InsertHeaderChain(chain, start, bc.forker)
return 0, err
}

View File

@@ -79,10 +79,10 @@ func testShortRepair(t *testing.T, snapshots bool) {
// already committed, after which the process crashed. In this case we expect the full
// chain to be rolled back to the committed block, but the chain data itself left in
// the database for replaying.
func TestShortFastSyncedRepair(t *testing.T) { testShortFastSyncedRepair(t, false) }
func TestShortFastSyncedRepairWithSnapshots(t *testing.T) { testShortFastSyncedRepair(t, true) }
func TestShortSnapSyncedRepair(t *testing.T) { testShortSnapSyncedRepair(t, false) }
func TestShortSnapSyncedRepairWithSnapshots(t *testing.T) { testShortSnapSyncedRepair(t, true) }
func testShortFastSyncedRepair(t *testing.T, snapshots bool) {
func testShortSnapSyncedRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
//
@@ -119,10 +119,10 @@ func testShortFastSyncedRepair(t *testing.T, snapshots bool) {
// not yet committed, but the process crashed. In this case we expect the chain to
// detect that it was fast syncing and not delete anything, since we can just pick
// up directly where we left off.
func TestShortFastSyncingRepair(t *testing.T) { testShortFastSyncingRepair(t, false) }
func TestShortFastSyncingRepairWithSnapshots(t *testing.T) { testShortFastSyncingRepair(t, true) }
func TestShortSnapSyncingRepair(t *testing.T) { testShortSnapSyncingRepair(t, false) }
func TestShortSnapSyncingRepairWithSnapshots(t *testing.T) { testShortSnapSyncingRepair(t, true) }
func testShortFastSyncingRepair(t *testing.T, snapshots bool) {
func testShortSnapSyncingRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
//
@@ -203,14 +203,14 @@ func testShortOldForkedRepair(t *testing.T, snapshots bool) {
// crashed. In this test scenario the side chain is below the committed block. In
// this case we expect the canonical chain to be rolled back to the committed block,
// but the chain data itself left in the database for replaying.
func TestShortOldForkedFastSyncedRepair(t *testing.T) {
testShortOldForkedFastSyncedRepair(t, false)
func TestShortOldForkedSnapSyncedRepair(t *testing.T) {
testShortOldForkedSnapSyncedRepair(t, false)
}
func TestShortOldForkedFastSyncedRepairWithSnapshots(t *testing.T) {
testShortOldForkedFastSyncedRepair(t, true)
func TestShortOldForkedSnapSyncedRepairWithSnapshots(t *testing.T) {
testShortOldForkedSnapSyncedRepair(t, true)
}
func testShortOldForkedFastSyncedRepair(t *testing.T, snapshots bool) {
func testShortOldForkedSnapSyncedRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
// └->S1->S2->S3
@@ -250,14 +250,14 @@ func testShortOldForkedFastSyncedRepair(t *testing.T, snapshots bool) {
// test scenario the side chain is below the committed block. In this case we expect
// the chain to detect that it was fast syncing and not delete anything, since we
// can just pick up directly where we left off.
func TestShortOldForkedFastSyncingRepair(t *testing.T) {
testShortOldForkedFastSyncingRepair(t, false)
func TestShortOldForkedSnapSyncingRepair(t *testing.T) {
testShortOldForkedSnapSyncingRepair(t, false)
}
func TestShortOldForkedFastSyncingRepairWithSnapshots(t *testing.T) {
testShortOldForkedFastSyncingRepair(t, true)
func TestShortOldForkedSnapSyncingRepairWithSnapshots(t *testing.T) {
testShortOldForkedSnapSyncingRepair(t, true)
}
func testShortOldForkedFastSyncingRepair(t *testing.T, snapshots bool) {
func testShortOldForkedSnapSyncingRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
// └->S1->S2->S3
@@ -340,14 +340,14 @@ func testShortNewlyForkedRepair(t *testing.T, snapshots bool) {
// crashed. In this test scenario the side chain reaches above the committed block.
// In this case we expect the canonical chain to be rolled back to the committed
// block, but the chain data itself left in the database for replaying.
func TestShortNewlyForkedFastSyncedRepair(t *testing.T) {
testShortNewlyForkedFastSyncedRepair(t, false)
func TestShortNewlyForkedSnapSyncedRepair(t *testing.T) {
testShortNewlyForkedSnapSyncedRepair(t, false)
}
func TestShortNewlyForkedFastSyncedRepairWithSnapshots(t *testing.T) {
testShortNewlyForkedFastSyncedRepair(t, true)
func TestShortNewlyForkedSnapSyncedRepairWithSnapshots(t *testing.T) {
testShortNewlyForkedSnapSyncedRepair(t, true)
}
func testShortNewlyForkedFastSyncedRepair(t *testing.T, snapshots bool) {
func testShortNewlyForkedSnapSyncedRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
// └->S1->S2->S3->S4->S5->S6
@@ -387,14 +387,14 @@ func testShortNewlyForkedFastSyncedRepair(t *testing.T, snapshots bool) {
// this test scenario the side chain reaches above the committed block. In this
// case we expect the chain to detect that it was fast syncing and not delete
// anything, since we can just pick up directly where we left off.
func TestShortNewlyForkedFastSyncingRepair(t *testing.T) {
testShortNewlyForkedFastSyncingRepair(t, false)
func TestShortNewlyForkedSnapSyncingRepair(t *testing.T) {
testShortNewlyForkedSnapSyncingRepair(t, false)
}
func TestShortNewlyForkedFastSyncingRepairWithSnapshots(t *testing.T) {
testShortNewlyForkedFastSyncingRepair(t, true)
func TestShortNewlyForkedSnapSyncingRepairWithSnapshots(t *testing.T) {
testShortNewlyForkedSnapSyncingRepair(t, true)
}
func testShortNewlyForkedFastSyncingRepair(t *testing.T, snapshots bool) {
func testShortNewlyForkedSnapSyncingRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
// └->S1->S2->S3->S4->S5->S6
@@ -475,14 +475,14 @@ func testShortReorgedRepair(t *testing.T, snapshots bool) {
// the fast sync pivot point was already committed to disk and then the process
// crashed. In this case we expect the canonical chain to be rolled back to the
// committed block, but the chain data itself left in the database for replaying.
func TestShortReorgedFastSyncedRepair(t *testing.T) {
testShortReorgedFastSyncedRepair(t, false)
func TestShortReorgedSnapSyncedRepair(t *testing.T) {
testShortReorgedSnapSyncedRepair(t, false)
}
func TestShortReorgedFastSyncedRepairWithSnapshots(t *testing.T) {
testShortReorgedFastSyncedRepair(t, true)
func TestShortReorgedSnapSyncedRepairWithSnapshots(t *testing.T) {
testShortReorgedSnapSyncedRepair(t, true)
}
func testShortReorgedFastSyncedRepair(t *testing.T, snapshots bool) {
func testShortReorgedSnapSyncedRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
@@ -521,14 +521,14 @@ func testShortReorgedFastSyncedRepair(t *testing.T, snapshots bool) {
// the fast sync pivot point was not yet committed, but the process crashed. In
// this case we expect the chain to detect that it was fast syncing and not delete
// anything, since we can just pick up directly where we left off.
func TestShortReorgedFastSyncingRepair(t *testing.T) {
testShortReorgedFastSyncingRepair(t, false)
func TestShortReorgedSnapSyncingRepair(t *testing.T) {
testShortReorgedSnapSyncingRepair(t, false)
}
func TestShortReorgedFastSyncingRepairWithSnapshots(t *testing.T) {
testShortReorgedFastSyncingRepair(t, true)
func TestShortReorgedSnapSyncingRepairWithSnapshots(t *testing.T) {
testShortReorgedSnapSyncingRepair(t, true)
}
func testShortReorgedFastSyncingRepair(t *testing.T, snapshots bool) {
func testShortReorgedSnapSyncingRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
@@ -656,14 +656,14 @@ func testLongDeepRepair(t *testing.T, snapshots bool) {
// sync pivot point - newer than the ancient limit - was already committed, after
// which the process crashed. In this case we expect the chain to be rolled back
// to the committed block, with everything afterwads kept as fast sync data.
func TestLongFastSyncedShallowRepair(t *testing.T) {
testLongFastSyncedShallowRepair(t, false)
func TestLongSnapSyncedShallowRepair(t *testing.T) {
testLongSnapSyncedShallowRepair(t, false)
}
func TestLongFastSyncedShallowRepairWithSnapshots(t *testing.T) {
testLongFastSyncedShallowRepair(t, true)
func TestLongSnapSyncedShallowRepairWithSnapshots(t *testing.T) {
testLongSnapSyncedShallowRepair(t, true)
}
func testLongFastSyncedShallowRepair(t *testing.T, snapshots bool) {
func testLongSnapSyncedShallowRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
//
@@ -705,10 +705,10 @@ func testLongFastSyncedShallowRepair(t *testing.T, snapshots bool) {
// sync pivot point - older than the ancient limit - was already committed, after
// which the process crashed. In this case we expect the chain to be rolled back
// to the committed block, with everything afterwads deleted.
func TestLongFastSyncedDeepRepair(t *testing.T) { testLongFastSyncedDeepRepair(t, false) }
func TestLongFastSyncedDeepRepairWithSnapshots(t *testing.T) { testLongFastSyncedDeepRepair(t, true) }
func TestLongSnapSyncedDeepRepair(t *testing.T) { testLongSnapSyncedDeepRepair(t, false) }
func TestLongSnapSyncedDeepRepairWithSnapshots(t *testing.T) { testLongSnapSyncedDeepRepair(t, true) }
func testLongFastSyncedDeepRepair(t *testing.T, snapshots bool) {
func testLongSnapSyncedDeepRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
//
@@ -750,14 +750,14 @@ func testLongFastSyncedDeepRepair(t *testing.T, snapshots bool) {
// process crashed. In this case we expect the chain to detect that it was fast
// syncing and not delete anything, since we can just pick up directly where we
// left off.
func TestLongFastSyncingShallowRepair(t *testing.T) {
testLongFastSyncingShallowRepair(t, false)
func TestLongSnapSyncingShallowRepair(t *testing.T) {
testLongSnapSyncingShallowRepair(t, false)
}
func TestLongFastSyncingShallowRepairWithSnapshots(t *testing.T) {
testLongFastSyncingShallowRepair(t, true)
func TestLongSnapSyncingShallowRepairWithSnapshots(t *testing.T) {
testLongSnapSyncingShallowRepair(t, true)
}
func testLongFastSyncingShallowRepair(t *testing.T, snapshots bool) {
func testLongSnapSyncingShallowRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
//
@@ -800,10 +800,10 @@ func testLongFastSyncingShallowRepair(t *testing.T, snapshots bool) {
// process crashed. In this case we expect the chain to detect that it was fast
// syncing and not delete anything, since we can just pick up directly where we
// left off.
func TestLongFastSyncingDeepRepair(t *testing.T) { testLongFastSyncingDeepRepair(t, false) }
func TestLongFastSyncingDeepRepairWithSnapshots(t *testing.T) { testLongFastSyncingDeepRepair(t, true) }
func TestLongSnapSyncingDeepRepair(t *testing.T) { testLongSnapSyncingDeepRepair(t, false) }
func TestLongSnapSyncingDeepRepairWithSnapshots(t *testing.T) { testLongSnapSyncingDeepRepair(t, true) }
func testLongFastSyncingDeepRepair(t *testing.T, snapshots bool) {
func testLongSnapSyncingDeepRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
//
@@ -946,14 +946,14 @@ func testLongOldForkedDeepRepair(t *testing.T, snapshots bool) {
// the side chain is below the committed block. In this case we expect the chain
// to be rolled back to the committed block, with everything afterwads kept as
// fast sync data; the side chain completely nuked by the freezer.
func TestLongOldForkedFastSyncedShallowRepair(t *testing.T) {
testLongOldForkedFastSyncedShallowRepair(t, false)
func TestLongOldForkedSnapSyncedShallowRepair(t *testing.T) {
testLongOldForkedSnapSyncedShallowRepair(t, false)
}
func TestLongOldForkedFastSyncedShallowRepairWithSnapshots(t *testing.T) {
testLongOldForkedFastSyncedShallowRepair(t, true)
func TestLongOldForkedSnapSyncedShallowRepairWithSnapshots(t *testing.T) {
testLongOldForkedSnapSyncedShallowRepair(t, true)
}
func testLongOldForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) {
func testLongOldForkedSnapSyncedShallowRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
// └->S1->S2->S3
@@ -998,14 +998,14 @@ func testLongOldForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) {
// the side chain is below the committed block. In this case we expect the canonical
// chain to be rolled back to the committed block, with everything afterwads deleted;
// the side chain completely nuked by the freezer.
func TestLongOldForkedFastSyncedDeepRepair(t *testing.T) {
testLongOldForkedFastSyncedDeepRepair(t, false)
func TestLongOldForkedSnapSyncedDeepRepair(t *testing.T) {
testLongOldForkedSnapSyncedDeepRepair(t, false)
}
func TestLongOldForkedFastSyncedDeepRepairWithSnapshots(t *testing.T) {
testLongOldForkedFastSyncedDeepRepair(t, true)
func TestLongOldForkedSnapSyncedDeepRepairWithSnapshots(t *testing.T) {
testLongOldForkedSnapSyncedDeepRepair(t, true)
}
func testLongOldForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) {
func testLongOldForkedSnapSyncedDeepRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
// └->S1->S2->S3
@@ -1049,14 +1049,14 @@ func testLongOldForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) {
// chain is below the committed block. In this case we expect the chain to detect
// that it was fast syncing and not delete anything. The side chain is completely
// nuked by the freezer.
func TestLongOldForkedFastSyncingShallowRepair(t *testing.T) {
testLongOldForkedFastSyncingShallowRepair(t, false)
func TestLongOldForkedSnapSyncingShallowRepair(t *testing.T) {
testLongOldForkedSnapSyncingShallowRepair(t, false)
}
func TestLongOldForkedFastSyncingShallowRepairWithSnapshots(t *testing.T) {
testLongOldForkedFastSyncingShallowRepair(t, true)
func TestLongOldForkedSnapSyncingShallowRepairWithSnapshots(t *testing.T) {
testLongOldForkedSnapSyncingShallowRepair(t, true)
}
func testLongOldForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) {
func testLongOldForkedSnapSyncingShallowRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
// └->S1->S2->S3
@@ -1101,14 +1101,14 @@ func testLongOldForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) {
// chain is below the committed block. In this case we expect the chain to detect
// that it was fast syncing and not delete anything. The side chain is completely
// nuked by the freezer.
func TestLongOldForkedFastSyncingDeepRepair(t *testing.T) {
testLongOldForkedFastSyncingDeepRepair(t, false)
func TestLongOldForkedSnapSyncingDeepRepair(t *testing.T) {
testLongOldForkedSnapSyncingDeepRepair(t, false)
}
func TestLongOldForkedFastSyncingDeepRepairWithSnapshots(t *testing.T) {
testLongOldForkedFastSyncingDeepRepair(t, true)
func TestLongOldForkedSnapSyncingDeepRepairWithSnapshots(t *testing.T) {
testLongOldForkedSnapSyncingDeepRepair(t, true)
}
func testLongOldForkedFastSyncingDeepRepair(t *testing.T, snapshots bool) {
func testLongOldForkedSnapSyncingDeepRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
// └->S1->S2->S3
@@ -1252,14 +1252,14 @@ func testLongNewerForkedDeepRepair(t *testing.T, snapshots bool) {
// the side chain is above the committed block. In this case we expect the chain
// to be rolled back to the committed block, with everything afterwads kept as fast
// sync data; the side chain completely nuked by the freezer.
func TestLongNewerForkedFastSyncedShallowRepair(t *testing.T) {
testLongNewerForkedFastSyncedShallowRepair(t, false)
func TestLongNewerForkedSnapSyncedShallowRepair(t *testing.T) {
testLongNewerForkedSnapSyncedShallowRepair(t, false)
}
func TestLongNewerForkedFastSyncedShallowRepairWithSnapshots(t *testing.T) {
testLongNewerForkedFastSyncedShallowRepair(t, true)
func TestLongNewerForkedSnapSyncedShallowRepairWithSnapshots(t *testing.T) {
testLongNewerForkedSnapSyncedShallowRepair(t, true)
}
func testLongNewerForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) {
func testLongNewerForkedSnapSyncedShallowRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1304,14 +1304,14 @@ func testLongNewerForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) {
// the side chain is above the committed block. In this case we expect the canonical
// chain to be rolled back to the committed block, with everything afterwads deleted;
// the side chain completely nuked by the freezer.
func TestLongNewerForkedFastSyncedDeepRepair(t *testing.T) {
testLongNewerForkedFastSyncedDeepRepair(t, false)
func TestLongNewerForkedSnapSyncedDeepRepair(t *testing.T) {
testLongNewerForkedSnapSyncedDeepRepair(t, false)
}
func TestLongNewerForkedFastSyncedDeepRepairWithSnapshots(t *testing.T) {
testLongNewerForkedFastSyncedDeepRepair(t, true)
func TestLongNewerForkedSnapSyncedDeepRepairWithSnapshots(t *testing.T) {
testLongNewerForkedSnapSyncedDeepRepair(t, true)
}
func testLongNewerForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) {
func testLongNewerForkedSnapSyncedDeepRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1355,14 +1355,14 @@ func testLongNewerForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) {
// chain is above the committed block. In this case we expect the chain to detect
// that it was fast syncing and not delete anything. The side chain is completely
// nuked by the freezer.
func TestLongNewerForkedFastSyncingShallowRepair(t *testing.T) {
testLongNewerForkedFastSyncingShallowRepair(t, false)
func TestLongNewerForkedSnapSyncingShallowRepair(t *testing.T) {
testLongNewerForkedSnapSyncingShallowRepair(t, false)
}
func TestLongNewerForkedFastSyncingShallowRepairWithSnapshots(t *testing.T) {
testLongNewerForkedFastSyncingShallowRepair(t, true)
func TestLongNewerForkedSnapSyncingShallowRepairWithSnapshots(t *testing.T) {
testLongNewerForkedSnapSyncingShallowRepair(t, true)
}
func testLongNewerForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) {
func testLongNewerForkedSnapSyncingShallowRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1407,14 +1407,14 @@ func testLongNewerForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) {
// chain is above the committed block. In this case we expect the chain to detect
// that it was fast syncing and not delete anything. The side chain is completely
// nuked by the freezer.
func TestLongNewerForkedFastSyncingDeepRepair(t *testing.T) {
testLongNewerForkedFastSyncingDeepRepair(t, false)
func TestLongNewerForkedSnapSyncingDeepRepair(t *testing.T) {
testLongNewerForkedSnapSyncingDeepRepair(t, false)
}
func TestLongNewerForkedFastSyncingDeepRepairWithSnapshots(t *testing.T) {
testLongNewerForkedFastSyncingDeepRepair(t, true)
func TestLongNewerForkedSnapSyncingDeepRepairWithSnapshots(t *testing.T) {
testLongNewerForkedSnapSyncingDeepRepair(t, true)
}
func testLongNewerForkedFastSyncingDeepRepair(t *testing.T, snapshots bool) {
func testLongNewerForkedSnapSyncingDeepRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1552,14 +1552,14 @@ func testLongReorgedDeepRepair(t *testing.T, snapshots bool) {
// expect the chain to be rolled back to the committed block, with everything
// afterwads kept as fast sync data. The side chain completely nuked by the
// freezer.
func TestLongReorgedFastSyncedShallowRepair(t *testing.T) {
testLongReorgedFastSyncedShallowRepair(t, false)
func TestLongReorgedSnapSyncedShallowRepair(t *testing.T) {
testLongReorgedSnapSyncedShallowRepair(t, false)
}
func TestLongReorgedFastSyncedShallowRepairWithSnapshots(t *testing.T) {
testLongReorgedFastSyncedShallowRepair(t, true)
func TestLongReorgedSnapSyncedShallowRepairWithSnapshots(t *testing.T) {
testLongReorgedSnapSyncedShallowRepair(t, true)
}
func testLongReorgedFastSyncedShallowRepair(t *testing.T, snapshots bool) {
func testLongReorgedSnapSyncedShallowRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1603,14 +1603,14 @@ func testLongReorgedFastSyncedShallowRepair(t *testing.T, snapshots bool) {
// was already committed to disk and then the process crashed. In this case we
// expect the canonical chains to be rolled back to the committed block, with
// everything afterwads deleted. The side chain completely nuked by the freezer.
func TestLongReorgedFastSyncedDeepRepair(t *testing.T) {
testLongReorgedFastSyncedDeepRepair(t, false)
func TestLongReorgedSnapSyncedDeepRepair(t *testing.T) {
testLongReorgedSnapSyncedDeepRepair(t, false)
}
func TestLongReorgedFastSyncedDeepRepairWithSnapshots(t *testing.T) {
testLongReorgedFastSyncedDeepRepair(t, true)
func TestLongReorgedSnapSyncedDeepRepairWithSnapshots(t *testing.T) {
testLongReorgedSnapSyncedDeepRepair(t, true)
}
func testLongReorgedFastSyncedDeepRepair(t *testing.T, snapshots bool) {
func testLongReorgedSnapSyncedDeepRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1653,14 +1653,14 @@ func testLongReorgedFastSyncedDeepRepair(t *testing.T, snapshots bool) {
// was not yet committed, but the process crashed. In this case we expect the
// chain to detect that it was fast syncing and not delete anything, since we
// can just pick up directly where we left off.
func TestLongReorgedFastSyncingShallowRepair(t *testing.T) {
testLongReorgedFastSyncingShallowRepair(t, false)
func TestLongReorgedSnapSyncingShallowRepair(t *testing.T) {
testLongReorgedSnapSyncingShallowRepair(t, false)
}
func TestLongReorgedFastSyncingShallowRepairWithSnapshots(t *testing.T) {
testLongReorgedFastSyncingShallowRepair(t, true)
func TestLongReorgedSnapSyncingShallowRepairWithSnapshots(t *testing.T) {
testLongReorgedSnapSyncingShallowRepair(t, true)
}
func testLongReorgedFastSyncingShallowRepair(t *testing.T, snapshots bool) {
func testLongReorgedSnapSyncingShallowRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1704,14 +1704,14 @@ func testLongReorgedFastSyncingShallowRepair(t *testing.T, snapshots bool) {
// was not yet committed, but the process crashed. In this case we expect the
// chain to detect that it was fast syncing and not delete anything, since we
// can just pick up directly where we left off.
func TestLongReorgedFastSyncingDeepRepair(t *testing.T) {
testLongReorgedFastSyncingDeepRepair(t, false)
func TestLongReorgedSnapSyncingDeepRepair(t *testing.T) {
testLongReorgedSnapSyncingDeepRepair(t, false)
}
func TestLongReorgedFastSyncingDeepRepairWithSnapshots(t *testing.T) {
testLongReorgedFastSyncingDeepRepair(t, true)
func TestLongReorgedSnapSyncingDeepRepairWithSnapshots(t *testing.T) {
testLongReorgedSnapSyncingDeepRepair(t, true)
}
func testLongReorgedFastSyncingDeepRepair(t *testing.T, snapshots bool) {
func testLongReorgedSnapSyncingDeepRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1829,7 +1829,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
// Pull the plug on the database, simulating a hard crash
db.Close()
// Start a new blockchain back up and see where the repait leads us
// Start a new blockchain back up and see where the repair leads us
db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
if err != nil {
t.Fatalf("Failed to reopen persistent database: %v", err)

View File

@@ -194,10 +194,10 @@ func testShortSetHead(t *testing.T, snapshots bool) {
// Everything above the sethead point should be deleted. In between the committed
// block and the requested head the data can remain as "fast sync" data to avoid
// redownloading it.
func TestShortFastSyncedSetHead(t *testing.T) { testShortFastSyncedSetHead(t, false) }
func TestShortFastSyncedSetHeadWithSnapshots(t *testing.T) { testShortFastSyncedSetHead(t, true) }
func TestShortSnapSyncedSetHead(t *testing.T) { testShortSnapSyncedSetHead(t, false) }
func TestShortSnapSyncedSetHeadWithSnapshots(t *testing.T) { testShortSnapSyncedSetHead(t, true) }
func testShortFastSyncedSetHead(t *testing.T, snapshots bool) {
func testShortSnapSyncedSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
//
@@ -236,10 +236,10 @@ func testShortFastSyncedSetHead(t *testing.T, snapshots bool) {
// detect that it was fast syncing and delete everything from the new head, since
// we can just pick up fast syncing from there. The head full block should be set
// to the genesis.
func TestShortFastSyncingSetHead(t *testing.T) { testShortFastSyncingSetHead(t, false) }
func TestShortFastSyncingSetHeadWithSnapshots(t *testing.T) { testShortFastSyncingSetHead(t, true) }
func TestShortSnapSyncingSetHead(t *testing.T) { testShortSnapSyncingSetHead(t, false) }
func TestShortSnapSyncingSetHeadWithSnapshots(t *testing.T) { testShortSnapSyncingSetHead(t, true) }
func testShortFastSyncingSetHead(t *testing.T, snapshots bool) {
func testShortSnapSyncingSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
//
@@ -326,14 +326,14 @@ func testShortOldForkedSetHead(t *testing.T, snapshots bool) {
// block. Everything above the sethead point should be deleted. In between the
// committed block and the requested head the data can remain as "fast sync" data
// to avoid redownloading it. The side chain should be left alone as it was shorter.
func TestShortOldForkedFastSyncedSetHead(t *testing.T) {
testShortOldForkedFastSyncedSetHead(t, false)
func TestShortOldForkedSnapSyncedSetHead(t *testing.T) {
testShortOldForkedSnapSyncedSetHead(t, false)
}
func TestShortOldForkedFastSyncedSetHeadWithSnapshots(t *testing.T) {
testShortOldForkedFastSyncedSetHead(t, true)
func TestShortOldForkedSnapSyncedSetHeadWithSnapshots(t *testing.T) {
testShortOldForkedSnapSyncedSetHead(t, true)
}
func testShortOldForkedFastSyncedSetHead(t *testing.T, snapshots bool) {
func testShortOldForkedSnapSyncedSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
// └->S1->S2->S3
@@ -375,14 +375,14 @@ func testShortOldForkedFastSyncedSetHead(t *testing.T, snapshots bool) {
// the chain to detect that it was fast syncing and delete everything from the new
// head, since we can just pick up fast syncing from there. The head full block
// should be set to the genesis.
func TestShortOldForkedFastSyncingSetHead(t *testing.T) {
testShortOldForkedFastSyncingSetHead(t, false)
func TestShortOldForkedSnapSyncingSetHead(t *testing.T) {
testShortOldForkedSnapSyncingSetHead(t, false)
}
func TestShortOldForkedFastSyncingSetHeadWithSnapshots(t *testing.T) {
testShortOldForkedFastSyncingSetHead(t, true)
func TestShortOldForkedSnapSyncingSetHeadWithSnapshots(t *testing.T) {
testShortOldForkedSnapSyncingSetHead(t, true)
}
func testShortOldForkedFastSyncingSetHead(t *testing.T, snapshots bool) {
func testShortOldForkedSnapSyncingSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
// └->S1->S2->S3
@@ -478,14 +478,14 @@ func testShortNewlyForkedSetHead(t *testing.T, snapshots bool) {
// The side chain could be left to be if the fork point was before the new head
// we are deleting to, but it would be exceedingly hard to detect that case and
// properly handle it, so we'll trade extra work in exchange for simpler code.
func TestShortNewlyForkedFastSyncedSetHead(t *testing.T) {
testShortNewlyForkedFastSyncedSetHead(t, false)
func TestShortNewlyForkedSnapSyncedSetHead(t *testing.T) {
testShortNewlyForkedSnapSyncedSetHead(t, false)
}
func TestShortNewlyForkedFastSyncedSetHeadWithSnapshots(t *testing.T) {
testShortNewlyForkedFastSyncedSetHead(t, true)
func TestShortNewlyForkedSnapSyncedSetHeadWithSnapshots(t *testing.T) {
testShortNewlyForkedSnapSyncedSetHead(t, true)
}
func testShortNewlyForkedFastSyncedSetHead(t *testing.T, snapshots bool) {
func testShortNewlyForkedSnapSyncedSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8
@@ -531,14 +531,14 @@ func testShortNewlyForkedFastSyncedSetHead(t *testing.T, snapshots bool) {
// The side chain could be left to be if the fork point was before the new head
// we are deleting to, but it would be exceedingly hard to detect that case and
// properly handle it, so we'll trade extra work in exchange for simpler code.
func TestShortNewlyForkedFastSyncingSetHead(t *testing.T) {
testShortNewlyForkedFastSyncingSetHead(t, false)
func TestShortNewlyForkedSnapSyncingSetHead(t *testing.T) {
testShortNewlyForkedSnapSyncingSetHead(t, false)
}
func TestShortNewlyForkedFastSyncingSetHeadWithSnapshots(t *testing.T) {
testShortNewlyForkedFastSyncingSetHead(t, true)
func TestShortNewlyForkedSnapSyncingSetHeadWithSnapshots(t *testing.T) {
testShortNewlyForkedSnapSyncingSetHead(t, true)
}
func testShortNewlyForkedFastSyncingSetHead(t *testing.T, snapshots bool) {
func testShortNewlyForkedSnapSyncingSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8
@@ -634,14 +634,14 @@ func testShortReorgedSetHead(t *testing.T, snapshots bool) {
// The side chain could be left to be if the fork point was before the new head
// we are deleting to, but it would be exceedingly hard to detect that case and
// properly handle it, so we'll trade extra work in exchange for simpler code.
func TestShortReorgedFastSyncedSetHead(t *testing.T) {
testShortReorgedFastSyncedSetHead(t, false)
func TestShortReorgedSnapSyncedSetHead(t *testing.T) {
testShortReorgedSnapSyncedSetHead(t, false)
}
func TestShortReorgedFastSyncedSetHeadWithSnapshots(t *testing.T) {
testShortReorgedFastSyncedSetHead(t, true)
func TestShortReorgedSnapSyncedSetHeadWithSnapshots(t *testing.T) {
testShortReorgedSnapSyncedSetHead(t, true)
}
func testShortReorgedFastSyncedSetHead(t *testing.T, snapshots bool) {
func testShortReorgedSnapSyncedSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
@@ -686,14 +686,14 @@ func testShortReorgedFastSyncedSetHead(t *testing.T, snapshots bool) {
// The side chain could be left to be if the fork point was before the new head
// we are deleting to, but it would be exceedingly hard to detect that case and
// properly handle it, so we'll trade extra work in exchange for simpler code.
func TestShortReorgedFastSyncingSetHead(t *testing.T) {
testShortReorgedFastSyncingSetHead(t, false)
func TestShortReorgedSnapSyncingSetHead(t *testing.T) {
testShortReorgedSnapSyncingSetHead(t, false)
}
func TestShortReorgedFastSyncingSetHeadWithSnapshots(t *testing.T) {
testShortReorgedFastSyncingSetHead(t, true)
func TestShortReorgedSnapSyncingSetHeadWithSnapshots(t *testing.T) {
testShortReorgedSnapSyncingSetHead(t, true)
}
func testShortReorgedFastSyncingSetHead(t *testing.T, snapshots bool) {
func testShortReorgedSnapSyncingSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
@@ -829,14 +829,14 @@ func testLongDeepSetHead(t *testing.T, snapshots bool) {
// back to the committed block. Everything above the sethead point should be
// deleted. In between the committed block and the requested head the data can
// remain as "fast sync" data to avoid redownloading it.
func TestLongFastSyncedShallowSetHead(t *testing.T) {
testLongFastSyncedShallowSetHead(t, false)
func TestLongSnapSyncedShallowSetHead(t *testing.T) {
testLongSnapSyncedShallowSetHead(t, false)
}
func TestLongFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
testLongFastSyncedShallowSetHead(t, true)
func TestLongSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) {
testLongSnapSyncedShallowSetHead(t, true)
}
func testLongFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
func testLongSnapSyncedShallowSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
//
@@ -880,10 +880,10 @@ func testLongFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
// which sethead was called. In this case we expect the full chain to be rolled
// back to the committed block. Since the ancient limit was underflown, everything
// needs to be deleted onwards to avoid creating a gap.
func TestLongFastSyncedDeepSetHead(t *testing.T) { testLongFastSyncedDeepSetHead(t, false) }
func TestLongFastSyncedDeepSetHeadWithSnapshots(t *testing.T) { testLongFastSyncedDeepSetHead(t, true) }
func TestLongSnapSyncedDeepSetHead(t *testing.T) { testLongSnapSyncedDeepSetHead(t, false) }
func TestLongSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) { testLongSnapSyncedDeepSetHead(t, true) }
func testLongFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
func testLongSnapSyncedDeepSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
//
@@ -926,14 +926,14 @@ func testLongFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
// sethead was called. In this case we expect the chain to detect that it was fast
// syncing and delete everything from the new head, since we can just pick up fast
// syncing from there.
func TestLongFastSyncingShallowSetHead(t *testing.T) {
testLongFastSyncingShallowSetHead(t, false)
func TestLongSnapSyncingShallowSetHead(t *testing.T) {
testLongSnapSyncingShallowSetHead(t, false)
}
func TestLongFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
testLongFastSyncingShallowSetHead(t, true)
func TestLongSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) {
testLongSnapSyncingShallowSetHead(t, true)
}
func testLongFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
func testLongSnapSyncingShallowSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
//
@@ -977,14 +977,14 @@ func testLongFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
// sethead was called. In this case we expect the chain to detect that it was fast
// syncing and delete everything from the new head, since we can just pick up fast
// syncing from there.
func TestLongFastSyncingDeepSetHead(t *testing.T) {
testLongFastSyncingDeepSetHead(t, false)
func TestLongSnapSyncingDeepSetHead(t *testing.T) {
testLongSnapSyncingDeepSetHead(t, false)
}
func TestLongFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
testLongFastSyncingDeepSetHead(t, true)
func TestLongSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) {
testLongSnapSyncingDeepSetHead(t, true)
}
func testLongFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
func testLongSnapSyncingDeepSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
//
@@ -1132,14 +1132,14 @@ func testLongOldForkedDeepSetHead(t *testing.T, snapshots bool) {
// sethead point should be deleted. In between the committed block and the
// requested head the data can remain as "fast sync" data to avoid redownloading
// it. The side chain is nuked by the freezer.
func TestLongOldForkedFastSyncedShallowSetHead(t *testing.T) {
testLongOldForkedFastSyncedShallowSetHead(t, false)
func TestLongOldForkedSnapSyncedShallowSetHead(t *testing.T) {
testLongOldForkedSnapSyncedShallowSetHead(t, false)
}
func TestLongOldForkedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
testLongOldForkedFastSyncedShallowSetHead(t, true)
func TestLongOldForkedSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) {
testLongOldForkedSnapSyncedShallowSetHead(t, true)
}
func testLongOldForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
func testLongOldForkedSnapSyncedShallowSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
// └->S1->S2->S3
@@ -1186,14 +1186,14 @@ func testLongOldForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
// full chain to be rolled back to the committed block. Since the ancient limit was
// underflown, everything needs to be deleted onwards to avoid creating a gap. The
// side chain is nuked by the freezer.
func TestLongOldForkedFastSyncedDeepSetHead(t *testing.T) {
testLongOldForkedFastSyncedDeepSetHead(t, false)
func TestLongOldForkedSnapSyncedDeepSetHead(t *testing.T) {
testLongOldForkedSnapSyncedDeepSetHead(t, false)
}
func TestLongOldForkedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) {
testLongOldForkedFastSyncedDeepSetHead(t, true)
func TestLongOldForkedSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) {
testLongOldForkedSnapSyncedDeepSetHead(t, true)
}
func testLongOldForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
func testLongOldForkedSnapSyncedDeepSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
// └->S1->S2->S3
@@ -1239,14 +1239,14 @@ func testLongOldForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
// that it was fast syncing and delete everything from the new head, since we can
// just pick up fast syncing from there. The side chain is completely nuked by the
// freezer.
func TestLongOldForkedFastSyncingShallowSetHead(t *testing.T) {
testLongOldForkedFastSyncingShallowSetHead(t, false)
func TestLongOldForkedSnapSyncingShallowSetHead(t *testing.T) {
testLongOldForkedSnapSyncingShallowSetHead(t, false)
}
func TestLongOldForkedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
testLongOldForkedFastSyncingShallowSetHead(t, true)
func TestLongOldForkedSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) {
testLongOldForkedSnapSyncingShallowSetHead(t, true)
}
func testLongOldForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
func testLongOldForkedSnapSyncingShallowSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
// └->S1->S2->S3
@@ -1293,14 +1293,14 @@ func testLongOldForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
// that it was fast syncing and delete everything from the new head, since we can
// just pick up fast syncing from there. The side chain is completely nuked by the
// freezer.
func TestLongOldForkedFastSyncingDeepSetHead(t *testing.T) {
testLongOldForkedFastSyncingDeepSetHead(t, false)
func TestLongOldForkedSnapSyncingDeepSetHead(t *testing.T) {
testLongOldForkedSnapSyncingDeepSetHead(t, false)
}
func TestLongOldForkedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
testLongOldForkedFastSyncingDeepSetHead(t, true)
func TestLongOldForkedSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) {
testLongOldForkedSnapSyncingDeepSetHead(t, true)
}
func testLongOldForkedFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
func testLongOldForkedSnapSyncingDeepSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
// └->S1->S2->S3
@@ -1446,15 +1446,15 @@ func testLongNewerForkedDeepSetHead(t *testing.T, snapshots bool) {
// side chain, where the fast sync pivot point - newer than the ancient limit -
// was already committed to disk and then sethead was called. In this test scenario
// the side chain is above the committed block. In this case the freezer will delete
// the sidechain since it's dangling, reverting to TestLongFastSyncedShallowSetHead.
func TestLongNewerForkedFastSyncedShallowSetHead(t *testing.T) {
testLongNewerForkedFastSyncedShallowSetHead(t, false)
// the sidechain since it's dangling, reverting to TestLongSnapSyncedShallowSetHead.
func TestLongNewerForkedSnapSyncedShallowSetHead(t *testing.T) {
testLongNewerForkedSnapSyncedShallowSetHead(t, false)
}
func TestLongNewerForkedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
testLongNewerForkedFastSyncedShallowSetHead(t, true)
func TestLongNewerForkedSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) {
testLongNewerForkedSnapSyncedShallowSetHead(t, true)
}
func testLongNewerForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
func testLongNewerForkedSnapSyncedShallowSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1498,15 +1498,15 @@ func testLongNewerForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
// side chain, where the fast sync pivot point - older than the ancient limit -
// was already committed to disk and then sethead was called. In this test scenario
// the side chain is above the committed block. In this case the freezer will delete
// the sidechain since it's dangling, reverting to TestLongFastSyncedDeepSetHead.
func TestLongNewerForkedFastSyncedDeepSetHead(t *testing.T) {
testLongNewerForkedFastSyncedDeepSetHead(t, false)
// the sidechain since it's dangling, reverting to TestLongSnapSyncedDeepSetHead.
func TestLongNewerForkedSnapSyncedDeepSetHead(t *testing.T) {
testLongNewerForkedSnapSyncedDeepSetHead(t, false)
}
func TestLongNewerForkedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) {
testLongNewerForkedFastSyncedDeepSetHead(t, true)
func TestLongNewerForkedSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) {
testLongNewerForkedSnapSyncedDeepSetHead(t, true)
}
func testLongNewerForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
func testLongNewerForkedSnapSyncedDeepSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1549,15 +1549,15 @@ func testLongNewerForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
// side chain, where the fast sync pivot point - newer than the ancient limit -
// was not yet committed, but sethead was called. In this test scenario the side
// chain is above the committed block. In this case the freezer will delete the
// sidechain since it's dangling, reverting to TestLongFastSyncinghallowSetHead.
func TestLongNewerForkedFastSyncingShallowSetHead(t *testing.T) {
testLongNewerForkedFastSyncingShallowSetHead(t, false)
// sidechain since it's dangling, reverting to TestLongSnapSyncinghallowSetHead.
func TestLongNewerForkedSnapSyncingShallowSetHead(t *testing.T) {
testLongNewerForkedSnapSyncingShallowSetHead(t, false)
}
func TestLongNewerForkedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
testLongNewerForkedFastSyncingShallowSetHead(t, true)
func TestLongNewerForkedSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) {
testLongNewerForkedSnapSyncingShallowSetHead(t, true)
}
func testLongNewerForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
func testLongNewerForkedSnapSyncingShallowSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1601,15 +1601,15 @@ func testLongNewerForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool)
// side chain, where the fast sync pivot point - older than the ancient limit -
// was not yet committed, but sethead was called. In this test scenario the side
// chain is above the committed block. In this case the freezer will delete the
// sidechain since it's dangling, reverting to TestLongFastSyncingDeepSetHead.
func TestLongNewerForkedFastSyncingDeepSetHead(t *testing.T) {
testLongNewerForkedFastSyncingDeepSetHead(t, false)
// sidechain since it's dangling, reverting to TestLongSnapSyncingDeepSetHead.
func TestLongNewerForkedSnapSyncingDeepSetHead(t *testing.T) {
testLongNewerForkedSnapSyncingDeepSetHead(t, false)
}
func TestLongNewerForkedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
testLongNewerForkedFastSyncingDeepSetHead(t, true)
func TestLongNewerForkedSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) {
testLongNewerForkedSnapSyncingDeepSetHead(t, true)
}
func testLongNewerForkedFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
func testLongNewerForkedSnapSyncingDeepSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1745,15 +1745,15 @@ func testLongReorgedDeepSetHead(t *testing.T, snapshots bool) {
// side chain, where the fast sync pivot point - newer than the ancient limit -
// was already committed to disk and then sethead was called. In this case the
// freezer will delete the sidechain since it's dangling, reverting to
// TestLongFastSyncedShallowSetHead.
func TestLongReorgedFastSyncedShallowSetHead(t *testing.T) {
testLongReorgedFastSyncedShallowSetHead(t, false)
// TestLongSnapSyncedShallowSetHead.
func TestLongReorgedSnapSyncedShallowSetHead(t *testing.T) {
testLongReorgedSnapSyncedShallowSetHead(t, false)
}
func TestLongReorgedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
testLongReorgedFastSyncedShallowSetHead(t, true)
func TestLongReorgedSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) {
testLongReorgedSnapSyncedShallowSetHead(t, true)
}
func testLongReorgedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
func testLongReorgedSnapSyncedShallowSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1797,15 +1797,15 @@ func testLongReorgedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
// side chain, where the fast sync pivot point - older than the ancient limit -
// was already committed to disk and then sethead was called. In this case the
// freezer will delete the sidechain since it's dangling, reverting to
// TestLongFastSyncedDeepSetHead.
func TestLongReorgedFastSyncedDeepSetHead(t *testing.T) {
testLongReorgedFastSyncedDeepSetHead(t, false)
// TestLongSnapSyncedDeepSetHead.
func TestLongReorgedSnapSyncedDeepSetHead(t *testing.T) {
testLongReorgedSnapSyncedDeepSetHead(t, false)
}
func TestLongReorgedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) {
testLongReorgedFastSyncedDeepSetHead(t, true)
func TestLongReorgedSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) {
testLongReorgedSnapSyncedDeepSetHead(t, true)
}
func testLongReorgedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
func testLongReorgedSnapSyncedDeepSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1850,14 +1850,14 @@ func testLongReorgedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
// chain to detect that it was fast syncing and delete everything from the new
// head, since we can just pick up fast syncing from there. The side chain is
// completely nuked by the freezer.
func TestLongReorgedFastSyncingShallowSetHead(t *testing.T) {
testLongReorgedFastSyncingShallowSetHead(t, false)
func TestLongReorgedSnapSyncingShallowSetHead(t *testing.T) {
testLongReorgedSnapSyncingShallowSetHead(t, false)
}
func TestLongReorgedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
testLongReorgedFastSyncingShallowSetHead(t, true)
func TestLongReorgedSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) {
testLongReorgedSnapSyncingShallowSetHead(t, true)
}
func testLongReorgedFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
func testLongReorgedSnapSyncingShallowSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1903,14 +1903,14 @@ func testLongReorgedFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
// chain to detect that it was fast syncing and delete everything from the new
// head, since we can just pick up fast syncing from there. The side chain is
// completely nuked by the freezer.
func TestLongReorgedFastSyncingDeepSetHead(t *testing.T) {
testLongReorgedFastSyncingDeepSetHead(t, false)
func TestLongReorgedSnapSyncingDeepSetHead(t *testing.T) {
testLongReorgedSnapSyncingDeepSetHead(t, false)
}
func TestLongReorgedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
testLongReorgedFastSyncingDeepSetHead(t, true)
func TestLongReorgedSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) {
testLongReorgedSnapSyncingDeepSetHead(t, true)
}
func testLongReorgedFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
func testLongReorgedSnapSyncingDeepSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26

View File

@@ -28,13 +28,16 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/beacon"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
@@ -210,6 +213,55 @@ func TestLastBlock(t *testing.T) {
}
}
// Test inserts the blocks/headers after the fork choice rule is changed.
// The chain is reorged to whatever specified.
func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full bool) {
// Copy old chain up to #i into a new db
db, blockchain2, err := newCanonical(ethash.NewFaker(), i, full)
if err != nil {
t.Fatal("could not make new canonical in testFork", err)
}
defer blockchain2.Stop()
// Assert the chains have the same header/block at #i
var hash1, hash2 common.Hash
if full {
hash1 = blockchain.GetBlockByNumber(uint64(i)).Hash()
hash2 = blockchain2.GetBlockByNumber(uint64(i)).Hash()
} else {
hash1 = blockchain.GetHeaderByNumber(uint64(i)).Hash()
hash2 = blockchain2.GetHeaderByNumber(uint64(i)).Hash()
}
if hash1 != hash2 {
t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1)
}
// Extend the newly created chain
if full {
blockChainB := makeBlockChain(blockchain2.CurrentBlock(), n, ethash.NewFaker(), db, forkSeed)
if _, err := blockchain2.InsertChain(blockChainB); err != nil {
t.Fatalf("failed to insert forking chain: %v", err)
}
if blockchain2.CurrentBlock().NumberU64() != blockChainB[len(blockChainB)-1].NumberU64() {
t.Fatalf("failed to reorg to the given chain")
}
if blockchain2.CurrentBlock().Hash() != blockChainB[len(blockChainB)-1].Hash() {
t.Fatalf("failed to reorg to the given chain")
}
} else {
headerChainB := makeHeaderChain(blockchain2.CurrentHeader(), n, ethash.NewFaker(), db, forkSeed)
if _, err := blockchain2.InsertHeaderChain(headerChainB, 1); err != nil {
t.Fatalf("failed to insert forking chain: %v", err)
}
if blockchain2.CurrentHeader().Number.Uint64() != headerChainB[len(headerChainB)-1].Number.Uint64() {
t.Fatalf("failed to reorg to the given chain")
}
if blockchain2.CurrentHeader().Hash() != headerChainB[len(headerChainB)-1].Hash() {
t.Fatalf("failed to reorg to the given chain")
}
}
}
// Tests that given a starting canonical chain of a given size, it can be extended
// with various length chains.
func TestExtendCanonicalHeaders(t *testing.T) { testExtendCanonical(t, false) }
@@ -238,6 +290,25 @@ func testExtendCanonical(t *testing.T, full bool) {
testFork(t, processor, length, 10, full, better)
}
// Tests that given a starting canonical chain of a given size, it can be extended
// with various length chains.
func TestExtendCanonicalHeadersAfterMerge(t *testing.T) { testExtendCanonicalAfterMerge(t, false) }
func TestExtendCanonicalBlocksAfterMerge(t *testing.T) { testExtendCanonicalAfterMerge(t, true) }
func testExtendCanonicalAfterMerge(t *testing.T, full bool) {
length := 5
// Make first chain starting from genesis
_, processor, err := newCanonical(ethash.NewFaker(), length, full)
if err != nil {
t.Fatalf("failed to make new canonical chain: %v", err)
}
defer processor.Stop()
testInsertAfterMerge(t, processor, length, 1, full)
testInsertAfterMerge(t, processor, length, 10, full)
}
// Tests that given a starting canonical chain of a given size, creating shorter
// forks do not take canonical ownership.
func TestShorterForkHeaders(t *testing.T) { testShorterFork(t, false) }
@@ -268,6 +339,29 @@ func testShorterFork(t *testing.T, full bool) {
testFork(t, processor, 5, 4, full, worse)
}
// Tests that given a starting canonical chain of a given size, creating shorter
// forks do not take canonical ownership.
func TestShorterForkHeadersAfterMerge(t *testing.T) { testShorterForkAfterMerge(t, false) }
func TestShorterForkBlocksAfterMerge(t *testing.T) { testShorterForkAfterMerge(t, true) }
func testShorterForkAfterMerge(t *testing.T, full bool) {
length := 10
// Make first chain starting from genesis
_, processor, err := newCanonical(ethash.NewFaker(), length, full)
if err != nil {
t.Fatalf("failed to make new canonical chain: %v", err)
}
defer processor.Stop()
testInsertAfterMerge(t, processor, 0, 3, full)
testInsertAfterMerge(t, processor, 0, 7, full)
testInsertAfterMerge(t, processor, 1, 1, full)
testInsertAfterMerge(t, processor, 1, 7, full)
testInsertAfterMerge(t, processor, 5, 3, full)
testInsertAfterMerge(t, processor, 5, 4, full)
}
// Tests that given a starting canonical chain of a given size, creating longer
// forks do take canonical ownership.
func TestLongerForkHeaders(t *testing.T) { testLongerFork(t, false) }
@@ -283,19 +377,35 @@ func testLongerFork(t *testing.T, full bool) {
}
defer processor.Stop()
// Define the difficulty comparator
better := func(td1, td2 *big.Int) {
if td2.Cmp(td1) <= 0 {
t.Errorf("total difficulty mismatch: have %v, expected more than %v", td2, td1)
}
testInsertAfterMerge(t, processor, 0, 11, full)
testInsertAfterMerge(t, processor, 0, 15, full)
testInsertAfterMerge(t, processor, 1, 10, full)
testInsertAfterMerge(t, processor, 1, 12, full)
testInsertAfterMerge(t, processor, 5, 6, full)
testInsertAfterMerge(t, processor, 5, 8, full)
}
// Tests that given a starting canonical chain of a given size, creating longer
// forks do take canonical ownership.
func TestLongerForkHeadersAfterMerge(t *testing.T) { testLongerForkAfterMerge(t, false) }
func TestLongerForkBlocksAfterMerge(t *testing.T) { testLongerForkAfterMerge(t, true) }
func testLongerForkAfterMerge(t *testing.T, full bool) {
length := 10
// Make first chain starting from genesis
_, processor, err := newCanonical(ethash.NewFaker(), length, full)
if err != nil {
t.Fatalf("failed to make new canonical chain: %v", err)
}
// Sum of numbers must be greater than `length` for this to be a longer fork
testFork(t, processor, 0, 11, full, better)
testFork(t, processor, 0, 15, full, better)
testFork(t, processor, 1, 10, full, better)
testFork(t, processor, 1, 12, full, better)
testFork(t, processor, 5, 6, full, better)
testFork(t, processor, 5, 8, full, better)
defer processor.Stop()
testInsertAfterMerge(t, processor, 0, 11, full)
testInsertAfterMerge(t, processor, 0, 15, full)
testInsertAfterMerge(t, processor, 1, 10, full)
testInsertAfterMerge(t, processor, 1, 12, full)
testInsertAfterMerge(t, processor, 5, 6, full)
testInsertAfterMerge(t, processor, 5, 8, full)
}
// Tests that given a starting canonical chain of a given size, creating equal
@@ -328,6 +438,29 @@ func testEqualFork(t *testing.T, full bool) {
testFork(t, processor, 9, 1, full, equal)
}
// Tests that given a starting canonical chain of a given size, creating equal
// forks do take canonical ownership.
func TestEqualForkHeadersAfterMerge(t *testing.T) { testEqualForkAfterMerge(t, false) }
func TestEqualForkBlocksAfterMerge(t *testing.T) { testEqualForkAfterMerge(t, true) }
func testEqualForkAfterMerge(t *testing.T, full bool) {
length := 10
// Make first chain starting from genesis
_, processor, err := newCanonical(ethash.NewFaker(), length, full)
if err != nil {
t.Fatalf("failed to make new canonical chain: %v", err)
}
defer processor.Stop()
testInsertAfterMerge(t, processor, 0, 10, full)
testInsertAfterMerge(t, processor, 1, 9, full)
testInsertAfterMerge(t, processor, 2, 8, full)
testInsertAfterMerge(t, processor, 5, 5, full)
testInsertAfterMerge(t, processor, 6, 4, full)
testInsertAfterMerge(t, processor, 9, 1, full)
}
// Tests that chains missing links do not get accepted by the processor.
func TestBrokenHeaderChain(t *testing.T) { testBrokenChain(t, false) }
func TestBrokenBlockChain(t *testing.T) { testBrokenChain(t, true) }
@@ -1800,21 +1933,56 @@ func TestLowDiffLongChain(t *testing.T) {
// - C is canon chain, containing blocks [G..Cn..Cm]
// - A common ancestor is placed at prune-point + blocksBetweenCommonAncestorAndPruneblock
// - The sidechain S is prepended with numCanonBlocksInSidechain blocks from the canon chain
func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommonAncestorAndPruneblock int) {
//
// The mergePoint can be these values:
// -1: the transition won't happen
// 0: the transition happens since genesis
// 1: the transition happens after some chain segments
func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommonAncestorAndPruneblock int, mergePoint int) {
// Copy the TestChainConfig so we can modify it during tests
chainConfig := *params.TestChainConfig
// Generate a canonical chain to act as the main dataset
engine := ethash.NewFaker()
db := rawdb.NewMemoryDatabase()
genesis := (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
var (
merger = consensus.NewMerger(rawdb.NewMemoryDatabase())
genEngine = beacon.New(ethash.NewFaker())
runEngine = beacon.New(ethash.NewFaker())
db = rawdb.NewMemoryDatabase()
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr = crypto.PubkeyToAddress(key.PublicKey)
nonce = uint64(0)
gspec = &Genesis{
Config: &chainConfig,
Alloc: GenesisAlloc{addr: {Balance: big.NewInt(math.MaxInt64)}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
signer = types.LatestSigner(gspec.Config)
genesis, _ = gspec.Commit(db)
)
// Generate and import the canonical chain
blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 2*TriesInMemory, nil)
diskdb := rawdb.NewMemoryDatabase()
(&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(diskdb)
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
gspec.MustCommit(diskdb)
chain, err := NewBlockChain(diskdb, nil, &chainConfig, runEngine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
// Activate the transition since genesis if required
if mergePoint == 0 {
merger.ReachTTD()
merger.FinalizePoS()
// Set the terminal total difficulty in the config
gspec.Config.TerminalTotalDifficulty = big.NewInt(0)
}
blocks, _ := GenerateChain(&chainConfig, genesis, genEngine, db, 2*TriesInMemory, func(i int, gen *BlockGen) {
tx, err := types.SignTx(types.NewTransaction(nonce, common.HexToAddress("deadbeef"), big.NewInt(100), 21000, big.NewInt(int64(i+1)*params.GWei), nil), signer, key)
if err != nil {
t.Fatalf("failed to create tx: %v", err)
}
gen.AddTx(tx)
nonce++
})
if n, err := chain.InsertChain(blocks); err != nil {
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
}
@@ -1831,6 +1999,15 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon
if !chain.HasBlockAndState(firstNonPrunedBlock.Hash(), firstNonPrunedBlock.NumberU64()) {
t.Errorf("Block %d pruned", firstNonPrunedBlock.NumberU64())
}
// Activate the transition in the middle of the chain
if mergePoint == 1 {
merger.ReachTTD()
merger.FinalizePoS()
// Set the terminal total difficulty in the config
gspec.Config.TerminalTotalDifficulty = big.NewInt(int64(len(blocks)))
}
// Generate the sidechain
// First block should be a known block, block after should be a pruned block. So
// canon(pruned), side, side...
@@ -1838,7 +2015,7 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon
// Generate fork chain, make it longer than canon
parentIndex := lastPrunedIndex + blocksBetweenCommonAncestorAndPruneblock
parent := blocks[parentIndex]
fork, _ := GenerateChain(params.TestChainConfig, parent, engine, db, 2*TriesInMemory, func(i int, b *BlockGen) {
fork, _ := GenerateChain(&chainConfig, parent, genEngine, db, 2*TriesInMemory, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{2})
})
// Prepend the parent(s)
@@ -1847,9 +2024,9 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon
sidechain = append(sidechain, blocks[parentIndex+1-i])
}
sidechain = append(sidechain, fork...)
_, err = chain.InsertChain(sidechain)
n, err := chain.InsertChain(sidechain)
if err != nil {
t.Errorf("Got error, %v", err)
t.Errorf("Got error, %v number %d - %d", err, sidechain[n].NumberU64(), n)
}
head := chain.CurrentBlock()
if got := fork[len(fork)-1].Hash(); got != head.Hash() {
@@ -1870,11 +2047,28 @@ func TestPrunedImportSide(t *testing.T) {
//glogger := log.NewGlogHandler(log.StreamHandler(os.Stdout, log.TerminalFormat(false)))
//glogger.Verbosity(3)
//log.Root().SetHandler(log.Handler(glogger))
testSideImport(t, 3, 3)
testSideImport(t, 3, -3)
testSideImport(t, 10, 0)
testSideImport(t, 1, 10)
testSideImport(t, 1, -10)
testSideImport(t, 3, 3, -1)
testSideImport(t, 3, -3, -1)
testSideImport(t, 10, 0, -1)
testSideImport(t, 1, 10, -1)
testSideImport(t, 1, -10, -1)
}
func TestPrunedImportSideWithMerging(t *testing.T) {
//glogger := log.NewGlogHandler(log.StreamHandler(os.Stdout, log.TerminalFormat(false)))
//glogger.Verbosity(3)
//log.Root().SetHandler(log.Handler(glogger))
testSideImport(t, 3, 3, 0)
testSideImport(t, 3, -3, 0)
testSideImport(t, 10, 0, 0)
testSideImport(t, 1, 10, 0)
testSideImport(t, 1, -10, 0)
testSideImport(t, 3, 3, 1)
testSideImport(t, 3, -3, 1)
testSideImport(t, 10, 0, 1)
testSideImport(t, 1, 10, 1)
testSideImport(t, 1, -10, 1)
}
func TestInsertKnownHeaders(t *testing.T) { testInsertKnownChainData(t, "headers") }
@@ -2002,6 +2196,179 @@ func testInsertKnownChainData(t *testing.T, typ string) {
asserter(t, blocks2[len(blocks2)-1])
}
func TestInsertKnownHeadersWithMerging(t *testing.T) {
testInsertKnownChainDataWithMerging(t, "headers", 0)
}
func TestInsertKnownReceiptChainWithMerging(t *testing.T) {
testInsertKnownChainDataWithMerging(t, "receipts", 0)
}
func TestInsertKnownBlocksWithMerging(t *testing.T) {
testInsertKnownChainDataWithMerging(t, "blocks", 0)
}
func TestInsertKnownHeadersAfterMerging(t *testing.T) {
testInsertKnownChainDataWithMerging(t, "headers", 1)
}
func TestInsertKnownReceiptChainAfterMerging(t *testing.T) {
testInsertKnownChainDataWithMerging(t, "receipts", 1)
}
func TestInsertKnownBlocksAfterMerging(t *testing.T) {
testInsertKnownChainDataWithMerging(t, "blocks", 1)
}
// mergeHeight can be assigned in these values:
// 0: means the merging is applied since genesis
// 1: means the merging is applied after the first segment
func testInsertKnownChainDataWithMerging(t *testing.T, typ string, mergeHeight int) {
// Copy the TestChainConfig so we can modify it during tests
chainConfig := *params.TestChainConfig
var (
db = rawdb.NewMemoryDatabase()
genesis = (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee), Config: &chainConfig}).MustCommit(db)
runMerger = consensus.NewMerger(db)
runEngine = beacon.New(ethash.NewFaker())
genEngine = beacon.New(ethash.NewFaker())
)
applyMerge := func(engine *beacon.Beacon, height int) {
if engine != nil {
runMerger.FinalizePoS()
// Set the terminal total difficulty in the config
chainConfig.TerminalTotalDifficulty = big.NewInt(int64(height))
}
}
// Apply merging since genesis
if mergeHeight == 0 {
applyMerge(genEngine, 0)
}
blocks, receipts := GenerateChain(&chainConfig, genesis, genEngine, db, 32, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
// Apply merging after the first segment
if mergeHeight == 1 {
applyMerge(genEngine, len(blocks))
}
// Longer chain and shorter chain
blocks2, receipts2 := GenerateChain(&chainConfig, blocks[len(blocks)-1], genEngine, db, 65, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
blocks3, receipts3 := GenerateChain(&chainConfig, blocks[len(blocks)-1], genEngine, db, 64, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{1})
b.OffsetTime(-9) // Time shifted, difficulty shouldn't be changed
})
// Import the shared chain and the original canonical one
dir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("failed to create temp freezer dir: %v", err)
}
defer os.Remove(dir)
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "", false)
if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err)
}
(&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(chaindb)
defer os.RemoveAll(dir)
chain, err := NewBlockChain(chaindb, nil, &chainConfig, runEngine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
var (
inserter func(blocks []*types.Block, receipts []types.Receipts) error
asserter func(t *testing.T, block *types.Block)
)
if typ == "headers" {
inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
headers := make([]*types.Header, 0, len(blocks))
for _, block := range blocks {
headers = append(headers, block.Header())
}
_, err := chain.InsertHeaderChain(headers, 1)
return err
}
asserter = func(t *testing.T, block *types.Block) {
if chain.CurrentHeader().Hash() != block.Hash() {
t.Fatalf("current head header mismatch, have %v, want %v", chain.CurrentHeader().Hash().Hex(), block.Hash().Hex())
}
}
} else if typ == "receipts" {
inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
headers := make([]*types.Header, 0, len(blocks))
for _, block := range blocks {
headers = append(headers, block.Header())
}
_, err := chain.InsertHeaderChain(headers, 1)
if err != nil {
return err
}
_, err = chain.InsertReceiptChain(blocks, receipts, 0)
return err
}
asserter = func(t *testing.T, block *types.Block) {
if chain.CurrentFastBlock().Hash() != block.Hash() {
t.Fatalf("current head fast block mismatch, have %v, want %v", chain.CurrentFastBlock().Hash().Hex(), block.Hash().Hex())
}
}
} else {
inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
_, err := chain.InsertChain(blocks)
return err
}
asserter = func(t *testing.T, block *types.Block) {
if chain.CurrentBlock().Hash() != block.Hash() {
t.Fatalf("current head block mismatch, have %v, want %v", chain.CurrentBlock().Hash().Hex(), block.Hash().Hex())
}
}
}
// Apply merging since genesis if required
if mergeHeight == 0 {
applyMerge(runEngine, 0)
}
if err := inserter(blocks, receipts); err != nil {
t.Fatalf("failed to insert chain data: %v", err)
}
// Reimport the chain data again. All the imported
// chain data are regarded "known" data.
if err := inserter(blocks, receipts); err != nil {
t.Fatalf("failed to insert chain data: %v", err)
}
asserter(t, blocks[len(blocks)-1])
// Import a long canonical chain with some known data as prefix.
rollback := blocks[len(blocks)/2].NumberU64()
chain.SetHead(rollback - 1)
if err := inserter(blocks, receipts); err != nil {
t.Fatalf("failed to insert chain data: %v", err)
}
asserter(t, blocks[len(blocks)-1])
// Apply merging after the first segment
if mergeHeight == 1 {
applyMerge(runEngine, len(blocks))
}
// Import a longer chain with some known data as prefix.
if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
t.Fatalf("failed to insert chain data: %v", err)
}
asserter(t, blocks2[len(blocks2)-1])
// Import a shorter chain with some known data as prefix.
// The reorg is expected since the fork choice rule is
// already changed.
if err := inserter(append(blocks, blocks3...), append(receipts, receipts3...)); err != nil {
t.Fatalf("failed to insert chain data: %v", err)
}
// The head shouldn't change.
asserter(t, blocks3[len(blocks3)-1])
// Reimport the longer chain again, the reorg is still expected
chain.SetHead(rollback - 1)
if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
t.Fatalf("failed to insert chain data: %v", err)
}
asserter(t, blocks2[len(blocks2)-1])
}
// getLongAndShortChains returns two chains: A is longer, B is heavier.
func getLongAndShortChains() (bc *BlockChain, longChain []*types.Block, heavyChain []*types.Block, err error) {
// Generate a canonical chain to act as the main dataset
@@ -2270,7 +2637,7 @@ func TestTransactionIndices(t *testing.T) {
}
}
func TestSkipStaleTxIndicesInFastSync(t *testing.T) {
func TestSkipStaleTxIndicesInSnapSync(t *testing.T) {
// Configure and generate a sample block chain
var (
gendb = rawdb.NewMemoryDatabase()
@@ -2482,6 +2849,7 @@ func TestSideImportPrunedBlocks(t *testing.T) {
// Generate and import the canonical chain
blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 2*TriesInMemory, nil)
diskdb := rawdb.NewMemoryDatabase()
(&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(diskdb)
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
@@ -2690,7 +3058,7 @@ func TestDeleteRecreateSlots(t *testing.T) {
gspec.MustCommit(diskdb)
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{
Debug: true,
Tracer: vm.NewJSONLogger(nil, os.Stdout),
Tracer: logger.NewJSONLogger(nil, os.Stdout),
}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
@@ -2770,7 +3138,7 @@ func TestDeleteRecreateAccount(t *testing.T) {
gspec.MustCommit(diskdb)
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{
Debug: true,
Tracer: vm.NewJSONLogger(nil, os.Stdout),
Tracer: logger.NewJSONLogger(nil, os.Stdout),
}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)

View File

@@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
)
// BlockGen creates blocks for testing.
@@ -155,6 +156,28 @@ func (b *BlockGen) TxNonce(addr common.Address) uint64 {
// AddUncle adds an uncle header to the generated block.
func (b *BlockGen) AddUncle(h *types.Header) {
// The uncle will have the same timestamp and auto-generated difficulty
h.Time = b.header.Time
var parent *types.Header
for i := b.i - 1; i >= 0; i-- {
if b.chain[i].Hash() == h.ParentHash {
parent = b.chain[i].Header()
break
}
}
chainreader := &fakeChainReader{config: b.config}
h.Difficulty = b.engine.CalcDifficulty(chainreader, b.header.Time, parent)
// The gas limit and price should be derived from the parent
h.GasLimit = parent.GasLimit
if b.config.IsLondon(h.Number) {
h.BaseFee = misc.CalcBaseFee(b.config, parent)
if !b.config.IsLondon(parent.Number) {
parentGasLimit := parent.GasLimit * params.ElasticityMultiplier
h.GasLimit = CalcGasLimit(parentGasLimit, parentGasLimit)
}
}
b.uncles = append(b.uncles, h)
}
@@ -205,6 +228,18 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine}
b.header = makeHeader(chainreader, parent, statedb, b.engine)
// Set the difficulty for clique block. The chain maker doesn't have access
// to a chain, so the difficulty will be left unset (nil). Set it here to the
// correct value.
if b.header.Difficulty == nil {
if config.TerminalTotalDifficulty == nil {
// Clique chain
b.header.Difficulty = big.NewInt(2)
} else {
// Post-merge chain
b.header.Difficulty = big.NewInt(0)
}
}
// Mutate the state and block according to any hard-fork specs
if daoBlock := config.DAOForkBlock; daoBlock != nil {
limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange)
@@ -250,6 +285,91 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
return blocks, receipts
}
func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine consensus.Engine, db ethdb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts) {
if config == nil {
config = params.TestChainConfig
}
blocks, receipts := make(types.Blocks, n), make([]types.Receipts, n)
chainreader := &fakeChainReader{config: config}
genblock := func(i int, parent *types.Block, statedb *state.StateDB) (*types.Block, types.Receipts) {
b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine}
b.header = makeHeader(chainreader, parent, statedb, b.engine)
// Mutate the state and block according to any hard-fork specs
if daoBlock := config.DAOForkBlock; daoBlock != nil {
limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange)
if b.header.Number.Cmp(daoBlock) >= 0 && b.header.Number.Cmp(limit) < 0 {
if config.DAOForkSupport {
b.header.Extra = common.CopyBytes(params.DAOForkBlockExtra)
}
}
}
if config.DAOForkSupport && config.DAOForkBlock != nil && config.DAOForkBlock.Cmp(b.header.Number) == 0 {
misc.ApplyDAOHardFork(statedb)
}
// Execute any user modifications to the block
if gen != nil {
gen(i, b)
}
if b.engine != nil {
// Finalize and seal the block
block, err := b.engine.FinalizeAndAssemble(chainreader, b.header, statedb, b.txs, b.uncles, b.receipts)
if err != nil {
panic(err)
}
// Write state changes to db
root, err := statedb.Commit(config.IsEIP158(b.header.Number))
if err != nil {
panic(fmt.Sprintf("state write error: %v", err))
}
if err := statedb.Database().TrieDB().Commit(root, false, nil); err != nil {
panic(fmt.Sprintf("trie write error: %v", err))
}
// Generate an associated verkle proof
if tr := statedb.GetTrie(); tr.IsVerkle() {
vtr := tr.(*trie.VerkleTrie)
// Generate the proof if we are using a verkle tree
// WORKAROUND: make sure all keys are resolved
// before building the proof. Ultimately, node
// resolution can be done with a prefetcher or
// from GetCommitmentsAlongPath.
keys := statedb.Witness().Keys()
for _, key := range keys {
out, err := vtr.TryGet(key)
if err != nil {
panic(err)
}
if len(out) == 0 {
panic(fmt.Sprintf("%x should be present in the tree", key))
}
}
vtr.Hash()
p, err := vtr.ProveAndSerialize(keys, statedb.Witness().KeyVals())
block.SetVerkleProof(p)
if err != nil {
panic(err)
}
}
return block, b.receipts
}
return nil, nil
}
for i := 0; i < n; i++ {
statedb, err := state.New(parent.Root(), state.NewDatabaseWithConfig(db, &trie.Config{UseVerkle: true}), nil)
if err != nil {
panic(err)
}
block, receipt := genblock(i, parent, statedb)
blocks[i] = block
receipts[i] = receipt
parent = block
}
return blocks, receipts
}
func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header {
var time uint64
if parent.Time() == 0 {
@@ -313,3 +433,4 @@ func (cr *fakeChainReader) GetHeaderByNumber(number uint64) *types.Header
func (cr *fakeChainReader) GetHeaderByHash(hash common.Hash) *types.Header { return nil }
func (cr *fakeChainReader) GetHeader(hash common.Hash, number uint64) *types.Header { return nil }
func (cr *fakeChainReader) GetBlock(hash common.Hash, number uint64) *types.Block { return nil }
func (cr *fakeChainReader) GetTd(hash common.Hash, number uint64) *big.Int { return nil }

View File

@@ -51,6 +51,10 @@ var (
// next one expected based on the local chain.
ErrNonceTooHigh = errors.New("nonce too high")
// ErrNonceMax is returned if the nonce of a transaction sender account has
// maximum allowed value and would become invalid if incremented.
ErrNonceMax = errors.New("nonce has max value")
// ErrGasLimitReached is returned by the gas pool if the amount of gas required
// by a transaction is higher than what's left in the block.
ErrGasLimitReached = errors.New("gas limit reached")

108
core/forkchoice.go Normal file
View File

@@ -0,0 +1,108 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
crand "crypto/rand"
"errors"
"math/big"
mrand "math/rand"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
)
// ChainReader defines a small collection of methods needed to access the local
// blockchain during header verification. It's implemented by both blockchain
// and lightchain.
type ChainReader interface {
// Config retrieves the header chain's chain configuration.
Config() *params.ChainConfig
// GetTd returns the total difficulty of a local block.
GetTd(common.Hash, uint64) *big.Int
}
// ForkChoice is the fork chooser based on the highest total difficulty of the
// chain(the fork choice used in the eth1) and the external fork choice (the fork
// choice used in the eth2). This main goal of this ForkChoice is not only for
// offering fork choice during the eth1/2 merge phase, but also keep the compatibility
// for all other proof-of-work networks.
type ForkChoice struct {
chain ChainReader
rand *mrand.Rand
// preserve is a helper function used in td fork choice.
// Miners will prefer to choose the local mined block if the
// local td is equal to the extern one. It can be nil for light
// client
preserve func(header *types.Header) bool
}
func NewForkChoice(chainReader ChainReader, preserve func(header *types.Header) bool) *ForkChoice {
// Seed a fast but crypto originating random generator
seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
if err != nil {
log.Crit("Failed to initialize random seed", "err", err)
}
return &ForkChoice{
chain: chainReader,
rand: mrand.New(mrand.NewSource(seed.Int64())),
preserve: preserve,
}
}
// ReorgNeeded returns whether the reorg should be applied
// based on the given external header and local canonical chain.
// In the td mode, the new head is chosen if the corresponding
// total difficulty is higher. In the extern mode, the trusted
// header is always selected as the head.
func (f *ForkChoice) ReorgNeeded(current *types.Header, header *types.Header) (bool, error) {
var (
localTD = f.chain.GetTd(current.Hash(), current.Number.Uint64())
externTd = f.chain.GetTd(header.Hash(), header.Number.Uint64())
)
if localTD == nil || externTd == nil {
return false, errors.New("missing td")
}
// Accept the new header as the chain head if the transition
// is already triggered. We assume all the headers after the
// transition come from the trusted consensus layer.
if ttd := f.chain.Config().TerminalTotalDifficulty; ttd != nil && ttd.Cmp(externTd) <= 0 {
return true, nil
}
// If the total difficulty is higher than our known, add it to the canonical chain
// Second clause in the if statement reduces the vulnerability to selfish mining.
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
reorg := externTd.Cmp(localTD) > 0
if !reorg && externTd.Cmp(localTD) == 0 {
number, headNumber := header.Number.Uint64(), current.Number.Uint64()
if number < headNumber {
reorg = true
} else if number == headNumber {
var currentPreserve, externPreserve bool
if f.preserve != nil {
currentPreserve, externPreserve = f.preserve(current), f.preserve(header)
}
reorg = !currentPreserve && (externPreserve || f.rand.Float64() < 0.5)
}
}
return reorg, nil
}

View File

@@ -155,13 +155,14 @@ func (e *GenesisMismatchError) Error() string {
//
// The returned chain configuration is never nil.
func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) {
return SetupGenesisBlockWithOverride(db, genesis, nil)
return SetupGenesisBlockWithOverride(db, genesis, nil, nil)
}
func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideArrowGlacier *big.Int) (*params.ChainConfig, common.Hash, error) {
func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideArrowGlacier, overrideTerminalTotalDifficulty *big.Int) (*params.ChainConfig, common.Hash, error) {
if genesis != nil && genesis.Config == nil {
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
}
// Just commit the new block if there is no stored genesis block.
stored := rawdb.ReadCanonicalHash(db, 0)
if (stored == common.Hash{}) {
@@ -177,13 +178,29 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
}
return genesis.Config, block.Hash(), nil
}
// We have the genesis block in database(perhaps in ancient database)
// but the corresponding state is missing.
header := rawdb.ReadHeader(db, stored, 0)
if _, err := state.New(header.Root, state.NewDatabaseWithConfig(db, nil), nil); err != nil {
if genesis == nil {
genesis = DefaultGenesisBlock()
var trieCfg *trie.Config
if genesis == nil {
storedcfg := rawdb.ReadChainConfig(db, stored)
if storedcfg == nil {
panic("this should never be reached: if genesis is nil, the config is already present or 'geth init' is being called which created it (in the code above, which means genesis != nil)")
}
if storedcfg.CancunBlock != nil {
if storedcfg.CancunBlock.Cmp(big.NewInt(0)) != 0 {
panic("cancun block must be 0")
}
trieCfg = &trie.Config{UseVerkle: storedcfg.IsCancun(big.NewInt(header.Number.Int64()))}
}
}
if _, err := state.New(header.Root, state.NewDatabaseWithConfig(db, trieCfg), nil); err != nil {
// Ensure the stored genesis matches with the given one.
hash := genesis.ToBlock(nil).Hash()
if hash != stored {
@@ -207,6 +224,9 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
if overrideArrowGlacier != nil {
newcfg.ArrowGlacierBlock = overrideArrowGlacier
}
if overrideTerminalTotalDifficulty != nil {
newcfg.TerminalTotalDifficulty = overrideTerminalTotalDifficulty
}
if err := newcfg.CheckConfigForkOrder(); err != nil {
return newcfg, common.Hash{}, err
}
@@ -261,7 +281,11 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
if db == nil {
db = rawdb.NewMemoryDatabase()
}
statedb, err := state.New(common.Hash{}, state.NewDatabase(db), nil)
var trieCfg *trie.Config
if g.Config != nil {
trieCfg = &trie.Config{UseVerkle: g.Config.IsCancun(big.NewInt(int64(g.Number)))}
}
statedb, err := state.New(common.Hash{}, state.NewDatabaseWithConfig(db, trieCfg), nil)
if err != nil {
panic(err)
}
@@ -303,6 +327,9 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
}
statedb.Commit(false)
statedb.Database().TrieDB().Commit(root, true, nil)
if err := statedb.Cap(root); err != nil {
panic(err)
}
return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil))
}
@@ -354,6 +381,20 @@ func GenesisBlockForTesting(db ethdb.Database, addr common.Address, balance *big
return g.MustCommit(db)
}
func DefaultVerkleGenesisBlock() *Genesis {
return &Genesis{
Config: params.VerkleChainConfig,
Nonce: 86,
GasLimit: 0x2fefd8,
Difficulty: big.NewInt(1),
Alloc: map[common.Address]GenesisAccount{
common.BytesToAddress([]byte{97, 118, 97, 209, 72, 165, 43, 239, 81, 162, 104, 199, 40, 179, 162, 27, 88, 249, 67, 6}): {
Balance: big.NewInt(0).Lsh(big.NewInt(1), 27),
},
},
}
}
// DefaultGenesisBlock returns the Ethereum main net genesis block.
func DefaultGenesisBlock() *Genesis {
return &Genesis{
@@ -416,7 +457,7 @@ func DefaultSepoliaGenesisBlock() *Genesis {
}
// DeveloperGenesisBlock returns the 'geth --dev' genesis block.
func DeveloperGenesisBlock(period uint64, faucet common.Address) *Genesis {
func DeveloperGenesisBlock(period uint64, gasLimit uint64, faucet common.Address) *Genesis {
// Override the default period to the user requested one
config := *params.AllCliqueProtocolChanges
config.Clique = &params.CliqueConfig{
@@ -428,7 +469,7 @@ func DeveloperGenesisBlock(period uint64, faucet common.Address) *Genesis {
return &Genesis{
Config: &config,
ExtraData: append(append(make([]byte, 32), faucet[:]...), make([]byte, crypto.SignatureLength)...),
GasLimit: 11500000,
GasLimit: gasLimit,
BaseFee: big.NewInt(params.InitialBaseFee),
Difficulty: big.NewInt(1),
Alloc: map[common.Address]GenesisAccount{

View File

@@ -49,15 +49,14 @@ const (
// HeaderChain is responsible for maintaining the header chain including the
// header query and updating.
//
// The components maintained by headerchain includes: (1) total difficult
// The components maintained by headerchain includes: (1) total difficulty
// (2) header (3) block hash -> number mapping (4) canonical number -> hash mapping
// and (5) head header flag.
//
// It is not thread safe either, the encapsulating chain structures should do
// the necessary mutex locking/unlocking.
type HeaderChain struct {
config *params.ChainConfig
config *params.ChainConfig
chainDb ethdb.Database
genesisHeader *types.Header
@@ -86,7 +85,6 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c
if err != nil {
return nil, err
}
hc := &HeaderChain{
config: config,
chainDb: chainDb,
@@ -97,12 +95,10 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c
rand: mrand.New(mrand.NewSource(seed.Int64())),
engine: engine,
}
hc.genesisHeader = hc.GetHeaderByNumber(0)
if hc.genesisHeader == nil {
return nil, ErrNoGenesis
}
hc.currentHeader.Store(hc.genesisHeader)
if head := rawdb.ReadHeadBlockHash(chainDb); head != (common.Hash{}) {
if chead := hc.GetHeaderByHash(head); chead != nil {
@@ -111,7 +107,6 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c
}
hc.currentHeaderHash = hc.CurrentHeader().Hash()
headHeaderGauge.Update(hc.CurrentHeader().Number.Int64())
return hc, nil
}
@@ -137,35 +132,93 @@ type headerWriteResult struct {
lastHeader *types.Header
}
// WriteHeaders writes a chain of headers into the local chain, given that the parents
// are already known. If the total difficulty of the newly inserted chain becomes
// greater than the current known TD, the canonical chain is reorged.
//
// Note: This method is not concurrent-safe with inserting blocks simultaneously
// into the chain, as side effects caused by reorganisations cannot be emulated
// without the real blocks. Hence, writing headers directly should only be done
// in two scenarios: pure-header mode of operation (light clients), or properly
// separated header/block phases (non-archive clients).
func (hc *HeaderChain) writeHeaders(headers []*types.Header) (result *headerWriteResult, err error) {
// Reorg reorgs the local canonical chain into the specified chain. The reorg
// can be classified into two cases: (a) extend the local chain (b) switch the
// head to the given header.
func (hc *HeaderChain) Reorg(headers []*types.Header) error {
// Short circuit if nothing to reorg.
if len(headers) == 0 {
return &headerWriteResult{}, nil
return nil
}
// If the parent of the (first) block is already the canon header,
// we don't have to go backwards to delete canon blocks, but simply
// pile them onto the existing chain. Otherwise, do the necessary
// reorgs.
var (
first = headers[0]
last = headers[len(headers)-1]
batch = hc.chainDb.NewBatch()
)
if first.ParentHash != hc.currentHeaderHash {
// Delete any canonical number assignments above the new head
for i := last.Number.Uint64() + 1; ; i++ {
hash := rawdb.ReadCanonicalHash(hc.chainDb, i)
if hash == (common.Hash{}) {
break
}
rawdb.DeleteCanonicalHash(batch, i)
}
// Overwrite any stale canonical number assignments, going
// backwards from the first header in this import until the
// cross link between two chains.
var (
header = first
headNumber = header.Number.Uint64()
headHash = header.Hash()
)
for rawdb.ReadCanonicalHash(hc.chainDb, headNumber) != headHash {
rawdb.WriteCanonicalHash(batch, headHash, headNumber)
if headNumber == 0 {
break // It shouldn't be reached
}
headHash, headNumber = header.ParentHash, header.Number.Uint64()-1
header = hc.GetHeader(headHash, headNumber)
if header == nil {
return fmt.Errorf("missing parent %d %x", headNumber, headHash)
}
}
}
// Extend the canonical chain with the new headers
for i := 0; i < len(headers)-1; i++ {
hash := headers[i+1].ParentHash // Save some extra hashing
num := headers[i].Number.Uint64()
rawdb.WriteCanonicalHash(batch, hash, num)
rawdb.WriteHeadHeaderHash(batch, hash)
}
// Write the last header
hash := headers[len(headers)-1].Hash()
num := headers[len(headers)-1].Number.Uint64()
rawdb.WriteCanonicalHash(batch, hash, num)
rawdb.WriteHeadHeaderHash(batch, hash)
if err := batch.Write(); err != nil {
return err
}
// Last step update all in-memory head header markers
hc.currentHeaderHash = last.Hash()
hc.currentHeader.Store(types.CopyHeader(last))
headHeaderGauge.Update(last.Number.Int64())
return nil
}
// WriteHeaders writes a chain of headers into the local chain, given that the
// parents are already known. The chain head header won't be updated in this
// function, the additional setChainHead is expected in order to finish the entire
// procedure.
func (hc *HeaderChain) WriteHeaders(headers []*types.Header) (int, error) {
if len(headers) == 0 {
return 0, nil
}
ptd := hc.GetTd(headers[0].ParentHash, headers[0].Number.Uint64()-1)
if ptd == nil {
return &headerWriteResult{}, consensus.ErrUnknownAncestor
return 0, consensus.ErrUnknownAncestor
}
var (
lastNumber = headers[0].Number.Uint64() - 1 // Last successfully imported number
lastHash = headers[0].ParentHash // Last imported header hash
newTD = new(big.Int).Set(ptd) // Total difficulty of inserted chain
lastHeader *types.Header
inserted []numberHash // Ephemeral lookup of number/hash for the chain
firstInserted = -1 // Index of the first non-ignored header
newTD = new(big.Int).Set(ptd) // Total difficulty of inserted chain
inserted []rawdb.NumberHash // Ephemeral lookup of number/hash for the chain
parentKnown = true // Set to true to force hc.HasHeader check the first iteration
batch = hc.chainDb.NewBatch()
)
batch := hc.chainDb.NewBatch()
parentKnown := true // Set to true to force hc.HasHeader check the first iteration
for i, header := range headers {
var hash common.Hash
// The headers have already been validated at this point, so we already
@@ -188,116 +241,67 @@ func (hc *HeaderChain) writeHeaders(headers []*types.Header) (result *headerWrit
hc.tdCache.Add(hash, new(big.Int).Set(newTD))
rawdb.WriteHeader(batch, header)
inserted = append(inserted, numberHash{number, hash})
inserted = append(inserted, rawdb.NumberHash{Number: number, Hash: hash})
hc.headerCache.Add(hash, header)
hc.numberCache.Add(hash, number)
if firstInserted < 0 {
firstInserted = i
}
}
parentKnown = alreadyKnown
lastHeader, lastHash, lastNumber = header, hash, number
}
// Skip the slow disk write of all headers if interrupted.
if hc.procInterrupt() {
log.Debug("Premature abort during headers import")
return &headerWriteResult{}, errors.New("aborted")
return 0, errors.New("aborted")
}
// Commit to disk!
if err := batch.Write(); err != nil {
log.Crit("Failed to write headers", "error", err)
}
batch.Reset()
return len(inserted), nil
}
// writeHeadersAndSetHead writes a batch of block headers and applies the last
// header as the chain head if the fork choicer says it's ok to update the chain.
// Note: This method is not concurrent-safe with inserting blocks simultaneously
// into the chain, as side effects caused by reorganisations cannot be emulated
// without the real blocks. Hence, writing headers directly should only be done
// in two scenarios: pure-header mode of operation (light clients), or properly
// separated header/block phases (non-archive clients).
func (hc *HeaderChain) writeHeadersAndSetHead(headers []*types.Header, forker *ForkChoice) (*headerWriteResult, error) {
inserted, err := hc.WriteHeaders(headers)
if err != nil {
return nil, err
}
var (
head = hc.CurrentHeader().Number.Uint64()
localTD = hc.GetTd(hc.currentHeaderHash, head)
status = SideStatTy
lastHeader = headers[len(headers)-1]
lastHash = headers[len(headers)-1].Hash()
result = &headerWriteResult{
status: NonStatTy,
ignored: len(headers) - inserted,
imported: inserted,
lastHash: lastHash,
lastHeader: lastHeader,
}
)
// If the total difficulty is higher than our known, add it to the canonical chain
// Second clause in the if statement reduces the vulnerability to selfish mining.
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
reorg := newTD.Cmp(localTD) > 0
if !reorg && newTD.Cmp(localTD) == 0 {
if lastNumber < head {
reorg = true
} else if lastNumber == head {
reorg = mrand.Float64() < 0.5
// Ask the fork choicer if the reorg is necessary
if reorg, err := forker.ReorgNeeded(hc.CurrentHeader(), lastHeader); err != nil {
return nil, err
} else if !reorg {
if inserted != 0 {
result.status = SideStatTy
}
return result, nil
}
// If the parent of the (first) block is already the canon header,
// we don't have to go backwards to delete canon blocks, but
// simply pile them onto the existing chain
chainAlreadyCanon := headers[0].ParentHash == hc.currentHeaderHash
if reorg {
// If the header can be added into canonical chain, adjust the
// header chain markers(canonical indexes and head header flag).
//
// Note all markers should be written atomically.
markerBatch := batch // we can reuse the batch to keep allocs down
if !chainAlreadyCanon {
// Delete any canonical number assignments above the new head
for i := lastNumber + 1; ; i++ {
hash := rawdb.ReadCanonicalHash(hc.chainDb, i)
if hash == (common.Hash{}) {
break
}
rawdb.DeleteCanonicalHash(markerBatch, i)
}
// Overwrite any stale canonical number assignments, going
// backwards from the first header in this import
var (
headHash = headers[0].ParentHash // inserted[0].parent?
headNumber = headers[0].Number.Uint64() - 1 // inserted[0].num-1 ?
headHeader = hc.GetHeader(headHash, headNumber)
)
for rawdb.ReadCanonicalHash(hc.chainDb, headNumber) != headHash {
rawdb.WriteCanonicalHash(markerBatch, headHash, headNumber)
headHash = headHeader.ParentHash
headNumber = headHeader.Number.Uint64() - 1
headHeader = hc.GetHeader(headHash, headNumber)
}
// If some of the older headers were already known, but obtained canon-status
// during this import batch, then we need to write that now
// Further down, we continue writing the staus for the ones that
// were not already known
for i := 0; i < firstInserted; i++ {
hash := headers[i].Hash()
num := headers[i].Number.Uint64()
rawdb.WriteCanonicalHash(markerBatch, hash, num)
rawdb.WriteHeadHeaderHash(markerBatch, hash)
}
}
// Extend the canonical chain with the new headers
for _, hn := range inserted {
rawdb.WriteCanonicalHash(markerBatch, hn.hash, hn.number)
rawdb.WriteHeadHeaderHash(markerBatch, hn.hash)
}
if err := markerBatch.Write(); err != nil {
log.Crit("Failed to write header markers into disk", "err", err)
}
markerBatch.Reset()
// Last step update all in-memory head header markers
hc.currentHeaderHash = lastHash
hc.currentHeader.Store(types.CopyHeader(lastHeader))
headHeaderGauge.Update(lastHeader.Number.Int64())
// Chain status is canonical since this insert was a reorg.
// Note that all inserts which have higher TD than existing are 'reorg'.
status = CanonStatTy
// Special case, all the inserted headers are already on the canonical
// header chain, skip the reorg operation.
if hc.GetCanonicalHash(lastHeader.Number.Uint64()) == lastHash && lastHeader.Number.Uint64() <= hc.CurrentHeader().Number.Uint64() {
return result, nil
}
if len(inserted) == 0 {
status = NonStatTy
// Apply the reorg operation
if err := hc.Reorg(headers); err != nil {
return nil, err
}
return &headerWriteResult{
status: status,
ignored: len(headers) - len(inserted),
imported: len(inserted),
lastHash: lastHash,
lastHeader: lastHeader,
}, nil
result.status = CanonStatTy
return result, nil
}
func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
@@ -357,7 +361,7 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int)
return 0, nil
}
// InsertHeaderChain inserts the given headers.
// InsertHeaderChain inserts the given headers and does the reorganisations.
//
// The validity of the headers is NOT CHECKED by this method, i.e. they need to be
// validated by ValidateHeaderChain before calling InsertHeaderChain.
@@ -367,20 +371,19 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int)
//
// The returned 'write status' says if the inserted headers are part of the canonical chain
// or a side chain.
func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, start time.Time) (WriteStatus, error) {
func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, start time.Time, forker *ForkChoice) (WriteStatus, error) {
if hc.procInterrupt() {
return 0, errors.New("aborted")
}
res, err := hc.writeHeaders(chain)
res, err := hc.writeHeadersAndSetHead(chain, forker)
if err != nil {
return 0, err
}
// Report some public statistics so the user has a clue what's going on
context := []interface{}{
"count", res.imported,
"elapsed", common.PrettyDuration(time.Since(start)),
}
if err != nil {
context = append(context, "err", err)
}
if last := res.lastHeader; last != nil {
context = append(context, "number", last.Number, "hash", res.lastHash)
if timestamp := time.Unix(int64(last.Time), 0); time.Since(timestamp) > time.Minute {

View File

@@ -51,10 +51,10 @@ func verifyUnbrokenCanonchain(hc *HeaderChain) error {
return nil
}
func testInsert(t *testing.T, hc *HeaderChain, chain []*types.Header, wantStatus WriteStatus, wantErr error) {
func testInsert(t *testing.T, hc *HeaderChain, chain []*types.Header, wantStatus WriteStatus, wantErr error, forker *ForkChoice) {
t.Helper()
status, err := hc.InsertHeaderChain(chain, time.Now())
status, err := hc.InsertHeaderChain(chain, time.Now(), forker)
if status != wantStatus {
t.Errorf("wrong write status from InsertHeaderChain: got %v, want %v", status, wantStatus)
}
@@ -80,37 +80,38 @@ func TestHeaderInsertion(t *testing.T) {
}
// chain A: G->A1->A2...A128
chainA := makeHeaderChain(genesis.Header(), 128, ethash.NewFaker(), db, 10)
// chain B: G->A1->B2...B128
// chain B: G->A1->B1...B128
chainB := makeHeaderChain(chainA[0], 128, ethash.NewFaker(), db, 10)
log.Root().SetHandler(log.StdoutHandler)
forker := NewForkChoice(hc, nil)
// Inserting 64 headers on an empty chain, expecting
// 1 callbacks, 1 canon-status, 0 sidestatus,
testInsert(t, hc, chainA[:64], CanonStatTy, nil)
testInsert(t, hc, chainA[:64], CanonStatTy, nil, forker)
// Inserting 64 identical headers, expecting
// 0 callbacks, 0 canon-status, 0 sidestatus,
testInsert(t, hc, chainA[:64], NonStatTy, nil)
testInsert(t, hc, chainA[:64], NonStatTy, nil, forker)
// Inserting the same some old, some new headers
// 1 callbacks, 1 canon, 0 side
testInsert(t, hc, chainA[32:96], CanonStatTy, nil)
testInsert(t, hc, chainA[32:96], CanonStatTy, nil, forker)
// Inserting side blocks, but not overtaking the canon chain
testInsert(t, hc, chainB[0:32], SideStatTy, nil)
testInsert(t, hc, chainB[0:32], SideStatTy, nil, forker)
// Inserting more side blocks, but we don't have the parent
testInsert(t, hc, chainB[34:36], NonStatTy, consensus.ErrUnknownAncestor)
testInsert(t, hc, chainB[34:36], NonStatTy, consensus.ErrUnknownAncestor, forker)
// Inserting more sideblocks, overtaking the canon chain
testInsert(t, hc, chainB[32:97], CanonStatTy, nil)
testInsert(t, hc, chainB[32:97], CanonStatTy, nil, forker)
// Inserting more A-headers, taking back the canonicality
testInsert(t, hc, chainA[90:100], CanonStatTy, nil)
testInsert(t, hc, chainA[90:100], CanonStatTy, nil, forker)
// And B becomes canon again
testInsert(t, hc, chainB[97:107], CanonStatTy, nil)
testInsert(t, hc, chainB[97:107], CanonStatTy, nil, forker)
// And B becomes even longer
testInsert(t, hc, chainB[107:128], CanonStatTy, nil)
testInsert(t, hc, chainB[107:128], CanonStatTy, nil, forker)
}

View File

@@ -242,24 +242,6 @@ func WriteLastPivotNumber(db ethdb.KeyValueWriter, pivot uint64) {
}
}
// ReadFastTrieProgress retrieves the number of tries nodes fast synced to allow
// reporting correct numbers across restarts.
func ReadFastTrieProgress(db ethdb.KeyValueReader) uint64 {
data, _ := db.Get(fastTrieProgressKey)
if len(data) == 0 {
return 0
}
return new(big.Int).SetBytes(data).Uint64()
}
// WriteFastTrieProgress stores the fast sync trie process counter to support
// retrieving it across restarts.
func WriteFastTrieProgress(db ethdb.KeyValueWriter, count uint64) {
if err := db.Put(fastTrieProgressKey, new(big.Int).SetUint64(count).Bytes()); err != nil {
log.Crit("Failed to store fast sync trie progress", "err", err)
}
}
// ReadTxIndexTail retrieves the number of oldest indexed block
// whose transaction indices has been indexed. If the corresponding entry
// is non-existent in database it means the indexing has been finished.
@@ -669,7 +651,7 @@ func deriveLogFields(receipts []*receiptLogs, hash common.Hash, number uint64, t
// ReadLogs retrieves the logs for all transactions in a block. The log fields
// are populated with metadata. In case the receipts or the block body
// are not found, a nil is returned.
func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64) [][]*types.Log {
func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) [][]*types.Log {
// Retrieve the flattened receipt slice
data := ReadReceiptsRLP(db, hash, number)
if len(data) == 0 {
@@ -677,7 +659,12 @@ func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64) [][]*types.Log {
}
receipts := []*receiptLogs{}
if err := rlp.DecodeBytes(data, &receipts); err != nil {
log.Error("Invalid receipt array RLP", "hash", hash, "err", err)
// Receipts might be in the legacy format, try decoding that.
// TODO: to be removed after users migrated
if logs := readLegacyLogs(db, hash, number, config); logs != nil {
return logs
}
log.Error("Invalid receipt array RLP", "hash", "err", err)
return nil
}
@@ -697,6 +684,21 @@ func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64) [][]*types.Log {
return logs
}
// readLegacyLogs is a temporary workaround for when trying to read logs
// from a block which has its receipt stored in the legacy format. It'll
// be removed after users have migrated their freezer databases.
func readLegacyLogs(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) [][]*types.Log {
receipts := ReadReceipts(db, hash, number, config)
if receipts == nil {
return nil
}
logs := make([][]*types.Log, len(receipts))
for i, receipt := range receipts {
logs[i] = receipt.Logs
}
return logs
}
// ReadBlock retrieves an entire block corresponding to the hash, assembling it
// back from the stored header and body. If either the header or body could not
// be retrieved nil is returned.

View File

@@ -744,7 +744,7 @@ func TestReadLogs(t *testing.T) {
// Insert the receipt slice into the database and check presence
WriteReceipts(db, hash, 0, receipts)
logs := ReadLogs(db, hash, 0)
logs := ReadLogs(db, hash, 0, params.TestChainConfig)
if len(logs) == 0 {
t.Fatalf("no logs returned")
}

View File

@@ -138,3 +138,16 @@ func PopUncleanShutdownMarker(db ethdb.KeyValueStore) {
log.Warn("Failed to clear unclean-shutdown marker", "err", err)
}
}
// ReadTransitionStatus retrieves the eth2 transition status from the database
func ReadTransitionStatus(db ethdb.KeyValueReader) []byte {
data, _ := db.Get(transitionStatusKey)
return data
}
// WriteTransitionStatus stores the eth2 transition status to the database
func WriteTransitionStatus(db ethdb.KeyValueWriter, data []byte) {
if err := db.Put(transitionStatusKey, data); err != nil {
log.Crit("Failed to store the eth2 transition status", "err", err)
}
}

View File

@@ -208,11 +208,3 @@ func WriteSnapshotSyncStatus(db ethdb.KeyValueWriter, status []byte) {
log.Crit("Failed to store snapshot sync status", "err", err)
}
}
// DeleteSnapshotSyncStatus deletes the serialized sync status saved at the last
// shutdown
func DeleteSnapshotSyncStatus(db ethdb.KeyValueWriter) {
if err := db.Delete(snapshotSyncStatusKey); err != nil {
log.Crit("Failed to remove snapshot sync status", "err", err)
}
}

View File

@@ -44,24 +44,29 @@ func InitDatabaseFromFreezer(db ethdb.Database) {
logged = start.Add(-7 * time.Second) // Unindex during import is fast, don't double log
hash common.Hash
)
for i := uint64(0); i < frozen; i++ {
// Since the freezer has all data in sequential order on a file,
// it would be 'neat' to read more data in one go, and let the
// freezerdb return N items (e.g up to 1000 items per go)
// That would require an API change in Ancients though
if h, err := db.Ancient(freezerHashTable, i); err != nil {
for i := uint64(0); i < frozen; {
// We read 100K hashes at a time, for a total of 3.2M
count := uint64(100_000)
if i+count > frozen {
count = frozen - i
}
data, err := db.AncientRange(freezerHashTable, i, count, 32*count)
if err != nil {
log.Crit("Failed to init database from freezer", "err", err)
} else {
}
for j, h := range data {
number := i + uint64(j)
hash = common.BytesToHash(h)
}
WriteHeaderNumber(batch, hash, i)
// If enough data was accumulated in memory or we're at the last block, dump to disk
if batch.ValueSize() > ethdb.IdealBatchSize {
if err := batch.Write(); err != nil {
log.Crit("Failed to write data to db", "err", err)
WriteHeaderNumber(batch, hash, number)
// If enough data was accumulated in memory or we're at the last block, dump to disk
if batch.ValueSize() > ethdb.IdealBatchSize {
if err := batch.Write(); err != nil {
log.Crit("Failed to write data to db", "err", err)
}
batch.Reset()
}
batch.Reset()
}
i += uint64(len(data))
// If we've spent too much time already, notify the user of what we're doing
if time.Since(logged) > 8*time.Second {
log.Info("Initializing database from freezer", "total", frozen, "number", i, "hash", hash, "elapsed", common.PrettyDuration(time.Since(start)))

View File

@@ -395,7 +395,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, lastPivotKey,
fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey,
snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
uncleanShutdownKey, badBlockKey,
uncleanShutdownKey, badBlockKey, transitionStatusKey,
} {
if bytes.Equal(key, meta) {
metadata.Add(size)

View File

@@ -116,7 +116,7 @@ func (batch *freezerTableBatch) reset() {
// existing data.
func (batch *freezerTableBatch) Append(item uint64, data interface{}) error {
if item != batch.curItem {
return errOutOrderInsertion
return fmt.Errorf("%w: have %d want %d", errOutOrderInsertion, item, batch.curItem)
}
// Encode the item.
@@ -136,7 +136,7 @@ func (batch *freezerTableBatch) Append(item uint64, data interface{}) error {
// existing data.
func (batch *freezerTableBatch) AppendRaw(item uint64, blob []byte) error {
if item != batch.curItem {
return errOutOrderInsertion
return fmt.Errorf("%w: have %d want %d", errOutOrderInsertion, item, batch.curItem)
}
encItem := blob

View File

@@ -246,7 +246,7 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) {
if truncateErr != nil {
t.Fatal("concurrent truncate failed:", err)
}
if !(modifyErr == nil || modifyErr == errOutOrderInsertion) {
if !(errors.Is(modifyErr, nil) || errors.Is(modifyErr, errOutOrderInsertion)) {
t.Fatal("wrong error from concurrent modify:", modifyErr)
}
checkAncientCount(t, f, "test", 10)

View File

@@ -75,6 +75,9 @@ var (
// uncleanShutdownKey tracks the list of local crashes
uncleanShutdownKey = []byte("unclean-shutdown") // config prefix for the db
// transitionStatusKey tracks the eth2 transition status.
transitionStatusKey = []byte("eth2-transition")
// Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes).
headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td

View File

@@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/trie"
"github.com/gballet/go-verkle"
lru "github.com/hashicorp/golang-lru"
)
@@ -104,6 +105,9 @@ type Trie interface {
// nodes of the longest existing prefix of the key (at least the root), ending
// with the node that proves the absence of the key.
Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error
// IsVerkle returns true if the trie is verkle-tree based
IsVerkle() bool
}
// NewDatabase creates a backing store for state. The returned database is safe for
@@ -118,6 +122,13 @@ func NewDatabase(db ethdb.Database) Database {
// large memory cache.
func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database {
csc, _ := lru.New(codeSizeCacheSize)
if config != nil && config.UseVerkle {
return &VerkleDB{
db: trie.NewDatabaseWithConfig(db, config),
codeSizeCache: csc,
codeCache: fastcache.New(codeCacheSize),
}
}
return &cachingDB{
db: trie.NewDatabaseWithConfig(db, config),
codeSizeCache: csc,
@@ -202,3 +213,67 @@ func (db *cachingDB) ContractCodeSize(addrHash, codeHash common.Hash) (int, erro
func (db *cachingDB) TrieDB() *trie.Database {
return db.db
}
// VerkleDB implements state.Database for a verkle tree
type VerkleDB struct {
db *trie.Database
codeSizeCache *lru.Cache
codeCache *fastcache.Cache
}
// OpenTrie opens the main account trie.
func (db *VerkleDB) OpenTrie(root common.Hash) (Trie, error) {
if root == (common.Hash{}) || root == emptyRoot {
return trie.NewVerkleTrie(verkle.New(), db.db), nil
}
payload, err := db.db.DiskDB().Get(root[:])
if err != nil {
return nil, err
}
r, err := verkle.ParseNode(payload, 0)
if err != nil {
panic(err)
}
return trie.NewVerkleTrie(r, db.db), err
}
// OpenStorageTrie opens the storage trie of an account.
func (db *VerkleDB) OpenStorageTrie(addrHash, root common.Hash) (Trie, error) {
// alternatively, return accTrie
panic("should not be called")
}
// CopyTrie returns an independent copy of the given trie.
func (db *VerkleDB) CopyTrie(tr Trie) Trie {
t, ok := tr.(*trie.VerkleTrie)
if ok {
return t.Copy(db.db)
}
panic("invalid tree type != VerkleTrie")
}
// ContractCode retrieves a particular contract's code.
func (db *VerkleDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error) {
if code := db.codeCache.Get(nil, codeHash.Bytes()); len(code) > 0 {
return code, nil
}
code := rawdb.ReadCode(db.db.DiskDB(), codeHash)
if len(code) > 0 {
db.codeCache.Set(codeHash.Bytes(), code)
db.codeSizeCache.Add(codeHash, len(code))
return code, nil
}
return nil, errors.New("not found")
}
// ContractCodeSize retrieves a particular contracts code's size.
func (db *VerkleDB) ContractCodeSize(addrHash, codeHash common.Hash) (int, error) {
panic("need to merge #31 for this to work")
}
// TrieDB retrieves the low level trie database used for data storage.
func (db *VerkleDB) TrieDB() *trie.Database {
return db.db
}

View File

@@ -76,6 +76,14 @@ func (it *NodeIterator) step() error {
// Initialize the iterator if we've just started
if it.stateIt == nil {
it.stateIt = it.state.trie.NodeIterator(nil)
// If the trie is a verkle trie, then the data and state
// are the same tree, and as a result both iterators are
// the same. This is a hack meant for both tree types to
// work.
if _, ok := it.state.trie.(*trie.VerkleTrie); ok {
it.dataIt = it.stateIt
}
}
// If we had data nodes previously, we surely have at least state nodes
if it.dataIt != nil {
@@ -100,10 +108,11 @@ func (it *NodeIterator) step() error {
it.state, it.stateIt = nil, nil
return nil
}
// If the state trie node is an internal entry, leave as is
// If the state trie node is an internal entry, leave as is.
if !it.stateIt.Leaf() {
return nil
}
// Otherwise we've reached an account node, initiate data iteration
var account types.StateAccount
if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob()), &account); err != nil {

View File

@@ -89,7 +89,7 @@ func NewPruner(db ethdb.Database, datadir, trieCachePath string, bloomSize uint6
if headBlock == nil {
return nil, errors.New("Failed to load head block")
}
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, false)
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, false, false)
if err != nil {
return nil, err // The relevant snapshot(s) might not exist
}
@@ -362,7 +362,7 @@ func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) err
// - The state HEAD is rewound already because of multiple incomplete `prune-state`
// In this case, even the state HEAD is not exactly matched with snapshot, it
// still feasible to recover the pruning correctly.
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, true)
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, true, false)
if err != nil {
return err // The relevant snapshot(s) might not exist
}

View File

@@ -24,6 +24,7 @@ import (
"sync"
"sync/atomic"
"github.com/VictoriaMetrics/fastcache"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
@@ -183,7 +184,7 @@ type Tree struct {
// This case happens when the snapshot is 'ahead' of the state trie.
// - otherwise, the entire snapshot is considered invalid and will be recreated on
// a background thread.
func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool, rebuild bool, recovery bool) (*Tree, error) {
func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool, rebuild bool, recovery bool, useVerkle bool) (*Tree, error) {
// Create a new, empty snapshot tree
snap := &Tree{
diskdb: diskdb,
@@ -202,6 +203,17 @@ func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root comm
}
if err != nil {
if rebuild {
if useVerkle {
snap.layers = map[common.Hash]snapshot{
root: &diskLayer{
diskdb: diskdb,
triedb: triedb,
root: root,
cache: fastcache.New(cache * 1024 * 1024),
},
}
return snap, nil
}
log.Warn("Failed to load snapshot, regenerating", "err", err)
snap.Rebuild(root)
return snap, nil

View File

@@ -21,67 +21,11 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
)
// wipeSnapshot starts a goroutine to iterate over the entire key-value database
// and delete all the data associated with the snapshot (accounts, storage,
// metadata). After all is done, the snapshot range of the database is compacted
// to free up unused data blocks.
func wipeSnapshot(db ethdb.KeyValueStore, full bool) chan struct{} {
// Wipe the snapshot root marker synchronously
if full {
rawdb.DeleteSnapshotRoot(db)
}
// Wipe everything else asynchronously
wiper := make(chan struct{}, 1)
go func() {
if err := wipeContent(db); err != nil {
log.Error("Failed to wipe state snapshot", "err", err) // Database close will trigger this
return
}
close(wiper)
}()
return wiper
}
// wipeContent iterates over the entire key-value database and deletes all the
// data associated with the snapshot (accounts, storage), but not the root hash
// as the wiper is meant to run on a background thread but the root needs to be
// removed in sync to avoid data races. After all is done, the snapshot range of
// the database is compacted to free up unused data blocks.
func wipeContent(db ethdb.KeyValueStore) error {
if err := wipeKeyRange(db, "accounts", rawdb.SnapshotAccountPrefix, nil, nil, len(rawdb.SnapshotAccountPrefix)+common.HashLength, snapWipedAccountMeter, true); err != nil {
return err
}
if err := wipeKeyRange(db, "storage", rawdb.SnapshotStoragePrefix, nil, nil, len(rawdb.SnapshotStoragePrefix)+2*common.HashLength, snapWipedStorageMeter, true); err != nil {
return err
}
// Compact the snapshot section of the database to get rid of unused space
start := time.Now()
log.Info("Compacting snapshot account area ")
end := common.CopyBytes(rawdb.SnapshotAccountPrefix)
end[len(end)-1]++
if err := db.Compact(rawdb.SnapshotAccountPrefix, end); err != nil {
return err
}
log.Info("Compacting snapshot storage area ")
end = common.CopyBytes(rawdb.SnapshotStoragePrefix)
end[len(end)-1]++
if err := db.Compact(rawdb.SnapshotStoragePrefix, end); err != nil {
return err
}
log.Info("Compacted snapshot area in database", "elapsed", common.PrettyDuration(time.Since(start)))
return nil
}
// wipeKeyRange deletes a range of keys from the database starting with prefix
// and having a specific total key length. The start and limit is optional for
// specifying a particular key range for deletion.

View File

@@ -30,95 +30,50 @@ import (
func TestWipe(t *testing.T) {
// Create a database with some random snapshot data
db := memorydb.New()
for i := 0; i < 128; i++ {
account := randomHash()
rawdb.WriteAccountSnapshot(db, account, randomHash().Bytes())
for j := 0; j < 1024; j++ {
rawdb.WriteStorageSnapshot(db, account, randomHash(), randomHash().Bytes())
}
rawdb.WriteAccountSnapshot(db, randomHash(), randomHash().Bytes())
}
rawdb.WriteSnapshotRoot(db, randomHash())
// Add some random non-snapshot data too to make wiping harder
for i := 0; i < 65536; i++ {
// Generate a key that's the wrong length for a state snapshot item
var keysize int
for keysize == 0 || keysize == 32 || keysize == 64 {
keysize = 8 + rand.Intn(64) // +8 to ensure we will "never" randomize duplicates
}
// Randomize the suffix, dedup and inject it under the snapshot namespace
keysuffix := make([]byte, keysize)
for i := 0; i < 500; i++ {
// Generate keys with wrong length for a state snapshot item
keysuffix := make([]byte, 31)
rand.Read(keysuffix)
if rand.Int31n(2) == 0 {
db.Put(append(rawdb.SnapshotAccountPrefix, keysuffix...), randomHash().Bytes())
} else {
db.Put(append(rawdb.SnapshotStoragePrefix, keysuffix...), randomHash().Bytes())
db.Put(append(rawdb.SnapshotAccountPrefix, keysuffix...), randomHash().Bytes())
keysuffix = make([]byte, 33)
rand.Read(keysuffix)
db.Put(append(rawdb.SnapshotAccountPrefix, keysuffix...), randomHash().Bytes())
}
count := func() (items int) {
it := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
defer it.Release()
for it.Next() {
if len(it.Key()) == len(rawdb.SnapshotAccountPrefix)+common.HashLength {
items++
}
}
return items
}
// Sanity check that all the keys are present
var items int
it := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
defer it.Release()
for it.Next() {
key := it.Key()
if len(key) == len(rawdb.SnapshotAccountPrefix)+common.HashLength {
items++
}
if items := count(); items != 128 {
t.Fatalf("snapshot size mismatch: have %d, want %d", items, 128)
}
it = db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
defer it.Release()
for it.Next() {
key := it.Key()
if len(key) == len(rawdb.SnapshotStoragePrefix)+2*common.HashLength {
items++
}
// Wipe the accounts
if err := wipeKeyRange(db, "accounts", rawdb.SnapshotAccountPrefix, nil, nil,
len(rawdb.SnapshotAccountPrefix)+common.HashLength, snapWipedAccountMeter, true); err != nil {
t.Fatal(err)
}
if items != 128+128*1024 {
t.Fatalf("snapshot size mismatch: have %d, want %d", items, 128+128*1024)
}
if hash := rawdb.ReadSnapshotRoot(db); hash == (common.Hash{}) {
t.Errorf("snapshot block marker mismatch: have %#x, want <not-nil>", hash)
}
// Wipe all snapshot entries from the database
<-wipeSnapshot(db, true)
// Iterate over the database end ensure no snapshot information remains
it = db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
defer it.Release()
for it.Next() {
key := it.Key()
if len(key) == len(rawdb.SnapshotAccountPrefix)+common.HashLength {
t.Errorf("snapshot entry remained after wipe: %x", key)
}
}
it = db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
defer it.Release()
for it.Next() {
key := it.Key()
if len(key) == len(rawdb.SnapshotStoragePrefix)+2*common.HashLength {
t.Errorf("snapshot entry remained after wipe: %x", key)
}
}
if hash := rawdb.ReadSnapshotRoot(db); hash != (common.Hash{}) {
t.Errorf("snapshot block marker remained after wipe: %#x", hash)
if items := count(); items != 0 {
t.Fatalf("snapshot size mismatch: have %d, want %d", items, 0)
}
// Iterate over the database and ensure miscellaneous items are present
items = 0
it = db.NewIterator(nil, nil)
items := 0
it := db.NewIterator(nil, nil)
defer it.Release()
for it.Next() {
items++
}
if items != 65536 {
t.Fatalf("misc item count mismatch: have %d, want %d", items, 65536)
if items != 1000 {
t.Fatalf("misc item count mismatch: have %d, want %d", items, 1000)
}
}

View File

@@ -28,6 +28,8 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp"
trieUtils "github.com/ethereum/go-ethereum/trie/utils"
"github.com/holiman/uint256"
)
var emptyCodeHash = crypto.Keccak256(nil)
@@ -239,9 +241,13 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
if metrics.EnabledExpensive {
meter = &s.db.StorageReads
}
if enc, err = s.getTrie(db).TryGet(key.Bytes()); err != nil {
s.setError(err)
return common.Hash{}
if !s.db.trie.IsVerkle() {
if enc, err = s.getTrie(db).TryGet(key.Bytes()); err != nil {
s.setError(err)
return common.Hash{}
}
} else {
panic("verkle trees use the snapshot")
}
}
var value common.Hash
@@ -332,7 +338,12 @@ func (s *stateObject) updateTrie(db Database) Trie {
// The snapshot storage map for the object
var storage map[common.Hash][]byte
// Insert all the pending updates into the trie
tr := s.getTrie(db)
var tr Trie
if s.db.trie.IsVerkle() {
tr = s.db.trie
} else {
tr = s.getTrie(db)
}
hasher := s.db.hasher
usedStorage := make([][]byte, 0, len(s.pendingStorage))
@@ -345,12 +356,25 @@ func (s *stateObject) updateTrie(db Database) Trie {
var v []byte
if (value == common.Hash{}) {
s.setError(tr.TryDelete(key[:]))
if tr.IsVerkle() {
k := trieUtils.GetTreeKeyStorageSlot(s.address[:], new(uint256.Int).SetBytes(key[:]))
s.setError(tr.TryDelete(k))
//s.db.db.TrieDB().DiskDB().Delete(append(s.address[:], key[:]...))
} else {
s.setError(tr.TryDelete(key[:]))
}
s.db.StorageDeleted += 1
} else {
// Encoding []byte cannot fail, ok to ignore the error.
v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:]))
s.setError(tr.TryUpdate(key[:], v))
if !tr.IsVerkle() {
// Encoding []byte cannot fail, ok to ignore the error.
v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:]))
s.setError(tr.TryUpdate(key[:], v))
} else {
k := trieUtils.GetTreeKeyStorageSlot(s.address[:], new(uint256.Int).SetBytes(key[:]))
// Update the trie, with v as a value
s.setError(tr.TryUpdate(k, value[:]))
}
s.db.StorageUpdated += 1
}
// If state snapshotting is active, cache the data til commit

View File

@@ -18,6 +18,7 @@
package state
import (
"encoding/binary"
"errors"
"fmt"
"math/big"
@@ -33,6 +34,8 @@ import (
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
trieUtils "github.com/ethereum/go-ethereum/trie/utils"
"github.com/holiman/uint256"
)
type revision struct {
@@ -99,6 +102,8 @@ type StateDB struct {
// Per-transaction access list
accessList *accessList
witness *types.AccessWitness
// Journal of state modifications. This is the backbone of
// Snapshot and RevertToSnapshot.
journal *journal
@@ -144,6 +149,15 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error)
accessList: newAccessList(),
hasher: crypto.NewKeccakState(),
}
if tr.IsVerkle() {
sdb.witness = types.NewAccessWitness()
if sdb.snaps == nil {
sdb.snaps, err = snapshot.New(db.TrieDB().DiskDB(), db.TrieDB(), 1, root, false, true, false, true)
if err != nil {
return nil, err
}
}
}
if sdb.snaps != nil {
if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap != nil {
sdb.snapDestructs = make(map[common.Hash]struct{})
@@ -154,6 +168,14 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error)
return sdb, nil
}
func (s *StateDB) Witness() *types.AccessWitness {
return s.witness
}
func (s *StateDB) SetWitness(aw *types.AccessWitness) {
s.witness = aw
}
// StartPrefetcher initializes a new trie prefetcher to pull in nodes from the
// state trie concurrently while the state is mutated so that when we reach the
// commit phase, most of the needed data is already hot.
@@ -162,7 +184,7 @@ func (s *StateDB) StartPrefetcher(namespace string) {
s.prefetcher.close()
s.prefetcher = nil
}
if s.snap != nil {
if s.snap != nil && !s.trie.IsVerkle() {
s.prefetcher = newTriePrefetcher(s.db, s.originalRoot, namespace)
}
}
@@ -266,6 +288,24 @@ func (s *StateDB) GetBalance(addr common.Address) *big.Int {
return common.Big0
}
func (s *StateDB) GetNonceLittleEndian(address common.Address) []byte {
var nonceBytes [8]byte
binary.LittleEndian.PutUint64(nonceBytes[:], s.GetNonce(address))
return nonceBytes[:]
}
func (s *StateDB) GetBalanceLittleEndian(address common.Address) []byte {
var paddedBalance [32]byte
balanceBytes := s.GetBalance(address).Bytes()
// swap to little-endian
for i, j := 0, len(balanceBytes)-1; i < j; i, j = i+1, j-1 {
balanceBytes[i], balanceBytes[j] = balanceBytes[j], balanceBytes[i]
}
copy(paddedBalance[:len(balanceBytes)], balanceBytes)
return paddedBalance[:len(balanceBytes)]
}
func (s *StateDB) GetNonce(addr common.Address) uint64 {
stateObject := s.getStateObject(addr)
if stateObject != nil {
@@ -460,8 +500,33 @@ func (s *StateDB) updateStateObject(obj *stateObject) {
}
// Encode the account and update the account trie
addr := obj.Address()
if err := s.trie.TryUpdateAccount(addr[:], &obj.data); err != nil {
s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err))
s.setError(fmt.Errorf("updateStateObject (%x) error: %w", addr[:], err))
}
if s.trie.IsVerkle() {
if len(obj.code) > 0 {
cs := make([]byte, 32)
binary.BigEndian.PutUint64(cs, uint64(len(obj.code)))
if err := s.trie.TryUpdate(trieUtils.GetTreeKeyCodeSize(addr[:]), cs); err != nil {
s.setError(fmt.Errorf("updateStateObject (%x) error: %w", addr[:], err))
}
if obj.dirtyCode {
if chunks, err := trie.ChunkifyCode(addr, obj.code); err == nil {
for i := range chunks {
s.trie.TryUpdate(trieUtils.GetTreeKeyCodeChunk(addr[:], uint256.NewInt(uint64(i))), chunks[i][:])
}
} else {
s.setError(err)
}
}
} else {
cs := []byte{0}
if err := s.trie.TryUpdate(trieUtils.GetTreeKeyCodeSize(addr[:]), cs); err != nil {
s.setError(fmt.Errorf("updateStateObject (%x) error: %w", addr[:], err))
}
}
}
// If state snapshotting is active, cache the data til commit. Note, this
@@ -479,10 +544,19 @@ func (s *StateDB) deleteStateObject(obj *stateObject) {
if metrics.EnabledExpensive {
defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now())
}
// Delete the account from the trie
addr := obj.Address()
if err := s.trie.TryDelete(addr[:]); err != nil {
s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err))
if !s.trie.IsVerkle() {
addr := obj.Address()
if err := s.trie.TryDelete(addr[:]); err != nil {
s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err))
}
} else {
for i := byte(0); i <= 255; i++ {
if err := s.trie.TryDelete(trieUtils.GetTreeKeyAccountLeaf(obj.Address().Bytes(), i)); err != nil {
s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", obj.Address(), err))
}
}
}
}
@@ -532,6 +606,14 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
data.Root = emptyRoot
}
}
// NOTE: Do not touch the addresses here, kick the can down the
// road. That is because I don't want to change the interface
// to getDeletedStateObject at this stage, as the PR would then
// have a huge footprint.
// The alternative is to make accesses available via the state
// db instead of the evm. This requires a significant rewrite,
// that isn't currently warranted.
}
// If snapshot unavailable or reading from it failed, load from the database
if s.snap == nil || err != nil {
@@ -659,6 +741,9 @@ func (s *StateDB) Copy() *StateDB {
journal: newJournal(),
hasher: crypto.NewKeccakState(),
}
if s.witness != nil {
state.witness = s.witness.Copy()
}
// Copy the dirty states, logs, and preimages
for addr := range s.journal.dirties {
// As documented [here](https://github.com/ethereum/go-ethereum/pull/16485#issuecomment-380438527),
@@ -845,7 +930,11 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
// to pull useful data from disk.
for addr := range s.stateObjectsPending {
if obj := s.stateObjects[addr]; !obj.deleted {
obj.updateRoot(s.db)
if s.trie.IsVerkle() {
obj.updateTrie(s.db)
} else {
obj.updateRoot(s.db)
}
}
}
// Now we're about to start to write changes to the trie. The trie is so far
@@ -896,6 +985,20 @@ func (s *StateDB) clearJournalAndRefund() {
s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entires
}
// GetTrie returns the account trie.
func (s *StateDB) GetTrie() Trie {
return s.trie
}
func (s *StateDB) Cap(root common.Hash) error {
if s.snaps != nil {
return s.snaps.Cap(root, 0)
}
// pre-verkle path: noop if s.snaps hasn't been
// initialized.
return nil
}
// Commit writes the state to the underlying in-memory trie database.
func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
if s.dbErr != nil {
@@ -909,17 +1012,27 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
codeWriter := s.db.TrieDB().DiskDB().NewBatch()
for addr := range s.stateObjectsDirty {
if obj := s.stateObjects[addr]; !obj.deleted {
// Write any contract code associated with the state object
if obj.code != nil && obj.dirtyCode {
rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code)
obj.dirtyCode = false
}
// Write any storage changes in the state object to its storage trie
committed, err := obj.CommitTrie(s.db)
if err != nil {
return common.Hash{}, err
}
storageCommitted += committed
// Write any contract code associated with the state object
if obj.code != nil && obj.dirtyCode {
if s.trie.IsVerkle() {
if chunks, err := trie.ChunkifyCode(addr, obj.code); err == nil {
for i := range chunks {
s.trie.TryUpdate(trieUtils.GetTreeKeyCodeChunk(addr[:], uint256.NewInt(uint64(i))), chunks[i][:])
}
} else {
s.setError(err)
}
} else {
rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code)
}
obj.dirtyCode = false
}
}
}
if len(s.stateObjectsDirty) > 0 {

View File

@@ -704,7 +704,10 @@ func TestMissingTrieNodes(t *testing.T) {
memDb := rawdb.NewMemoryDatabase()
db := NewDatabase(memDb)
var root common.Hash
state, _ := New(common.Hash{}, db, nil)
state, err := New(common.Hash{}, db, nil)
if err != nil {
panic("nil stte")
}
addr := common.BytesToAddress([]byte("so"))
{
state.SetBalance(addr, big.NewInt(1))
@@ -736,7 +739,7 @@ func TestMissingTrieNodes(t *testing.T) {
}
// Modify the state
state.SetBalance(addr, big.NewInt(2))
root, err := state.Commit(false)
root, err = state.Commit(false)
if err == nil {
t.Fatalf("expected error, got root :%x", root)
}

View File

@@ -70,7 +70,10 @@ func makeTestState() (Database, common.Hash, []*testAccount) {
state.updateStateObject(obj)
accounts = append(accounts, acc)
}
root, _ := state.Commit(false)
root, err := state.Commit(false)
if err != nil {
panic(err)
}
// Return the generated state
return db, root, accounts

View File

@@ -95,6 +95,9 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas *uint64, evm *vm.EVM) (*types.Receipt, error) {
// Create a new context to be used in the EVM environment.
txContext := NewEVMTxContext(msg)
if config.IsCancun(blockNumber) {
txContext.Accesses = types.NewAccessWitness()
}
evm.Reset(txContext, statedb)
// Apply the transaction to the current state (included in the env).
@@ -128,6 +131,10 @@ func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainCon
receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, tx.Nonce())
}
if config.IsCancun(blockNumber) {
statedb.Witness().Merge(txContext.Accesses)
}
// Set the receipt logs and create the bloom filter.
receipt.Logs = statedb.GetLogs(tx.Hash(), blockHash)
receipt.Bloom = types.CreateBloom(types.Receipts{receipt})

View File

@@ -17,10 +17,12 @@
package core
import (
"crypto/ecdsa"
"math/big"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/consensus/misc"
@@ -54,11 +56,12 @@ func TestStateProcessorErrors(t *testing.T) {
LondonBlock: big.NewInt(0),
Ethash: new(params.EthashConfig),
}
signer = types.LatestSigner(config)
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
signer = types.LatestSigner(config)
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
key2, _ = crypto.HexToECDSA("0202020202020202020202020202020202020202020202020202002020202020")
)
var makeTx = func(nonce uint64, to common.Address, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *types.Transaction {
tx, _ := types.SignTx(types.NewTransaction(nonce, to, amount, gasLimit, gasPrice, data), signer, testKey)
var makeTx = func(key *ecdsa.PrivateKey, nonce uint64, to common.Address, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *types.Transaction {
tx, _ := types.SignTx(types.NewTransaction(nonce, to, amount, gasLimit, gasPrice, data), signer, key)
return tx
}
var mkDynamicTx = func(nonce uint64, to common.Address, gasLimit uint64, gasTipCap, gasFeeCap *big.Int) *types.Transaction {
@@ -69,7 +72,7 @@ func TestStateProcessorErrors(t *testing.T) {
Gas: gasLimit,
To: &to,
Value: big.NewInt(0),
}), signer, testKey)
}), signer, key1)
return tx
}
{ // Tests against a 'recent' chain definition
@@ -82,6 +85,10 @@ func TestStateProcessorErrors(t *testing.T) {
Balance: big.NewInt(1000000000000000000), // 1 ether
Nonce: 0,
},
common.HexToAddress("0xfd0810DD14796680f72adf1a371963d0745BCc64"): GenesisAccount{
Balance: big.NewInt(1000000000000000000), // 1 ether
Nonce: math.MaxUint64,
},
},
}
genesis = gspec.MustCommit(db)
@@ -97,32 +104,38 @@ func TestStateProcessorErrors(t *testing.T) {
}{
{ // ErrNonceTooLow
txs: []*types.Transaction{
makeTx(0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(875000000), nil),
makeTx(0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(875000000), nil),
makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(875000000), nil),
makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(875000000), nil),
},
want: "could not apply tx 1 [0x0026256b3939ed97e2c4a6f3fce8ecf83bdcfa6d507c47838c308a1fb0436f62]: nonce too low: address 0x71562b71999873DB5b286dF957af199Ec94617F7, tx: 0 state: 1",
},
{ // ErrNonceTooHigh
txs: []*types.Transaction{
makeTx(100, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(875000000), nil),
makeTx(key1, 100, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(875000000), nil),
},
want: "could not apply tx 0 [0xdebad714ca7f363bd0d8121c4518ad48fa469ca81b0a081be3d10c17460f751b]: nonce too high: address 0x71562b71999873DB5b286dF957af199Ec94617F7, tx: 100 state: 0",
},
{ // ErrNonceMax
txs: []*types.Transaction{
makeTx(key2, math.MaxUint64, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(875000000), nil),
},
want: "could not apply tx 0 [0x84ea18d60eb2bb3b040e3add0eb72f757727122cc257dd858c67cb6591a85986]: nonce has max value: address 0xfd0810DD14796680f72adf1a371963d0745BCc64, nonce: 18446744073709551615",
},
{ // ErrGasLimitReached
txs: []*types.Transaction{
makeTx(0, common.Address{}, big.NewInt(0), 21000000, big.NewInt(875000000), nil),
makeTx(key1, 0, common.Address{}, big.NewInt(0), 21000000, big.NewInt(875000000), nil),
},
want: "could not apply tx 0 [0xbd49d8dadfd47fb846986695f7d4da3f7b2c48c8da82dbc211a26eb124883de9]: gas limit reached",
},
{ // ErrInsufficientFundsForTransfer
txs: []*types.Transaction{
makeTx(0, common.Address{}, big.NewInt(1000000000000000000), params.TxGas, big.NewInt(875000000), nil),
makeTx(key1, 0, common.Address{}, big.NewInt(1000000000000000000), params.TxGas, big.NewInt(875000000), nil),
},
want: "could not apply tx 0 [0x98c796b470f7fcab40aaef5c965a602b0238e1034cce6fb73823042dd0638d74]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 1000000000000000000 want 1000018375000000000",
},
{ // ErrInsufficientFunds
txs: []*types.Transaction{
makeTx(0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(900000000000000000), nil),
makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(900000000000000000), nil),
},
want: "could not apply tx 0 [0x4a69690c4b0cd85e64d0d9ea06302455b01e10a83db964d60281739752003440]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 1000000000000000000 want 18900000000000000000000",
},
@@ -132,13 +145,13 @@ func TestStateProcessorErrors(t *testing.T) {
// multiplication len(data) +gas_per_byte overflows uint64. Not testable at the moment
{ // ErrIntrinsicGas
txs: []*types.Transaction{
makeTx(0, common.Address{}, big.NewInt(0), params.TxGas-1000, big.NewInt(875000000), nil),
makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas-1000, big.NewInt(875000000), nil),
},
want: "could not apply tx 0 [0xcf3b049a0b516cb4f9274b3e2a264359e2ba53b2fb64b7bda2c634d5c9d01fca]: intrinsic gas too low: have 20000, want 21000",
},
{ // ErrGasLimitReached
txs: []*types.Transaction{
makeTx(0, common.Address{}, big.NewInt(0), params.TxGas*1000, big.NewInt(875000000), nil),
makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas*1000, big.NewInt(875000000), nil),
},
want: "could not apply tx 0 [0xbd49d8dadfd47fb846986695f7d4da3f7b2c48c8da82dbc211a26eb124883de9]: gas limit reached",
},
@@ -327,3 +340,55 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr
// Assemble and return the final block for sealing
return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil))
}
func TestProcessStateless(t *testing.T) {
var (
config = &params.ChainConfig{
ChainID: big.NewInt(1),
HomesteadBlock: big.NewInt(0),
EIP150Block: big.NewInt(0),
EIP155Block: big.NewInt(0),
EIP158Block: big.NewInt(0),
ByzantiumBlock: big.NewInt(0),
ConstantinopleBlock: big.NewInt(0),
PetersburgBlock: big.NewInt(0),
IstanbulBlock: big.NewInt(0),
MuirGlacierBlock: big.NewInt(0),
BerlinBlock: big.NewInt(0),
LondonBlock: big.NewInt(0),
Ethash: new(params.EthashConfig),
CancunBlock: big.NewInt(0),
}
signer = types.LatestSigner(config)
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
db = rawdb.NewMemoryDatabase()
gspec = &Genesis{
Config: config,
Alloc: GenesisAlloc{
common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{
Balance: big.NewInt(1000000000000000000), // 1 ether
Nonce: 0,
},
},
}
)
// Verkle trees use the snapshot, which must be enabled before the
// data is saved into the tree+database.
genesis := gspec.MustCommit(db)
blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
defer blockchain.Stop()
chain, _ := GenerateVerkleChain(gspec.Config, genesis, ethash.NewFaker(), db, 2, func(_ int, gen *BlockGen) {
tx, _ := types.SignTx(types.NewTransaction(0, common.Address{1, 2, 3}, big.NewInt(999), params.TxGas, big.NewInt(875000000), nil), signer, testKey)
gen.AddTx(tx)
tx, _ = types.SignTx(types.NewTransaction(1, common.Address{}, big.NewInt(999), params.TxGas, big.NewInt(875000000), nil), signer, testKey)
gen.AddTx(tx)
tx, _ = types.SignTx(types.NewTransaction(2, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(875000000), nil), signer, testKey)
gen.AddTx(tx)
})
_, err := blockchain.InsertChain(chain)
if err != nil {
t.Fatalf("block imported with error: %v", err)
}
}

View File

@@ -17,6 +17,7 @@
package core
import (
"encoding/binary"
"fmt"
"math"
"math/big"
@@ -115,7 +116,7 @@ func (result *ExecutionResult) Revert() []byte {
}
// IntrinsicGas computes the 'intrinsic gas' for a message with the given data.
func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation bool, isHomestead, isEIP2028 bool) (uint64, error) {
func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation, isHomestead, isEIP2028 bool) (uint64, error) {
// Set the starting gas for the raw transaction
var gas uint64
if isContractCreation && isHomestead {
@@ -222,6 +223,9 @@ func (st *StateTransition) preCheck() error {
} else if stNonce > msgNonce {
return fmt.Errorf("%w: address %v, tx: %d state: %d", ErrNonceTooLow,
st.msg.From().Hex(), msgNonce, stNonce)
} else if stNonce+1 < stNonce {
return fmt.Errorf("%w: address %v, nonce: %d", ErrNonceMax,
st.msg.From().Hex(), stNonce)
}
// Make sure the sender is an EOA
if codeHash := st.state.GetCodeHash(st.msg.From()); codeHash != emptyCodeHash && codeHash != (common.Hash{}) {
@@ -256,6 +260,19 @@ func (st *StateTransition) preCheck() error {
return st.buyGas()
}
// tryConsumeGas tries to subtract gas from gasPool, setting the result in gasPool
// if subtracting more gas than remains in gasPool, set gasPool = 0 and return false
// otherwise, do the subtraction setting the result in gasPool and return true
func tryConsumeGas(gasPool *uint64, gas uint64) bool {
if *gasPool < gas {
*gasPool = 0
return false
}
*gasPool -= gas
return true
}
// TransitionDb will transition the state by applying the current message and
// returning the evm execution result with following fields.
//
@@ -299,6 +316,35 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
if st.gas < gas {
return nil, fmt.Errorf("%w: have %d, want %d", ErrIntrinsicGas, st.gas, gas)
}
if st.evm.ChainConfig().IsCancun(st.evm.Context.BlockNumber) {
var targetBalance, targetNonce, targetCodeSize, targetCodeKeccak, originBalance, originNonce []byte
targetAddr := msg.To()
originAddr := msg.From()
statelessGasOrigin := st.evm.Accesses.TouchTxOriginAndChargeGas(originAddr.Bytes())
if !tryConsumeGas(&st.gas, statelessGasOrigin) {
return nil, fmt.Errorf("insufficient gas to cover witness access costs")
}
originBalance = st.evm.StateDB.GetBalanceLittleEndian(originAddr)
originNonce = st.evm.StateDB.GetNonceLittleEndian(originAddr)
st.evm.Accesses.SetTxTouchedLeaves(originAddr.Bytes(), originBalance, originNonce)
if msg.To() != nil {
statelessGasDest := st.evm.Accesses.TouchTxExistingAndChargeGas(targetAddr.Bytes())
if !tryConsumeGas(&st.gas, statelessGasDest) {
return nil, fmt.Errorf("insufficient gas to cover witness access costs")
}
targetBalance = st.evm.StateDB.GetBalanceLittleEndian(*targetAddr)
targetNonce = st.evm.StateDB.GetNonceLittleEndian(*targetAddr)
targetCodeKeccak = st.evm.StateDB.GetCodeHash(*targetAddr).Bytes()
codeSize := uint64(st.evm.StateDB.GetCodeSize(*targetAddr))
var codeSizeBytes [32]byte
binary.LittleEndian.PutUint64(codeSizeBytes[:8], codeSize)
st.evm.Accesses.SetTxExistingTouchedLeaves(targetAddr.Bytes(), targetBalance, targetNonce, targetCodeSize, targetCodeKeccak)
}
}
st.gas -= gas
// Check clause 6

View File

@@ -621,8 +621,9 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
if err != nil {
return ErrInvalidSender
}
// Drop non-local transactions under our own minimal accepted gas price or tip
if !local && tx.GasTipCapIntCmp(pool.gasPrice) < 0 {
// Drop non-local transactions under our own minimal accepted gas price or tip.
pendingBaseFee := pool.priced.urgent.baseFee
if !local && tx.EffectiveGasTipIntCmp(pool.gasPrice, pendingBaseFee) < 0 {
return ErrUnderpriced
}
// Ensure the transaction adheres to nonce ordering

View File

@@ -0,0 +1,264 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package types
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie/utils"
)
// AccessWitness lists the locations of the state that are being accessed
// during the production of a block.
// TODO(@gballet) this doesn't fully support deletions
type AccessWitness struct {
// Branches flags if a given branch has been loaded
Branches map[[31]byte]struct{}
// Chunks contains the initial value of each address
Chunks map[common.Hash][]byte
// The initial value isn't always available at the time an
// address is touched, this map references addresses that
// were touched but can not yet be put in Chunks.
Undefined map[common.Hash]struct{}
}
func NewAccessWitness() *AccessWitness {
return &AccessWitness{
Branches: make(map[[31]byte]struct{}),
Chunks: make(map[common.Hash][]byte),
Undefined: make(map[common.Hash]struct{}),
}
}
// TODO TouchAndCharge + SetLeafValue* does redundant calls to GetTreeKey*
func (aw *AccessWitness) TouchAndChargeProofOfAbsence(addr []byte) uint64 {
var gas uint64
gas += aw.TouchAddressAndChargeGas(utils.GetTreeKeyVersion(addr[:]), nil)
gas += aw.TouchAddressAndChargeGas(utils.GetTreeKeyBalance(addr[:]), nil)
gas += aw.TouchAddressAndChargeGas(utils.GetTreeKeyCodeSize(addr[:]), nil)
gas += aw.TouchAddressAndChargeGas(utils.GetTreeKeyCodeKeccak(addr[:]), nil)
gas += aw.TouchAddressAndChargeGas(utils.GetTreeKeyNonce(addr[:]), nil)
return gas
}
func (aw *AccessWitness) TouchAndChargeMessageCall(addr []byte) uint64 {
var gas uint64
gas += aw.TouchAddressAndChargeGas(utils.GetTreeKeyVersion(addr[:]), nil)
gas += aw.TouchAddressAndChargeGas(utils.GetTreeKeyCodeSize(addr[:]), nil)
return gas
}
func (aw *AccessWitness) SetLeafValuesMessageCall(addr, codeSize []byte) {
var data [32]byte
aw.TouchAddress(utils.GetTreeKeyVersion(addr[:]), data[:])
aw.TouchAddress(utils.GetTreeKeyCodeSize(addr[:]), codeSize[:])
}
func (aw *AccessWitness) TouchAndChargeValueTransfer(callerAddr, targetAddr []byte) uint64 {
var gas uint64
gas += aw.TouchAddressAndChargeGas(utils.GetTreeKeyBalance(callerAddr[:]), nil)
gas += aw.TouchAddressAndChargeGas(utils.GetTreeKeyBalance(targetAddr[:]), nil)
return gas
}
func (aw *AccessWitness) SetLeafValuesValueTransfer(callerAddr, targetAddr, callerBalance, targetBalance []byte) {
aw.TouchAddress(utils.GetTreeKeyBalance(callerAddr[:]), callerBalance)
aw.TouchAddress(utils.GetTreeKeyBalance(targetAddr[:]), targetBalance)
}
// TouchAndChargeContractCreateInit charges access costs to initiate
// a contract creation
func (aw *AccessWitness) TouchAndChargeContractCreateInit(addr []byte) uint64 {
var gas uint64
gas += aw.TouchAddressAndChargeGas(utils.GetTreeKeyVersion(addr[:]), nil)
gas += aw.TouchAddressAndChargeGas(utils.GetTreeKeyNonce(addr[:]), nil)
return gas
}
func (aw *AccessWitness) SetLeafValuesContractCreateInit(addr, nonce []byte) {
var version [32]byte
aw.TouchAddress(utils.GetTreeKeyVersion(addr[:]), version[:])
aw.TouchAddress(utils.GetTreeKeyNonce(addr[:]), nonce)
}
// TouchAndChargeContractCreateCompleted charges access access costs after
// the completion of a contract creation to populate the created account in
// the tree
func (aw *AccessWitness) TouchAndChargeContractCreateCompleted(addr []byte, withValue bool) uint64 {
var gas uint64
gas += aw.TouchAddressAndChargeGas(utils.GetTreeKeyVersion(addr[:]), nil)
gas += aw.TouchAddressAndChargeGas(utils.GetTreeKeyNonce(addr[:]), nil)
if withValue {
gas += aw.TouchAddressAndChargeGas(utils.GetTreeKeyBalance(addr[:]), nil)
}
gas += aw.TouchAddressAndChargeGas(utils.GetTreeKeyCodeSize(addr[:]), nil)
gas += aw.TouchAddressAndChargeGas(utils.GetTreeKeyCodeKeccak(addr[:]), nil)
return gas
}
func (aw *AccessWitness) SetLeafValuesContractCreateCompleted(addr, codeSize, codeKeccak []byte) {
aw.TouchAddress(utils.GetTreeKeyCodeSize(addr[:]), codeSize)
aw.TouchAddress(utils.GetTreeKeyCodeKeccak(addr[:]), codeKeccak)
}
func (aw *AccessWitness) TouchTxAndChargeGas(originAddr, targetAddr []byte) uint64 {
var gasUsed uint64
var version [32]byte
gasUsed += aw.TouchAddressAndChargeGas(utils.GetTreeKeyVersion(originAddr[:]), version[:])
gasUsed += aw.TouchAddressAndChargeGas(utils.GetTreeKeyBalance(originAddr[:]), nil)
gasUsed += aw.TouchAddressAndChargeGas(utils.GetTreeKeyNonce(originAddr[:]), nil)
gasUsed += aw.TouchAddressAndChargeGas(utils.GetTreeKeyVersion(targetAddr[:]), version[:])
gasUsed += aw.TouchAddressAndChargeGas(utils.GetTreeKeyBalance(targetAddr[:]), nil)
gasUsed += aw.TouchAddressAndChargeGas(utils.GetTreeKeyNonce(targetAddr[:]), nil)
gasUsed += aw.TouchAddressAndChargeGas(utils.GetTreeKeyCodeSize(targetAddr[:]), nil)
gasUsed += aw.TouchAddressAndChargeGas(utils.GetTreeKeyCodeKeccak(targetAddr[:]), nil)
return gasUsed
}
func (aw *AccessWitness) TouchTxOriginAndChargeGas(originAddr []byte) uint64 {
var gasUsed uint64
var version [32]byte
gasUsed += aw.TouchAddressAndChargeGas(utils.GetTreeKeyVersion(originAddr[:]), version[:])
gasUsed += aw.TouchAddressAndChargeGas(utils.GetTreeKeyBalance(originAddr[:]), nil)
gasUsed += aw.TouchAddressAndChargeGas(utils.GetTreeKeyNonce(originAddr[:]), nil)
return gasUsed
}
func (aw *AccessWitness) TouchTxExistingAndChargeGas(targetAddr []byte) uint64 {
var gasUsed uint64
var version [32]byte
gasUsed += aw.TouchAddressAndChargeGas(utils.GetTreeKeyVersion(targetAddr[:]), version[:])
gasUsed += aw.TouchAddressAndChargeGas(utils.GetTreeKeyBalance(targetAddr[:]), nil)
gasUsed += aw.TouchAddressAndChargeGas(utils.GetTreeKeyNonce(targetAddr[:]), nil)
gasUsed += aw.TouchAddressAndChargeGas(utils.GetTreeKeyCodeSize(targetAddr[:]), nil)
gasUsed += aw.TouchAddressAndChargeGas(utils.GetTreeKeyCodeKeccak(targetAddr[:]), nil)
return gasUsed
}
func (aw *AccessWitness) SetTxTouchedLeaves(originAddr, originBalance, originNonce []byte) {
aw.TouchAddress(utils.GetTreeKeyBalance(originAddr[:]), originBalance)
aw.TouchAddress(utils.GetTreeKeyNonce(originAddr[:]), originNonce)
}
func (aw *AccessWitness) SetTxExistingTouchedLeaves(targetAddr, targetBalance, targetNonce, targetCodeSize, targetCodeHash []byte) {
aw.TouchAddress(utils.GetTreeKeyBalance(targetAddr[:]), targetBalance)
aw.TouchAddress(utils.GetTreeKeyNonce(targetAddr[:]), targetNonce)
aw.TouchAddress(utils.GetTreeKeyCodeSize(targetAddr[:]), targetCodeSize)
aw.TouchAddress(utils.GetTreeKeyCodeKeccak(targetAddr[:]), targetCodeHash)
}
// TouchAddress adds any missing addr to the witness and returns respectively
// true if the stem or the stub weren't arleady present.
func (aw *AccessWitness) TouchAddress(addr, value []byte) (bool, bool) {
var (
stem [31]byte
newStem bool
newSelector bool
)
copy(stem[:], addr[:31])
// Check for the presence of the stem
if _, newStem := aw.Branches[stem]; !newStem {
aw.Branches[stem] = struct{}{}
}
// Check for the presence of the selector
if _, newSelector := aw.Chunks[common.BytesToHash(addr)]; !newSelector {
if value == nil {
aw.Undefined[common.BytesToHash(addr)] = struct{}{}
} else {
if _, ok := aw.Undefined[common.BytesToHash(addr)]; !ok {
delete(aw.Undefined, common.BytesToHash(addr))
}
aw.Chunks[common.BytesToHash(addr)] = value
}
}
return newStem, newSelector
}
// TouchAddressAndChargeGas checks if a location has already been touched in
// the current witness, and charge extra gas if that isn't the case. This is
// meant to only be called on a tx-context access witness (i.e. before it is
// merged), not a block-context witness: witness costs are charged per tx.
func (aw *AccessWitness) TouchAddressAndChargeGas(addr, value []byte) uint64 {
var gas uint64
nstem, nsel := aw.TouchAddress(addr, value)
if nstem {
gas += params.WitnessBranchCost
}
if nsel {
gas += params.WitnessChunkCost
}
return gas
}
// Merge is used to merge the witness that got generated during the execution
// of a tx, with the accumulation of witnesses that were generated during the
// execution of all the txs preceding this one in a given block.
func (aw *AccessWitness) Merge(other *AccessWitness) {
for k := range other.Undefined {
if _, ok := aw.Undefined[k]; !ok {
aw.Undefined[k] = struct{}{}
}
}
for k := range other.Branches {
if _, ok := aw.Branches[k]; !ok {
aw.Branches[k] = struct{}{}
}
}
for k, chunk := range other.Chunks {
if _, ok := aw.Chunks[k]; !ok {
aw.Chunks[k] = chunk
}
}
}
// Key returns, predictably, the list of keys that were touched during the
// buildup of the access witness.
func (aw *AccessWitness) Keys() [][]byte {
keys := make([][]byte, 0, len(aw.Chunks))
for key := range aw.Chunks {
var k [32]byte
copy(k[:], key[:])
keys = append(keys, k[:])
}
return keys
}
func (aw *AccessWitness) KeyVals() map[common.Hash][]byte {
return aw.Chunks
}
func (aw *AccessWitness) Copy() *AccessWitness {
naw := &AccessWitness{
Branches: make(map[[31]byte]struct{}),
Chunks: make(map[common.Hash][]byte),
Undefined: make(map[common.Hash]struct{}),
}
naw.Merge(aw)
return naw
}

Some files were not shown because too many files have changed in this diff Show More