Compare commits
118 Commits
v1.10.10
...
verkle/onl
Author | SHA1 | Date | |
---|---|---|---|
|
a3437cc17c | ||
|
fe75603d0b | ||
|
5bac5b3262 | ||
|
fa753db9e8 | ||
|
86bdc3fb39 | ||
|
909049c5fe | ||
|
7360d168c8 | ||
|
361a328cb7 | ||
|
41c2f754cc | ||
|
7cb1add36a | ||
|
03dbc0a210 | ||
|
6d40e11fe3 | ||
|
5ca990184f | ||
|
15d98607f3 | ||
|
ef08e51e40 | ||
|
e1144745a7 | ||
|
bc06d2c740 | ||
|
97a79f50e8 | ||
|
9f9c03a94c | ||
|
719bf47354 | ||
|
162780515a | ||
|
c10a0a62c3 | ||
|
3038e480f5 | ||
|
519cf98b69 | ||
|
4ebeca19d7 | ||
|
1876cb443b | ||
|
9055cc14ec | ||
|
ad7c90c198 | ||
|
10b1cd9b1b | ||
|
66ee9422f5 | ||
|
8151dd67e1 | ||
|
7a0c19f813 | ||
|
0a7672fc9a | ||
|
7322b2590c | ||
|
743769f48e | ||
|
d15e423562 | ||
|
347c37b362 | ||
|
50e07a1e16 | ||
|
23f69c6db0 | ||
|
17f1c2dc0f | ||
|
d9c13d407f | ||
|
441c7f2b0f | ||
|
5d4bcbc14f | ||
|
6f2c3f2114 | ||
|
e0761432a4 | ||
|
e761255ba7 | ||
|
c52def7f11 | ||
|
ab31fbbde1 | ||
|
16341e0563 | ||
|
fa96718512 | ||
|
33f2813809 | ||
|
b7a6409cc1 | ||
|
05acc272b5 | ||
|
b0b708bf23 | ||
|
abc74a5ffe | ||
|
e9294a7fe9 | ||
|
5358e491f3 | ||
|
c57df9ca28 | ||
|
f32feeb260 | ||
|
e185a8c818 | ||
|
fb7da82dde | ||
|
0efed7f58b | ||
|
6b9c77f060 | ||
|
9489853321 | ||
|
ad11691daf | ||
|
6c4dc6c388 | ||
|
787a3b185c | ||
|
851256e856 | ||
|
c4fff0f56e | ||
|
aa2727f82c | ||
|
e61b8cb1f8 | ||
|
e1c000b0dd | ||
|
8be8ba450e | ||
|
476fb565ce | ||
|
8d7e6062ec | ||
|
3bbeb94c1c | ||
|
53b94f135a | ||
|
03bc8b7858 | ||
|
f49e90e32c | ||
|
178debe435 | ||
|
2e8b58f076 | ||
|
551bd6e721 | ||
|
c576fa153a | ||
|
c2e64db3b1 | ||
|
1e4becb5c1 | ||
|
ff844918e8 | ||
|
c113520d5d | ||
|
57c252ef4e | ||
|
410e731bea | ||
|
31870a59ff | ||
|
32150f8aa9 | ||
|
bff330335b | ||
|
52c02ccb1f | ||
|
eab4d898fd | ||
|
526c3f6b9e | ||
|
53f81574e3 | ||
|
c72b16c340 | ||
|
48dc34b8d9 | ||
|
0e7efd696b | ||
|
2954f40eac | ||
|
b6fb18479c | ||
|
3ce9f6d96f | ||
|
114ed3edcd | ||
|
7231b3efb8 | ||
|
da1b6f3906 | ||
|
f423290ac8 | ||
|
312e02bca9 | ||
|
0183256e7f | ||
|
84d8eb2ca8 | ||
|
554b1b9d5f | ||
|
b97f57882c | ||
|
60d3cc8b77 | ||
|
c36f8fefc3 | ||
|
433f0919cc | ||
|
b8dc1e2705 | ||
|
eaa24a8a15 | ||
|
c641cff51a | ||
|
464885faaa |
45
.circleci/config.yml
Normal file
45
.circleci/config.yml
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
# Use the latest 2.1 version of CircleCI pipeline process engine.
|
||||||
|
# See: https://circleci.com/docs/2.0/configuration-reference
|
||||||
|
version: 2.1
|
||||||
|
|
||||||
|
# Define a job to be invoked later in a workflow.
|
||||||
|
# See: https://circleci.com/docs/2.0/configuration-reference/#jobs
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
working_directory: ~/repo
|
||||||
|
# Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub.
|
||||||
|
# See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor
|
||||||
|
docker:
|
||||||
|
- image: circleci/golang:1.16.10
|
||||||
|
# Add steps to the job
|
||||||
|
# See: https://circleci.com/docs/2.0/configuration-reference/#steps
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- restore_cache:
|
||||||
|
keys:
|
||||||
|
- go-mod-v4-{{ checksum "go.sum" }}
|
||||||
|
- run:
|
||||||
|
name: Install Dependencies
|
||||||
|
command: go mod download
|
||||||
|
- save_cache:
|
||||||
|
key: go-mod-v4-{{ checksum "go.sum" }}
|
||||||
|
paths:
|
||||||
|
- "/go/pkg/mod"
|
||||||
|
#- run:
|
||||||
|
# name: Run linter
|
||||||
|
# command: |
|
||||||
|
# go run build/ci.go lint
|
||||||
|
- run:
|
||||||
|
name: Run tests
|
||||||
|
command: |
|
||||||
|
go run build/ci.go test -coverage
|
||||||
|
- store_test_results:
|
||||||
|
path: /tmp/test-reports
|
||||||
|
|
||||||
|
# Invoke jobs via workflows
|
||||||
|
# See: https://circleci.com/docs/2.0/configuration-reference/#workflows
|
||||||
|
workflows:
|
||||||
|
sample: # This is the name of the workflow, feel free to change it to better match your workflow.
|
||||||
|
# Inside the workflow, you define the jobs you want to run.
|
||||||
|
jobs:
|
||||||
|
- build
|
30
.travis.yml
30
.travis.yml
@@ -120,36 +120,6 @@ jobs:
|
|||||||
- go run build/ci.go install -dlgo -arch arm64 -cc aarch64-linux-gnu-gcc
|
- go run build/ci.go install -dlgo -arch arm64 -cc aarch64-linux-gnu-gcc
|
||||||
- go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
|
- go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
|
||||||
|
|
||||||
# This builder does the Linux Azure MIPS xgo uploads
|
|
||||||
- stage: build
|
|
||||||
if: type = push
|
|
||||||
os: linux
|
|
||||||
dist: bionic
|
|
||||||
services:
|
|
||||||
- docker
|
|
||||||
go: 1.17.x
|
|
||||||
env:
|
|
||||||
- azure-linux-mips
|
|
||||||
- GO111MODULE=on
|
|
||||||
git:
|
|
||||||
submodules: false # avoid cloning ethereum/tests
|
|
||||||
script:
|
|
||||||
- go run build/ci.go xgo --alltools -- --targets=linux/mips --ldflags '-extldflags "-static"' -v
|
|
||||||
- for bin in build/bin/*-linux-mips; do mv -f "${bin}" "${bin/-linux-mips/}"; done
|
|
||||||
- go run build/ci.go archive -arch mips -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
|
|
||||||
|
|
||||||
- go run build/ci.go xgo --alltools -- --targets=linux/mipsle --ldflags '-extldflags "-static"' -v
|
|
||||||
- for bin in build/bin/*-linux-mipsle; do mv -f "${bin}" "${bin/-linux-mipsle/}"; done
|
|
||||||
- go run build/ci.go archive -arch mipsle -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
|
|
||||||
|
|
||||||
- go run build/ci.go xgo --alltools -- --targets=linux/mips64 --ldflags '-extldflags "-static"' -v
|
|
||||||
- for bin in build/bin/*-linux-mips64; do mv -f "${bin}" "${bin/-linux-mips64/}"; done
|
|
||||||
- go run build/ci.go archive -arch mips64 -type tar -signer LINUX_SIGNING_KEY signify SIGNIFY_KEY -upload gethstore/builds
|
|
||||||
|
|
||||||
- go run build/ci.go xgo --alltools -- --targets=linux/mips64le --ldflags '-extldflags "-static"' -v
|
|
||||||
- for bin in build/bin/*-linux-mips64le; do mv -f "${bin}" "${bin/-linux-mips64le/}"; done
|
|
||||||
- go run build/ci.go archive -arch mips64le -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
|
|
||||||
|
|
||||||
# This builder does the Android Maven and Azure uploads
|
# This builder does the Android Maven and Azure uploads
|
||||||
- stage: build
|
- stage: build
|
||||||
if: type = push
|
if: type = push
|
||||||
|
98
Makefile
98
Makefile
@@ -2,11 +2,7 @@
|
|||||||
# with Go source code. If you know what GOPATH is then you probably
|
# with Go source code. If you know what GOPATH is then you probably
|
||||||
# don't need to bother with make.
|
# don't need to bother with make.
|
||||||
|
|
||||||
.PHONY: geth android ios geth-cross evm all test clean
|
.PHONY: geth android ios evm all test clean
|
||||||
.PHONY: geth-linux geth-linux-386 geth-linux-amd64 geth-linux-mips64 geth-linux-mips64le
|
|
||||||
.PHONY: geth-linux-arm geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64
|
|
||||||
.PHONY: geth-darwin geth-darwin-386 geth-darwin-amd64
|
|
||||||
.PHONY: geth-windows geth-windows-386 geth-windows-amd64
|
|
||||||
|
|
||||||
GOBIN = ./build/bin
|
GOBIN = ./build/bin
|
||||||
GO ?= latest
|
GO ?= latest
|
||||||
@@ -53,95 +49,3 @@ devtools:
|
|||||||
env GOBIN= go install ./cmd/abigen
|
env GOBIN= go install ./cmd/abigen
|
||||||
@type "solc" 2> /dev/null || echo 'Please install solc'
|
@type "solc" 2> /dev/null || echo 'Please install solc'
|
||||||
@type "protoc" 2> /dev/null || echo 'Please install protoc'
|
@type "protoc" 2> /dev/null || echo 'Please install protoc'
|
||||||
|
|
||||||
# Cross Compilation Targets (xgo)
|
|
||||||
|
|
||||||
geth-cross: geth-linux geth-darwin geth-windows geth-android geth-ios
|
|
||||||
@echo "Full cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-*
|
|
||||||
|
|
||||||
geth-linux: geth-linux-386 geth-linux-amd64 geth-linux-arm geth-linux-mips64 geth-linux-mips64le
|
|
||||||
@echo "Linux cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-linux-*
|
|
||||||
|
|
||||||
geth-linux-386:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/386 -v ./cmd/geth
|
|
||||||
@echo "Linux 386 cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep 386
|
|
||||||
|
|
||||||
geth-linux-amd64:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/amd64 -v ./cmd/geth
|
|
||||||
@echo "Linux amd64 cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep amd64
|
|
||||||
|
|
||||||
geth-linux-arm: geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64
|
|
||||||
@echo "Linux ARM cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm
|
|
||||||
|
|
||||||
geth-linux-arm-5:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-5 -v ./cmd/geth
|
|
||||||
@echo "Linux ARMv5 cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm-5
|
|
||||||
|
|
||||||
geth-linux-arm-6:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-6 -v ./cmd/geth
|
|
||||||
@echo "Linux ARMv6 cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm-6
|
|
||||||
|
|
||||||
geth-linux-arm-7:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-7 -v ./cmd/geth
|
|
||||||
@echo "Linux ARMv7 cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm-7
|
|
||||||
|
|
||||||
geth-linux-arm64:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm64 -v ./cmd/geth
|
|
||||||
@echo "Linux ARM64 cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm64
|
|
||||||
|
|
||||||
geth-linux-mips:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips --ldflags '-extldflags "-static"' -v ./cmd/geth
|
|
||||||
@echo "Linux MIPS cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep mips
|
|
||||||
|
|
||||||
geth-linux-mipsle:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mipsle --ldflags '-extldflags "-static"' -v ./cmd/geth
|
|
||||||
@echo "Linux MIPSle cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep mipsle
|
|
||||||
|
|
||||||
geth-linux-mips64:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64 --ldflags '-extldflags "-static"' -v ./cmd/geth
|
|
||||||
@echo "Linux MIPS64 cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep mips64
|
|
||||||
|
|
||||||
geth-linux-mips64le:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64le --ldflags '-extldflags "-static"' -v ./cmd/geth
|
|
||||||
@echo "Linux MIPS64le cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep mips64le
|
|
||||||
|
|
||||||
geth-darwin: geth-darwin-386 geth-darwin-amd64
|
|
||||||
@echo "Darwin cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-darwin-*
|
|
||||||
|
|
||||||
geth-darwin-386:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/386 -v ./cmd/geth
|
|
||||||
@echo "Darwin 386 cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-darwin-* | grep 386
|
|
||||||
|
|
||||||
geth-darwin-amd64:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/amd64 -v ./cmd/geth
|
|
||||||
@echo "Darwin amd64 cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-darwin-* | grep amd64
|
|
||||||
|
|
||||||
geth-windows: geth-windows-386 geth-windows-amd64
|
|
||||||
@echo "Windows cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-windows-*
|
|
||||||
|
|
||||||
geth-windows-386:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/386 -v ./cmd/geth
|
|
||||||
@echo "Windows 386 cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-windows-* | grep 386
|
|
||||||
|
|
||||||
geth-windows-amd64:
|
|
||||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/amd64 -v ./cmd/geth
|
|
||||||
@echo "Windows amd64 cross compilation done:"
|
|
||||||
@ls -ld $(GOBIN)/geth-windows-* | grep amd64
|
|
||||||
|
@@ -462,6 +462,12 @@ func (b *SimulatedBackend) PendingNonceAt(ctx context.Context, account common.Ad
|
|||||||
// SuggestGasPrice implements ContractTransactor.SuggestGasPrice. Since the simulated
|
// SuggestGasPrice implements ContractTransactor.SuggestGasPrice. Since the simulated
|
||||||
// chain doesn't have miners, we just return a gas price of 1 for any call.
|
// chain doesn't have miners, we just return a gas price of 1 for any call.
|
||||||
func (b *SimulatedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) {
|
func (b *SimulatedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
|
if b.pendingBlock.Header().BaseFee != nil {
|
||||||
|
return b.pendingBlock.Header().BaseFee, nil
|
||||||
|
}
|
||||||
return big.NewInt(1), nil
|
return big.NewInt(1), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -916,8 +916,8 @@ func TestSuggestGasPrice(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("could not get gas price: %v", err)
|
t.Errorf("could not get gas price: %v", err)
|
||||||
}
|
}
|
||||||
if gasPrice.Uint64() != uint64(1) {
|
if gasPrice.Uint64() != sim.pendingBlock.Header().BaseFee.Uint64() {
|
||||||
t.Errorf("gas price was not expected value of 1. actual: %v", gasPrice.Uint64())
|
t.Errorf("gas price was not expected value of %v. actual: %v", sim.pendingBlock.Header().BaseFee.Uint64(), gasPrice.Uint64())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -370,7 +370,7 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i
|
|||||||
rawTx, err = c.createLegacyTx(opts, contract, input)
|
rawTx, err = c.createLegacyTx(opts, contract, input)
|
||||||
} else {
|
} else {
|
||||||
// Only query for basefee if gasPrice not specified
|
// Only query for basefee if gasPrice not specified
|
||||||
if head, errHead := c.transactor.HeaderByNumber(ensureContext(opts.Context), nil); err != nil {
|
if head, errHead := c.transactor.HeaderByNumber(ensureContext(opts.Context), nil); errHead != nil {
|
||||||
return nil, errHead
|
return nil, errHead
|
||||||
} else if head.BaseFee != nil {
|
} else if head.BaseFee != nil {
|
||||||
rawTx, err = c.createDynamicTx(opts, contract, input, head)
|
rawTx, err = c.createDynamicTx(opts, contract, input, head)
|
||||||
|
45
build/ci.go
45
build/ci.go
@@ -33,7 +33,6 @@ Available commands are:
|
|||||||
nsis -- creates a Windows NSIS installer
|
nsis -- creates a Windows NSIS installer
|
||||||
aar [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an Android archive
|
aar [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an Android archive
|
||||||
xcode [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an iOS XCode framework
|
xcode [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an iOS XCode framework
|
||||||
xgo [ -alltools ] [ options ] -- cross builds according to options
|
|
||||||
purge [ -store blobstore ] [ -days threshold ] -- purges old archives from the blobstore
|
purge [ -store blobstore ] [ -days threshold ] -- purges old archives from the blobstore
|
||||||
|
|
||||||
For all commands, -n prevents execution of external programs (dry run mode).
|
For all commands, -n prevents execution of external programs (dry run mode).
|
||||||
@@ -188,8 +187,6 @@ func main() {
|
|||||||
doAndroidArchive(os.Args[2:])
|
doAndroidArchive(os.Args[2:])
|
||||||
case "xcode":
|
case "xcode":
|
||||||
doXCodeFramework(os.Args[2:])
|
doXCodeFramework(os.Args[2:])
|
||||||
case "xgo":
|
|
||||||
doXgo(os.Args[2:])
|
|
||||||
case "purge":
|
case "purge":
|
||||||
doPurge(os.Args[2:])
|
doPurge(os.Args[2:])
|
||||||
default:
|
default:
|
||||||
@@ -1209,48 +1206,6 @@ func newPodMetadata(env build.Environment, archive string) podMetadata {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cross compilation
|
|
||||||
|
|
||||||
func doXgo(cmdline []string) {
|
|
||||||
var (
|
|
||||||
alltools = flag.Bool("alltools", false, `Flag whether we're building all known tools, or only on in particular`)
|
|
||||||
)
|
|
||||||
flag.CommandLine.Parse(cmdline)
|
|
||||||
env := build.Env()
|
|
||||||
var tc build.GoToolchain
|
|
||||||
|
|
||||||
// Make sure xgo is available for cross compilation
|
|
||||||
build.MustRun(tc.Install(GOBIN, "github.com/karalabe/xgo@latest"))
|
|
||||||
|
|
||||||
// If all tools building is requested, build everything the builder wants
|
|
||||||
args := append(buildFlags(env), flag.Args()...)
|
|
||||||
|
|
||||||
if *alltools {
|
|
||||||
args = append(args, []string{"--dest", GOBIN}...)
|
|
||||||
for _, res := range allToolsArchiveFiles {
|
|
||||||
if strings.HasPrefix(res, GOBIN) {
|
|
||||||
// Binary tool found, cross build it explicitly
|
|
||||||
args = append(args, "./"+filepath.Join("cmd", filepath.Base(res)))
|
|
||||||
build.MustRun(xgoTool(args))
|
|
||||||
args = args[:len(args)-1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise execute the explicit cross compilation
|
|
||||||
path := args[len(args)-1]
|
|
||||||
args = append(args[:len(args)-1], []string{"--dest", GOBIN, path}...)
|
|
||||||
build.MustRun(xgoTool(args))
|
|
||||||
}
|
|
||||||
|
|
||||||
func xgoTool(args []string) *exec.Cmd {
|
|
||||||
cmd := exec.Command(filepath.Join(GOBIN, "xgo"), args...)
|
|
||||||
cmd.Env = os.Environ()
|
|
||||||
cmd.Env = append(cmd.Env, []string{"GOBIN=" + GOBIN}...)
|
|
||||||
return cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
// Binary distribution cleanups
|
// Binary distribution cleanups
|
||||||
|
|
||||||
func doPurge(cmdline []string) {
|
func doPurge(cmdline []string) {
|
||||||
|
@@ -133,7 +133,8 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string)
|
|||||||
log.Info(fmt.Sprintf("Creating %s = %q", path, val))
|
log.Info(fmt.Sprintf("Creating %s = %q", path, val))
|
||||||
ttl := rootTTL
|
ttl := rootTTL
|
||||||
if path != name {
|
if path != name {
|
||||||
ttl = treeNodeTTL // Max TTL permitted by Cloudflare
|
ttl = treeNodeTTLCloudflare // Max TTL permitted by Cloudflare
|
||||||
|
|
||||||
}
|
}
|
||||||
record := cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl}
|
record := cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl}
|
||||||
_, err = c.CreateDNSRecord(context.Background(), c.zoneID, record)
|
_, err = c.CreateDNSRecord(context.Background(), c.zoneID, record)
|
||||||
|
@@ -115,8 +115,9 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
rootTTL = 30 * 60 // 30 min
|
rootTTL = 30 * 60 // 30 min
|
||||||
treeNodeTTL = 4 * 7 * 24 * 60 * 60 // 4 weeks
|
treeNodeTTL = 4 * 7 * 24 * 60 * 60 // 4 weeks
|
||||||
|
treeNodeTTLCloudflare = 24 * 60 * 60 // 1 day
|
||||||
)
|
)
|
||||||
|
|
||||||
// dnsSync performs dnsSyncCommand.
|
// dnsSync performs dnsSyncCommand.
|
||||||
|
@@ -131,7 +131,7 @@ func (c *Conn) handshake() error {
|
|||||||
}
|
}
|
||||||
c.negotiateEthProtocol(msg.Caps)
|
c.negotiateEthProtocol(msg.Caps)
|
||||||
if c.negotiatedProtoVersion == 0 {
|
if c.negotiatedProtoVersion == 0 {
|
||||||
return fmt.Errorf("unexpected eth protocol version")
|
return fmt.Errorf("could not negotiate protocol (remote caps: %v, local eth version: %v)", msg.Caps, c.ourHighestProtoVersion)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
default:
|
default:
|
||||||
|
@@ -52,35 +52,35 @@ func NewSuite(dest *enode.Node, chainfile string, genesisfile string) (*Suite, e
|
|||||||
func (s *Suite) AllEthTests() []utesting.Test {
|
func (s *Suite) AllEthTests() []utesting.Test {
|
||||||
return []utesting.Test{
|
return []utesting.Test{
|
||||||
// status
|
// status
|
||||||
{Name: "TestStatus", Fn: s.TestStatus},
|
{Name: "TestStatus65", Fn: s.TestStatus65},
|
||||||
{Name: "TestStatus66", Fn: s.TestStatus66},
|
{Name: "TestStatus66", Fn: s.TestStatus66},
|
||||||
// get block headers
|
// get block headers
|
||||||
{Name: "TestGetBlockHeaders", Fn: s.TestGetBlockHeaders},
|
{Name: "TestGetBlockHeaders65", Fn: s.TestGetBlockHeaders65},
|
||||||
{Name: "TestGetBlockHeaders66", Fn: s.TestGetBlockHeaders66},
|
{Name: "TestGetBlockHeaders66", Fn: s.TestGetBlockHeaders66},
|
||||||
{Name: "TestSimultaneousRequests66", Fn: s.TestSimultaneousRequests66},
|
{Name: "TestSimultaneousRequests66", Fn: s.TestSimultaneousRequests66},
|
||||||
{Name: "TestSameRequestID66", Fn: s.TestSameRequestID66},
|
{Name: "TestSameRequestID66", Fn: s.TestSameRequestID66},
|
||||||
{Name: "TestZeroRequestID66", Fn: s.TestZeroRequestID66},
|
{Name: "TestZeroRequestID66", Fn: s.TestZeroRequestID66},
|
||||||
// get block bodies
|
// get block bodies
|
||||||
{Name: "TestGetBlockBodies", Fn: s.TestGetBlockBodies},
|
{Name: "TestGetBlockBodies65", Fn: s.TestGetBlockBodies65},
|
||||||
{Name: "TestGetBlockBodies66", Fn: s.TestGetBlockBodies66},
|
{Name: "TestGetBlockBodies66", Fn: s.TestGetBlockBodies66},
|
||||||
// broadcast
|
// broadcast
|
||||||
{Name: "TestBroadcast", Fn: s.TestBroadcast},
|
{Name: "TestBroadcast65", Fn: s.TestBroadcast65},
|
||||||
{Name: "TestBroadcast66", Fn: s.TestBroadcast66},
|
{Name: "TestBroadcast66", Fn: s.TestBroadcast66},
|
||||||
{Name: "TestLargeAnnounce", Fn: s.TestLargeAnnounce},
|
{Name: "TestLargeAnnounce65", Fn: s.TestLargeAnnounce65},
|
||||||
{Name: "TestLargeAnnounce66", Fn: s.TestLargeAnnounce66},
|
{Name: "TestLargeAnnounce66", Fn: s.TestLargeAnnounce66},
|
||||||
{Name: "TestOldAnnounce", Fn: s.TestOldAnnounce},
|
{Name: "TestOldAnnounce65", Fn: s.TestOldAnnounce65},
|
||||||
{Name: "TestOldAnnounce66", Fn: s.TestOldAnnounce66},
|
{Name: "TestOldAnnounce66", Fn: s.TestOldAnnounce66},
|
||||||
{Name: "TestBlockHashAnnounce", Fn: s.TestBlockHashAnnounce},
|
{Name: "TestBlockHashAnnounce65", Fn: s.TestBlockHashAnnounce65},
|
||||||
{Name: "TestBlockHashAnnounce66", Fn: s.TestBlockHashAnnounce66},
|
{Name: "TestBlockHashAnnounce66", Fn: s.TestBlockHashAnnounce66},
|
||||||
// malicious handshakes + status
|
// malicious handshakes + status
|
||||||
{Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake},
|
{Name: "TestMaliciousHandshake65", Fn: s.TestMaliciousHandshake65},
|
||||||
{Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus},
|
{Name: "TestMaliciousStatus65", Fn: s.TestMaliciousStatus65},
|
||||||
{Name: "TestMaliciousHandshake66", Fn: s.TestMaliciousHandshake66},
|
{Name: "TestMaliciousHandshake66", Fn: s.TestMaliciousHandshake66},
|
||||||
{Name: "TestMaliciousStatus66", Fn: s.TestMaliciousStatus66},
|
{Name: "TestMaliciousStatus66", Fn: s.TestMaliciousStatus66},
|
||||||
// test transactions
|
// test transactions
|
||||||
{Name: "TestTransaction", Fn: s.TestTransaction},
|
{Name: "TestTransaction65", Fn: s.TestTransaction65},
|
||||||
{Name: "TestTransaction66", Fn: s.TestTransaction66},
|
{Name: "TestTransaction66", Fn: s.TestTransaction66},
|
||||||
{Name: "TestMaliciousTx", Fn: s.TestMaliciousTx},
|
{Name: "TestMaliciousTx65", Fn: s.TestMaliciousTx65},
|
||||||
{Name: "TestMaliciousTx66", Fn: s.TestMaliciousTx66},
|
{Name: "TestMaliciousTx66", Fn: s.TestMaliciousTx66},
|
||||||
{Name: "TestLargeTxRequest66", Fn: s.TestLargeTxRequest66},
|
{Name: "TestLargeTxRequest66", Fn: s.TestLargeTxRequest66},
|
||||||
{Name: "TestNewPooledTxs66", Fn: s.TestNewPooledTxs66},
|
{Name: "TestNewPooledTxs66", Fn: s.TestNewPooledTxs66},
|
||||||
@@ -89,17 +89,17 @@ func (s *Suite) AllEthTests() []utesting.Test {
|
|||||||
|
|
||||||
func (s *Suite) EthTests() []utesting.Test {
|
func (s *Suite) EthTests() []utesting.Test {
|
||||||
return []utesting.Test{
|
return []utesting.Test{
|
||||||
{Name: "TestStatus", Fn: s.TestStatus},
|
{Name: "TestStatus65", Fn: s.TestStatus65},
|
||||||
{Name: "TestGetBlockHeaders", Fn: s.TestGetBlockHeaders},
|
{Name: "TestGetBlockHeaders65", Fn: s.TestGetBlockHeaders65},
|
||||||
{Name: "TestGetBlockBodies", Fn: s.TestGetBlockBodies},
|
{Name: "TestGetBlockBodies65", Fn: s.TestGetBlockBodies65},
|
||||||
{Name: "TestBroadcast", Fn: s.TestBroadcast},
|
{Name: "TestBroadcast65", Fn: s.TestBroadcast65},
|
||||||
{Name: "TestLargeAnnounce", Fn: s.TestLargeAnnounce},
|
{Name: "TestLargeAnnounce65", Fn: s.TestLargeAnnounce65},
|
||||||
{Name: "TestOldAnnounce", Fn: s.TestOldAnnounce},
|
{Name: "TestOldAnnounce65", Fn: s.TestOldAnnounce65},
|
||||||
{Name: "TestBlockHashAnnounce", Fn: s.TestBlockHashAnnounce},
|
{Name: "TestBlockHashAnnounce65", Fn: s.TestBlockHashAnnounce65},
|
||||||
{Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake},
|
{Name: "TestMaliciousHandshake65", Fn: s.TestMaliciousHandshake65},
|
||||||
{Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus},
|
{Name: "TestMaliciousStatus65", Fn: s.TestMaliciousStatus65},
|
||||||
{Name: "TestTransaction", Fn: s.TestTransaction},
|
{Name: "TestTransaction65", Fn: s.TestTransaction65},
|
||||||
{Name: "TestMaliciousTx", Fn: s.TestMaliciousTx},
|
{Name: "TestMaliciousTx65", Fn: s.TestMaliciousTx65},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -130,9 +130,9 @@ var (
|
|||||||
eth65 = false // indicates whether suite should negotiate eth65 connection or below.
|
eth65 = false // indicates whether suite should negotiate eth65 connection or below.
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestStatus attempts to connect to the given node and exchange
|
// TestStatus65 attempts to connect to the given node and exchange
|
||||||
// a status message with it.
|
// a status message with it.
|
||||||
func (s *Suite) TestStatus(t *utesting.T) {
|
func (s *Suite) TestStatus65(t *utesting.T) {
|
||||||
conn, err := s.dial()
|
conn, err := s.dial()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("dial failed: %v", err)
|
t.Fatalf("dial failed: %v", err)
|
||||||
@@ -156,9 +156,9 @@ func (s *Suite) TestStatus66(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestGetBlockHeaders tests whether the given node can respond to
|
// TestGetBlockHeaders65 tests whether the given node can respond to
|
||||||
// a `GetBlockHeaders` request accurately.
|
// a `GetBlockHeaders` request accurately.
|
||||||
func (s *Suite) TestGetBlockHeaders(t *utesting.T) {
|
func (s *Suite) TestGetBlockHeaders65(t *utesting.T) {
|
||||||
conn, err := s.dial()
|
conn, err := s.dial()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("dial failed: %v", err)
|
t.Fatalf("dial failed: %v", err)
|
||||||
@@ -392,9 +392,9 @@ func (s *Suite) TestZeroRequestID66(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestGetBlockBodies tests whether the given node can respond to
|
// TestGetBlockBodies65 tests whether the given node can respond to
|
||||||
// a `GetBlockBodies` request and that the response is accurate.
|
// a `GetBlockBodies` request and that the response is accurate.
|
||||||
func (s *Suite) TestGetBlockBodies(t *utesting.T) {
|
func (s *Suite) TestGetBlockBodies65(t *utesting.T) {
|
||||||
conn, err := s.dial()
|
conn, err := s.dial()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("dial failed: %v", err)
|
t.Fatalf("dial failed: %v", err)
|
||||||
@@ -460,9 +460,9 @@ func (s *Suite) TestGetBlockBodies66(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestBroadcast tests whether a block announcement is correctly
|
// TestBroadcast65 tests whether a block announcement is correctly
|
||||||
// propagated to the given node's peer(s).
|
// propagated to the given node's peer(s).
|
||||||
func (s *Suite) TestBroadcast(t *utesting.T) {
|
func (s *Suite) TestBroadcast65(t *utesting.T) {
|
||||||
if err := s.sendNextBlock(eth65); err != nil {
|
if err := s.sendNextBlock(eth65); err != nil {
|
||||||
t.Fatalf("block broadcast failed: %v", err)
|
t.Fatalf("block broadcast failed: %v", err)
|
||||||
}
|
}
|
||||||
@@ -476,8 +476,8 @@ func (s *Suite) TestBroadcast66(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestLargeAnnounce tests the announcement mechanism with a large block.
|
// TestLargeAnnounce65 tests the announcement mechanism with a large block.
|
||||||
func (s *Suite) TestLargeAnnounce(t *utesting.T) {
|
func (s *Suite) TestLargeAnnounce65(t *utesting.T) {
|
||||||
nextBlock := len(s.chain.blocks)
|
nextBlock := len(s.chain.blocks)
|
||||||
blocks := []*NewBlock{
|
blocks := []*NewBlock{
|
||||||
{
|
{
|
||||||
@@ -569,8 +569,8 @@ func (s *Suite) TestLargeAnnounce66(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestOldAnnounce tests the announcement mechanism with an old block.
|
// TestOldAnnounce65 tests the announcement mechanism with an old block.
|
||||||
func (s *Suite) TestOldAnnounce(t *utesting.T) {
|
func (s *Suite) TestOldAnnounce65(t *utesting.T) {
|
||||||
if err := s.oldAnnounce(eth65); err != nil {
|
if err := s.oldAnnounce(eth65); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -584,9 +584,9 @@ func (s *Suite) TestOldAnnounce66(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestBlockHashAnnounce sends a new block hash announcement and expects
|
// TestBlockHashAnnounce65 sends a new block hash announcement and expects
|
||||||
// the node to perform a `GetBlockHeaders` request.
|
// the node to perform a `GetBlockHeaders` request.
|
||||||
func (s *Suite) TestBlockHashAnnounce(t *utesting.T) {
|
func (s *Suite) TestBlockHashAnnounce65(t *utesting.T) {
|
||||||
if err := s.hashAnnounce(eth65); err != nil {
|
if err := s.hashAnnounce(eth65); err != nil {
|
||||||
t.Fatalf("block hash announcement failed: %v", err)
|
t.Fatalf("block hash announcement failed: %v", err)
|
||||||
}
|
}
|
||||||
@@ -600,8 +600,8 @@ func (s *Suite) TestBlockHashAnnounce66(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestMaliciousHandshake tries to send malicious data during the handshake.
|
// TestMaliciousHandshake65 tries to send malicious data during the handshake.
|
||||||
func (s *Suite) TestMaliciousHandshake(t *utesting.T) {
|
func (s *Suite) TestMaliciousHandshake65(t *utesting.T) {
|
||||||
if err := s.maliciousHandshakes(t, eth65); err != nil {
|
if err := s.maliciousHandshakes(t, eth65); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -614,8 +614,8 @@ func (s *Suite) TestMaliciousHandshake66(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestMaliciousStatus sends a status package with a large total difficulty.
|
// TestMaliciousStatus65 sends a status package with a large total difficulty.
|
||||||
func (s *Suite) TestMaliciousStatus(t *utesting.T) {
|
func (s *Suite) TestMaliciousStatus65(t *utesting.T) {
|
||||||
conn, err := s.dial()
|
conn, err := s.dial()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("dial failed: %v", err)
|
t.Fatalf("dial failed: %v", err)
|
||||||
@@ -641,9 +641,9 @@ func (s *Suite) TestMaliciousStatus66(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestTransaction sends a valid transaction to the node and
|
// TestTransaction65 sends a valid transaction to the node and
|
||||||
// checks if the transaction gets propagated.
|
// checks if the transaction gets propagated.
|
||||||
func (s *Suite) TestTransaction(t *utesting.T) {
|
func (s *Suite) TestTransaction65(t *utesting.T) {
|
||||||
if err := s.sendSuccessfulTxs(t, eth65); err != nil {
|
if err := s.sendSuccessfulTxs(t, eth65); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -657,9 +657,9 @@ func (s *Suite) TestTransaction66(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestMaliciousTx sends several invalid transactions and tests whether
|
// TestMaliciousTx65 sends several invalid transactions and tests whether
|
||||||
// the node will propagate them.
|
// the node will propagate them.
|
||||||
func (s *Suite) TestMaliciousTx(t *utesting.T) {
|
func (s *Suite) TestMaliciousTx65(t *utesting.T) {
|
||||||
if err := s.sendMaliciousTxs(t, eth65); err != nil {
|
if err := s.sendMaliciousTxs(t, eth65); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@@ -229,7 +229,7 @@ func PingPastExpiration(t *utesting.T) {
|
|||||||
|
|
||||||
reply, _, _ := te.read(te.l1)
|
reply, _, _ := te.read(te.l1)
|
||||||
if reply != nil {
|
if reply != nil {
|
||||||
t.Fatal("Expected no reply, got", reply)
|
t.Fatalf("Expected no reply, got %v %v", reply.Name(), reply)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -247,7 +247,7 @@ func WrongPacketType(t *utesting.T) {
|
|||||||
|
|
||||||
reply, _, _ := te.read(te.l1)
|
reply, _, _ := te.read(te.l1)
|
||||||
if reply != nil {
|
if reply != nil {
|
||||||
t.Fatal("Expected no reply, got", reply)
|
t.Fatalf("Expected no reply, got %v %v", reply.Name(), reply)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -282,9 +282,16 @@ func FindnodeWithoutEndpointProof(t *utesting.T) {
|
|||||||
rand.Read(req.Target[:])
|
rand.Read(req.Target[:])
|
||||||
te.send(te.l1, &req)
|
te.send(te.l1, &req)
|
||||||
|
|
||||||
reply, _, _ := te.read(te.l1)
|
for {
|
||||||
if reply != nil {
|
reply, _, _ := te.read(te.l1)
|
||||||
t.Fatal("Expected no response, got", reply)
|
if reply == nil {
|
||||||
|
// No response, all good
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if reply.Kind() == v4wire.PingPacket {
|
||||||
|
continue // A ping is ok, just ignore it
|
||||||
|
}
|
||||||
|
t.Fatalf("Expected no reply, got %v %v", reply.Name(), reply)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -304,7 +311,7 @@ func BasicFindnode(t *utesting.T) {
|
|||||||
t.Fatal("read find nodes", err)
|
t.Fatal("read find nodes", err)
|
||||||
}
|
}
|
||||||
if reply.Kind() != v4wire.NeighborsPacket {
|
if reply.Kind() != v4wire.NeighborsPacket {
|
||||||
t.Fatal("Expected neighbors, got", reply.Name())
|
t.Fatalf("Expected neighbors, got %v %v", reply.Name(), reply)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -341,7 +348,7 @@ func UnsolicitedNeighbors(t *utesting.T) {
|
|||||||
t.Fatal("read find nodes", err)
|
t.Fatal("read find nodes", err)
|
||||||
}
|
}
|
||||||
if reply.Kind() != v4wire.NeighborsPacket {
|
if reply.Kind() != v4wire.NeighborsPacket {
|
||||||
t.Fatal("Expected neighbors, got", reply.Name())
|
t.Fatalf("Expected neighbors, got %v %v", reply.Name(), reply)
|
||||||
}
|
}
|
||||||
nodes := reply.(*v4wire.Neighbors).Nodes
|
nodes := reply.(*v4wire.Neighbors).Nodes
|
||||||
if contains(nodes, encFakeKey) {
|
if contains(nodes, encFakeKey) {
|
||||||
|
@@ -235,6 +235,8 @@ func ethFilter(args []string) (nodeFilter, error) {
|
|||||||
filter = forkid.NewStaticFilter(params.GoerliChainConfig, params.GoerliGenesisHash)
|
filter = forkid.NewStaticFilter(params.GoerliChainConfig, params.GoerliGenesisHash)
|
||||||
case "ropsten":
|
case "ropsten":
|
||||||
filter = forkid.NewStaticFilter(params.RopstenChainConfig, params.RopstenGenesisHash)
|
filter = forkid.NewStaticFilter(params.RopstenChainConfig, params.RopstenGenesisHash)
|
||||||
|
case "sepolia":
|
||||||
|
filter = forkid.NewStaticFilter(params.SepoliaChainConfig, params.SepoliaGenesisHash)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown network %q", args[0])
|
return nil, fmt.Errorf("unknown network %q", args[0])
|
||||||
}
|
}
|
||||||
|
380
cmd/evm/internal/t8ntool/block.go
Normal file
380
cmd/evm/internal/t8ntool/block.go
Normal file
@@ -0,0 +1,380 @@
|
|||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package t8ntool
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/clique"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"gopkg.in/urfave/cli.v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:generate gencodec -type header -field-override headerMarshaling -out gen_header.go
|
||||||
|
type header struct {
|
||||||
|
ParentHash common.Hash `json:"parentHash"`
|
||||||
|
OmmerHash *common.Hash `json:"sha3Uncles"`
|
||||||
|
Coinbase *common.Address `json:"miner"`
|
||||||
|
Root common.Hash `json:"stateRoot" gencodec:"required"`
|
||||||
|
TxHash *common.Hash `json:"transactionsRoot"`
|
||||||
|
ReceiptHash *common.Hash `json:"receiptsRoot"`
|
||||||
|
Bloom types.Bloom `json:"logsBloom"`
|
||||||
|
Difficulty *big.Int `json:"difficulty"`
|
||||||
|
Number *big.Int `json:"number" gencodec:"required"`
|
||||||
|
GasLimit uint64 `json:"gasLimit" gencodec:"required"`
|
||||||
|
GasUsed uint64 `json:"gasUsed"`
|
||||||
|
Time uint64 `json:"timestamp" gencodec:"required"`
|
||||||
|
Extra []byte `json:"extraData"`
|
||||||
|
MixDigest common.Hash `json:"mixHash"`
|
||||||
|
Nonce *types.BlockNonce `json:"nonce"`
|
||||||
|
BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type headerMarshaling struct {
|
||||||
|
Difficulty *math.HexOrDecimal256
|
||||||
|
Number *math.HexOrDecimal256
|
||||||
|
GasLimit math.HexOrDecimal64
|
||||||
|
GasUsed math.HexOrDecimal64
|
||||||
|
Time math.HexOrDecimal64
|
||||||
|
Extra hexutil.Bytes
|
||||||
|
BaseFee *math.HexOrDecimal256
|
||||||
|
}
|
||||||
|
|
||||||
|
type bbInput struct {
|
||||||
|
Header *header `json:"header,omitempty"`
|
||||||
|
OmmersRlp []string `json:"ommers,omitempty"`
|
||||||
|
TxRlp string `json:"txs,omitempty"`
|
||||||
|
Clique *cliqueInput `json:"clique,omitempty"`
|
||||||
|
|
||||||
|
Ethash bool `json:"-"`
|
||||||
|
EthashDir string `json:"-"`
|
||||||
|
PowMode ethash.Mode `json:"-"`
|
||||||
|
Txs []*types.Transaction `json:"-"`
|
||||||
|
Ommers []*types.Header `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type cliqueInput struct {
|
||||||
|
Key *ecdsa.PrivateKey
|
||||||
|
Voted *common.Address
|
||||||
|
Authorize *bool
|
||||||
|
Vanity common.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON implements json.Unmarshaler interface.
|
||||||
|
func (c *cliqueInput) UnmarshalJSON(input []byte) error {
|
||||||
|
var x struct {
|
||||||
|
Key *common.Hash `json:"secretKey"`
|
||||||
|
Voted *common.Address `json:"voted"`
|
||||||
|
Authorize *bool `json:"authorize"`
|
||||||
|
Vanity common.Hash `json:"vanity"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(input, &x); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if x.Key == nil {
|
||||||
|
return errors.New("missing required field 'secretKey' for cliqueInput")
|
||||||
|
}
|
||||||
|
if ecdsaKey, err := crypto.ToECDSA(x.Key[:]); err != nil {
|
||||||
|
return err
|
||||||
|
} else {
|
||||||
|
c.Key = ecdsaKey
|
||||||
|
}
|
||||||
|
c.Voted = x.Voted
|
||||||
|
c.Authorize = x.Authorize
|
||||||
|
c.Vanity = x.Vanity
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToBlock converts i into a *types.Block
|
||||||
|
func (i *bbInput) ToBlock() *types.Block {
|
||||||
|
header := &types.Header{
|
||||||
|
ParentHash: i.Header.ParentHash,
|
||||||
|
UncleHash: types.EmptyUncleHash,
|
||||||
|
Coinbase: common.Address{},
|
||||||
|
Root: i.Header.Root,
|
||||||
|
TxHash: types.EmptyRootHash,
|
||||||
|
ReceiptHash: types.EmptyRootHash,
|
||||||
|
Bloom: i.Header.Bloom,
|
||||||
|
Difficulty: common.Big0,
|
||||||
|
Number: i.Header.Number,
|
||||||
|
GasLimit: i.Header.GasLimit,
|
||||||
|
GasUsed: i.Header.GasUsed,
|
||||||
|
Time: i.Header.Time,
|
||||||
|
Extra: i.Header.Extra,
|
||||||
|
MixDigest: i.Header.MixDigest,
|
||||||
|
BaseFee: i.Header.BaseFee,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fill optional values.
|
||||||
|
if i.Header.OmmerHash != nil {
|
||||||
|
header.UncleHash = *i.Header.OmmerHash
|
||||||
|
} else if len(i.Ommers) != 0 {
|
||||||
|
// Calculate the ommer hash if none is provided and there are ommers to hash
|
||||||
|
header.UncleHash = types.CalcUncleHash(i.Ommers)
|
||||||
|
}
|
||||||
|
if i.Header.Coinbase != nil {
|
||||||
|
header.Coinbase = *i.Header.Coinbase
|
||||||
|
}
|
||||||
|
if i.Header.TxHash != nil {
|
||||||
|
header.TxHash = *i.Header.TxHash
|
||||||
|
}
|
||||||
|
if i.Header.ReceiptHash != nil {
|
||||||
|
header.ReceiptHash = *i.Header.ReceiptHash
|
||||||
|
}
|
||||||
|
if i.Header.Nonce != nil {
|
||||||
|
header.Nonce = *i.Header.Nonce
|
||||||
|
}
|
||||||
|
if header.Difficulty != nil {
|
||||||
|
header.Difficulty = i.Header.Difficulty
|
||||||
|
}
|
||||||
|
return types.NewBlockWithHeader(header).WithBody(i.Txs, i.Ommers)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SealBlock seals the given block using the configured engine.
|
||||||
|
func (i *bbInput) SealBlock(block *types.Block) (*types.Block, error) {
|
||||||
|
switch {
|
||||||
|
case i.Ethash:
|
||||||
|
return i.sealEthash(block)
|
||||||
|
case i.Clique != nil:
|
||||||
|
return i.sealClique(block)
|
||||||
|
default:
|
||||||
|
return block, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sealEthash seals the given block using ethash.
|
||||||
|
func (i *bbInput) sealEthash(block *types.Block) (*types.Block, error) {
|
||||||
|
if i.Header.Nonce != nil {
|
||||||
|
return nil, NewError(ErrorConfig, fmt.Errorf("sealing with ethash will overwrite provided nonce"))
|
||||||
|
}
|
||||||
|
ethashConfig := ethash.Config{
|
||||||
|
PowMode: i.PowMode,
|
||||||
|
DatasetDir: i.EthashDir,
|
||||||
|
CacheDir: i.EthashDir,
|
||||||
|
DatasetsInMem: 1,
|
||||||
|
DatasetsOnDisk: 2,
|
||||||
|
CachesInMem: 2,
|
||||||
|
CachesOnDisk: 3,
|
||||||
|
}
|
||||||
|
engine := ethash.New(ethashConfig, nil, true)
|
||||||
|
defer engine.Close()
|
||||||
|
// Use a buffered chan for results.
|
||||||
|
// If the testmode is used, the sealer will return quickly, and complain
|
||||||
|
// "Sealing result is not read by miner" if it cannot write the result.
|
||||||
|
results := make(chan *types.Block, 1)
|
||||||
|
if err := engine.Seal(nil, block, results, nil); err != nil {
|
||||||
|
panic(fmt.Sprintf("failed to seal block: %v", err))
|
||||||
|
}
|
||||||
|
found := <-results
|
||||||
|
return block.WithSeal(found.Header()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// sealClique seals the given block using clique.
|
||||||
|
func (i *bbInput) sealClique(block *types.Block) (*types.Block, error) {
|
||||||
|
// If any clique value overwrites an explicit header value, fail
|
||||||
|
// to avoid silently building a block with unexpected values.
|
||||||
|
if i.Header.Extra != nil {
|
||||||
|
return nil, NewError(ErrorConfig, fmt.Errorf("sealing with clique will overwrite provided extra data"))
|
||||||
|
}
|
||||||
|
header := block.Header()
|
||||||
|
if i.Clique.Voted != nil {
|
||||||
|
if i.Header.Coinbase != nil {
|
||||||
|
return nil, NewError(ErrorConfig, fmt.Errorf("sealing with clique and voting will overwrite provided coinbase"))
|
||||||
|
}
|
||||||
|
header.Coinbase = *i.Clique.Voted
|
||||||
|
}
|
||||||
|
if i.Clique.Authorize != nil {
|
||||||
|
if i.Header.Nonce != nil {
|
||||||
|
return nil, NewError(ErrorConfig, fmt.Errorf("sealing with clique and voting will overwrite provided nonce"))
|
||||||
|
}
|
||||||
|
if *i.Clique.Authorize {
|
||||||
|
header.Nonce = [8]byte{}
|
||||||
|
} else {
|
||||||
|
header.Nonce = [8]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Extra is fixed 32 byte vanity and 65 byte signature
|
||||||
|
header.Extra = make([]byte, 32+65)
|
||||||
|
copy(header.Extra[0:32], i.Clique.Vanity.Bytes()[:])
|
||||||
|
|
||||||
|
// Sign the seal hash and fill in the rest of the extra data
|
||||||
|
h := clique.SealHash(header)
|
||||||
|
sighash, err := crypto.Sign(h[:], i.Clique.Key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
copy(header.Extra[32:], sighash)
|
||||||
|
block = block.WithSeal(header)
|
||||||
|
return block, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildBlock constructs a block from the given inputs.
|
||||||
|
func BuildBlock(ctx *cli.Context) error {
|
||||||
|
// Configure the go-ethereum logger
|
||||||
|
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
||||||
|
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
|
||||||
|
log.Root().SetHandler(glogger)
|
||||||
|
|
||||||
|
baseDir, err := createBasedir(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err))
|
||||||
|
}
|
||||||
|
inputData, err := readInput(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
block := inputData.ToBlock()
|
||||||
|
block, err = inputData.SealBlock(block)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return dispatchBlock(ctx, baseDir, block)
|
||||||
|
}
|
||||||
|
|
||||||
|
func readInput(ctx *cli.Context) (*bbInput, error) {
|
||||||
|
var (
|
||||||
|
headerStr = ctx.String(InputHeaderFlag.Name)
|
||||||
|
ommersStr = ctx.String(InputOmmersFlag.Name)
|
||||||
|
txsStr = ctx.String(InputTxsRlpFlag.Name)
|
||||||
|
cliqueStr = ctx.String(SealCliqueFlag.Name)
|
||||||
|
ethashOn = ctx.Bool(SealEthashFlag.Name)
|
||||||
|
ethashDir = ctx.String(SealEthashDirFlag.Name)
|
||||||
|
ethashMode = ctx.String(SealEthashModeFlag.Name)
|
||||||
|
inputData = &bbInput{}
|
||||||
|
)
|
||||||
|
if ethashOn && cliqueStr != "" {
|
||||||
|
return nil, NewError(ErrorConfig, fmt.Errorf("both ethash and clique sealing specified, only one may be chosen"))
|
||||||
|
}
|
||||||
|
if ethashOn {
|
||||||
|
inputData.Ethash = ethashOn
|
||||||
|
inputData.EthashDir = ethashDir
|
||||||
|
switch ethashMode {
|
||||||
|
case "normal":
|
||||||
|
inputData.PowMode = ethash.ModeNormal
|
||||||
|
case "test":
|
||||||
|
inputData.PowMode = ethash.ModeTest
|
||||||
|
case "fake":
|
||||||
|
inputData.PowMode = ethash.ModeFake
|
||||||
|
default:
|
||||||
|
return nil, NewError(ErrorConfig, fmt.Errorf("unknown pow mode: %s, supported modes: test, fake, normal", ethashMode))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if headerStr == stdinSelector || ommersStr == stdinSelector || txsStr == stdinSelector || cliqueStr == stdinSelector {
|
||||||
|
decoder := json.NewDecoder(os.Stdin)
|
||||||
|
if err := decoder.Decode(inputData); err != nil {
|
||||||
|
return nil, NewError(ErrorJson, fmt.Errorf("failed unmarshaling stdin: %v", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if cliqueStr != stdinSelector && cliqueStr != "" {
|
||||||
|
var clique cliqueInput
|
||||||
|
if err := readFile(cliqueStr, "clique", &clique); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
inputData.Clique = &clique
|
||||||
|
}
|
||||||
|
if headerStr != stdinSelector {
|
||||||
|
var env header
|
||||||
|
if err := readFile(headerStr, "header", &env); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
inputData.Header = &env
|
||||||
|
}
|
||||||
|
if ommersStr != stdinSelector && ommersStr != "" {
|
||||||
|
var ommers []string
|
||||||
|
if err := readFile(ommersStr, "ommers", &ommers); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
inputData.OmmersRlp = ommers
|
||||||
|
}
|
||||||
|
if txsStr != stdinSelector {
|
||||||
|
var txs string
|
||||||
|
if err := readFile(txsStr, "txs", &txs); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
inputData.TxRlp = txs
|
||||||
|
}
|
||||||
|
// Deserialize rlp txs and ommers
|
||||||
|
var (
|
||||||
|
ommers = []*types.Header{}
|
||||||
|
txs = []*types.Transaction{}
|
||||||
|
)
|
||||||
|
if inputData.TxRlp != "" {
|
||||||
|
if err := rlp.DecodeBytes(common.FromHex(inputData.TxRlp), &txs); err != nil {
|
||||||
|
return nil, NewError(ErrorRlp, fmt.Errorf("unable to decode transaction from rlp data: %v", err))
|
||||||
|
}
|
||||||
|
inputData.Txs = txs
|
||||||
|
}
|
||||||
|
for _, str := range inputData.OmmersRlp {
|
||||||
|
type extblock struct {
|
||||||
|
Header *types.Header
|
||||||
|
Txs []*types.Transaction
|
||||||
|
Ommers []*types.Header
|
||||||
|
}
|
||||||
|
var ommer *extblock
|
||||||
|
if err := rlp.DecodeBytes(common.FromHex(str), &ommer); err != nil {
|
||||||
|
return nil, NewError(ErrorRlp, fmt.Errorf("unable to decode ommer from rlp data: %v", err))
|
||||||
|
}
|
||||||
|
ommers = append(ommers, ommer.Header)
|
||||||
|
}
|
||||||
|
inputData.Ommers = ommers
|
||||||
|
|
||||||
|
return inputData, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// dispatchOutput writes the output data to either stderr or stdout, or to the specified
|
||||||
|
// files
|
||||||
|
func dispatchBlock(ctx *cli.Context, baseDir string, block *types.Block) error {
|
||||||
|
raw, _ := rlp.EncodeToBytes(block)
|
||||||
|
|
||||||
|
type blockInfo struct {
|
||||||
|
Rlp hexutil.Bytes `json:"rlp"`
|
||||||
|
Hash common.Hash `json:"hash"`
|
||||||
|
}
|
||||||
|
var enc blockInfo
|
||||||
|
enc.Rlp = raw
|
||||||
|
enc.Hash = block.Hash()
|
||||||
|
|
||||||
|
b, err := json.MarshalIndent(enc, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err))
|
||||||
|
}
|
||||||
|
switch dest := ctx.String(OutputBlockFlag.Name); dest {
|
||||||
|
case "stdout":
|
||||||
|
os.Stdout.Write(b)
|
||||||
|
os.Stdout.WriteString("\n")
|
||||||
|
case "stderr":
|
||||||
|
os.Stderr.Write(b)
|
||||||
|
os.Stderr.WriteString("\n")
|
||||||
|
default:
|
||||||
|
if err := saveFile(baseDir, dest, enc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
@@ -49,12 +49,13 @@ type Prestate struct {
|
|||||||
type ExecutionResult struct {
|
type ExecutionResult struct {
|
||||||
StateRoot common.Hash `json:"stateRoot"`
|
StateRoot common.Hash `json:"stateRoot"`
|
||||||
TxRoot common.Hash `json:"txRoot"`
|
TxRoot common.Hash `json:"txRoot"`
|
||||||
ReceiptRoot common.Hash `json:"receiptRoot"`
|
ReceiptRoot common.Hash `json:"receiptsRoot"`
|
||||||
LogsHash common.Hash `json:"logsHash"`
|
LogsHash common.Hash `json:"logsHash"`
|
||||||
Bloom types.Bloom `json:"logsBloom" gencodec:"required"`
|
Bloom types.Bloom `json:"logsBloom" gencodec:"required"`
|
||||||
Receipts types.Receipts `json:"receipts"`
|
Receipts types.Receipts `json:"receipts"`
|
||||||
Rejected []*rejectedTx `json:"rejected,omitempty"`
|
Rejected []*rejectedTx `json:"rejected,omitempty"`
|
||||||
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
|
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
|
||||||
|
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ommer struct {
|
type ommer struct {
|
||||||
@@ -96,7 +97,7 @@ type rejectedTx struct {
|
|||||||
// Apply applies a set of transactions to a pre-state
|
// Apply applies a set of transactions to a pre-state
|
||||||
func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
||||||
txs types.Transactions, miningReward int64,
|
txs types.Transactions, miningReward int64,
|
||||||
getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.Tracer, err error)) (*state.StateDB, *ExecutionResult, error) {
|
getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.EVMLogger, err error)) (*state.StateDB, *ExecutionResult, error) {
|
||||||
|
|
||||||
// Capture errors for BLOCKHASH operation, if we haven't been supplied the
|
// Capture errors for BLOCKHASH operation, if we haven't been supplied the
|
||||||
// required blockhashes
|
// required blockhashes
|
||||||
@@ -255,6 +256,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
|||||||
Receipts: receipts,
|
Receipts: receipts,
|
||||||
Rejected: rejectedTxs,
|
Rejected: rejectedTxs,
|
||||||
Difficulty: (*math.HexOrDecimal256)(vmContext.Difficulty),
|
Difficulty: (*math.HexOrDecimal256)(vmContext.Difficulty),
|
||||||
|
GasUsed: (math.HexOrDecimal64)(gasUsed),
|
||||||
}
|
}
|
||||||
return statedb, execRs, nil
|
return statedb, execRs, nil
|
||||||
}
|
}
|
||||||
|
@@ -32,7 +32,11 @@ var (
|
|||||||
}
|
}
|
||||||
TraceDisableMemoryFlag = cli.BoolTFlag{
|
TraceDisableMemoryFlag = cli.BoolTFlag{
|
||||||
Name: "trace.nomemory",
|
Name: "trace.nomemory",
|
||||||
Usage: "Disable full memory dump in traces",
|
Usage: "Disable full memory dump in traces (deprecated)",
|
||||||
|
}
|
||||||
|
TraceEnableMemoryFlag = cli.BoolFlag{
|
||||||
|
Name: "trace.memory",
|
||||||
|
Usage: "Enable full memory dump in traces",
|
||||||
}
|
}
|
||||||
TraceDisableStackFlag = cli.BoolFlag{
|
TraceDisableStackFlag = cli.BoolFlag{
|
||||||
Name: "trace.nostack",
|
Name: "trace.nostack",
|
||||||
@@ -40,7 +44,11 @@ var (
|
|||||||
}
|
}
|
||||||
TraceDisableReturnDataFlag = cli.BoolTFlag{
|
TraceDisableReturnDataFlag = cli.BoolTFlag{
|
||||||
Name: "trace.noreturndata",
|
Name: "trace.noreturndata",
|
||||||
Usage: "Disable return data output in traces",
|
Usage: "Disable return data output in traces (deprecated)",
|
||||||
|
}
|
||||||
|
TraceEnableReturnDataFlag = cli.BoolFlag{
|
||||||
|
Name: "trace.returndata",
|
||||||
|
Usage: "Enable return data output in traces",
|
||||||
}
|
}
|
||||||
OutputBasedir = cli.StringFlag{
|
OutputBasedir = cli.StringFlag{
|
||||||
Name: "output.basedir",
|
Name: "output.basedir",
|
||||||
@@ -68,6 +76,14 @@ var (
|
|||||||
"\t<file> - into the file <file> ",
|
"\t<file> - into the file <file> ",
|
||||||
Value: "result.json",
|
Value: "result.json",
|
||||||
}
|
}
|
||||||
|
OutputBlockFlag = cli.StringFlag{
|
||||||
|
Name: "output.block",
|
||||||
|
Usage: "Determines where to put the `block` after building.\n" +
|
||||||
|
"\t`stdout` - into the stdout output\n" +
|
||||||
|
"\t`stderr` - into the stderr output\n" +
|
||||||
|
"\t<file> - into the file <file> ",
|
||||||
|
Value: "block.json",
|
||||||
|
}
|
||||||
InputAllocFlag = cli.StringFlag{
|
InputAllocFlag = cli.StringFlag{
|
||||||
Name: "input.alloc",
|
Name: "input.alloc",
|
||||||
Usage: "`stdin` or file name of where to find the prestate alloc to use.",
|
Usage: "`stdin` or file name of where to find the prestate alloc to use.",
|
||||||
@@ -81,10 +97,41 @@ var (
|
|||||||
InputTxsFlag = cli.StringFlag{
|
InputTxsFlag = cli.StringFlag{
|
||||||
Name: "input.txs",
|
Name: "input.txs",
|
||||||
Usage: "`stdin` or file name of where to find the transactions to apply. " +
|
Usage: "`stdin` or file name of where to find the transactions to apply. " +
|
||||||
"If the file prefix is '.rlp', then the data is interpreted as an RLP list of signed transactions." +
|
"If the file extension is '.rlp', then the data is interpreted as an RLP list of signed transactions." +
|
||||||
"The '.rlp' format is identical to the output.body format.",
|
"The '.rlp' format is identical to the output.body format.",
|
||||||
Value: "txs.json",
|
Value: "txs.json",
|
||||||
}
|
}
|
||||||
|
InputHeaderFlag = cli.StringFlag{
|
||||||
|
Name: "input.header",
|
||||||
|
Usage: "`stdin` or file name of where to find the block header to use.",
|
||||||
|
Value: "header.json",
|
||||||
|
}
|
||||||
|
InputOmmersFlag = cli.StringFlag{
|
||||||
|
Name: "input.ommers",
|
||||||
|
Usage: "`stdin` or file name of where to find the list of ommer header RLPs to use.",
|
||||||
|
}
|
||||||
|
InputTxsRlpFlag = cli.StringFlag{
|
||||||
|
Name: "input.txs",
|
||||||
|
Usage: "`stdin` or file name of where to find the transactions list in RLP form.",
|
||||||
|
Value: "txs.rlp",
|
||||||
|
}
|
||||||
|
SealCliqueFlag = cli.StringFlag{
|
||||||
|
Name: "seal.clique",
|
||||||
|
Usage: "Seal block with Clique. `stdin` or file name of where to find the Clique sealing data.",
|
||||||
|
}
|
||||||
|
SealEthashFlag = cli.BoolFlag{
|
||||||
|
Name: "seal.ethash",
|
||||||
|
Usage: "Seal block with ethash.",
|
||||||
|
}
|
||||||
|
SealEthashDirFlag = cli.StringFlag{
|
||||||
|
Name: "seal.ethash.dir",
|
||||||
|
Usage: "Path to ethash DAG. If none exists, a new DAG will be generated.",
|
||||||
|
}
|
||||||
|
SealEthashModeFlag = cli.StringFlag{
|
||||||
|
Name: "seal.ethash.mode",
|
||||||
|
Usage: "Defines the type and amount of PoW verification an ethash engine makes.",
|
||||||
|
Value: "normal",
|
||||||
|
}
|
||||||
RewardFlag = cli.Int64Flag{
|
RewardFlag = cli.Int64Flag{
|
||||||
Name: "state.reward",
|
Name: "state.reward",
|
||||||
Usage: "Mining reward. Set to -1 to disable",
|
Usage: "Mining reward. Set to -1 to disable",
|
||||||
|
135
cmd/evm/internal/t8ntool/gen_header.go
Normal file
135
cmd/evm/internal/t8ntool/gen_header.go
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
|
||||||
|
|
||||||
|
package t8ntool
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = (*headerMarshaling)(nil)
|
||||||
|
|
||||||
|
// MarshalJSON marshals as JSON.
|
||||||
|
func (h header) MarshalJSON() ([]byte, error) {
|
||||||
|
type header struct {
|
||||||
|
ParentHash common.Hash `json:"parentHash"`
|
||||||
|
OmmerHash *common.Hash `json:"sha3Uncles"`
|
||||||
|
Coinbase *common.Address `json:"miner"`
|
||||||
|
Root common.Hash `json:"stateRoot" gencodec:"required"`
|
||||||
|
TxHash *common.Hash `json:"transactionsRoot"`
|
||||||
|
ReceiptHash *common.Hash `json:"receiptsRoot"`
|
||||||
|
Bloom types.Bloom `json:"logsBloom"`
|
||||||
|
Difficulty *math.HexOrDecimal256 `json:"difficulty"`
|
||||||
|
Number *math.HexOrDecimal256 `json:"number" gencodec:"required"`
|
||||||
|
GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
|
||||||
|
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
|
||||||
|
Time math.HexOrDecimal64 `json:"timestamp" gencodec:"required"`
|
||||||
|
Extra hexutil.Bytes `json:"extraData"`
|
||||||
|
MixDigest common.Hash `json:"mixHash"`
|
||||||
|
Nonce *types.BlockNonce `json:"nonce"`
|
||||||
|
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"`
|
||||||
|
}
|
||||||
|
var enc header
|
||||||
|
enc.ParentHash = h.ParentHash
|
||||||
|
enc.OmmerHash = h.OmmerHash
|
||||||
|
enc.Coinbase = h.Coinbase
|
||||||
|
enc.Root = h.Root
|
||||||
|
enc.TxHash = h.TxHash
|
||||||
|
enc.ReceiptHash = h.ReceiptHash
|
||||||
|
enc.Bloom = h.Bloom
|
||||||
|
enc.Difficulty = (*math.HexOrDecimal256)(h.Difficulty)
|
||||||
|
enc.Number = (*math.HexOrDecimal256)(h.Number)
|
||||||
|
enc.GasLimit = math.HexOrDecimal64(h.GasLimit)
|
||||||
|
enc.GasUsed = math.HexOrDecimal64(h.GasUsed)
|
||||||
|
enc.Time = math.HexOrDecimal64(h.Time)
|
||||||
|
enc.Extra = h.Extra
|
||||||
|
enc.MixDigest = h.MixDigest
|
||||||
|
enc.Nonce = h.Nonce
|
||||||
|
enc.BaseFee = (*math.HexOrDecimal256)(h.BaseFee)
|
||||||
|
return json.Marshal(&enc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON unmarshals from JSON.
|
||||||
|
func (h *header) UnmarshalJSON(input []byte) error {
|
||||||
|
type header struct {
|
||||||
|
ParentHash *common.Hash `json:"parentHash"`
|
||||||
|
OmmerHash *common.Hash `json:"sha3Uncles"`
|
||||||
|
Coinbase *common.Address `json:"miner"`
|
||||||
|
Root *common.Hash `json:"stateRoot" gencodec:"required"`
|
||||||
|
TxHash *common.Hash `json:"transactionsRoot"`
|
||||||
|
ReceiptHash *common.Hash `json:"receiptsRoot"`
|
||||||
|
Bloom *types.Bloom `json:"logsBloom"`
|
||||||
|
Difficulty *math.HexOrDecimal256 `json:"difficulty"`
|
||||||
|
Number *math.HexOrDecimal256 `json:"number" gencodec:"required"`
|
||||||
|
GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
|
||||||
|
GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
|
||||||
|
Time *math.HexOrDecimal64 `json:"timestamp" gencodec:"required"`
|
||||||
|
Extra *hexutil.Bytes `json:"extraData"`
|
||||||
|
MixDigest *common.Hash `json:"mixHash"`
|
||||||
|
Nonce *types.BlockNonce `json:"nonce"`
|
||||||
|
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"`
|
||||||
|
}
|
||||||
|
var dec header
|
||||||
|
if err := json.Unmarshal(input, &dec); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if dec.ParentHash != nil {
|
||||||
|
h.ParentHash = *dec.ParentHash
|
||||||
|
}
|
||||||
|
if dec.OmmerHash != nil {
|
||||||
|
h.OmmerHash = dec.OmmerHash
|
||||||
|
}
|
||||||
|
if dec.Coinbase != nil {
|
||||||
|
h.Coinbase = dec.Coinbase
|
||||||
|
}
|
||||||
|
if dec.Root == nil {
|
||||||
|
return errors.New("missing required field 'stateRoot' for header")
|
||||||
|
}
|
||||||
|
h.Root = *dec.Root
|
||||||
|
if dec.TxHash != nil {
|
||||||
|
h.TxHash = dec.TxHash
|
||||||
|
}
|
||||||
|
if dec.ReceiptHash != nil {
|
||||||
|
h.ReceiptHash = dec.ReceiptHash
|
||||||
|
}
|
||||||
|
if dec.Bloom != nil {
|
||||||
|
h.Bloom = *dec.Bloom
|
||||||
|
}
|
||||||
|
if dec.Difficulty != nil {
|
||||||
|
h.Difficulty = (*big.Int)(dec.Difficulty)
|
||||||
|
}
|
||||||
|
if dec.Number == nil {
|
||||||
|
return errors.New("missing required field 'number' for header")
|
||||||
|
}
|
||||||
|
h.Number = (*big.Int)(dec.Number)
|
||||||
|
if dec.GasLimit == nil {
|
||||||
|
return errors.New("missing required field 'gasLimit' for header")
|
||||||
|
}
|
||||||
|
h.GasLimit = uint64(*dec.GasLimit)
|
||||||
|
if dec.GasUsed != nil {
|
||||||
|
h.GasUsed = uint64(*dec.GasUsed)
|
||||||
|
}
|
||||||
|
if dec.Time == nil {
|
||||||
|
return errors.New("missing required field 'timestamp' for header")
|
||||||
|
}
|
||||||
|
h.Time = uint64(*dec.Time)
|
||||||
|
if dec.Extra != nil {
|
||||||
|
h.Extra = *dec.Extra
|
||||||
|
}
|
||||||
|
if dec.MixDigest != nil {
|
||||||
|
h.MixDigest = *dec.MixDigest
|
||||||
|
}
|
||||||
|
if dec.Nonce != nil {
|
||||||
|
h.Nonce = dec.Nonce
|
||||||
|
}
|
||||||
|
if dec.BaseFee != nil {
|
||||||
|
h.BaseFee = (*big.Int)(dec.BaseFee)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
@@ -36,17 +36,19 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type result struct {
|
type result struct {
|
||||||
Error error
|
Error error
|
||||||
Address common.Address
|
Address common.Address
|
||||||
Hash common.Hash
|
Hash common.Hash
|
||||||
|
IntrinsicGas uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON marshals as JSON with a hash.
|
// MarshalJSON marshals as JSON with a hash.
|
||||||
func (r *result) MarshalJSON() ([]byte, error) {
|
func (r *result) MarshalJSON() ([]byte, error) {
|
||||||
type xx struct {
|
type xx struct {
|
||||||
Error string `json:"error,omitempty"`
|
Error string `json:"error,omitempty"`
|
||||||
Address *common.Address `json:"address,omitempty"`
|
Address *common.Address `json:"address,omitempty"`
|
||||||
Hash *common.Hash `json:"hash,omitempty"`
|
Hash *common.Hash `json:"hash,omitempty"`
|
||||||
|
IntrinsicGas hexutil.Uint64 `json:"intrinsicGas,omitempty"`
|
||||||
}
|
}
|
||||||
var out xx
|
var out xx
|
||||||
if r.Error != nil {
|
if r.Error != nil {
|
||||||
@@ -58,6 +60,7 @@ func (r *result) MarshalJSON() ([]byte, error) {
|
|||||||
if r.Hash != (common.Hash{}) {
|
if r.Hash != (common.Hash{}) {
|
||||||
out.Hash = &r.Hash
|
out.Hash = &r.Hash
|
||||||
}
|
}
|
||||||
|
out.IntrinsicGas = hexutil.Uint64(r.IntrinsicGas)
|
||||||
return json.Marshal(out)
|
return json.Marshal(out)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -79,7 +82,7 @@ func Transaction(ctx *cli.Context) error {
|
|||||||
)
|
)
|
||||||
// Construct the chainconfig
|
// Construct the chainconfig
|
||||||
if cConf, _, err := tests.GetChainConfig(ctx.String(ForknameFlag.Name)); err != nil {
|
if cConf, _, err := tests.GetChainConfig(ctx.String(ForknameFlag.Name)); err != nil {
|
||||||
return NewError(ErrorVMConfig, fmt.Errorf("failed constructing chain configuration: %v", err))
|
return NewError(ErrorConfig, fmt.Errorf("failed constructing chain configuration: %v", err))
|
||||||
} else {
|
} else {
|
||||||
chainConfig = cConf
|
chainConfig = cConf
|
||||||
}
|
}
|
||||||
@@ -118,6 +121,9 @@ func Transaction(ctx *cli.Context) error {
|
|||||||
}
|
}
|
||||||
var results []result
|
var results []result
|
||||||
for it.Next() {
|
for it.Next() {
|
||||||
|
if err := it.Err(); err != nil {
|
||||||
|
return NewError(ErrorIO, err)
|
||||||
|
}
|
||||||
var tx types.Transaction
|
var tx types.Transaction
|
||||||
err := rlp.DecodeBytes(it.Value(), &tx)
|
err := rlp.DecodeBytes(it.Value(), &tx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -132,12 +138,38 @@ func Transaction(ctx *cli.Context) error {
|
|||||||
} else {
|
} else {
|
||||||
r.Address = sender
|
r.Address = sender
|
||||||
}
|
}
|
||||||
|
// Check intrinsic gas
|
||||||
if gas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil,
|
if gas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil,
|
||||||
chainConfig.IsHomestead(new(big.Int)), chainConfig.IsIstanbul(new(big.Int))); err != nil {
|
chainConfig.IsHomestead(new(big.Int)), chainConfig.IsIstanbul(new(big.Int))); err != nil {
|
||||||
r.Error = err
|
r.Error = err
|
||||||
} else if tx.Gas() < gas {
|
results = append(results, r)
|
||||||
r.Error = fmt.Errorf("%w: have %d, want %d", core.ErrIntrinsicGas, tx.Gas(), gas)
|
continue
|
||||||
|
} else {
|
||||||
|
r.IntrinsicGas = gas
|
||||||
|
if tx.Gas() < gas {
|
||||||
|
r.Error = fmt.Errorf("%w: have %d, want %d", core.ErrIntrinsicGas, tx.Gas(), gas)
|
||||||
|
results = append(results, r)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Validate <256bit fields
|
||||||
|
switch {
|
||||||
|
case tx.Nonce()+1 < tx.Nonce():
|
||||||
|
r.Error = errors.New("nonce exceeds 2^64-1")
|
||||||
|
case tx.Value().BitLen() > 256:
|
||||||
|
r.Error = errors.New("value exceeds 256 bits")
|
||||||
|
case tx.GasPrice().BitLen() > 256:
|
||||||
|
r.Error = errors.New("gasPrice exceeds 256 bits")
|
||||||
|
case tx.GasTipCap().BitLen() > 256:
|
||||||
|
r.Error = errors.New("maxPriorityFeePerGas exceeds 256 bits")
|
||||||
|
case tx.GasFeeCap().BitLen() > 256:
|
||||||
|
r.Error = errors.New("maxFeePerGas exceeds 256 bits")
|
||||||
|
case tx.GasFeeCap().Cmp(tx.GasTipCap()) < 0:
|
||||||
|
r.Error = errors.New("maxFeePerGas < maxPriorityFeePerGas")
|
||||||
|
case new(big.Int).Mul(tx.GasPrice(), new(big.Int).SetUint64(tx.Gas())).BitLen() > 256:
|
||||||
|
r.Error = errors.New("gas * gasPrice exceeds 256 bits")
|
||||||
|
case new(big.Int).Mul(tx.GasFeeCap(), new(big.Int).SetUint64(tx.Gas())).BitLen() > 256:
|
||||||
|
r.Error = errors.New("gas * maxFeePerGas exceeds 256 bits")
|
||||||
}
|
}
|
||||||
results = append(results, r)
|
results = append(results, r)
|
||||||
}
|
}
|
||||||
|
@@ -34,6 +34,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
@@ -43,11 +44,12 @@ import (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
ErrorEVM = 2
|
ErrorEVM = 2
|
||||||
ErrorVMConfig = 3
|
ErrorConfig = 3
|
||||||
ErrorMissingBlockhash = 4
|
ErrorMissingBlockhash = 4
|
||||||
|
|
||||||
ErrorJson = 10
|
ErrorJson = 10
|
||||||
ErrorIO = 11
|
ErrorIO = 11
|
||||||
|
ErrorRlp = 12
|
||||||
|
|
||||||
stdinSelector = "stdin"
|
stdinSelector = "stdin"
|
||||||
)
|
)
|
||||||
@@ -88,28 +90,33 @@ func Transition(ctx *cli.Context) error {
|
|||||||
log.Root().SetHandler(glogger)
|
log.Root().SetHandler(glogger)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
tracer vm.Tracer
|
tracer vm.EVMLogger
|
||||||
baseDir = ""
|
|
||||||
)
|
)
|
||||||
var getTracer func(txIndex int, txHash common.Hash) (vm.Tracer, error)
|
var getTracer func(txIndex int, txHash common.Hash) (vm.EVMLogger, error)
|
||||||
|
|
||||||
// If user specified a basedir, make sure it exists
|
baseDir, err := createBasedir(ctx)
|
||||||
if ctx.IsSet(OutputBasedir.Name) {
|
if err != nil {
|
||||||
if base := ctx.String(OutputBasedir.Name); len(base) > 0 {
|
return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err))
|
||||||
err := os.MkdirAll(base, 0755) // //rw-r--r--
|
|
||||||
if err != nil {
|
|
||||||
return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err))
|
|
||||||
}
|
|
||||||
baseDir = base
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if ctx.Bool(TraceFlag.Name) {
|
if ctx.Bool(TraceFlag.Name) {
|
||||||
|
if ctx.IsSet(TraceDisableMemoryFlag.Name) && ctx.IsSet(TraceEnableMemoryFlag.Name) {
|
||||||
|
return NewError(ErrorConfig, fmt.Errorf("can't use both flags --%s and --%s", TraceDisableMemoryFlag.Name, TraceEnableMemoryFlag.Name))
|
||||||
|
}
|
||||||
|
if ctx.IsSet(TraceDisableReturnDataFlag.Name) && ctx.IsSet(TraceEnableReturnDataFlag.Name) {
|
||||||
|
return NewError(ErrorConfig, fmt.Errorf("can't use both flags --%s and --%s", TraceDisableReturnDataFlag.Name, TraceEnableReturnDataFlag.Name))
|
||||||
|
}
|
||||||
|
if ctx.IsSet(TraceDisableMemoryFlag.Name) {
|
||||||
|
log.Warn(fmt.Sprintf("--%s has been deprecated in favour of --%s", TraceDisableMemoryFlag.Name, TraceEnableMemoryFlag.Name))
|
||||||
|
}
|
||||||
|
if ctx.IsSet(TraceDisableReturnDataFlag.Name) {
|
||||||
|
log.Warn(fmt.Sprintf("--%s has been deprecated in favour of --%s", TraceDisableReturnDataFlag.Name, TraceEnableReturnDataFlag.Name))
|
||||||
|
}
|
||||||
// Configure the EVM logger
|
// Configure the EVM logger
|
||||||
logConfig := &vm.LogConfig{
|
logConfig := &logger.Config{
|
||||||
DisableStack: ctx.Bool(TraceDisableStackFlag.Name),
|
DisableStack: ctx.Bool(TraceDisableStackFlag.Name),
|
||||||
EnableMemory: !ctx.Bool(TraceDisableMemoryFlag.Name),
|
EnableMemory: !ctx.Bool(TraceDisableMemoryFlag.Name) || ctx.Bool(TraceEnableMemoryFlag.Name),
|
||||||
EnableReturnData: !ctx.Bool(TraceDisableReturnDataFlag.Name),
|
EnableReturnData: !ctx.Bool(TraceDisableReturnDataFlag.Name) || ctx.Bool(TraceEnableReturnDataFlag.Name),
|
||||||
Debug: true,
|
Debug: true,
|
||||||
}
|
}
|
||||||
var prevFile *os.File
|
var prevFile *os.File
|
||||||
@@ -119,7 +126,7 @@ func Transition(ctx *cli.Context) error {
|
|||||||
prevFile.Close()
|
prevFile.Close()
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
getTracer = func(txIndex int, txHash common.Hash) (vm.Tracer, error) {
|
getTracer = func(txIndex int, txHash common.Hash) (vm.EVMLogger, error) {
|
||||||
if prevFile != nil {
|
if prevFile != nil {
|
||||||
prevFile.Close()
|
prevFile.Close()
|
||||||
}
|
}
|
||||||
@@ -128,10 +135,10 @@ func Transition(ctx *cli.Context) error {
|
|||||||
return nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err))
|
return nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err))
|
||||||
}
|
}
|
||||||
prevFile = traceFile
|
prevFile = traceFile
|
||||||
return vm.NewJSONLogger(logConfig, traceFile), nil
|
return logger.NewJSONLogger(logConfig, traceFile), nil
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
getTracer = func(txIndex int, txHash common.Hash) (tracer vm.Tracer, err error) {
|
getTracer = func(txIndex int, txHash common.Hash) (tracer vm.EVMLogger, err error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -155,29 +162,17 @@ func Transition(ctx *cli.Context) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if allocStr != stdinSelector {
|
if allocStr != stdinSelector {
|
||||||
inFile, err := os.Open(allocStr)
|
if err := readFile(allocStr, "alloc", &inputData.Alloc); err != nil {
|
||||||
if err != nil {
|
return err
|
||||||
return NewError(ErrorIO, fmt.Errorf("failed reading alloc file: %v", err))
|
|
||||||
}
|
|
||||||
defer inFile.Close()
|
|
||||||
decoder := json.NewDecoder(inFile)
|
|
||||||
if err := decoder.Decode(&inputData.Alloc); err != nil {
|
|
||||||
return NewError(ErrorJson, fmt.Errorf("failed unmarshaling alloc-file: %v", err))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
prestate.Pre = inputData.Alloc
|
prestate.Pre = inputData.Alloc
|
||||||
|
|
||||||
// Set the block environment
|
// Set the block environment
|
||||||
if envStr != stdinSelector {
|
if envStr != stdinSelector {
|
||||||
inFile, err := os.Open(envStr)
|
|
||||||
if err != nil {
|
|
||||||
return NewError(ErrorIO, fmt.Errorf("failed reading env file: %v", err))
|
|
||||||
}
|
|
||||||
defer inFile.Close()
|
|
||||||
decoder := json.NewDecoder(inFile)
|
|
||||||
var env stEnv
|
var env stEnv
|
||||||
if err := decoder.Decode(&env); err != nil {
|
if err := readFile(envStr, "env", &env); err != nil {
|
||||||
return NewError(ErrorJson, fmt.Errorf("failed unmarshaling env-file: %v", err))
|
return err
|
||||||
}
|
}
|
||||||
inputData.Env = &env
|
inputData.Env = &env
|
||||||
}
|
}
|
||||||
@@ -190,7 +185,7 @@ func Transition(ctx *cli.Context) error {
|
|||||||
// Construct the chainconfig
|
// Construct the chainconfig
|
||||||
var chainConfig *params.ChainConfig
|
var chainConfig *params.ChainConfig
|
||||||
if cConf, extraEips, err := tests.GetChainConfig(ctx.String(ForknameFlag.Name)); err != nil {
|
if cConf, extraEips, err := tests.GetChainConfig(ctx.String(ForknameFlag.Name)); err != nil {
|
||||||
return NewError(ErrorVMConfig, fmt.Errorf("failed constructing chain configuration: %v", err))
|
return NewError(ErrorConfig, fmt.Errorf("failed constructing chain configuration: %v", err))
|
||||||
} else {
|
} else {
|
||||||
chainConfig = cConf
|
chainConfig = cConf
|
||||||
vmConfig.ExtraEips = extraEips
|
vmConfig.ExtraEips = extraEips
|
||||||
@@ -254,18 +249,18 @@ func Transition(ctx *cli.Context) error {
|
|||||||
// Sanity check, to not `panic` in state_transition
|
// Sanity check, to not `panic` in state_transition
|
||||||
if chainConfig.IsLondon(big.NewInt(int64(prestate.Env.Number))) {
|
if chainConfig.IsLondon(big.NewInt(int64(prestate.Env.Number))) {
|
||||||
if prestate.Env.BaseFee == nil {
|
if prestate.Env.BaseFee == nil {
|
||||||
return NewError(ErrorVMConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section"))
|
return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if env := prestate.Env; env.Difficulty == nil {
|
if env := prestate.Env; env.Difficulty == nil {
|
||||||
// If difficulty was not provided by caller, we need to calculate it.
|
// If difficulty was not provided by caller, we need to calculate it.
|
||||||
switch {
|
switch {
|
||||||
case env.ParentDifficulty == nil:
|
case env.ParentDifficulty == nil:
|
||||||
return NewError(ErrorVMConfig, errors.New("currentDifficulty was not provided, and cannot be calculated due to missing parentDifficulty"))
|
return NewError(ErrorConfig, errors.New("currentDifficulty was not provided, and cannot be calculated due to missing parentDifficulty"))
|
||||||
case env.Number == 0:
|
case env.Number == 0:
|
||||||
return NewError(ErrorVMConfig, errors.New("currentDifficulty needs to be provided for block number 0"))
|
return NewError(ErrorConfig, errors.New("currentDifficulty needs to be provided for block number 0"))
|
||||||
case env.Timestamp <= env.ParentTimestamp:
|
case env.Timestamp <= env.ParentTimestamp:
|
||||||
return NewError(ErrorVMConfig, fmt.Errorf("currentDifficulty cannot be calculated -- currentTime (%d) needs to be after parent time (%d)",
|
return NewError(ErrorConfig, fmt.Errorf("currentDifficulty cannot be calculated -- currentTime (%d) needs to be after parent time (%d)",
|
||||||
env.Timestamp, env.ParentTimestamp))
|
env.Timestamp, env.ParentTimestamp))
|
||||||
}
|
}
|
||||||
prestate.Env.Difficulty = calcDifficulty(chainConfig, env.Number, env.Timestamp,
|
prestate.Env.Difficulty = calcDifficulty(chainConfig, env.Number, env.Timestamp,
|
||||||
@@ -286,27 +281,34 @@ func Transition(ctx *cli.Context) error {
|
|||||||
// txWithKey is a helper-struct, to allow us to use the types.Transaction along with
|
// txWithKey is a helper-struct, to allow us to use the types.Transaction along with
|
||||||
// a `secretKey`-field, for input
|
// a `secretKey`-field, for input
|
||||||
type txWithKey struct {
|
type txWithKey struct {
|
||||||
key *ecdsa.PrivateKey
|
key *ecdsa.PrivateKey
|
||||||
tx *types.Transaction
|
tx *types.Transaction
|
||||||
|
protected bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *txWithKey) UnmarshalJSON(input []byte) error {
|
func (t *txWithKey) UnmarshalJSON(input []byte) error {
|
||||||
// Read the secretKey, if present
|
// Read the metadata, if present
|
||||||
type sKey struct {
|
type txMetadata struct {
|
||||||
Key *common.Hash `json:"secretKey"`
|
Key *common.Hash `json:"secretKey"`
|
||||||
|
Protected *bool `json:"protected"`
|
||||||
}
|
}
|
||||||
var key sKey
|
var data txMetadata
|
||||||
if err := json.Unmarshal(input, &key); err != nil {
|
if err := json.Unmarshal(input, &data); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if key.Key != nil {
|
if data.Key != nil {
|
||||||
k := key.Key.Hex()[2:]
|
k := data.Key.Hex()[2:]
|
||||||
if ecdsaKey, err := crypto.HexToECDSA(k); err != nil {
|
if ecdsaKey, err := crypto.HexToECDSA(k); err != nil {
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
t.key = ecdsaKey
|
t.key = ecdsaKey
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if data.Protected != nil {
|
||||||
|
t.protected = *data.Protected
|
||||||
|
} else {
|
||||||
|
t.protected = true
|
||||||
|
}
|
||||||
// Now, read the transaction itself
|
// Now, read the transaction itself
|
||||||
var tx types.Transaction
|
var tx types.Transaction
|
||||||
if err := json.Unmarshal(input, &tx); err != nil {
|
if err := json.Unmarshal(input, &tx); err != nil {
|
||||||
@@ -335,7 +337,15 @@ func signUnsignedTransactions(txs []*txWithKey, signer types.Signer) (types.Tran
|
|||||||
v, r, s := tx.RawSignatureValues()
|
v, r, s := tx.RawSignatureValues()
|
||||||
if key != nil && v.BitLen()+r.BitLen()+s.BitLen() == 0 {
|
if key != nil && v.BitLen()+r.BitLen()+s.BitLen() == 0 {
|
||||||
// This transaction needs to be signed
|
// This transaction needs to be signed
|
||||||
signed, err := types.SignTx(tx, signer, key)
|
var (
|
||||||
|
signed *types.Transaction
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if txWithKey.protected {
|
||||||
|
signed, err = types.SignTx(tx, signer, key)
|
||||||
|
} else {
|
||||||
|
signed, err = types.SignTx(tx, types.FrontierSigner{}, key)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, NewError(ErrorJson, fmt.Errorf("tx %d: failed to sign tx: %v", i, err))
|
return nil, NewError(ErrorJson, fmt.Errorf("tx %d: failed to sign tx: %v", i, err))
|
||||||
}
|
}
|
||||||
|
54
cmd/evm/internal/t8ntool/utils.go
Normal file
54
cmd/evm/internal/t8ntool/utils.go
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package t8ntool
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"gopkg.in/urfave/cli.v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// readFile reads the json-data in the provided path and marshals into dest.
|
||||||
|
func readFile(path, desc string, dest interface{}) error {
|
||||||
|
inFile, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return NewError(ErrorIO, fmt.Errorf("failed reading %s file: %v", desc, err))
|
||||||
|
}
|
||||||
|
defer inFile.Close()
|
||||||
|
decoder := json.NewDecoder(inFile)
|
||||||
|
if err := decoder.Decode(dest); err != nil {
|
||||||
|
return NewError(ErrorJson, fmt.Errorf("failed unmarshaling %s file: %v", desc, err))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// createBasedir makes sure the basedir exists, if user specified one.
|
||||||
|
func createBasedir(ctx *cli.Context) (string, error) {
|
||||||
|
baseDir := ""
|
||||||
|
if ctx.IsSet(OutputBasedir.Name) {
|
||||||
|
if base := ctx.String(OutputBasedir.Name); len(base) > 0 {
|
||||||
|
err := os.MkdirAll(base, 0755) // //rw-r--r--
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
baseDir = base
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return baseDir, nil
|
||||||
|
}
|
@@ -139,8 +139,10 @@ var stateTransitionCommand = cli.Command{
|
|||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
t8ntool.TraceFlag,
|
t8ntool.TraceFlag,
|
||||||
t8ntool.TraceDisableMemoryFlag,
|
t8ntool.TraceDisableMemoryFlag,
|
||||||
|
t8ntool.TraceEnableMemoryFlag,
|
||||||
t8ntool.TraceDisableStackFlag,
|
t8ntool.TraceDisableStackFlag,
|
||||||
t8ntool.TraceDisableReturnDataFlag,
|
t8ntool.TraceDisableReturnDataFlag,
|
||||||
|
t8ntool.TraceEnableReturnDataFlag,
|
||||||
t8ntool.OutputBasedir,
|
t8ntool.OutputBasedir,
|
||||||
t8ntool.OutputAllocFlag,
|
t8ntool.OutputAllocFlag,
|
||||||
t8ntool.OutputResultFlag,
|
t8ntool.OutputResultFlag,
|
||||||
@@ -167,6 +169,25 @@ var transactionCommand = cli.Command{
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var blockBuilderCommand = cli.Command{
|
||||||
|
Name: "block-builder",
|
||||||
|
Aliases: []string{"b11r"},
|
||||||
|
Usage: "builds a block",
|
||||||
|
Action: t8ntool.BuildBlock,
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
t8ntool.OutputBasedir,
|
||||||
|
t8ntool.OutputBlockFlag,
|
||||||
|
t8ntool.InputHeaderFlag,
|
||||||
|
t8ntool.InputOmmersFlag,
|
||||||
|
t8ntool.InputTxsRlpFlag,
|
||||||
|
t8ntool.SealCliqueFlag,
|
||||||
|
t8ntool.SealEthashFlag,
|
||||||
|
t8ntool.SealEthashDirFlag,
|
||||||
|
t8ntool.SealEthashModeFlag,
|
||||||
|
t8ntool.VerbosityFlag,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
app.Flags = []cli.Flag{
|
app.Flags = []cli.Flag{
|
||||||
BenchFlag,
|
BenchFlag,
|
||||||
@@ -200,6 +221,7 @@ func init() {
|
|||||||
stateTestCommand,
|
stateTestCommand,
|
||||||
stateTransitionCommand,
|
stateTransitionCommand,
|
||||||
transactionCommand,
|
transactionCommand,
|
||||||
|
blockBuilderCommand,
|
||||||
}
|
}
|
||||||
cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate
|
cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate
|
||||||
}
|
}
|
||||||
|
@@ -36,6 +36,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/core/vm/runtime"
|
"github.com/ethereum/go-ethereum/core/vm/runtime"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"gopkg.in/urfave/cli.v1"
|
"gopkg.in/urfave/cli.v1"
|
||||||
@@ -107,7 +108,7 @@ func runCmd(ctx *cli.Context) error {
|
|||||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
||||||
glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name)))
|
glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name)))
|
||||||
log.Root().SetHandler(glogger)
|
log.Root().SetHandler(glogger)
|
||||||
logconfig := &vm.LogConfig{
|
logconfig := &logger.Config{
|
||||||
EnableMemory: !ctx.GlobalBool(DisableMemoryFlag.Name),
|
EnableMemory: !ctx.GlobalBool(DisableMemoryFlag.Name),
|
||||||
DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
|
DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
|
||||||
DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name),
|
DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name),
|
||||||
@@ -116,8 +117,8 @@ func runCmd(ctx *cli.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
tracer vm.Tracer
|
tracer vm.EVMLogger
|
||||||
debugLogger *vm.StructLogger
|
debugLogger *logger.StructLogger
|
||||||
statedb *state.StateDB
|
statedb *state.StateDB
|
||||||
chainConfig *params.ChainConfig
|
chainConfig *params.ChainConfig
|
||||||
sender = common.BytesToAddress([]byte("sender"))
|
sender = common.BytesToAddress([]byte("sender"))
|
||||||
@@ -125,12 +126,12 @@ func runCmd(ctx *cli.Context) error {
|
|||||||
genesisConfig *core.Genesis
|
genesisConfig *core.Genesis
|
||||||
)
|
)
|
||||||
if ctx.GlobalBool(MachineFlag.Name) {
|
if ctx.GlobalBool(MachineFlag.Name) {
|
||||||
tracer = vm.NewJSONLogger(logconfig, os.Stdout)
|
tracer = logger.NewJSONLogger(logconfig, os.Stdout)
|
||||||
} else if ctx.GlobalBool(DebugFlag.Name) {
|
} else if ctx.GlobalBool(DebugFlag.Name) {
|
||||||
debugLogger = vm.NewStructLogger(logconfig)
|
debugLogger = logger.NewStructLogger(logconfig)
|
||||||
tracer = debugLogger
|
tracer = debugLogger
|
||||||
} else {
|
} else {
|
||||||
debugLogger = vm.NewStructLogger(logconfig)
|
debugLogger = logger.NewStructLogger(logconfig)
|
||||||
}
|
}
|
||||||
if ctx.GlobalString(GenesisFlag.Name) != "" {
|
if ctx.GlobalString(GenesisFlag.Name) != "" {
|
||||||
gen := readGenesis(ctx.GlobalString(GenesisFlag.Name))
|
gen := readGenesis(ctx.GlobalString(GenesisFlag.Name))
|
||||||
@@ -288,10 +289,10 @@ func runCmd(ctx *cli.Context) error {
|
|||||||
if ctx.GlobalBool(DebugFlag.Name) {
|
if ctx.GlobalBool(DebugFlag.Name) {
|
||||||
if debugLogger != nil {
|
if debugLogger != nil {
|
||||||
fmt.Fprintln(os.Stderr, "#### TRACE ####")
|
fmt.Fprintln(os.Stderr, "#### TRACE ####")
|
||||||
vm.WriteTrace(os.Stderr, debugLogger.StructLogs())
|
logger.WriteTrace(os.Stderr, debugLogger.StructLogs())
|
||||||
}
|
}
|
||||||
fmt.Fprintln(os.Stderr, "#### LOGS ####")
|
fmt.Fprintln(os.Stderr, "#### LOGS ####")
|
||||||
vm.WriteLogs(os.Stderr, statedb.Logs())
|
logger.WriteLogs(os.Stderr, statedb.Logs())
|
||||||
}
|
}
|
||||||
|
|
||||||
if bench || ctx.GlobalBool(StatDumpFlag.Name) {
|
if bench || ctx.GlobalBool(StatDumpFlag.Name) {
|
||||||
|
@@ -25,6 +25,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/tests"
|
"github.com/ethereum/go-ethereum/tests"
|
||||||
|
|
||||||
@@ -58,26 +59,26 @@ func stateTestCmd(ctx *cli.Context) error {
|
|||||||
log.Root().SetHandler(glogger)
|
log.Root().SetHandler(glogger)
|
||||||
|
|
||||||
// Configure the EVM logger
|
// Configure the EVM logger
|
||||||
config := &vm.LogConfig{
|
config := &logger.Config{
|
||||||
EnableMemory: !ctx.GlobalBool(DisableMemoryFlag.Name),
|
EnableMemory: !ctx.GlobalBool(DisableMemoryFlag.Name),
|
||||||
DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
|
DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
|
||||||
DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name),
|
DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name),
|
||||||
EnableReturnData: !ctx.GlobalBool(DisableReturnDataFlag.Name),
|
EnableReturnData: !ctx.GlobalBool(DisableReturnDataFlag.Name),
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
tracer vm.Tracer
|
tracer vm.EVMLogger
|
||||||
debugger *vm.StructLogger
|
debugger *logger.StructLogger
|
||||||
)
|
)
|
||||||
switch {
|
switch {
|
||||||
case ctx.GlobalBool(MachineFlag.Name):
|
case ctx.GlobalBool(MachineFlag.Name):
|
||||||
tracer = vm.NewJSONLogger(config, os.Stderr)
|
tracer = logger.NewJSONLogger(config, os.Stderr)
|
||||||
|
|
||||||
case ctx.GlobalBool(DebugFlag.Name):
|
case ctx.GlobalBool(DebugFlag.Name):
|
||||||
debugger = vm.NewStructLogger(config)
|
debugger = logger.NewStructLogger(config)
|
||||||
tracer = debugger
|
tracer = debugger
|
||||||
|
|
||||||
default:
|
default:
|
||||||
debugger = vm.NewStructLogger(config)
|
debugger = logger.NewStructLogger(config)
|
||||||
}
|
}
|
||||||
// Load the test content from the input file
|
// Load the test content from the input file
|
||||||
src, err := ioutil.ReadFile(ctx.Args().First())
|
src, err := ioutil.ReadFile(ctx.Args().First())
|
||||||
@@ -118,7 +119,7 @@ func stateTestCmd(ctx *cli.Context) error {
|
|||||||
if ctx.GlobalBool(DebugFlag.Name) {
|
if ctx.GlobalBool(DebugFlag.Name) {
|
||||||
if debugger != nil {
|
if debugger != nil {
|
||||||
fmt.Fprintln(os.Stderr, "#### TRACE ####")
|
fmt.Fprintln(os.Stderr, "#### TRACE ####")
|
||||||
vm.WriteTrace(os.Stderr, debugger.StructLogs())
|
logger.WriteTrace(os.Stderr, debugger.StructLogs())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -9,6 +9,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/reexec"
|
"github.com/docker/docker/pkg/reexec"
|
||||||
|
"github.com/ethereum/go-ethereum/cmd/evm/internal/t8ntool"
|
||||||
"github.com/ethereum/go-ethereum/internal/cmdtest"
|
"github.com/ethereum/go-ethereum/internal/cmdtest"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -130,7 +131,7 @@ func TestT8n(t *testing.T) {
|
|||||||
output: t8nOutput{alloc: true, result: true},
|
output: t8nOutput{alloc: true, result: true},
|
||||||
expExitCode: 4,
|
expExitCode: 4,
|
||||||
},
|
},
|
||||||
{ // Ommer test
|
{ // Uncle test
|
||||||
base: "./testdata/5",
|
base: "./testdata/5",
|
||||||
input: t8nInput{
|
input: t8nInput{
|
||||||
"alloc.json", "txs.json", "env.json", "Byzantium", "0x80",
|
"alloc.json", "txs.json", "env.json", "Byzantium", "0x80",
|
||||||
@@ -170,13 +171,53 @@ func TestT8n(t *testing.T) {
|
|||||||
output: t8nOutput{result: true},
|
output: t8nOutput{result: true},
|
||||||
expOut: "exp2.json",
|
expOut: "exp2.json",
|
||||||
},
|
},
|
||||||
|
{ // Difficulty calculation - with ommers + Berlin
|
||||||
|
base: "./testdata/14",
|
||||||
|
input: t8nInput{
|
||||||
|
"alloc.json", "txs.json", "env.uncles.json", "Berlin", "",
|
||||||
|
},
|
||||||
|
output: t8nOutput{result: true},
|
||||||
|
expOut: "exp_berlin.json",
|
||||||
|
},
|
||||||
|
{ // Difficulty calculation on arrow glacier
|
||||||
|
base: "./testdata/19",
|
||||||
|
input: t8nInput{
|
||||||
|
"alloc.json", "txs.json", "env.json", "London", "",
|
||||||
|
},
|
||||||
|
output: t8nOutput{result: true},
|
||||||
|
expOut: "exp_london.json",
|
||||||
|
},
|
||||||
|
{ // Difficulty calculation on arrow glacier
|
||||||
|
base: "./testdata/19",
|
||||||
|
input: t8nInput{
|
||||||
|
"alloc.json", "txs.json", "env.json", "ArrowGlacier", "",
|
||||||
|
},
|
||||||
|
output: t8nOutput{result: true},
|
||||||
|
expOut: "exp_arrowglacier.json",
|
||||||
|
},
|
||||||
|
{ // Sign unprotected (pre-EIP155) transaction
|
||||||
|
base: "./testdata/23",
|
||||||
|
input: t8nInput{
|
||||||
|
"alloc.json", "txs.json", "env.json", "Berlin", "",
|
||||||
|
},
|
||||||
|
output: t8nOutput{result: true},
|
||||||
|
expOut: "exp.json",
|
||||||
|
},
|
||||||
} {
|
} {
|
||||||
|
|
||||||
args := []string{"t8n"}
|
args := []string{"t8n"}
|
||||||
args = append(args, tc.output.get()...)
|
args = append(args, tc.output.get()...)
|
||||||
args = append(args, tc.input.get(tc.base)...)
|
args = append(args, tc.input.get(tc.base)...)
|
||||||
|
var qArgs []string // quoted args for debugging purposes
|
||||||
|
for _, arg := range args {
|
||||||
|
if len(arg) == 0 {
|
||||||
|
qArgs = append(qArgs, `""`)
|
||||||
|
} else {
|
||||||
|
qArgs = append(qArgs, arg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tt.Logf("args: %v\n", strings.Join(qArgs, " "))
|
||||||
tt.Run("evm-test", args...)
|
tt.Run("evm-test", args...)
|
||||||
tt.Logf("args: %v\n", strings.Join(args, " "))
|
|
||||||
// Compare the expected output, if provided
|
// Compare the expected output, if provided
|
||||||
if tc.expOut != "" {
|
if tc.expOut != "" {
|
||||||
want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut))
|
want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut))
|
||||||
@@ -257,6 +298,22 @@ func TestT9n(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expOut: "exp.json",
|
expOut: "exp.json",
|
||||||
},
|
},
|
||||||
|
{ // Transactions with value exceeding 256 bits
|
||||||
|
base: "./testdata/17",
|
||||||
|
input: t9nInput{
|
||||||
|
inTxs: "signed_txs.rlp",
|
||||||
|
stFork: "London",
|
||||||
|
},
|
||||||
|
expOut: "exp.json",
|
||||||
|
},
|
||||||
|
{ // Invalid RLP
|
||||||
|
base: "./testdata/18",
|
||||||
|
input: t9nInput{
|
||||||
|
inTxs: "invalid.rlp",
|
||||||
|
stFork: "London",
|
||||||
|
},
|
||||||
|
expExitCode: t8ntool.ErrorIO,
|
||||||
|
},
|
||||||
} {
|
} {
|
||||||
|
|
||||||
args := []string{"t9n"}
|
args := []string{"t9n"}
|
||||||
@@ -287,6 +344,126 @@ func TestT9n(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type b11rInput struct {
|
||||||
|
inEnv string
|
||||||
|
inOmmersRlp string
|
||||||
|
inTxsRlp string
|
||||||
|
inClique string
|
||||||
|
ethash bool
|
||||||
|
ethashMode string
|
||||||
|
ethashDir string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *b11rInput) get(base string) []string {
|
||||||
|
var out []string
|
||||||
|
if opt := args.inEnv; opt != "" {
|
||||||
|
out = append(out, "--input.header")
|
||||||
|
out = append(out, fmt.Sprintf("%v/%v", base, opt))
|
||||||
|
}
|
||||||
|
if opt := args.inOmmersRlp; opt != "" {
|
||||||
|
out = append(out, "--input.ommers")
|
||||||
|
out = append(out, fmt.Sprintf("%v/%v", base, opt))
|
||||||
|
}
|
||||||
|
if opt := args.inTxsRlp; opt != "" {
|
||||||
|
out = append(out, "--input.txs")
|
||||||
|
out = append(out, fmt.Sprintf("%v/%v", base, opt))
|
||||||
|
}
|
||||||
|
if opt := args.inClique; opt != "" {
|
||||||
|
out = append(out, "--seal.clique")
|
||||||
|
out = append(out, fmt.Sprintf("%v/%v", base, opt))
|
||||||
|
}
|
||||||
|
if args.ethash {
|
||||||
|
out = append(out, "--seal.ethash")
|
||||||
|
}
|
||||||
|
if opt := args.ethashMode; opt != "" {
|
||||||
|
out = append(out, "--seal.ethash.mode")
|
||||||
|
out = append(out, fmt.Sprintf("%v/%v", base, opt))
|
||||||
|
}
|
||||||
|
if opt := args.ethashDir; opt != "" {
|
||||||
|
out = append(out, "--seal.ethash.dir")
|
||||||
|
out = append(out, fmt.Sprintf("%v/%v", base, opt))
|
||||||
|
}
|
||||||
|
out = append(out, "--output.block")
|
||||||
|
out = append(out, "stdout")
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestB11r(t *testing.T) {
|
||||||
|
tt := new(testT8n)
|
||||||
|
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
|
||||||
|
for i, tc := range []struct {
|
||||||
|
base string
|
||||||
|
input b11rInput
|
||||||
|
expExitCode int
|
||||||
|
expOut string
|
||||||
|
}{
|
||||||
|
{ // unsealed block
|
||||||
|
base: "./testdata/20",
|
||||||
|
input: b11rInput{
|
||||||
|
inEnv: "header.json",
|
||||||
|
inOmmersRlp: "ommers.json",
|
||||||
|
inTxsRlp: "txs.rlp",
|
||||||
|
},
|
||||||
|
expOut: "exp.json",
|
||||||
|
},
|
||||||
|
{ // ethash test seal
|
||||||
|
base: "./testdata/21",
|
||||||
|
input: b11rInput{
|
||||||
|
inEnv: "header.json",
|
||||||
|
inOmmersRlp: "ommers.json",
|
||||||
|
inTxsRlp: "txs.rlp",
|
||||||
|
},
|
||||||
|
expOut: "exp.json",
|
||||||
|
},
|
||||||
|
{ // clique test seal
|
||||||
|
base: "./testdata/21",
|
||||||
|
input: b11rInput{
|
||||||
|
inEnv: "header.json",
|
||||||
|
inOmmersRlp: "ommers.json",
|
||||||
|
inTxsRlp: "txs.rlp",
|
||||||
|
inClique: "clique.json",
|
||||||
|
},
|
||||||
|
expOut: "exp-clique.json",
|
||||||
|
},
|
||||||
|
{ // block with ommers
|
||||||
|
base: "./testdata/22",
|
||||||
|
input: b11rInput{
|
||||||
|
inEnv: "header.json",
|
||||||
|
inOmmersRlp: "ommers.json",
|
||||||
|
inTxsRlp: "txs.rlp",
|
||||||
|
},
|
||||||
|
expOut: "exp.json",
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
|
||||||
|
args := []string{"b11r"}
|
||||||
|
args = append(args, tc.input.get(tc.base)...)
|
||||||
|
|
||||||
|
tt.Run("evm-test", args...)
|
||||||
|
tt.Logf("args:\n go run . %v\n", strings.Join(args, " "))
|
||||||
|
// Compare the expected output, if provided
|
||||||
|
if tc.expOut != "" {
|
||||||
|
want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("test %d: could not read expected output: %v", i, err)
|
||||||
|
}
|
||||||
|
have := tt.Output()
|
||||||
|
ok, err := cmpJson(have, want)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
t.Logf(string(have))
|
||||||
|
t.Fatalf("test %d, json parsing failed: %v", i, err)
|
||||||
|
case !ok:
|
||||||
|
t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tt.WaitExit()
|
||||||
|
if have, want := tt.ExitStatus(), tc.expExitCode; have != want {
|
||||||
|
t.Fatalf("test %d: wrong exit code, have %d, want %d", i, have, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// cmpJson compares the JSON in two byte slices.
|
// cmpJson compares the JSON in two byte slices.
|
||||||
func cmpJson(a, b []byte) (bool, error) {
|
func cmpJson(a, b []byte) (bool, error) {
|
||||||
var j, j2 interface{}
|
var j, j2 interface{}
|
||||||
|
5
cmd/evm/testdata/1/exp.json
vendored
5
cmd/evm/testdata/1/exp.json
vendored
@@ -15,7 +15,7 @@
|
|||||||
"result": {
|
"result": {
|
||||||
"stateRoot": "0x84208a19bc2b46ada7445180c1db162be5b39b9abc8c0a54b05d32943eae4e13",
|
"stateRoot": "0x84208a19bc2b46ada7445180c1db162be5b39b9abc8c0a54b05d32943eae4e13",
|
||||||
"txRoot": "0xc4761fd7b87ff2364c7c60b6c5c8d02e522e815328aaea3f20e3b7b7ef52c42d",
|
"txRoot": "0xc4761fd7b87ff2364c7c60b6c5c8d02e522e815328aaea3f20e3b7b7ef52c42d",
|
||||||
"receiptRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
|
"receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
|
||||||
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
"receipts": [
|
"receipts": [
|
||||||
@@ -38,6 +38,7 @@
|
|||||||
"error": "nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1"
|
"error": "nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"currentDifficulty": "0x20000"
|
"currentDifficulty": "0x20000",
|
||||||
|
"gasUsed": "0x5208"
|
||||||
}
|
}
|
||||||
}
|
}
|
5
cmd/evm/testdata/13/exp2.json
vendored
5
cmd/evm/testdata/13/exp2.json
vendored
@@ -2,7 +2,7 @@
|
|||||||
"result": {
|
"result": {
|
||||||
"stateRoot": "0xe4b924a6adb5959fccf769d5b7bb2f6359e26d1e76a2443c5a91a36d826aef61",
|
"stateRoot": "0xe4b924a6adb5959fccf769d5b7bb2f6359e26d1e76a2443c5a91a36d826aef61",
|
||||||
"txRoot": "0x013509c8563d41c0ae4bf38f2d6d19fc6512a1d0d6be045079c8c9f68bf45f9d",
|
"txRoot": "0x013509c8563d41c0ae4bf38f2d6d19fc6512a1d0d6be045079c8c9f68bf45f9d",
|
||||||
"receiptRoot": "0xa532a08aa9f62431d6fe5d924951b8efb86ed3c54d06fee77788c3767dd13420",
|
"receiptsRoot": "0xa532a08aa9f62431d6fe5d924951b8efb86ed3c54d06fee77788c3767dd13420",
|
||||||
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
"receipts": [
|
"receipts": [
|
||||||
@@ -33,6 +33,7 @@
|
|||||||
"transactionIndex": "0x1"
|
"transactionIndex": "0x1"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"currentDifficulty": "0x20000"
|
"currentDifficulty": "0x20000",
|
||||||
|
"gasUsed": "0x109a0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
5
cmd/evm/testdata/14/exp.json
vendored
5
cmd/evm/testdata/14/exp.json
vendored
@@ -2,10 +2,11 @@
|
|||||||
"result": {
|
"result": {
|
||||||
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
|
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
|
||||||
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
"currentDifficulty": "0x2000020000000",
|
"currentDifficulty": "0x2000020000000",
|
||||||
"receipts": []
|
"receipts": [],
|
||||||
|
"gasUsed": "0x0"
|
||||||
}
|
}
|
||||||
}
|
}
|
5
cmd/evm/testdata/14/exp2.json
vendored
5
cmd/evm/testdata/14/exp2.json
vendored
@@ -2,10 +2,11 @@
|
|||||||
"result": {
|
"result": {
|
||||||
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
|
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
|
||||||
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
"receipts": [],
|
"receipts": [],
|
||||||
"currentDifficulty": "0x1ff8020000000"
|
"currentDifficulty": "0x1ff8020000000",
|
||||||
|
"gasUsed": "0x0"
|
||||||
}
|
}
|
||||||
}
|
}
|
12
cmd/evm/testdata/14/exp_berlin.json
vendored
Normal file
12
cmd/evm/testdata/14/exp_berlin.json
vendored
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
{
|
||||||
|
"result": {
|
||||||
|
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
|
||||||
|
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
|
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
|
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"receipts": [],
|
||||||
|
"currentDifficulty": "0x1ff9000000000",
|
||||||
|
"gasUsed": "0x0"
|
||||||
|
}
|
||||||
|
}
|
6
cmd/evm/testdata/15/exp2.json
vendored
6
cmd/evm/testdata/15/exp2.json
vendored
@@ -1,10 +1,12 @@
|
|||||||
[
|
[
|
||||||
{
|
{
|
||||||
"address": "0xd02d72e067e77158444ef2020ff2d325f929b363",
|
"address": "0xd02d72e067e77158444ef2020ff2d325f929b363",
|
||||||
"hash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476"
|
"hash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476",
|
||||||
|
"intrinsicGas": "0x5208"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"address": "0xd02d72e067e77158444ef2020ff2d325f929b363",
|
"address": "0xd02d72e067e77158444ef2020ff2d325f929b363",
|
||||||
"hash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a"
|
"hash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a",
|
||||||
|
"intrinsicGas": "0x5208"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
6
cmd/evm/testdata/16/exp.json
vendored
6
cmd/evm/testdata/16/exp.json
vendored
@@ -1,11 +1,13 @@
|
|||||||
[
|
[
|
||||||
{
|
{
|
||||||
"address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
|
"address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
|
||||||
"hash": "0x7cc3d1a8540a44736750f03bb4d85c0113be4b3472a71bf82241a3b261b479e6"
|
"hash": "0x7cc3d1a8540a44736750f03bb4d85c0113be4b3472a71bf82241a3b261b479e6",
|
||||||
|
"intrinsicGas": "0x5208"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"error": "intrinsic gas too low: have 82, want 21000",
|
"error": "intrinsic gas too low: have 82, want 21000",
|
||||||
"address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
|
"address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
|
||||||
"hash": "0x3b2d2609e4361562edb9169314f4c05afc6dbf5d706bf9dda5abe242ab76a22b"
|
"hash": "0x3b2d2609e4361562edb9169314f4c05afc6dbf5d706bf9dda5abe242ab76a22b",
|
||||||
|
"intrinsicGas": "0x5208"
|
||||||
}
|
}
|
||||||
]
|
]
|
22
cmd/evm/testdata/17/exp.json
vendored
Normal file
22
cmd/evm/testdata/17/exp.json
vendored
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"error": "value exceeds 256 bits",
|
||||||
|
"address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
|
||||||
|
"hash": "0xfbd91685dcbf8172f0e8c53e2ddbb4d26707840da6b51a74371f62a33868fd82",
|
||||||
|
"intrinsicGas": "0x5208"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error": "gasPrice exceeds 256 bits",
|
||||||
|
"address": "0x1b57ccef1fe5fb73f1e64530fb4ebd9cf1655964",
|
||||||
|
"hash": "0x45dc05035cada83748e4c1fe617220106b331eca054f44c2304d5654a9fb29d5",
|
||||||
|
"intrinsicGas": "0x5208"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error": "invalid transaction v, r, s values",
|
||||||
|
"hash": "0xf06691c2a803ab7f3c81d06a0c0a896f80f311105c599fc59a9fdbc669356d35"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error": "invalid transaction v, r, s values",
|
||||||
|
"hash": "0x84703b697ad5b0db25e4f1f98fb6b1adce85b9edb2232eeba9cedd8c6601694b"
|
||||||
|
}
|
||||||
|
]
|
46
cmd/evm/testdata/17/rlpdata.txt
vendored
Normal file
46
cmd/evm/testdata/17/rlpdata.txt
vendored
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
[
|
||||||
|
[
|
||||||
|
"",
|
||||||
|
"d",
|
||||||
|
5208,
|
||||||
|
d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0,
|
||||||
|
010000000000000000000000000000000000000000000000000000000000000001,
|
||||||
|
"",
|
||||||
|
1b,
|
||||||
|
c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549d,
|
||||||
|
6180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28,
|
||||||
|
],
|
||||||
|
[
|
||||||
|
"",
|
||||||
|
010000000000000000000000000000000000000000000000000000000000000001,
|
||||||
|
5208,
|
||||||
|
d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0,
|
||||||
|
11,
|
||||||
|
"",
|
||||||
|
1b,
|
||||||
|
c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549d,
|
||||||
|
6180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28,
|
||||||
|
],
|
||||||
|
[
|
||||||
|
"",
|
||||||
|
11,
|
||||||
|
5208,
|
||||||
|
d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0,
|
||||||
|
11,
|
||||||
|
"",
|
||||||
|
1b,
|
||||||
|
c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549daa,
|
||||||
|
6180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28,
|
||||||
|
],
|
||||||
|
[
|
||||||
|
"",
|
||||||
|
11,
|
||||||
|
5208,
|
||||||
|
d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0,
|
||||||
|
11,
|
||||||
|
"",
|
||||||
|
1b,
|
||||||
|
c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549d,
|
||||||
|
6180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28bb,
|
||||||
|
],
|
||||||
|
]
|
1
cmd/evm/testdata/17/signed_txs.rlp
vendored
Normal file
1
cmd/evm/testdata/17/signed_txs.rlp
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
"0xf901c8f880806482520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0a1010000000000000000000000000000000000000000000000000000000000000001801ba0c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549da06180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28f88080a101000000000000000000000000000000000000000000000000000000000000000182520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d011801ba0c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549da06180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28f860801182520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d011801ba1c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549daaa06180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28f860801182520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d011801ba0c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549da16180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28bb"
|
9
cmd/evm/testdata/18/README.md
vendored
Normal file
9
cmd/evm/testdata/18/README.md
vendored
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
# Invalid rlp
|
||||||
|
|
||||||
|
This folder contains a sample of invalid RLP, and it's expected
|
||||||
|
that the t9n handles this properly:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ go run . t9n --input.txs=./testdata/18/invalid.rlp --state.fork=London
|
||||||
|
ERROR(11): rlp: value size exceeds available input length
|
||||||
|
```
|
1
cmd/evm/testdata/18/invalid.rlp
vendored
Normal file
1
cmd/evm/testdata/18/invalid.rlp
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
"0xf852328001825208870b9331677e6ebf0a801ca098ff921201554726367d2be8c804a7ff89ccf285ebc57dff8ae4c44b9c19ac4aa03887321be575c8095f789dd4c743dfe42c1820f9231f98a962b210e3ac2452a3"
|
12
cmd/evm/testdata/19/alloc.json
vendored
Normal file
12
cmd/evm/testdata/19/alloc.json
vendored
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
{
|
||||||
|
"a94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
|
||||||
|
"balance": "0x5ffd4878be161d74",
|
||||||
|
"code": "0x",
|
||||||
|
"nonce": "0xac",
|
||||||
|
"storage": {}
|
||||||
|
},
|
||||||
|
"0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192":{
|
||||||
|
"balance": "0xfeedbead",
|
||||||
|
"nonce" : "0x00"
|
||||||
|
}
|
||||||
|
}
|
9
cmd/evm/testdata/19/env.json
vendored
Normal file
9
cmd/evm/testdata/19/env.json
vendored
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
{
|
||||||
|
"currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b",
|
||||||
|
"currentGasLimit": "0x750a163df65e8a",
|
||||||
|
"currentBaseFee": "0x500",
|
||||||
|
"currentNumber": "13000000",
|
||||||
|
"currentTimestamp": "100015",
|
||||||
|
"parentTimestamp" : "99999",
|
||||||
|
"parentDifficulty" : "0x2000000000000"
|
||||||
|
}
|
12
cmd/evm/testdata/19/exp_arrowglacier.json
vendored
Normal file
12
cmd/evm/testdata/19/exp_arrowglacier.json
vendored
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
{
|
||||||
|
"result": {
|
||||||
|
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
|
||||||
|
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
|
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
|
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"currentDifficulty": "0x2000000200000",
|
||||||
|
"receipts": [],
|
||||||
|
"gasUsed": "0x0"
|
||||||
|
}
|
||||||
|
}
|
12
cmd/evm/testdata/19/exp_london.json
vendored
Normal file
12
cmd/evm/testdata/19/exp_london.json
vendored
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
{
|
||||||
|
"result": {
|
||||||
|
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
|
||||||
|
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
|
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
|
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"currentDifficulty": "0x2000080000000",
|
||||||
|
"receipts": [],
|
||||||
|
"gasUsed": "0x0"
|
||||||
|
}
|
||||||
|
}
|
9
cmd/evm/testdata/19/readme.md
vendored
Normal file
9
cmd/evm/testdata/19/readme.md
vendored
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
## Difficulty calculation
|
||||||
|
|
||||||
|
This test shows how the `evm t8n` can be used to calculate the (ethash) difficulty, if none is provided by the caller,
|
||||||
|
this time on `ArrowGlacier` (Eip 4345).
|
||||||
|
|
||||||
|
Calculating it (with an empty set of txs) using `ArrowGlacier` rules (and no provided unclehash for the parent block):
|
||||||
|
```
|
||||||
|
[user@work evm]$ ./evm t8n --input.alloc=./testdata/14/alloc.json --input.txs=./testdata/14/txs.json --input.env=./testdata/14/env.json --output.result=stdout --state.fork=ArrowGlacier
|
||||||
|
```
|
1
cmd/evm/testdata/19/txs.json
vendored
Normal file
1
cmd/evm/testdata/19/txs.json
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
[]
|
4
cmd/evm/testdata/20/exp.json
vendored
Normal file
4
cmd/evm/testdata/20/exp.json
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
{
|
||||||
|
"rlp": "0xf902d9f90211a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794e997a23b159e2e2a5ce72333262972374b15425ca0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e99476574682f76312e302e312f6c696e75782f676f312e342e32a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf8897435673d874f7c8f8c2f85f8002825208948a8eafb1cf62bfbeb1741769dae1a9dd4799619201801ba09500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdba07235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600f85f8002825208948a8eafb1cf62bfbeb1741769dae1a9dd4799619201801ba09500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdba07235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600c0",
|
||||||
|
"hash": "0xaba9a3b6a4e96e9ecffcadaa5a2ae0589359455617535cd86589fe1dd26fe899"
|
||||||
|
}
|
14
cmd/evm/testdata/20/header.json
vendored
Normal file
14
cmd/evm/testdata/20/header.json
vendored
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"parentHash": "0xd6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34e",
|
||||||
|
"miner": "0xe997a23b159e2e2a5ce72333262972374b15425c",
|
||||||
|
"stateRoot": "0x325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2e",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"difficulty": "0x1000",
|
||||||
|
"number": "0xc3be",
|
||||||
|
"gasLimit": "0x50785",
|
||||||
|
"gasUsed": "0x0",
|
||||||
|
"timestamp": "0x55c5277e",
|
||||||
|
"extraData": "0x476574682f76312e302e312f6c696e75782f676f312e342e32",
|
||||||
|
"mixHash": "0x5865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf",
|
||||||
|
"nonce": "0x97435673d874f7c8"
|
||||||
|
}
|
1
cmd/evm/testdata/20/ommers.json
vendored
Normal file
1
cmd/evm/testdata/20/ommers.json
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
[]
|
11
cmd/evm/testdata/20/readme.md
vendored
Normal file
11
cmd/evm/testdata/20/readme.md
vendored
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
# Block building
|
||||||
|
|
||||||
|
This test shows how `b11r` can be used to assemble an unsealed block.
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ go run . b11r --input.header=testdata/20/header.json --input.txs=testdata/20/txs.rlp --input.ommers=testdata/20/ommers.json --output.block=stdout
|
||||||
|
{
|
||||||
|
"rlp": "0xf90216f90211a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794e997a23b159e2e2a5ce72333262972374b15425ca0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e99476574682f76312e302e312f6c696e75782f676f312e342e32a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf8897435673d874f7c8c0c0",
|
||||||
|
"hash": "0xaba9a3b6a4e96e9ecffcadaa5a2ae0589359455617535cd86589fe1dd26fe899"
|
||||||
|
}
|
||||||
|
```
|
1
cmd/evm/testdata/20/txs.rlp
vendored
Normal file
1
cmd/evm/testdata/20/txs.rlp
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
"0xf8c2f85f8002825208948a8eafb1cf62bfbeb1741769dae1a9dd4799619201801ba09500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdba07235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600f85f8002825208948a8eafb1cf62bfbeb1741769dae1a9dd4799619201801ba09500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdba07235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600"
|
6
cmd/evm/testdata/21/clique.json
vendored
Normal file
6
cmd/evm/testdata/21/clique.json
vendored
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
{
|
||||||
|
"secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
|
||||||
|
"voted": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
|
||||||
|
"authorize": false,
|
||||||
|
"vanity": "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
||||||
|
}
|
4
cmd/evm/testdata/21/exp-clique.json
vendored
Normal file
4
cmd/evm/testdata/21/exp-clique.json
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
{
|
||||||
|
"rlp": "0xf9025ff9025aa0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277eb861aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac540a67aaee364005841da84f488f6b6d0116dfb5103d091402c81a163d5f66666595e37f56f196d8c5c98da714dbfae68d6b7e1790cc734a20ec6ce52213ad800a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf88ffffffffffffffffc0c0",
|
||||||
|
"hash": "0x71c59102cc805dbe8741e1210ebe229a321eff144ac7276006fefe39e8357dc7"
|
||||||
|
}
|
4
cmd/evm/testdata/21/exp.json
vendored
Normal file
4
cmd/evm/testdata/21/exp.json
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
{
|
||||||
|
"rlp": "0xf901fdf901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0c0",
|
||||||
|
"hash": "0x801411e9f6609a659825690d13e4f75a3cfe9143952fa2d9573f3b0a5eb9ebbb"
|
||||||
|
}
|
11
cmd/evm/testdata/21/header.json
vendored
Normal file
11
cmd/evm/testdata/21/header.json
vendored
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
{
|
||||||
|
"parentHash": "0xd6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34e",
|
||||||
|
"stateRoot": "0x325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2e",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"difficulty": "0x1000",
|
||||||
|
"number": "0xc3be",
|
||||||
|
"gasLimit": "0x50785",
|
||||||
|
"gasUsed": "0x0",
|
||||||
|
"timestamp": "0x55c5277e",
|
||||||
|
"mixHash": "0x5865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf"
|
||||||
|
}
|
1
cmd/evm/testdata/21/ommers.json
vendored
Normal file
1
cmd/evm/testdata/21/ommers.json
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
[]
|
23
cmd/evm/testdata/21/readme.md
vendored
Normal file
23
cmd/evm/testdata/21/readme.md
vendored
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
# Sealed block building
|
||||||
|
|
||||||
|
This test shows how `b11r` can be used to assemble a sealed block.
|
||||||
|
|
||||||
|
## Ethash
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ go run . b11r --input.header=testdata/21/header.json --input.txs=testdata/21/txs.rlp --input.ommers=testdata/21/ommers.json --seal.ethash --seal.ethash.mode=test --output.block=stdout
|
||||||
|
{
|
||||||
|
"rlp": "0xf901fdf901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0c0",
|
||||||
|
"hash": "0x801411e9f6609a659825690d13e4f75a3cfe9143952fa2d9573f3b0a5eb9ebbb"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Clique
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ go run . b11r --input.header=testdata/21/header.json --input.txs=testdata/21/txs.rlp --input.ommers=testdata/21/ommers.json --seal.clique=testdata/21/clique.json --output.block=stdout
|
||||||
|
{
|
||||||
|
"rlp": "0xf9025ff9025aa0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277eb861aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac540a67aaee364005841da84f488f6b6d0116dfb5103d091402c81a163d5f66666595e37f56f196d8c5c98da714dbfae68d6b7e1790cc734a20ec6ce52213ad800a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf88ffffffffffffffffc0c0",
|
||||||
|
"hash": "0x71c59102cc805dbe8741e1210ebe229a321eff144ac7276006fefe39e8357dc7"
|
||||||
|
}
|
||||||
|
```
|
1
cmd/evm/testdata/21/txs.rlp
vendored
Normal file
1
cmd/evm/testdata/21/txs.rlp
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
"c0"
|
4
cmd/evm/testdata/22/exp-clique.json
vendored
Normal file
4
cmd/evm/testdata/22/exp-clique.json
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
{
|
||||||
|
"rlp": "0xf9025ff9025aa0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277eb861aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac540a67aaee364005841da84f488f6b6d0116dfb5103d091402c81a163d5f66666595e37f56f196d8c5c98da714dbfae68d6b7e1790cc734a20ec6ce52213ad800a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf88ffffffffffffffffc0c0",
|
||||||
|
"hash": "0x71c59102cc805dbe8741e1210ebe229a321eff144ac7276006fefe39e8357dc7"
|
||||||
|
}
|
4
cmd/evm/testdata/22/exp.json
vendored
Normal file
4
cmd/evm/testdata/22/exp.json
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
{
|
||||||
|
"rlp": "0xf905f5f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea06eb9f0c3cd68c9e97134e6725d12b1f1d8f0644458da6870a37ff84c908fb1e7940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0f903f6f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000",
|
||||||
|
"hash": "0xd9a81c8fcd57a7f2a0d2c375eff6ad192c30c3729a271303f0a9a7e1b357e755"
|
||||||
|
}
|
11
cmd/evm/testdata/22/header.json
vendored
Normal file
11
cmd/evm/testdata/22/header.json
vendored
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
{
|
||||||
|
"parentHash": "0xd6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34e",
|
||||||
|
"stateRoot": "0x325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2e",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"difficulty": "0x1000",
|
||||||
|
"number": "0xc3be",
|
||||||
|
"gasLimit": "0x50785",
|
||||||
|
"gasUsed": "0x0",
|
||||||
|
"timestamp": "0x55c5277e",
|
||||||
|
"mixHash": "0x5865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf"
|
||||||
|
}
|
1
cmd/evm/testdata/22/ommers.json
vendored
Normal file
1
cmd/evm/testdata/22/ommers.json
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
["0xf901fdf901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0c0","0xf901fdf901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0c0"]
|
11
cmd/evm/testdata/22/readme.md
vendored
Normal file
11
cmd/evm/testdata/22/readme.md
vendored
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
# Building blocks with ommers
|
||||||
|
|
||||||
|
This test shows how `b11r` can chain together ommer assembles into a canonical block.
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ echo "{ \"ommers\": [`go run . b11r --input.header=testdata/22/header.json --input.txs=testdata/22/txs.rlp --output.block=stdout | jq '.[\"rlp\"]'`,`go run . b11r --input.header=testdata/22/header.json --input.txs=testdata/22/txs.rlp --output.block=stdout | jq '.[\"rlp\"]'`]}" | go run . b11r --input.header=testdata/22/header.json --input.txs=testdata/22/txs.rlp --input.ommers=stdin --output.block=stdout
|
||||||
|
{
|
||||||
|
"rlp": "0xf905f5f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea06eb9f0c3cd68c9e97134e6725d12b1f1d8f0644458da6870a37ff84c908fb1e7940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0f903f6f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000",
|
||||||
|
"hash": "0xd9a81c8fcd57a7f2a0d2c375eff6ad192c30c3729a271303f0a9a7e1b357e755"
|
||||||
|
}
|
||||||
|
```
|
1
cmd/evm/testdata/22/txs.rlp
vendored
Normal file
1
cmd/evm/testdata/22/txs.rlp
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
"c0"
|
16
cmd/evm/testdata/23/alloc.json
vendored
Normal file
16
cmd/evm/testdata/23/alloc.json
vendored
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
{
|
||||||
|
"0x095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
|
||||||
|
"balance" : "0x0de0b6b3a7640000",
|
||||||
|
"code" : "0x6001",
|
||||||
|
"nonce" : "0x00",
|
||||||
|
"storage" : {
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
|
||||||
|
"balance" : "0x0de0b6b3a7640000",
|
||||||
|
"code" : "0x",
|
||||||
|
"nonce" : "0x00",
|
||||||
|
"storage" : {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
7
cmd/evm/testdata/23/env.json
vendored
Normal file
7
cmd/evm/testdata/23/env.json
vendored
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
{
|
||||||
|
"currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
|
||||||
|
"currentDifficulty" : "0x020000",
|
||||||
|
"currentGasLimit" : "0x3b9aca00",
|
||||||
|
"currentNumber" : "0x05",
|
||||||
|
"currentTimestamp" : "0x03e8"
|
||||||
|
}
|
25
cmd/evm/testdata/23/exp.json
vendored
Normal file
25
cmd/evm/testdata/23/exp.json
vendored
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
{
|
||||||
|
"result": {
|
||||||
|
"stateRoot": "0x65334305e4accfa18352deb24f007b837b5036425b0712cf0e65a43bfa95154d",
|
||||||
|
"txRoot": "0x75e61774a2ff58cbe32653420256c7f44bc715715a423b0b746d5c622979af6b",
|
||||||
|
"receiptsRoot": "0xf951f9396af203499cc7d379715a9110323de73967c5700e2f424725446a3c76",
|
||||||
|
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"receipts": [
|
||||||
|
{
|
||||||
|
"root": "0x",
|
||||||
|
"status": "0x1",
|
||||||
|
"cumulativeGasUsed": "0x520b",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"logs": null,
|
||||||
|
"transactionHash": "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81",
|
||||||
|
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||||
|
"gasUsed": "0x520b",
|
||||||
|
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"transactionIndex": "0x0"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"currentDifficulty": "0x20000",
|
||||||
|
"gasUsed": "0x520b"
|
||||||
|
}
|
||||||
|
}
|
1
cmd/evm/testdata/23/readme.md
vendored
Normal file
1
cmd/evm/testdata/23/readme.md
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
These files examplify how to sign a transaction using the pre-EIP155 scheme.
|
15
cmd/evm/testdata/23/txs.json
vendored
Normal file
15
cmd/evm/testdata/23/txs.json
vendored
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"input" : "0x",
|
||||||
|
"gas" : "0x5f5e100",
|
||||||
|
"gasPrice" : "0x1",
|
||||||
|
"nonce" : "0x0",
|
||||||
|
"to" : "0x095e7baea6a6c7c4c2dfeb977efac326af552d87",
|
||||||
|
"value" : "0x186a0",
|
||||||
|
"v" : "0x0",
|
||||||
|
"r" : "0x0",
|
||||||
|
"s" : "0x0",
|
||||||
|
"secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
|
||||||
|
"protected": false
|
||||||
|
}
|
||||||
|
]
|
5
cmd/evm/testdata/3/exp.json
vendored
5
cmd/evm/testdata/3/exp.json
vendored
@@ -15,7 +15,7 @@
|
|||||||
"result": {
|
"result": {
|
||||||
"stateRoot": "0xb7341da3f9f762a6884eaa186c32942734c146b609efee11c4b0214c44857ea1",
|
"stateRoot": "0xb7341da3f9f762a6884eaa186c32942734c146b609efee11c4b0214c44857ea1",
|
||||||
"txRoot": "0x75e61774a2ff58cbe32653420256c7f44bc715715a423b0b746d5c622979af6b",
|
"txRoot": "0x75e61774a2ff58cbe32653420256c7f44bc715715a423b0b746d5c622979af6b",
|
||||||
"receiptRoot": "0xd0d26df80374a327c025d405ebadc752b1bbd089d864801ae78ab704bcad8086",
|
"receiptsRoot": "0xd0d26df80374a327c025d405ebadc752b1bbd089d864801ae78ab704bcad8086",
|
||||||
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
"receipts": [
|
"receipts": [
|
||||||
@@ -32,6 +32,7 @@
|
|||||||
"transactionIndex": "0x0"
|
"transactionIndex": "0x0"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"currentDifficulty": "0x20000"
|
"currentDifficulty": "0x20000",
|
||||||
|
"gasUsed": "0x521f"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
5
cmd/evm/testdata/5/exp.json
vendored
5
cmd/evm/testdata/5/exp.json
vendored
@@ -13,10 +13,11 @@
|
|||||||
"result": {
|
"result": {
|
||||||
"stateRoot": "0xa7312add33811645c6aa65d928a1a4f49d65d448801912c069a0aa8fe9c1f393",
|
"stateRoot": "0xa7312add33811645c6aa65d928a1a4f49d65d448801912c069a0aa8fe9c1f393",
|
||||||
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
"receipts": [],
|
"receipts": [],
|
||||||
"currentDifficulty": "0x20000"
|
"currentDifficulty": "0x20000",
|
||||||
|
"gasUsed": "0x0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -66,6 +66,7 @@ It expects the genesis file as argument.`,
|
|||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
utils.MainnetFlag,
|
utils.MainnetFlag,
|
||||||
utils.RopstenFlag,
|
utils.RopstenFlag,
|
||||||
|
utils.SepoliaFlag,
|
||||||
utils.RinkebyFlag,
|
utils.RinkebyFlag,
|
||||||
utils.GoerliFlag,
|
utils.GoerliFlag,
|
||||||
},
|
},
|
||||||
@@ -140,7 +141,9 @@ be gzipped.`,
|
|||||||
},
|
},
|
||||||
Category: "BLOCKCHAIN COMMANDS",
|
Category: "BLOCKCHAIN COMMANDS",
|
||||||
Description: `
|
Description: `
|
||||||
The import-preimages command imports hash preimages from an RLP encoded stream.`,
|
The import-preimages command imports hash preimages from an RLP encoded stream.
|
||||||
|
It's deprecated, please use "geth db import" instead.
|
||||||
|
`,
|
||||||
}
|
}
|
||||||
exportPreimagesCommand = cli.Command{
|
exportPreimagesCommand = cli.Command{
|
||||||
Action: utils.MigrateFlags(exportPreimages),
|
Action: utils.MigrateFlags(exportPreimages),
|
||||||
@@ -154,7 +157,9 @@ be gzipped.`,
|
|||||||
},
|
},
|
||||||
Category: "BLOCKCHAIN COMMANDS",
|
Category: "BLOCKCHAIN COMMANDS",
|
||||||
Description: `
|
Description: `
|
||||||
The export-preimages command export hash preimages to an RLP encoded stream`,
|
The export-preimages command exports hash preimages to an RLP encoded stream.
|
||||||
|
It's deprecated, please use "geth db export" instead.
|
||||||
|
`,
|
||||||
}
|
}
|
||||||
dumpCommand = cli.Command{
|
dumpCommand = cli.Command{
|
||||||
Action: utils.MigrateFlags(dump),
|
Action: utils.MigrateFlags(dump),
|
||||||
@@ -368,7 +373,6 @@ func exportPreimages(ctx *cli.Context) error {
|
|||||||
if len(ctx.Args()) < 1 {
|
if len(ctx.Args()) < 1 {
|
||||||
utils.Fatalf("This command requires an argument.")
|
utils.Fatalf("This command requires an argument.")
|
||||||
}
|
}
|
||||||
|
|
||||||
stack, _ := makeConfigNode(ctx)
|
stack, _ := makeConfigNode(ctx)
|
||||||
defer stack.Close()
|
defer stack.Close()
|
||||||
|
|
||||||
|
@@ -32,7 +32,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/accounts/scwallet"
|
"github.com/ethereum/go-ethereum/accounts/scwallet"
|
||||||
"github.com/ethereum/go-ethereum/accounts/usbwallet"
|
"github.com/ethereum/go-ethereum/accounts/usbwallet"
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
"github.com/ethereum/go-ethereum/eth/catalyst"
|
|
||||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
@@ -156,20 +155,13 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
|
|||||||
// makeFullNode loads geth configuration and creates the Ethereum backend.
|
// makeFullNode loads geth configuration and creates the Ethereum backend.
|
||||||
func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
||||||
stack, cfg := makeConfigNode(ctx)
|
stack, cfg := makeConfigNode(ctx)
|
||||||
if ctx.GlobalIsSet(utils.OverrideLondonFlag.Name) {
|
if ctx.GlobalIsSet(utils.OverrideArrowGlacierFlag.Name) {
|
||||||
cfg.Eth.OverrideLondon = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideLondonFlag.Name))
|
cfg.Eth.OverrideArrowGlacier = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideArrowGlacierFlag.Name))
|
||||||
}
|
}
|
||||||
backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
|
if ctx.GlobalIsSet(utils.OverrideTerminalTotalDifficulty.Name) {
|
||||||
|
cfg.Eth.Genesis.Config.TerminalTotalDifficulty = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideTerminalTotalDifficulty.Name))
|
||||||
// Configure catalyst.
|
|
||||||
if ctx.GlobalBool(utils.CatalystFlag.Name) {
|
|
||||||
if eth == nil {
|
|
||||||
utils.Fatalf("Catalyst does not work in light client mode.")
|
|
||||||
}
|
|
||||||
if err := catalyst.Register(stack, eth); err != nil {
|
|
||||||
utils.Fatalf("%v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
backend, _ := utils.RegisterEthService(stack, &cfg.Eth, ctx.GlobalBool(utils.CatalystFlag.Name))
|
||||||
|
|
||||||
// Configure GraphQL if requested
|
// Configure GraphQL if requested
|
||||||
if ctx.GlobalIsSet(utils.GraphQLEnabledFlag.Name) {
|
if ctx.GlobalIsSet(utils.GraphQLEnabledFlag.Name) {
|
||||||
|
@@ -134,6 +134,8 @@ func remoteConsole(ctx *cli.Context) error {
|
|||||||
path = filepath.Join(path, "rinkeby")
|
path = filepath.Join(path, "rinkeby")
|
||||||
} else if ctx.GlobalBool(utils.GoerliFlag.Name) {
|
} else if ctx.GlobalBool(utils.GoerliFlag.Name) {
|
||||||
path = filepath.Join(path, "goerli")
|
path = filepath.Join(path, "goerli")
|
||||||
|
} else if ctx.GlobalBool(utils.SepoliaFlag.Name) {
|
||||||
|
path = filepath.Join(path, "sepolia")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
endpoint = fmt.Sprintf("%s/geth.ipc", path)
|
endpoint = fmt.Sprintf("%s/geth.ipc", path)
|
||||||
|
@@ -17,11 +17,16 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"os/signal"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
@@ -62,6 +67,8 @@ Remove blockchain and state databases`,
|
|||||||
dbPutCmd,
|
dbPutCmd,
|
||||||
dbGetSlotsCmd,
|
dbGetSlotsCmd,
|
||||||
dbDumpFreezerIndex,
|
dbDumpFreezerIndex,
|
||||||
|
dbImportCmd,
|
||||||
|
dbExportCmd,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
dbInspectCmd = cli.Command{
|
dbInspectCmd = cli.Command{
|
||||||
@@ -70,9 +77,11 @@ Remove blockchain and state databases`,
|
|||||||
ArgsUsage: "<prefix> <start>",
|
ArgsUsage: "<prefix> <start>",
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
utils.DataDirFlag,
|
utils.DataDirFlag,
|
||||||
|
utils.AncientFlag,
|
||||||
utils.SyncModeFlag,
|
utils.SyncModeFlag,
|
||||||
utils.MainnetFlag,
|
utils.MainnetFlag,
|
||||||
utils.RopstenFlag,
|
utils.RopstenFlag,
|
||||||
|
utils.SepoliaFlag,
|
||||||
utils.RinkebyFlag,
|
utils.RinkebyFlag,
|
||||||
utils.GoerliFlag,
|
utils.GoerliFlag,
|
||||||
},
|
},
|
||||||
@@ -88,6 +97,7 @@ Remove blockchain and state databases`,
|
|||||||
utils.SyncModeFlag,
|
utils.SyncModeFlag,
|
||||||
utils.MainnetFlag,
|
utils.MainnetFlag,
|
||||||
utils.RopstenFlag,
|
utils.RopstenFlag,
|
||||||
|
utils.SepoliaFlag,
|
||||||
utils.RinkebyFlag,
|
utils.RinkebyFlag,
|
||||||
utils.GoerliFlag,
|
utils.GoerliFlag,
|
||||||
},
|
},
|
||||||
@@ -101,6 +111,7 @@ Remove blockchain and state databases`,
|
|||||||
utils.SyncModeFlag,
|
utils.SyncModeFlag,
|
||||||
utils.MainnetFlag,
|
utils.MainnetFlag,
|
||||||
utils.RopstenFlag,
|
utils.RopstenFlag,
|
||||||
|
utils.SepoliaFlag,
|
||||||
utils.RinkebyFlag,
|
utils.RinkebyFlag,
|
||||||
utils.GoerliFlag,
|
utils.GoerliFlag,
|
||||||
utils.CacheFlag,
|
utils.CacheFlag,
|
||||||
@@ -120,6 +131,7 @@ corruption if it is aborted during execution'!`,
|
|||||||
utils.SyncModeFlag,
|
utils.SyncModeFlag,
|
||||||
utils.MainnetFlag,
|
utils.MainnetFlag,
|
||||||
utils.RopstenFlag,
|
utils.RopstenFlag,
|
||||||
|
utils.SepoliaFlag,
|
||||||
utils.RinkebyFlag,
|
utils.RinkebyFlag,
|
||||||
utils.GoerliFlag,
|
utils.GoerliFlag,
|
||||||
},
|
},
|
||||||
@@ -135,6 +147,7 @@ corruption if it is aborted during execution'!`,
|
|||||||
utils.SyncModeFlag,
|
utils.SyncModeFlag,
|
||||||
utils.MainnetFlag,
|
utils.MainnetFlag,
|
||||||
utils.RopstenFlag,
|
utils.RopstenFlag,
|
||||||
|
utils.SepoliaFlag,
|
||||||
utils.RinkebyFlag,
|
utils.RinkebyFlag,
|
||||||
utils.GoerliFlag,
|
utils.GoerliFlag,
|
||||||
},
|
},
|
||||||
@@ -151,6 +164,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
|
|||||||
utils.SyncModeFlag,
|
utils.SyncModeFlag,
|
||||||
utils.MainnetFlag,
|
utils.MainnetFlag,
|
||||||
utils.RopstenFlag,
|
utils.RopstenFlag,
|
||||||
|
utils.SepoliaFlag,
|
||||||
utils.RinkebyFlag,
|
utils.RinkebyFlag,
|
||||||
utils.GoerliFlag,
|
utils.GoerliFlag,
|
||||||
},
|
},
|
||||||
@@ -167,6 +181,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
|
|||||||
utils.SyncModeFlag,
|
utils.SyncModeFlag,
|
||||||
utils.MainnetFlag,
|
utils.MainnetFlag,
|
||||||
utils.RopstenFlag,
|
utils.RopstenFlag,
|
||||||
|
utils.SepoliaFlag,
|
||||||
utils.RinkebyFlag,
|
utils.RinkebyFlag,
|
||||||
utils.GoerliFlag,
|
utils.GoerliFlag,
|
||||||
},
|
},
|
||||||
@@ -182,11 +197,42 @@ WARNING: This is a low-level operation which may cause database corruption!`,
|
|||||||
utils.SyncModeFlag,
|
utils.SyncModeFlag,
|
||||||
utils.MainnetFlag,
|
utils.MainnetFlag,
|
||||||
utils.RopstenFlag,
|
utils.RopstenFlag,
|
||||||
|
utils.SepoliaFlag,
|
||||||
utils.RinkebyFlag,
|
utils.RinkebyFlag,
|
||||||
utils.GoerliFlag,
|
utils.GoerliFlag,
|
||||||
},
|
},
|
||||||
Description: "This command displays information about the freezer index.",
|
Description: "This command displays information about the freezer index.",
|
||||||
}
|
}
|
||||||
|
dbImportCmd = cli.Command{
|
||||||
|
Action: utils.MigrateFlags(importLDBdata),
|
||||||
|
Name: "import",
|
||||||
|
Usage: "Imports leveldb-data from an exported RLP dump.",
|
||||||
|
ArgsUsage: "<dumpfile> <start (optional)",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
utils.DataDirFlag,
|
||||||
|
utils.SyncModeFlag,
|
||||||
|
utils.MainnetFlag,
|
||||||
|
utils.RopstenFlag,
|
||||||
|
utils.RinkebyFlag,
|
||||||
|
utils.GoerliFlag,
|
||||||
|
},
|
||||||
|
Description: "The import command imports the specific chain data from an RLP encoded stream.",
|
||||||
|
}
|
||||||
|
dbExportCmd = cli.Command{
|
||||||
|
Action: utils.MigrateFlags(exportChaindata),
|
||||||
|
Name: "export",
|
||||||
|
Usage: "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used.",
|
||||||
|
ArgsUsage: "<type> <dumpfile>",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
utils.DataDirFlag,
|
||||||
|
utils.SyncModeFlag,
|
||||||
|
utils.MainnetFlag,
|
||||||
|
utils.RopstenFlag,
|
||||||
|
utils.RinkebyFlag,
|
||||||
|
utils.GoerliFlag,
|
||||||
|
},
|
||||||
|
Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func removeDB(ctx *cli.Context) error {
|
func removeDB(ctx *cli.Context) error {
|
||||||
@@ -335,14 +381,15 @@ func dbGet(ctx *cli.Context) error {
|
|||||||
db := utils.MakeChainDatabase(ctx, stack, true)
|
db := utils.MakeChainDatabase(ctx, stack, true)
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|
||||||
key, err := hexutil.Decode(ctx.Args().Get(0))
|
key, err := parseHexOrString(ctx.Args().Get(0))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Info("Could not decode the key", "error", err)
|
log.Info("Could not decode the key", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := db.Get(key)
|
data, err := db.Get(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Info("Get operation failed", "error", err)
|
log.Info("Get operation failed", "key", fmt.Sprintf("0x%#x", key), "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
fmt.Printf("key %#x: %#x\n", key, data)
|
fmt.Printf("key %#x: %#x\n", key, data)
|
||||||
@@ -360,7 +407,7 @@ func dbDelete(ctx *cli.Context) error {
|
|||||||
db := utils.MakeChainDatabase(ctx, stack, false)
|
db := utils.MakeChainDatabase(ctx, stack, false)
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|
||||||
key, err := hexutil.Decode(ctx.Args().Get(0))
|
key, err := parseHexOrString(ctx.Args().Get(0))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Info("Could not decode the key", "error", err)
|
log.Info("Could not decode the key", "error", err)
|
||||||
return err
|
return err
|
||||||
@@ -370,7 +417,7 @@ func dbDelete(ctx *cli.Context) error {
|
|||||||
fmt.Printf("Previous value: %#x\n", data)
|
fmt.Printf("Previous value: %#x\n", data)
|
||||||
}
|
}
|
||||||
if err = db.Delete(key); err != nil {
|
if err = db.Delete(key); err != nil {
|
||||||
log.Info("Delete operation returned an error", "error", err)
|
log.Info("Delete operation returned an error", "key", fmt.Sprintf("0x%#x", key), "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -393,7 +440,7 @@ func dbPut(ctx *cli.Context) error {
|
|||||||
data []byte
|
data []byte
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
key, err = hexutil.Decode(ctx.Args().Get(0))
|
key, err = parseHexOrString(ctx.Args().Get(0))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Info("Could not decode the key", "error", err)
|
log.Info("Could not decode the key", "error", err)
|
||||||
return err
|
return err
|
||||||
@@ -499,3 +546,142 @@ func freezerInspect(ctx *cli.Context) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ParseHexOrString tries to hexdecode b, but if the prefix is missing, it instead just returns the raw bytes
|
||||||
|
func parseHexOrString(str string) ([]byte, error) {
|
||||||
|
b, err := hexutil.Decode(str)
|
||||||
|
if errors.Is(err, hexutil.ErrMissingPrefix) {
|
||||||
|
return []byte(str), nil
|
||||||
|
}
|
||||||
|
return b, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func importLDBdata(ctx *cli.Context) error {
|
||||||
|
start := 0
|
||||||
|
switch ctx.NArg() {
|
||||||
|
case 1:
|
||||||
|
break
|
||||||
|
case 2:
|
||||||
|
s, err := strconv.Atoi(ctx.Args().Get(1))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("second arg must be an integer: %v", err)
|
||||||
|
}
|
||||||
|
start = s
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
fName = ctx.Args().Get(0)
|
||||||
|
stack, _ = makeConfigNode(ctx)
|
||||||
|
interrupt = make(chan os.Signal, 1)
|
||||||
|
stop = make(chan struct{})
|
||||||
|
)
|
||||||
|
defer stack.Close()
|
||||||
|
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
defer signal.Stop(interrupt)
|
||||||
|
defer close(interrupt)
|
||||||
|
go func() {
|
||||||
|
if _, ok := <-interrupt; ok {
|
||||||
|
log.Info("Interrupted during ldb import, stopping at next batch")
|
||||||
|
}
|
||||||
|
close(stop)
|
||||||
|
}()
|
||||||
|
db := utils.MakeChainDatabase(ctx, stack, false)
|
||||||
|
return utils.ImportLDBData(db, fName, int64(start), stop)
|
||||||
|
}
|
||||||
|
|
||||||
|
type preimageIterator struct {
|
||||||
|
iter ethdb.Iterator
|
||||||
|
}
|
||||||
|
|
||||||
|
func (iter *preimageIterator) Next() (byte, []byte, []byte, bool) {
|
||||||
|
for iter.iter.Next() {
|
||||||
|
key := iter.iter.Key()
|
||||||
|
if bytes.HasPrefix(key, rawdb.PreimagePrefix) && len(key) == (len(rawdb.PreimagePrefix)+common.HashLength) {
|
||||||
|
return utils.OpBatchAdd, key, iter.iter.Value(), true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, nil, nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (iter *preimageIterator) Release() {
|
||||||
|
iter.iter.Release()
|
||||||
|
}
|
||||||
|
|
||||||
|
type snapshotIterator struct {
|
||||||
|
init bool
|
||||||
|
account ethdb.Iterator
|
||||||
|
storage ethdb.Iterator
|
||||||
|
}
|
||||||
|
|
||||||
|
func (iter *snapshotIterator) Next() (byte, []byte, []byte, bool) {
|
||||||
|
if !iter.init {
|
||||||
|
iter.init = true
|
||||||
|
return utils.OpBatchDel, rawdb.SnapshotRootKey, nil, true
|
||||||
|
}
|
||||||
|
for iter.account.Next() {
|
||||||
|
key := iter.account.Key()
|
||||||
|
if bytes.HasPrefix(key, rawdb.SnapshotAccountPrefix) && len(key) == (len(rawdb.SnapshotAccountPrefix)+common.HashLength) {
|
||||||
|
return utils.OpBatchAdd, key, iter.account.Value(), true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for iter.storage.Next() {
|
||||||
|
key := iter.storage.Key()
|
||||||
|
if bytes.HasPrefix(key, rawdb.SnapshotStoragePrefix) && len(key) == (len(rawdb.SnapshotStoragePrefix)+2*common.HashLength) {
|
||||||
|
return utils.OpBatchAdd, key, iter.storage.Value(), true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, nil, nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (iter *snapshotIterator) Release() {
|
||||||
|
iter.account.Release()
|
||||||
|
iter.storage.Release()
|
||||||
|
}
|
||||||
|
|
||||||
|
// chainExporters defines the export scheme for all exportable chain data.
|
||||||
|
var chainExporters = map[string]func(db ethdb.Database) utils.ChainDataIterator{
|
||||||
|
"preimage": func(db ethdb.Database) utils.ChainDataIterator {
|
||||||
|
iter := db.NewIterator(rawdb.PreimagePrefix, nil)
|
||||||
|
return &preimageIterator{iter: iter}
|
||||||
|
},
|
||||||
|
"snapshot": func(db ethdb.Database) utils.ChainDataIterator {
|
||||||
|
account := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
|
||||||
|
storage := db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
|
||||||
|
return &snapshotIterator{account: account, storage: storage}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func exportChaindata(ctx *cli.Context) error {
|
||||||
|
if ctx.NArg() < 2 {
|
||||||
|
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
|
||||||
|
}
|
||||||
|
// Parse the required chain data type, make sure it's supported.
|
||||||
|
kind := ctx.Args().Get(0)
|
||||||
|
kind = strings.ToLower(strings.Trim(kind, " "))
|
||||||
|
exporter, ok := chainExporters[kind]
|
||||||
|
if !ok {
|
||||||
|
var kinds []string
|
||||||
|
for kind := range chainExporters {
|
||||||
|
kinds = append(kinds, kind)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("invalid data type %s, supported types: %s", kind, strings.Join(kinds, ", "))
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
stack, _ = makeConfigNode(ctx)
|
||||||
|
interrupt = make(chan os.Signal, 1)
|
||||||
|
stop = make(chan struct{})
|
||||||
|
)
|
||||||
|
defer stack.Close()
|
||||||
|
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
defer signal.Stop(interrupt)
|
||||||
|
defer close(interrupt)
|
||||||
|
go func() {
|
||||||
|
if _, ok := <-interrupt; ok {
|
||||||
|
log.Info("Interrupted during db export, stopping at next batch")
|
||||||
|
}
|
||||||
|
close(stop)
|
||||||
|
}()
|
||||||
|
db := utils.MakeChainDatabase(ctx, stack, true)
|
||||||
|
return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
|
||||||
|
}
|
||||||
|
@@ -39,6 +39,11 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
|
|
||||||
|
// Force-load the tracer engines to trigger registration
|
||||||
|
_ "github.com/ethereum/go-ethereum/eth/tracers/js"
|
||||||
|
_ "github.com/ethereum/go-ethereum/eth/tracers/native"
|
||||||
|
|
||||||
"gopkg.in/urfave/cli.v1"
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -66,7 +71,8 @@ var (
|
|||||||
utils.NoUSBFlag,
|
utils.NoUSBFlag,
|
||||||
utils.USBFlag,
|
utils.USBFlag,
|
||||||
utils.SmartCardDaemonPathFlag,
|
utils.SmartCardDaemonPathFlag,
|
||||||
utils.OverrideLondonFlag,
|
utils.OverrideArrowGlacierFlag,
|
||||||
|
utils.OverrideTerminalTotalDifficulty,
|
||||||
utils.EthashCacheDirFlag,
|
utils.EthashCacheDirFlag,
|
||||||
utils.EthashCachesInMemoryFlag,
|
utils.EthashCachesInMemoryFlag,
|
||||||
utils.EthashCachesOnDiskFlag,
|
utils.EthashCachesOnDiskFlag,
|
||||||
@@ -135,7 +141,9 @@ var (
|
|||||||
utils.MainnetFlag,
|
utils.MainnetFlag,
|
||||||
utils.DeveloperFlag,
|
utils.DeveloperFlag,
|
||||||
utils.DeveloperPeriodFlag,
|
utils.DeveloperPeriodFlag,
|
||||||
|
utils.DeveloperGasLimitFlag,
|
||||||
utils.RopstenFlag,
|
utils.RopstenFlag,
|
||||||
|
utils.SepoliaFlag,
|
||||||
utils.RinkebyFlag,
|
utils.RinkebyFlag,
|
||||||
utils.GoerliFlag,
|
utils.GoerliFlag,
|
||||||
utils.VMEnableDebugFlag,
|
utils.VMEnableDebugFlag,
|
||||||
|
@@ -62,6 +62,7 @@ var (
|
|||||||
utils.DataDirFlag,
|
utils.DataDirFlag,
|
||||||
utils.AncientFlag,
|
utils.AncientFlag,
|
||||||
utils.RopstenFlag,
|
utils.RopstenFlag,
|
||||||
|
utils.SepoliaFlag,
|
||||||
utils.RinkebyFlag,
|
utils.RinkebyFlag,
|
||||||
utils.GoerliFlag,
|
utils.GoerliFlag,
|
||||||
utils.CacheTrieJournalFlag,
|
utils.CacheTrieJournalFlag,
|
||||||
@@ -92,6 +93,7 @@ the trie clean cache with default directory will be deleted.
|
|||||||
utils.DataDirFlag,
|
utils.DataDirFlag,
|
||||||
utils.AncientFlag,
|
utils.AncientFlag,
|
||||||
utils.RopstenFlag,
|
utils.RopstenFlag,
|
||||||
|
utils.SepoliaFlag,
|
||||||
utils.RinkebyFlag,
|
utils.RinkebyFlag,
|
||||||
utils.GoerliFlag,
|
utils.GoerliFlag,
|
||||||
},
|
},
|
||||||
@@ -112,6 +114,7 @@ In other words, this command does the snapshot to trie conversion.
|
|||||||
utils.DataDirFlag,
|
utils.DataDirFlag,
|
||||||
utils.AncientFlag,
|
utils.AncientFlag,
|
||||||
utils.RopstenFlag,
|
utils.RopstenFlag,
|
||||||
|
utils.SepoliaFlag,
|
||||||
utils.RinkebyFlag,
|
utils.RinkebyFlag,
|
||||||
utils.GoerliFlag,
|
utils.GoerliFlag,
|
||||||
},
|
},
|
||||||
@@ -134,6 +137,7 @@ It's also usable without snapshot enabled.
|
|||||||
utils.DataDirFlag,
|
utils.DataDirFlag,
|
||||||
utils.AncientFlag,
|
utils.AncientFlag,
|
||||||
utils.RopstenFlag,
|
utils.RopstenFlag,
|
||||||
|
utils.SepoliaFlag,
|
||||||
utils.RinkebyFlag,
|
utils.RinkebyFlag,
|
||||||
utils.GoerliFlag,
|
utils.GoerliFlag,
|
||||||
},
|
},
|
||||||
@@ -157,6 +161,7 @@ It's also usable without snapshot enabled.
|
|||||||
utils.DataDirFlag,
|
utils.DataDirFlag,
|
||||||
utils.AncientFlag,
|
utils.AncientFlag,
|
||||||
utils.RopstenFlag,
|
utils.RopstenFlag,
|
||||||
|
utils.SepoliaFlag,
|
||||||
utils.RinkebyFlag,
|
utils.RinkebyFlag,
|
||||||
utils.GoerliFlag,
|
utils.GoerliFlag,
|
||||||
utils.ExcludeCodeFlag,
|
utils.ExcludeCodeFlag,
|
||||||
@@ -215,7 +220,7 @@ func verifyState(ctx *cli.Context) error {
|
|||||||
log.Error("Failed to load head block")
|
log.Error("Failed to load head block")
|
||||||
return errors.New("no head block")
|
return errors.New("no head block")
|
||||||
}
|
}
|
||||||
snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, headBlock.Root(), false, false, false)
|
snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, headBlock.Root(), false, false, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Failed to open snapshot tree", "err", err)
|
log.Error("Failed to open snapshot tree", "err", err)
|
||||||
return err
|
return err
|
||||||
@@ -467,7 +472,7 @@ func dumpState(ctx *cli.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, root, false, false, false)
|
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, root, false, false, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@@ -45,6 +45,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
|
|||||||
utils.GoerliFlag,
|
utils.GoerliFlag,
|
||||||
utils.RinkebyFlag,
|
utils.RinkebyFlag,
|
||||||
utils.RopstenFlag,
|
utils.RopstenFlag,
|
||||||
|
utils.SepoliaFlag,
|
||||||
utils.SyncModeFlag,
|
utils.SyncModeFlag,
|
||||||
utils.ExitWhenSyncedFlag,
|
utils.ExitWhenSyncedFlag,
|
||||||
utils.GCModeFlag,
|
utils.GCModeFlag,
|
||||||
@@ -74,6 +75,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
|
|||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
utils.DeveloperFlag,
|
utils.DeveloperFlag,
|
||||||
utils.DeveloperPeriodFlag,
|
utils.DeveloperPeriodFlag,
|
||||||
|
utils.DeveloperGasLimitFlag,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@@ -17,7 +17,6 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@@ -32,8 +31,10 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/console/prompt"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/peterh/liner"
|
||||||
"golang.org/x/crypto/ssh/terminal"
|
"golang.org/x/crypto/ssh/terminal"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -76,17 +77,27 @@ type wizard struct {
|
|||||||
servers map[string]*sshClient // SSH connections to servers to administer
|
servers map[string]*sshClient // SSH connections to servers to administer
|
||||||
services map[string][]string // Ethereum services known to be running on servers
|
services map[string][]string // Ethereum services known to be running on servers
|
||||||
|
|
||||||
in *bufio.Reader // Wrapper around stdin to allow reading user input
|
lock sync.Mutex // Lock to protect configs during concurrent service discovery
|
||||||
lock sync.Mutex // Lock to protect configs during concurrent service discovery
|
}
|
||||||
|
|
||||||
|
// prompts the user for input with the given prompt string. Returns when a value is entered.
|
||||||
|
// Causes the wizard to exit if ctrl-d is pressed
|
||||||
|
func promptInput(p string) string {
|
||||||
|
for {
|
||||||
|
text, err := prompt.Stdin.PromptInput(p)
|
||||||
|
if err != nil {
|
||||||
|
if err != liner.ErrPromptAborted {
|
||||||
|
log.Crit("Failed to read user input", "err", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return text
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// read reads a single line from stdin, trimming if from spaces.
|
// read reads a single line from stdin, trimming if from spaces.
|
||||||
func (w *wizard) read() string {
|
func (w *wizard) read() string {
|
||||||
fmt.Printf("> ")
|
text := promptInput("> ")
|
||||||
text, err := w.in.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read user input", "err", err)
|
|
||||||
}
|
|
||||||
return strings.TrimSpace(text)
|
return strings.TrimSpace(text)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -94,11 +105,7 @@ func (w *wizard) read() string {
|
|||||||
// non-emptyness.
|
// non-emptyness.
|
||||||
func (w *wizard) readString() string {
|
func (w *wizard) readString() string {
|
||||||
for {
|
for {
|
||||||
fmt.Printf("> ")
|
text := promptInput("> ")
|
||||||
text, err := w.in.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read user input", "err", err)
|
|
||||||
}
|
|
||||||
if text = strings.TrimSpace(text); text != "" {
|
if text = strings.TrimSpace(text); text != "" {
|
||||||
return text
|
return text
|
||||||
}
|
}
|
||||||
@@ -108,11 +115,7 @@ func (w *wizard) readString() string {
|
|||||||
// readDefaultString reads a single line from stdin, trimming if from spaces. If
|
// readDefaultString reads a single line from stdin, trimming if from spaces. If
|
||||||
// an empty line is entered, the default value is returned.
|
// an empty line is entered, the default value is returned.
|
||||||
func (w *wizard) readDefaultString(def string) string {
|
func (w *wizard) readDefaultString(def string) string {
|
||||||
fmt.Printf("> ")
|
text := promptInput("> ")
|
||||||
text, err := w.in.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read user input", "err", err)
|
|
||||||
}
|
|
||||||
if text = strings.TrimSpace(text); text != "" {
|
if text = strings.TrimSpace(text); text != "" {
|
||||||
return text
|
return text
|
||||||
}
|
}
|
||||||
@@ -124,11 +127,7 @@ func (w *wizard) readDefaultString(def string) string {
|
|||||||
// value is returned.
|
// value is returned.
|
||||||
func (w *wizard) readDefaultYesNo(def bool) bool {
|
func (w *wizard) readDefaultYesNo(def bool) bool {
|
||||||
for {
|
for {
|
||||||
fmt.Printf("> ")
|
text := promptInput("> ")
|
||||||
text, err := w.in.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read user input", "err", err)
|
|
||||||
}
|
|
||||||
if text = strings.ToLower(strings.TrimSpace(text)); text == "" {
|
if text = strings.ToLower(strings.TrimSpace(text)); text == "" {
|
||||||
return def
|
return def
|
||||||
}
|
}
|
||||||
@@ -146,11 +145,7 @@ func (w *wizard) readDefaultYesNo(def bool) bool {
|
|||||||
// interpret it as a URL (http, https or file).
|
// interpret it as a URL (http, https or file).
|
||||||
func (w *wizard) readURL() *url.URL {
|
func (w *wizard) readURL() *url.URL {
|
||||||
for {
|
for {
|
||||||
fmt.Printf("> ")
|
text := promptInput("> ")
|
||||||
text, err := w.in.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read user input", "err", err)
|
|
||||||
}
|
|
||||||
uri, err := url.Parse(strings.TrimSpace(text))
|
uri, err := url.Parse(strings.TrimSpace(text))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Invalid input, expected URL", "err", err)
|
log.Error("Invalid input, expected URL", "err", err)
|
||||||
@@ -164,11 +159,7 @@ func (w *wizard) readURL() *url.URL {
|
|||||||
// to parse into an integer.
|
// to parse into an integer.
|
||||||
func (w *wizard) readInt() int {
|
func (w *wizard) readInt() int {
|
||||||
for {
|
for {
|
||||||
fmt.Printf("> ")
|
text := promptInput("> ")
|
||||||
text, err := w.in.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read user input", "err", err)
|
|
||||||
}
|
|
||||||
if text = strings.TrimSpace(text); text == "" {
|
if text = strings.TrimSpace(text); text == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -186,11 +177,7 @@ func (w *wizard) readInt() int {
|
|||||||
// returned.
|
// returned.
|
||||||
func (w *wizard) readDefaultInt(def int) int {
|
func (w *wizard) readDefaultInt(def int) int {
|
||||||
for {
|
for {
|
||||||
fmt.Printf("> ")
|
text := promptInput("> ")
|
||||||
text, err := w.in.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read user input", "err", err)
|
|
||||||
}
|
|
||||||
if text = strings.TrimSpace(text); text == "" {
|
if text = strings.TrimSpace(text); text == "" {
|
||||||
return def
|
return def
|
||||||
}
|
}
|
||||||
@@ -208,11 +195,7 @@ func (w *wizard) readDefaultInt(def int) int {
|
|||||||
// default value is returned.
|
// default value is returned.
|
||||||
func (w *wizard) readDefaultBigInt(def *big.Int) *big.Int {
|
func (w *wizard) readDefaultBigInt(def *big.Int) *big.Int {
|
||||||
for {
|
for {
|
||||||
fmt.Printf("> ")
|
text := promptInput("> ")
|
||||||
text, err := w.in.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read user input", "err", err)
|
|
||||||
}
|
|
||||||
if text = strings.TrimSpace(text); text == "" {
|
if text = strings.TrimSpace(text); text == "" {
|
||||||
return def
|
return def
|
||||||
}
|
}
|
||||||
@@ -225,38 +208,11 @@ func (w *wizard) readDefaultBigInt(def *big.Int) *big.Int {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
// readFloat reads a single line from stdin, trimming if from spaces, enforcing it
|
|
||||||
// to parse into a float.
|
|
||||||
func (w *wizard) readFloat() float64 {
|
|
||||||
for {
|
|
||||||
fmt.Printf("> ")
|
|
||||||
text, err := w.in.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read user input", "err", err)
|
|
||||||
}
|
|
||||||
if text = strings.TrimSpace(text); text == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
val, err := strconv.ParseFloat(strings.TrimSpace(text), 64)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("Invalid input, expected float", "err", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
// readDefaultFloat reads a single line from stdin, trimming if from spaces, enforcing
|
// readDefaultFloat reads a single line from stdin, trimming if from spaces, enforcing
|
||||||
// it to parse into a float. If an empty line is entered, the default value is returned.
|
// it to parse into a float. If an empty line is entered, the default value is returned.
|
||||||
func (w *wizard) readDefaultFloat(def float64) float64 {
|
func (w *wizard) readDefaultFloat(def float64) float64 {
|
||||||
for {
|
for {
|
||||||
fmt.Printf("> ")
|
text := promptInput("> ")
|
||||||
text, err := w.in.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read user input", "err", err)
|
|
||||||
}
|
|
||||||
if text = strings.TrimSpace(text); text == "" {
|
if text = strings.TrimSpace(text); text == "" {
|
||||||
return def
|
return def
|
||||||
}
|
}
|
||||||
@@ -285,12 +241,7 @@ func (w *wizard) readPassword() string {
|
|||||||
// it to an Ethereum address.
|
// it to an Ethereum address.
|
||||||
func (w *wizard) readAddress() *common.Address {
|
func (w *wizard) readAddress() *common.Address {
|
||||||
for {
|
for {
|
||||||
// Read the address from the user
|
text := promptInput("> 0x")
|
||||||
fmt.Printf("> 0x")
|
|
||||||
text, err := w.in.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read user input", "err", err)
|
|
||||||
}
|
|
||||||
if text = strings.TrimSpace(text); text == "" {
|
if text = strings.TrimSpace(text); text == "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -311,11 +262,7 @@ func (w *wizard) readAddress() *common.Address {
|
|||||||
func (w *wizard) readDefaultAddress(def common.Address) common.Address {
|
func (w *wizard) readDefaultAddress(def common.Address) common.Address {
|
||||||
for {
|
for {
|
||||||
// Read the address from the user
|
// Read the address from the user
|
||||||
fmt.Printf("> 0x")
|
text := promptInput("> 0x")
|
||||||
text, err := w.in.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read user input", "err", err)
|
|
||||||
}
|
|
||||||
if text = strings.TrimSpace(text); text == "" {
|
if text = strings.TrimSpace(text); text == "" {
|
||||||
return def
|
return def
|
||||||
}
|
}
|
||||||
@@ -334,8 +281,9 @@ func (w *wizard) readJSON() string {
|
|||||||
var blob json.RawMessage
|
var blob json.RawMessage
|
||||||
|
|
||||||
for {
|
for {
|
||||||
fmt.Printf("> ")
|
text := promptInput("> ")
|
||||||
if err := json.NewDecoder(w.in).Decode(&blob); err != nil {
|
reader := strings.NewReader(text)
|
||||||
|
if err := json.NewDecoder(reader).Decode(&blob); err != nil {
|
||||||
log.Error("Invalid JSON, please try again", "err", err)
|
log.Error("Invalid JSON, please try again", "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -351,10 +299,7 @@ func (w *wizard) readIPAddress() string {
|
|||||||
for {
|
for {
|
||||||
// Read the IP address from the user
|
// Read the IP address from the user
|
||||||
fmt.Printf("> ")
|
fmt.Printf("> ")
|
||||||
text, err := w.in.ReadString('\n')
|
text := promptInput("> ")
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read user input", "err", err)
|
|
||||||
}
|
|
||||||
if text = strings.TrimSpace(text); text == "" {
|
if text = strings.TrimSpace(text); text == "" {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
@@ -17,14 +17,12 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
)
|
)
|
||||||
@@ -38,7 +36,6 @@ func makeWizard(network string) *wizard {
|
|||||||
},
|
},
|
||||||
servers: make(map[string]*sshClient),
|
servers: make(map[string]*sshClient),
|
||||||
services: make(map[string][]string),
|
services: make(map[string][]string),
|
||||||
in: bufio.NewReader(os.Stdin),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -82,25 +79,17 @@ func (w *wizard) run() {
|
|||||||
} else if err := json.Unmarshal(blob, &w.conf); err != nil {
|
} else if err := json.Unmarshal(blob, &w.conf); err != nil {
|
||||||
log.Crit("Previous configuration corrupted", "path", w.conf.path, "err", err)
|
log.Crit("Previous configuration corrupted", "path", w.conf.path, "err", err)
|
||||||
} else {
|
} else {
|
||||||
// Dial all previously known servers concurrently
|
// Dial all previously known servers
|
||||||
var pend sync.WaitGroup
|
|
||||||
for server, pubkey := range w.conf.Servers {
|
for server, pubkey := range w.conf.Servers {
|
||||||
pend.Add(1)
|
log.Info("Dialing previously configured server", "server", server)
|
||||||
|
client, err := dial(server, pubkey)
|
||||||
go func(server string, pubkey []byte) {
|
if err != nil {
|
||||||
defer pend.Done()
|
log.Error("Previous server unreachable", "server", server, "err", err)
|
||||||
|
}
|
||||||
log.Info("Dialing previously configured server", "server", server)
|
w.lock.Lock()
|
||||||
client, err := dial(server, pubkey)
|
w.servers[server] = client
|
||||||
if err != nil {
|
w.lock.Unlock()
|
||||||
log.Error("Previous server unreachable", "server", server, "err", err)
|
|
||||||
}
|
|
||||||
w.lock.Lock()
|
|
||||||
w.servers[server] = client
|
|
||||||
w.lock.Unlock()
|
|
||||||
}(server, pubkey)
|
|
||||||
}
|
}
|
||||||
pend.Wait()
|
|
||||||
w.networkStats()
|
w.networkStats()
|
||||||
}
|
}
|
||||||
// Basics done, loop ad infinitum about what to do
|
// Basics done, loop ad infinitum about what to do
|
||||||
|
@@ -18,7 +18,9 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"container/list"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -26,18 +28,20 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
hexMode = flag.String("hex", "", "dump given hex data")
|
hexMode = flag.String("hex", "", "dump given hex data")
|
||||||
noASCII = flag.Bool("noascii", false, "don't print ASCII strings readably")
|
reverseMode = flag.Bool("reverse", false, "convert ASCII to rlp")
|
||||||
single = flag.Bool("single", false, "print only the first element, discard the rest")
|
noASCII = flag.Bool("noascii", false, "don't print ASCII strings readably")
|
||||||
|
single = flag.Bool("single", false, "print only the first element, discard the rest")
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
flag.Usage = func() {
|
flag.Usage = func() {
|
||||||
fmt.Fprintln(os.Stderr, "Usage:", os.Args[0], "[-noascii] [-hex <data>] [filename]")
|
fmt.Fprintln(os.Stderr, "Usage:", os.Args[0], "[-noascii] [-hex <data>][-reverse] [filename]")
|
||||||
flag.PrintDefaults()
|
flag.PrintDefaults()
|
||||||
fmt.Fprintln(os.Stderr, `
|
fmt.Fprintln(os.Stderr, `
|
||||||
Dumps RLP data from the given file in readable form.
|
Dumps RLP data from the given file in readable form.
|
||||||
@@ -73,23 +77,40 @@ func main() {
|
|||||||
flag.Usage()
|
flag.Usage()
|
||||||
os.Exit(2)
|
os.Exit(2)
|
||||||
}
|
}
|
||||||
|
out := os.Stdout
|
||||||
s := rlp.NewStream(r, 0)
|
if *reverseMode {
|
||||||
for {
|
data, err := textToRlp(r)
|
||||||
if err := dump(s, 0); err != nil {
|
if err != nil {
|
||||||
if err != io.EOF {
|
die(err)
|
||||||
die(err)
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
fmt.Println()
|
fmt.Printf("0x%x\n", data)
|
||||||
if *single {
|
return
|
||||||
break
|
} else {
|
||||||
|
err := rlpToText(r, out)
|
||||||
|
if err != nil {
|
||||||
|
die(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func dump(s *rlp.Stream, depth int) error {
|
func rlpToText(r io.Reader, out io.Writer) error {
|
||||||
|
s := rlp.NewStream(r, 0)
|
||||||
|
for {
|
||||||
|
if err := dump(s, 0, out); err != nil {
|
||||||
|
if err != io.EOF {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
fmt.Fprintln(out)
|
||||||
|
if *single {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func dump(s *rlp.Stream, depth int, out io.Writer) error {
|
||||||
kind, size, err := s.Kind()
|
kind, size, err := s.Kind()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -101,28 +122,28 @@ func dump(s *rlp.Stream, depth int) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(str) == 0 || !*noASCII && isASCII(str) {
|
if len(str) == 0 || !*noASCII && isASCII(str) {
|
||||||
fmt.Printf("%s%q", ws(depth), str)
|
fmt.Fprintf(out, "%s%q", ws(depth), str)
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("%s%x", ws(depth), str)
|
fmt.Fprintf(out, "%s%x", ws(depth), str)
|
||||||
}
|
}
|
||||||
case rlp.List:
|
case rlp.List:
|
||||||
s.List()
|
s.List()
|
||||||
defer s.ListEnd()
|
defer s.ListEnd()
|
||||||
if size == 0 {
|
if size == 0 {
|
||||||
fmt.Print(ws(depth) + "[]")
|
fmt.Fprintf(out, ws(depth)+"[]")
|
||||||
} else {
|
} else {
|
||||||
fmt.Println(ws(depth) + "[")
|
fmt.Fprintln(out, ws(depth)+"[")
|
||||||
for i := 0; ; i++ {
|
for i := 0; ; i++ {
|
||||||
if i > 0 {
|
if i > 0 {
|
||||||
fmt.Print(",\n")
|
fmt.Fprint(out, ",\n")
|
||||||
}
|
}
|
||||||
if err := dump(s, depth+1); err == rlp.EOL {
|
if err := dump(s, depth+1, out); err == rlp.EOL {
|
||||||
break
|
break
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fmt.Print(ws(depth) + "]")
|
fmt.Fprint(out, ws(depth)+"]")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -145,3 +166,45 @@ func die(args ...interface{}) {
|
|||||||
fmt.Fprintln(os.Stderr, args...)
|
fmt.Fprintln(os.Stderr, args...)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// textToRlp converts text into RLP (best effort).
|
||||||
|
func textToRlp(r io.Reader) ([]byte, error) {
|
||||||
|
// We're expecting the input to be well-formed, meaning that
|
||||||
|
// - each element is on a separate line
|
||||||
|
// - each line is either an (element OR a list start/end) + comma
|
||||||
|
// - an element is either hex-encoded bytes OR a quoted string
|
||||||
|
var (
|
||||||
|
scanner = bufio.NewScanner(r)
|
||||||
|
obj []interface{}
|
||||||
|
stack = list.New()
|
||||||
|
)
|
||||||
|
for scanner.Scan() {
|
||||||
|
t := strings.TrimSpace(scanner.Text())
|
||||||
|
if len(t) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch t {
|
||||||
|
case "[": // list start
|
||||||
|
stack.PushFront(obj)
|
||||||
|
obj = make([]interface{}, 0)
|
||||||
|
case "]", "],": // list end
|
||||||
|
parent := stack.Remove(stack.Front()).([]interface{})
|
||||||
|
obj = append(parent, obj)
|
||||||
|
case "[],": // empty list
|
||||||
|
obj = append(obj, make([]interface{}, 0))
|
||||||
|
default: // element
|
||||||
|
data := []byte(t)[:len(t)-1] // cut off comma
|
||||||
|
if data[0] == '"' { // ascii string
|
||||||
|
data = []byte(t)[1 : len(data)-1]
|
||||||
|
} else { // hex data
|
||||||
|
data = common.FromHex(string(data))
|
||||||
|
}
|
||||||
|
obj = append(obj, data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
data, err := rlp.EncodeToBytes(obj[0])
|
||||||
|
return data, err
|
||||||
|
}
|
||||||
|
65
cmd/rlpdump/rlpdump_test.go
Normal file
65
cmd/rlpdump/rlpdump_test.go
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRoundtrip(t *testing.T) {
|
||||||
|
for i, want := range []string{
|
||||||
|
"0xf880806482520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0a1010000000000000000000000000000000000000000000000000000000000000001801ba0c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549da06180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28",
|
||||||
|
"0xd5c0d3cb84746573742a2a808213378667617a6f6e6b",
|
||||||
|
"0xc780c0c1c0825208",
|
||||||
|
} {
|
||||||
|
var out strings.Builder
|
||||||
|
err := rlpToText(bytes.NewReader(common.FromHex(want)), &out)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
text := out.String()
|
||||||
|
rlpBytes, err := textToRlp(strings.NewReader(text))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("test %d: error %v", i, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
have := fmt.Sprintf("0x%x", rlpBytes)
|
||||||
|
if have != want {
|
||||||
|
t.Errorf("test %d: have\n%v\nwant:\n%v\n", i, have, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTextToRlp(t *testing.T) {
|
||||||
|
type tc struct {
|
||||||
|
text string
|
||||||
|
want string
|
||||||
|
}
|
||||||
|
cases := []tc{
|
||||||
|
{
|
||||||
|
text: `[
|
||||||
|
"",
|
||||||
|
[],
|
||||||
|
[
|
||||||
|
[],
|
||||||
|
],
|
||||||
|
5208,
|
||||||
|
]`,
|
||||||
|
want: "0xc780c0c1c0825208",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for i, tc := range cases {
|
||||||
|
have, err := textToRlp(strings.NewReader(tc.text))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("test %d: error %v", i, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if hexutil.Encode(have) != tc.want {
|
||||||
|
t.Errorf("test %d:\nhave %v\nwant %v", i, hexutil.Encode(have), tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
212
cmd/utils/cmd.go
212
cmd/utils/cmd.go
@@ -18,7 +18,9 @@
|
|||||||
package utils
|
package utils
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
@@ -270,6 +272,7 @@ func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, las
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ImportPreimages imports a batch of exported hash preimages into the database.
|
// ImportPreimages imports a batch of exported hash preimages into the database.
|
||||||
|
// It's a part of the deprecated functionality, should be removed in the future.
|
||||||
func ImportPreimages(db ethdb.Database, fn string) error {
|
func ImportPreimages(db ethdb.Database, fn string) error {
|
||||||
log.Info("Importing preimages", "file", fn)
|
log.Info("Importing preimages", "file", fn)
|
||||||
|
|
||||||
@@ -280,7 +283,7 @@ func ImportPreimages(db ethdb.Database, fn string) error {
|
|||||||
}
|
}
|
||||||
defer fh.Close()
|
defer fh.Close()
|
||||||
|
|
||||||
var reader io.Reader = fh
|
var reader io.Reader = bufio.NewReader(fh)
|
||||||
if strings.HasSuffix(fn, ".gz") {
|
if strings.HasSuffix(fn, ".gz") {
|
||||||
if reader, err = gzip.NewReader(reader); err != nil {
|
if reader, err = gzip.NewReader(reader); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -288,7 +291,7 @@ func ImportPreimages(db ethdb.Database, fn string) error {
|
|||||||
}
|
}
|
||||||
stream := rlp.NewStream(reader, 0)
|
stream := rlp.NewStream(reader, 0)
|
||||||
|
|
||||||
// Import the preimages in batches to prevent disk trashing
|
// Import the preimages in batches to prevent disk thrashing
|
||||||
preimages := make(map[common.Hash][]byte)
|
preimages := make(map[common.Hash][]byte)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
@@ -317,6 +320,7 @@ func ImportPreimages(db ethdb.Database, fn string) error {
|
|||||||
|
|
||||||
// ExportPreimages exports all known hash preimages into the specified file,
|
// ExportPreimages exports all known hash preimages into the specified file,
|
||||||
// truncating any data already present in the file.
|
// truncating any data already present in the file.
|
||||||
|
// It's a part of the deprecated functionality, should be removed in the future.
|
||||||
func ExportPreimages(db ethdb.Database, fn string) error {
|
func ExportPreimages(db ethdb.Database, fn string) error {
|
||||||
log.Info("Exporting preimages", "file", fn)
|
log.Info("Exporting preimages", "file", fn)
|
||||||
|
|
||||||
@@ -344,3 +348,207 @@ func ExportPreimages(db ethdb.Database, fn string) error {
|
|||||||
log.Info("Exported preimages", "file", fn)
|
log.Info("Exported preimages", "file", fn)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// exportHeader is used in the export/import flow. When we do an export,
|
||||||
|
// the first element we output is the exportHeader.
|
||||||
|
// Whenever a backwards-incompatible change is made, the Version header
|
||||||
|
// should be bumped.
|
||||||
|
// If the importer sees a higher version, it should reject the import.
|
||||||
|
type exportHeader struct {
|
||||||
|
Magic string // Always set to 'gethdbdump' for disambiguation
|
||||||
|
Version uint64
|
||||||
|
Kind string
|
||||||
|
UnixTime uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
const exportMagic = "gethdbdump"
|
||||||
|
const (
|
||||||
|
OpBatchAdd = 0
|
||||||
|
OpBatchDel = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
// ImportLDBData imports a batch of snapshot data into the database
|
||||||
|
func ImportLDBData(db ethdb.Database, f string, startIndex int64, interrupt chan struct{}) error {
|
||||||
|
log.Info("Importing leveldb data", "file", f)
|
||||||
|
|
||||||
|
// Open the file handle and potentially unwrap the gzip stream
|
||||||
|
fh, err := os.Open(f)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer fh.Close()
|
||||||
|
|
||||||
|
var reader io.Reader = bufio.NewReader(fh)
|
||||||
|
if strings.HasSuffix(f, ".gz") {
|
||||||
|
if reader, err = gzip.NewReader(reader); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stream := rlp.NewStream(reader, 0)
|
||||||
|
|
||||||
|
// Read the header
|
||||||
|
var header exportHeader
|
||||||
|
if err := stream.Decode(&header); err != nil {
|
||||||
|
return fmt.Errorf("could not decode header: %v", err)
|
||||||
|
}
|
||||||
|
if header.Magic != exportMagic {
|
||||||
|
return errors.New("incompatible data, wrong magic")
|
||||||
|
}
|
||||||
|
if header.Version != 0 {
|
||||||
|
return fmt.Errorf("incompatible version %d, (support only 0)", header.Version)
|
||||||
|
}
|
||||||
|
log.Info("Importing data", "file", f, "type", header.Kind, "data age",
|
||||||
|
common.PrettyDuration(time.Since(time.Unix(int64(header.UnixTime), 0))))
|
||||||
|
|
||||||
|
// Import the snapshot in batches to prevent disk thrashing
|
||||||
|
var (
|
||||||
|
count int64
|
||||||
|
start = time.Now()
|
||||||
|
logged = time.Now()
|
||||||
|
batch = db.NewBatch()
|
||||||
|
)
|
||||||
|
for {
|
||||||
|
// Read the next entry
|
||||||
|
var (
|
||||||
|
op byte
|
||||||
|
key, val []byte
|
||||||
|
)
|
||||||
|
if err := stream.Decode(&op); err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := stream.Decode(&key); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := stream.Decode(&val); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if count < startIndex {
|
||||||
|
count++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch op {
|
||||||
|
case OpBatchDel:
|
||||||
|
batch.Delete(key)
|
||||||
|
case OpBatchAdd:
|
||||||
|
batch.Put(key, val)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unknown op %d\n", op)
|
||||||
|
}
|
||||||
|
if batch.ValueSize() > ethdb.IdealBatchSize {
|
||||||
|
if err := batch.Write(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
batch.Reset()
|
||||||
|
}
|
||||||
|
// Check interruption emitted by ctrl+c
|
||||||
|
if count%1000 == 0 {
|
||||||
|
select {
|
||||||
|
case <-interrupt:
|
||||||
|
if err := batch.Write(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Info("External data import interrupted", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if count%1000 == 0 && time.Since(logged) > 8*time.Second {
|
||||||
|
log.Info("Importing external data", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
logged = time.Now()
|
||||||
|
}
|
||||||
|
count += 1
|
||||||
|
}
|
||||||
|
// Flush the last batch snapshot data
|
||||||
|
if batch.ValueSize() > 0 {
|
||||||
|
if err := batch.Write(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Info("Imported chain data", "file", f, "count", count,
|
||||||
|
"elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChainDataIterator is an interface wraps all necessary functions to iterate
|
||||||
|
// the exporting chain data.
|
||||||
|
type ChainDataIterator interface {
|
||||||
|
// Next returns the key-value pair for next exporting entry in the iterator.
|
||||||
|
// When the end is reached, it will return (0, nil, nil, false).
|
||||||
|
Next() (byte, []byte, []byte, bool)
|
||||||
|
|
||||||
|
// Release releases associated resources. Release should always succeed and can
|
||||||
|
// be called multiple times without causing error.
|
||||||
|
Release()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExportChaindata exports the given data type (truncating any data already present)
|
||||||
|
// in the file. If the suffix is 'gz', gzip compression is used.
|
||||||
|
func ExportChaindata(fn string, kind string, iter ChainDataIterator, interrupt chan struct{}) error {
|
||||||
|
log.Info("Exporting chain data", "file", fn, "kind", kind)
|
||||||
|
defer iter.Release()
|
||||||
|
|
||||||
|
// Open the file handle and potentially wrap with a gzip stream
|
||||||
|
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer fh.Close()
|
||||||
|
|
||||||
|
var writer io.Writer = fh
|
||||||
|
if strings.HasSuffix(fn, ".gz") {
|
||||||
|
writer = gzip.NewWriter(writer)
|
||||||
|
defer writer.(*gzip.Writer).Close()
|
||||||
|
}
|
||||||
|
// Write the header
|
||||||
|
if err := rlp.Encode(writer, &exportHeader{
|
||||||
|
Magic: exportMagic,
|
||||||
|
Version: 0,
|
||||||
|
Kind: kind,
|
||||||
|
UnixTime: uint64(time.Now().Unix()),
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Extract data from source iterator and dump them out to file
|
||||||
|
var (
|
||||||
|
count int64
|
||||||
|
start = time.Now()
|
||||||
|
logged = time.Now()
|
||||||
|
)
|
||||||
|
for {
|
||||||
|
op, key, val, ok := iter.Next()
|
||||||
|
if !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err := rlp.Encode(writer, op); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := rlp.Encode(writer, key); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := rlp.Encode(writer, val); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if count%1000 == 0 {
|
||||||
|
// Check interruption emitted by ctrl+c
|
||||||
|
select {
|
||||||
|
case <-interrupt:
|
||||||
|
log.Info("Chain data exporting interrupted", "file", fn,
|
||||||
|
"kind", kind, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
if time.Since(logged) > 8*time.Second {
|
||||||
|
log.Info("Exporting chain data", "file", fn, "kind", kind,
|
||||||
|
"count", count, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
logged = time.Now()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
log.Info("Exported chain data", "file", fn, "kind", kind, "count", count,
|
||||||
|
"elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
198
cmd/utils/export_test.go
Normal file
198
cmd/utils/export_test.go
Normal file
@@ -0,0 +1,198 @@
|
|||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestExport does basic sanity checks on the export/import functionality
|
||||||
|
func TestExport(t *testing.T) {
|
||||||
|
f := fmt.Sprintf("%v/tempdump", os.TempDir())
|
||||||
|
defer func() {
|
||||||
|
os.Remove(f)
|
||||||
|
}()
|
||||||
|
testExport(t, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExportGzip(t *testing.T) {
|
||||||
|
f := fmt.Sprintf("%v/tempdump.gz", os.TempDir())
|
||||||
|
defer func() {
|
||||||
|
os.Remove(f)
|
||||||
|
}()
|
||||||
|
testExport(t, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
type testIterator struct {
|
||||||
|
index int
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTestIterator() *testIterator {
|
||||||
|
return &testIterator{index: -1}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (iter *testIterator) Next() (byte, []byte, []byte, bool) {
|
||||||
|
if iter.index >= 999 {
|
||||||
|
return 0, nil, nil, false
|
||||||
|
}
|
||||||
|
iter.index += 1
|
||||||
|
if iter.index == 42 {
|
||||||
|
iter.index += 1
|
||||||
|
}
|
||||||
|
return OpBatchAdd, []byte(fmt.Sprintf("key-%04d", iter.index)),
|
||||||
|
[]byte(fmt.Sprintf("value %d", iter.index)), true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (iter *testIterator) Release() {}
|
||||||
|
|
||||||
|
func testExport(t *testing.T, f string) {
|
||||||
|
err := ExportChaindata(f, "testdata", newTestIterator(), make(chan struct{}))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
db := rawdb.NewMemoryDatabase()
|
||||||
|
err = ImportLDBData(db, f, 5, make(chan struct{}))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
// verify
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
v, err := db.Get([]byte(fmt.Sprintf("key-%04d", i)))
|
||||||
|
if (i < 5 || i == 42) && err == nil {
|
||||||
|
t.Fatalf("expected no element at idx %d, got '%v'", i, string(v))
|
||||||
|
}
|
||||||
|
if !(i < 5 || i == 42) {
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected element idx %d: %v", i, err)
|
||||||
|
}
|
||||||
|
if have, want := string(v), fmt.Sprintf("value %d", i); have != want {
|
||||||
|
t.Fatalf("have %v, want %v", have, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
v, err := db.Get([]byte(fmt.Sprintf("key-%04d", 1000)))
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("expected no element at idx %d, got '%v'", 1000, string(v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// testDeletion tests if the deletion markers can be exported/imported correctly
|
||||||
|
func TestDeletionExport(t *testing.T) {
|
||||||
|
f := fmt.Sprintf("%v/tempdump", os.TempDir())
|
||||||
|
defer func() {
|
||||||
|
os.Remove(f)
|
||||||
|
}()
|
||||||
|
testDeletion(t, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestDeletionExportGzip tests if the deletion markers can be exported/imported
|
||||||
|
// correctly with gz compression.
|
||||||
|
func TestDeletionExportGzip(t *testing.T) {
|
||||||
|
f := fmt.Sprintf("%v/tempdump.gz", os.TempDir())
|
||||||
|
defer func() {
|
||||||
|
os.Remove(f)
|
||||||
|
}()
|
||||||
|
testDeletion(t, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
type deletionIterator struct {
|
||||||
|
index int
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDeletionIterator() *deletionIterator {
|
||||||
|
return &deletionIterator{index: -1}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (iter *deletionIterator) Next() (byte, []byte, []byte, bool) {
|
||||||
|
if iter.index >= 999 {
|
||||||
|
return 0, nil, nil, false
|
||||||
|
}
|
||||||
|
iter.index += 1
|
||||||
|
if iter.index == 42 {
|
||||||
|
iter.index += 1
|
||||||
|
}
|
||||||
|
return OpBatchDel, []byte(fmt.Sprintf("key-%04d", iter.index)), nil, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (iter *deletionIterator) Release() {}
|
||||||
|
|
||||||
|
func testDeletion(t *testing.T, f string) {
|
||||||
|
err := ExportChaindata(f, "testdata", newDeletionIterator(), make(chan struct{}))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
db := rawdb.NewMemoryDatabase()
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
db.Put([]byte(fmt.Sprintf("key-%04d", i)), []byte(fmt.Sprintf("value %d", i)))
|
||||||
|
}
|
||||||
|
err = ImportLDBData(db, f, 5, make(chan struct{}))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
v, err := db.Get([]byte(fmt.Sprintf("key-%04d", i)))
|
||||||
|
if i < 5 || i == 42 {
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected element at idx %d, got '%v'", i, err)
|
||||||
|
}
|
||||||
|
if have, want := string(v), fmt.Sprintf("value %d", i); have != want {
|
||||||
|
t.Fatalf("have %v, want %v", have, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !(i < 5 || i == 42) {
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("expected no element idx %d: %v", i, string(v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestImportFutureFormat tests that we reject unsupported future versions.
|
||||||
|
func TestImportFutureFormat(t *testing.T) {
|
||||||
|
f := fmt.Sprintf("%v/tempdump-future", os.TempDir())
|
||||||
|
defer func() {
|
||||||
|
os.Remove(f)
|
||||||
|
}()
|
||||||
|
fh, err := os.OpenFile(f, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer fh.Close()
|
||||||
|
if err := rlp.Encode(fh, &exportHeader{
|
||||||
|
Magic: exportMagic,
|
||||||
|
Version: 500,
|
||||||
|
Kind: "testdata",
|
||||||
|
UnixTime: uint64(time.Now().Unix()),
|
||||||
|
}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
db2 := rawdb.NewMemoryDatabase()
|
||||||
|
err = ImportLDBData(db2, f, 0, make(chan struct{}))
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expected error, got none")
|
||||||
|
}
|
||||||
|
if !strings.HasPrefix(err.Error(), "incompatible version") {
|
||||||
|
t.Fatalf("wrong error: %v", err)
|
||||||
|
}
|
||||||
|
}
|
@@ -45,6 +45,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/eth"
|
"github.com/ethereum/go-ethereum/eth"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/catalyst"
|
||||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||||
"github.com/ethereum/go-ethereum/eth/gasprice"
|
"github.com/ethereum/go-ethereum/eth/gasprice"
|
||||||
@@ -155,6 +156,10 @@ var (
|
|||||||
Name: "ropsten",
|
Name: "ropsten",
|
||||||
Usage: "Ropsten network: pre-configured proof-of-work test network",
|
Usage: "Ropsten network: pre-configured proof-of-work test network",
|
||||||
}
|
}
|
||||||
|
SepoliaFlag = cli.BoolFlag{
|
||||||
|
Name: "sepolia",
|
||||||
|
Usage: "Sepolia network: pre-configured proof-of-work test network",
|
||||||
|
}
|
||||||
DeveloperFlag = cli.BoolFlag{
|
DeveloperFlag = cli.BoolFlag{
|
||||||
Name: "dev",
|
Name: "dev",
|
||||||
Usage: "Ephemeral proof-of-authority network with a pre-funded developer account, mining enabled",
|
Usage: "Ephemeral proof-of-authority network with a pre-funded developer account, mining enabled",
|
||||||
@@ -163,6 +168,11 @@ var (
|
|||||||
Name: "dev.period",
|
Name: "dev.period",
|
||||||
Usage: "Block period to use in developer mode (0 = mine only if transaction pending)",
|
Usage: "Block period to use in developer mode (0 = mine only if transaction pending)",
|
||||||
}
|
}
|
||||||
|
DeveloperGasLimitFlag = cli.Uint64Flag{
|
||||||
|
Name: "dev.gaslimit",
|
||||||
|
Usage: "Initial block gas limit",
|
||||||
|
Value: 11500000,
|
||||||
|
}
|
||||||
IdentityFlag = cli.StringFlag{
|
IdentityFlag = cli.StringFlag{
|
||||||
Name: "identity",
|
Name: "identity",
|
||||||
Usage: "Custom node name",
|
Usage: "Custom node name",
|
||||||
@@ -205,7 +215,7 @@ var (
|
|||||||
defaultSyncMode = ethconfig.Defaults.SyncMode
|
defaultSyncMode = ethconfig.Defaults.SyncMode
|
||||||
SyncModeFlag = TextMarshalerFlag{
|
SyncModeFlag = TextMarshalerFlag{
|
||||||
Name: "syncmode",
|
Name: "syncmode",
|
||||||
Usage: `Blockchain sync mode ("fast", "full", "snap" or "light")`,
|
Usage: `Blockchain sync mode ("snap", "full" or "light")`,
|
||||||
Value: &defaultSyncMode,
|
Value: &defaultSyncMode,
|
||||||
}
|
}
|
||||||
GCModeFlag = cli.StringFlag{
|
GCModeFlag = cli.StringFlag{
|
||||||
@@ -235,9 +245,13 @@ var (
|
|||||||
Usage: "Megabytes of memory allocated to bloom-filter for pruning",
|
Usage: "Megabytes of memory allocated to bloom-filter for pruning",
|
||||||
Value: 2048,
|
Value: 2048,
|
||||||
}
|
}
|
||||||
OverrideLondonFlag = cli.Uint64Flag{
|
OverrideArrowGlacierFlag = cli.Uint64Flag{
|
||||||
Name: "override.london",
|
Name: "override.arrowglacier",
|
||||||
Usage: "Manually specify London fork-block, overriding the bundled setting",
|
Usage: "Manually specify Arrow Glacier fork-block, overriding the bundled setting",
|
||||||
|
}
|
||||||
|
OverrideTerminalTotalDifficulty = cli.Uint64Flag{
|
||||||
|
Name: "override.terminaltotaldifficulty",
|
||||||
|
Usage: "Manually specify TerminalTotalDifficulty, overriding the bundled setting",
|
||||||
}
|
}
|
||||||
// Light server and client settings
|
// Light server and client settings
|
||||||
LightServeFlag = cli.IntFlag{
|
LightServeFlag = cli.IntFlag{
|
||||||
@@ -686,7 +700,7 @@ var (
|
|||||||
}
|
}
|
||||||
GpoMaxGasPriceFlag = cli.Int64Flag{
|
GpoMaxGasPriceFlag = cli.Int64Flag{
|
||||||
Name: "gpo.maxprice",
|
Name: "gpo.maxprice",
|
||||||
Usage: "Maximum gas price will be recommended by gpo",
|
Usage: "Maximum transaction priority fee (or gasprice before London fork) to be recommended by gpo",
|
||||||
Value: ethconfig.Defaults.GPO.MaxPrice.Int64(),
|
Value: ethconfig.Defaults.GPO.MaxPrice.Int64(),
|
||||||
}
|
}
|
||||||
GpoIgnoreGasPriceFlag = cli.Int64Flag{
|
GpoIgnoreGasPriceFlag = cli.Int64Flag{
|
||||||
@@ -798,6 +812,9 @@ func MakeDataDir(ctx *cli.Context) string {
|
|||||||
if ctx.GlobalBool(GoerliFlag.Name) {
|
if ctx.GlobalBool(GoerliFlag.Name) {
|
||||||
return filepath.Join(path, "goerli")
|
return filepath.Join(path, "goerli")
|
||||||
}
|
}
|
||||||
|
if ctx.GlobalBool(SepoliaFlag.Name) {
|
||||||
|
return filepath.Join(path, "sepolia")
|
||||||
|
}
|
||||||
return path
|
return path
|
||||||
}
|
}
|
||||||
Fatalf("Cannot determine default data directory, please set manually (--datadir)")
|
Fatalf("Cannot determine default data directory, please set manually (--datadir)")
|
||||||
@@ -846,6 +863,8 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
|
|||||||
urls = SplitAndTrim(ctx.GlobalString(BootnodesFlag.Name))
|
urls = SplitAndTrim(ctx.GlobalString(BootnodesFlag.Name))
|
||||||
case ctx.GlobalBool(RopstenFlag.Name):
|
case ctx.GlobalBool(RopstenFlag.Name):
|
||||||
urls = params.RopstenBootnodes
|
urls = params.RopstenBootnodes
|
||||||
|
case ctx.GlobalBool(SepoliaFlag.Name):
|
||||||
|
urls = params.SepoliaBootnodes
|
||||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||||
urls = params.RinkebyBootnodes
|
urls = params.RinkebyBootnodes
|
||||||
case ctx.GlobalBool(GoerliFlag.Name):
|
case ctx.GlobalBool(GoerliFlag.Name):
|
||||||
@@ -1182,7 +1201,7 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
|
|||||||
cfg.NetRestrict = list
|
cfg.NetRestrict = list
|
||||||
}
|
}
|
||||||
|
|
||||||
if ctx.GlobalBool(DeveloperFlag.Name) || ctx.GlobalBool(CatalystFlag.Name) {
|
if ctx.GlobalBool(DeveloperFlag.Name) {
|
||||||
// --dev mode can't use p2p networking.
|
// --dev mode can't use p2p networking.
|
||||||
cfg.MaxPeers = 0
|
cfg.MaxPeers = 0
|
||||||
cfg.ListenAddr = ""
|
cfg.ListenAddr = ""
|
||||||
@@ -1269,6 +1288,8 @@ func setDataDir(ctx *cli.Context, cfg *node.Config) {
|
|||||||
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby")
|
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby")
|
||||||
case ctx.GlobalBool(GoerliFlag.Name) && cfg.DataDir == node.DefaultDataDir():
|
case ctx.GlobalBool(GoerliFlag.Name) && cfg.DataDir == node.DefaultDataDir():
|
||||||
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "goerli")
|
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "goerli")
|
||||||
|
case ctx.GlobalBool(SepoliaFlag.Name) && cfg.DataDir == node.DefaultDataDir():
|
||||||
|
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "sepolia")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1454,7 +1475,7 @@ func CheckExclusive(ctx *cli.Context, args ...interface{}) {
|
|||||||
// SetEthConfig applies eth-related command line flags to the config.
|
// SetEthConfig applies eth-related command line flags to the config.
|
||||||
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
||||||
// Avoid conflicting network flags
|
// Avoid conflicting network flags
|
||||||
CheckExclusive(ctx, MainnetFlag, DeveloperFlag, RopstenFlag, RinkebyFlag, GoerliFlag)
|
CheckExclusive(ctx, MainnetFlag, DeveloperFlag, RopstenFlag, RinkebyFlag, GoerliFlag, SepoliaFlag)
|
||||||
CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light")
|
CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light")
|
||||||
CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer
|
CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer
|
||||||
if ctx.GlobalString(GCModeFlag.Name) == "archive" && ctx.GlobalUint64(TxLookupLimitFlag.Name) != 0 {
|
if ctx.GlobalString(GCModeFlag.Name) == "archive" && ctx.GlobalUint64(TxLookupLimitFlag.Name) != 0 {
|
||||||
@@ -1598,6 +1619,12 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
|||||||
}
|
}
|
||||||
cfg.Genesis = core.DefaultRopstenGenesisBlock()
|
cfg.Genesis = core.DefaultRopstenGenesisBlock()
|
||||||
SetDNSDiscoveryDefaults(cfg, params.RopstenGenesisHash)
|
SetDNSDiscoveryDefaults(cfg, params.RopstenGenesisHash)
|
||||||
|
case ctx.GlobalBool(SepoliaFlag.Name):
|
||||||
|
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||||
|
cfg.NetworkId = 11155111
|
||||||
|
}
|
||||||
|
cfg.Genesis = core.DefaultSepoliaGenesisBlock()
|
||||||
|
SetDNSDiscoveryDefaults(cfg, params.SepoliaGenesisHash)
|
||||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||||
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||||
cfg.NetworkId = 4
|
cfg.NetworkId = 4
|
||||||
@@ -1644,7 +1671,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
|||||||
log.Info("Using developer account", "address", developer.Address)
|
log.Info("Using developer account", "address", developer.Address)
|
||||||
|
|
||||||
// Create a new developer genesis block or reuse existing one
|
// Create a new developer genesis block or reuse existing one
|
||||||
cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.GlobalInt(DeveloperPeriodFlag.Name)), developer.Address)
|
cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.GlobalInt(DeveloperPeriodFlag.Name)), ctx.GlobalUint64(DeveloperGasLimitFlag.Name), developer.Address)
|
||||||
if ctx.GlobalIsSet(DataDirFlag.Name) {
|
if ctx.GlobalIsSet(DataDirFlag.Name) {
|
||||||
// Check if we have an already initialized chain and fall back to
|
// Check if we have an already initialized chain and fall back to
|
||||||
// that if so. Otherwise we need to generate a new genesis spec.
|
// that if so. Otherwise we need to generate a new genesis spec.
|
||||||
@@ -1683,13 +1710,18 @@ func SetDNSDiscoveryDefaults(cfg *ethconfig.Config, genesis common.Hash) {
|
|||||||
// RegisterEthService adds an Ethereum client to the stack.
|
// RegisterEthService adds an Ethereum client to the stack.
|
||||||
// The second return value is the full node instance, which may be nil if the
|
// The second return value is the full node instance, which may be nil if the
|
||||||
// node is running as a light client.
|
// node is running as a light client.
|
||||||
func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend, *eth.Ethereum) {
|
func RegisterEthService(stack *node.Node, cfg *ethconfig.Config, isCatalyst bool) (ethapi.Backend, *eth.Ethereum) {
|
||||||
if cfg.SyncMode == downloader.LightSync {
|
if cfg.SyncMode == downloader.LightSync {
|
||||||
backend, err := les.New(stack, cfg)
|
backend, err := les.New(stack, cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fatalf("Failed to register the Ethereum service: %v", err)
|
Fatalf("Failed to register the Ethereum service: %v", err)
|
||||||
}
|
}
|
||||||
stack.RegisterAPIs(tracers.APIs(backend.ApiBackend))
|
stack.RegisterAPIs(tracers.APIs(backend.ApiBackend))
|
||||||
|
if isCatalyst {
|
||||||
|
if err := catalyst.RegisterLight(stack, backend); err != nil {
|
||||||
|
Fatalf("Failed to register the catalyst service: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
return backend.ApiBackend, nil
|
return backend.ApiBackend, nil
|
||||||
}
|
}
|
||||||
backend, err := eth.New(stack, cfg)
|
backend, err := eth.New(stack, cfg)
|
||||||
@@ -1702,6 +1734,11 @@ func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend
|
|||||||
Fatalf("Failed to create the LES server: %v", err)
|
Fatalf("Failed to create the LES server: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if isCatalyst {
|
||||||
|
if err := catalyst.Register(stack, backend); err != nil {
|
||||||
|
Fatalf("Failed to register the catalyst service: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
stack.RegisterAPIs(tracers.APIs(backend.APIBackend))
|
stack.RegisterAPIs(tracers.APIs(backend.APIBackend))
|
||||||
return backend.APIBackend, backend
|
return backend.APIBackend, backend
|
||||||
}
|
}
|
||||||
@@ -1826,6 +1863,8 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis {
|
|||||||
genesis = core.DefaultGenesisBlock()
|
genesis = core.DefaultGenesisBlock()
|
||||||
case ctx.GlobalBool(RopstenFlag.Name):
|
case ctx.GlobalBool(RopstenFlag.Name):
|
||||||
genesis = core.DefaultRopstenGenesisBlock()
|
genesis = core.DefaultRopstenGenesisBlock()
|
||||||
|
case ctx.GlobalBool(SepoliaFlag.Name):
|
||||||
|
genesis = core.DefaultSepoliaGenesisBlock()
|
||||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||||
genesis = core.DefaultRinkebyGenesisBlock()
|
genesis = core.DefaultRinkebyGenesisBlock()
|
||||||
case ctx.GlobalBool(GoerliFlag.Name):
|
case ctx.GlobalBool(GoerliFlag.Name):
|
||||||
|
@@ -176,13 +176,14 @@ func MustDecodeBig(input string) *big.Int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// EncodeBig encodes bigint as a hex string with 0x prefix.
|
// EncodeBig encodes bigint as a hex string with 0x prefix.
|
||||||
// The sign of the integer is ignored.
|
|
||||||
func EncodeBig(bigint *big.Int) string {
|
func EncodeBig(bigint *big.Int) string {
|
||||||
nbits := bigint.BitLen()
|
if sign := bigint.Sign(); sign == 0 {
|
||||||
if nbits == 0 {
|
|
||||||
return "0x0"
|
return "0x0"
|
||||||
|
} else if sign > 0 {
|
||||||
|
return "0x" + bigint.Text(16)
|
||||||
|
} else {
|
||||||
|
return "-0x" + bigint.Text(16)[1:]
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("%#x", bigint)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func has0xPrefix(input string) bool {
|
func has0xPrefix(input string) bool {
|
||||||
|
@@ -201,3 +201,15 @@ func TestDecodeUint64(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func BenchmarkEncodeBig(b *testing.B) {
|
||||||
|
for _, bench := range encodeBigTests {
|
||||||
|
b.Run(bench.want, func(b *testing.B) {
|
||||||
|
b.ReportAllocs()
|
||||||
|
bigint := bench.input.(*big.Int)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
EncodeBig(bigint)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
376
consensus/beacon/consensus.go
Normal file
376
consensus/beacon/consensus.go
Normal file
@@ -0,0 +1,376 @@
|
|||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package beacon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/misc"
|
||||||
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Proof-of-stake protocol constants.
|
||||||
|
var (
|
||||||
|
beaconDifficulty = common.Big0 // The default block difficulty in the beacon consensus
|
||||||
|
beaconNonce = types.EncodeNonce(0) // The default block nonce in the beacon consensus
|
||||||
|
)
|
||||||
|
|
||||||
|
// Various error messages to mark blocks invalid. These should be private to
|
||||||
|
// prevent engine specific errors from being referenced in the remainder of the
|
||||||
|
// codebase, inherently breaking if the engine is swapped out. Please put common
|
||||||
|
// error types into the consensus package.
|
||||||
|
var (
|
||||||
|
errTooManyUncles = errors.New("too many uncles")
|
||||||
|
errInvalidMixDigest = errors.New("invalid mix digest")
|
||||||
|
errInvalidNonce = errors.New("invalid nonce")
|
||||||
|
errInvalidUncleHash = errors.New("invalid uncle hash")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Beacon is a consensus engine that combines the eth1 consensus and proof-of-stake
|
||||||
|
// algorithm. There is a special flag inside to decide whether to use legacy consensus
|
||||||
|
// rules or new rules. The transition rule is described in the eth1/2 merge spec.
|
||||||
|
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-3675.md
|
||||||
|
//
|
||||||
|
// The beacon here is a half-functional consensus engine with partial functions which
|
||||||
|
// is only used for necessary consensus checks. The legacy consensus engine can be any
|
||||||
|
// engine implements the consensus interface (except the beacon itself).
|
||||||
|
type Beacon struct {
|
||||||
|
ethone consensus.Engine // Original consensus engine used in eth1, e.g. ethash or clique
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a consensus engine with the given embedded eth1 engine.
|
||||||
|
func New(ethone consensus.Engine) *Beacon {
|
||||||
|
if _, ok := ethone.(*Beacon); ok {
|
||||||
|
panic("nested consensus engine")
|
||||||
|
}
|
||||||
|
return &Beacon{ethone: ethone}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Author implements consensus.Engine, returning the verified author of the block.
|
||||||
|
func (beacon *Beacon) Author(header *types.Header) (common.Address, error) {
|
||||||
|
if !beacon.IsPoSHeader(header) {
|
||||||
|
return beacon.ethone.Author(header)
|
||||||
|
}
|
||||||
|
return header.Coinbase, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyHeader checks whether a header conforms to the consensus rules of the
|
||||||
|
// stock Ethereum consensus engine.
|
||||||
|
func (beacon *Beacon) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error {
|
||||||
|
reached, _ := IsTTDReached(chain, header.ParentHash, header.Number.Uint64()-1)
|
||||||
|
if !reached {
|
||||||
|
return beacon.ethone.VerifyHeader(chain, header, seal)
|
||||||
|
}
|
||||||
|
// Short circuit if the parent is not known
|
||||||
|
parent := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1)
|
||||||
|
if parent == nil {
|
||||||
|
return consensus.ErrUnknownAncestor
|
||||||
|
}
|
||||||
|
// Sanity checks passed, do a proper verification
|
||||||
|
return beacon.verifyHeader(chain, header, parent)
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
|
||||||
|
// concurrently. The method returns a quit channel to abort the operations and
|
||||||
|
// a results channel to retrieve the async verifications.
|
||||||
|
// VerifyHeaders expect the headers to be ordered and continuous.
|
||||||
|
func (beacon *Beacon) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
|
||||||
|
if !beacon.IsPoSHeader(headers[len(headers)-1]) {
|
||||||
|
return beacon.ethone.VerifyHeaders(chain, headers, seals)
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
preHeaders []*types.Header
|
||||||
|
postHeaders []*types.Header
|
||||||
|
preSeals []bool
|
||||||
|
)
|
||||||
|
for index, header := range headers {
|
||||||
|
if beacon.IsPoSHeader(header) {
|
||||||
|
preHeaders = headers[:index]
|
||||||
|
postHeaders = headers[index:]
|
||||||
|
preSeals = seals[:index]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// All the headers have passed the transition point, use new rules.
|
||||||
|
if len(preHeaders) == 0 {
|
||||||
|
return beacon.verifyHeaders(chain, headers, nil)
|
||||||
|
}
|
||||||
|
// The transition point exists in the middle, separate the headers
|
||||||
|
// into two batches and apply different verification rules for them.
|
||||||
|
var (
|
||||||
|
abort = make(chan struct{})
|
||||||
|
results = make(chan error, len(headers))
|
||||||
|
)
|
||||||
|
go func() {
|
||||||
|
var (
|
||||||
|
old, new, out = 0, len(preHeaders), 0
|
||||||
|
errors = make([]error, len(headers))
|
||||||
|
done = make([]bool, len(headers))
|
||||||
|
oldDone, oldResult = beacon.ethone.VerifyHeaders(chain, preHeaders, preSeals)
|
||||||
|
newDone, newResult = beacon.verifyHeaders(chain, postHeaders, preHeaders[len(preHeaders)-1])
|
||||||
|
)
|
||||||
|
for {
|
||||||
|
for ; done[out]; out++ {
|
||||||
|
results <- errors[out]
|
||||||
|
if out == len(headers)-1 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case err := <-oldResult:
|
||||||
|
errors[old], done[old] = err, true
|
||||||
|
old++
|
||||||
|
case err := <-newResult:
|
||||||
|
errors[new], done[new] = err, true
|
||||||
|
new++
|
||||||
|
case <-abort:
|
||||||
|
close(oldDone)
|
||||||
|
close(newDone)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return abort, results
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyUncles verifies that the given block's uncles conform to the consensus
|
||||||
|
// rules of the Ethereum consensus engine.
|
||||||
|
func (beacon *Beacon) VerifyUncles(chain consensus.ChainReader, block *types.Block) error {
|
||||||
|
if !beacon.IsPoSHeader(block.Header()) {
|
||||||
|
return beacon.ethone.VerifyUncles(chain, block)
|
||||||
|
}
|
||||||
|
// Verify that there is no uncle block. It's explicitly disabled in the beacon
|
||||||
|
if len(block.Uncles()) > 0 {
|
||||||
|
return errTooManyUncles
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// verifyHeader checks whether a header conforms to the consensus rules of the
|
||||||
|
// stock Ethereum consensus engine. The difference between the beacon and classic is
|
||||||
|
// (a) The following fields are expected to be constants:
|
||||||
|
// - difficulty is expected to be 0
|
||||||
|
// - nonce is expected to be 0
|
||||||
|
// - unclehash is expected to be Hash(emptyHeader)
|
||||||
|
// to be the desired constants
|
||||||
|
// (b) the timestamp is not verified anymore
|
||||||
|
// (c) the extradata is limited to 32 bytes
|
||||||
|
func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header) error {
|
||||||
|
// Ensure that the header's extra-data section is of a reasonable size
|
||||||
|
if len(header.Extra) > 32 {
|
||||||
|
return fmt.Errorf("extra-data longer than 32 bytes (%d)", len(header.Extra))
|
||||||
|
}
|
||||||
|
// Verify the seal parts. Ensure the mixhash, nonce and uncle hash are the expected value.
|
||||||
|
if header.MixDigest != (common.Hash{}) {
|
||||||
|
return errInvalidMixDigest
|
||||||
|
}
|
||||||
|
if header.Nonce != beaconNonce {
|
||||||
|
return errInvalidNonce
|
||||||
|
}
|
||||||
|
if header.UncleHash != types.EmptyUncleHash {
|
||||||
|
return errInvalidUncleHash
|
||||||
|
}
|
||||||
|
// Verify the block's difficulty to ensure it's the default constant
|
||||||
|
if beaconDifficulty.Cmp(header.Difficulty) != 0 {
|
||||||
|
return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, beaconDifficulty)
|
||||||
|
}
|
||||||
|
// Verify that the gas limit is <= 2^63-1
|
||||||
|
cap := uint64(0x7fffffffffffffff)
|
||||||
|
if header.GasLimit > cap {
|
||||||
|
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, cap)
|
||||||
|
}
|
||||||
|
// Verify that the gasUsed is <= gasLimit
|
||||||
|
if header.GasUsed > header.GasLimit {
|
||||||
|
return fmt.Errorf("invalid gasUsed: have %d, gasLimit %d", header.GasUsed, header.GasLimit)
|
||||||
|
}
|
||||||
|
// Verify that the block number is parent's +1
|
||||||
|
if diff := new(big.Int).Sub(header.Number, parent.Number); diff.Cmp(common.Big1) != 0 {
|
||||||
|
return consensus.ErrInvalidNumber
|
||||||
|
}
|
||||||
|
// Verify the header's EIP-1559 attributes.
|
||||||
|
return misc.VerifyEip1559Header(chain.Config(), parent, header)
|
||||||
|
}
|
||||||
|
|
||||||
|
// verifyHeaders is similar to verifyHeader, but verifies a batch of headers
|
||||||
|
// concurrently. The method returns a quit channel to abort the operations and
|
||||||
|
// a results channel to retrieve the async verifications. An additional parent
|
||||||
|
// header will be passed if the relevant header is not in the database yet.
|
||||||
|
func (beacon *Beacon) verifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, ancestor *types.Header) (chan<- struct{}, <-chan error) {
|
||||||
|
var (
|
||||||
|
abort = make(chan struct{})
|
||||||
|
results = make(chan error, len(headers))
|
||||||
|
)
|
||||||
|
go func() {
|
||||||
|
for i, header := range headers {
|
||||||
|
var parent *types.Header
|
||||||
|
if i == 0 {
|
||||||
|
if ancestor != nil {
|
||||||
|
parent = ancestor
|
||||||
|
} else {
|
||||||
|
parent = chain.GetHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1)
|
||||||
|
}
|
||||||
|
} else if headers[i-1].Hash() == headers[i].ParentHash {
|
||||||
|
parent = headers[i-1]
|
||||||
|
}
|
||||||
|
if parent == nil {
|
||||||
|
select {
|
||||||
|
case <-abort:
|
||||||
|
return
|
||||||
|
case results <- consensus.ErrUnknownAncestor:
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
err := beacon.verifyHeader(chain, header, parent)
|
||||||
|
select {
|
||||||
|
case <-abort:
|
||||||
|
return
|
||||||
|
case results <- err:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return abort, results
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare implements consensus.Engine, initializing the difficulty field of a
|
||||||
|
// header to conform to the beacon protocol. The changes are done inline.
|
||||||
|
func (beacon *Beacon) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
|
||||||
|
// Transition isn't triggered yet, use the legacy rules for preparation.
|
||||||
|
reached, err := IsTTDReached(chain, header.ParentHash, header.Number.Uint64()-1)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !reached {
|
||||||
|
return beacon.ethone.Prepare(chain, header)
|
||||||
|
}
|
||||||
|
header.Difficulty = beaconDifficulty
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finalize implements consensus.Engine, setting the final state on the header
|
||||||
|
func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) {
|
||||||
|
// Finalize is different with Prepare, it can be used in both block generation
|
||||||
|
// and verification. So determine the consensus rules by header type.
|
||||||
|
if !beacon.IsPoSHeader(header) {
|
||||||
|
beacon.ethone.Finalize(chain, header, state, txs, uncles)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// The block reward is no longer handled here. It's done by the
|
||||||
|
// external consensus engine.
|
||||||
|
header.Root = state.IntermediateRoot(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FinalizeAndAssemble implements consensus.Engine, setting the final state and
|
||||||
|
// assembling the block.
|
||||||
|
func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
|
||||||
|
// FinalizeAndAssemble is different with Prepare, it can be used in both block
|
||||||
|
// generation and verification. So determine the consensus rules by header type.
|
||||||
|
if !beacon.IsPoSHeader(header) {
|
||||||
|
return beacon.ethone.FinalizeAndAssemble(chain, header, state, txs, uncles, receipts)
|
||||||
|
}
|
||||||
|
// Finalize and assemble the block
|
||||||
|
beacon.Finalize(chain, header, state, txs, uncles)
|
||||||
|
return types.NewBlock(header, txs, uncles, receipts, trie.NewStackTrie(nil)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seal generates a new sealing request for the given input block and pushes
|
||||||
|
// the result into the given channel.
|
||||||
|
//
|
||||||
|
// Note, the method returns immediately and will send the result async. More
|
||||||
|
// than one result may also be returned depending on the consensus algorithm.
|
||||||
|
func (beacon *Beacon) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
|
||||||
|
if !beacon.IsPoSHeader(block.Header()) {
|
||||||
|
return beacon.ethone.Seal(chain, block, results, stop)
|
||||||
|
}
|
||||||
|
// The seal verification is done by the external consensus engine,
|
||||||
|
// return directly without pushing any block back. In another word
|
||||||
|
// beacon won't return any result by `results` channel which may
|
||||||
|
// blocks the receiver logic forever.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SealHash returns the hash of a block prior to it being sealed.
|
||||||
|
func (beacon *Beacon) SealHash(header *types.Header) common.Hash {
|
||||||
|
return beacon.ethone.SealHash(header)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CalcDifficulty is the difficulty adjustment algorithm. It returns
|
||||||
|
// the difficulty that a new block should have when created at time
|
||||||
|
// given the parent block's time and difficulty.
|
||||||
|
func (beacon *Beacon) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int {
|
||||||
|
// Transition isn't triggered yet, use the legacy rules for calculation
|
||||||
|
if reached, _ := IsTTDReached(chain, parent.Hash(), parent.Number.Uint64()); !reached {
|
||||||
|
return beacon.ethone.CalcDifficulty(chain, time, parent)
|
||||||
|
}
|
||||||
|
return beaconDifficulty
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIs implements consensus.Engine, returning the user facing RPC APIs.
|
||||||
|
func (beacon *Beacon) APIs(chain consensus.ChainHeaderReader) []rpc.API {
|
||||||
|
return beacon.ethone.APIs(chain)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close shutdowns the consensus engine
|
||||||
|
func (beacon *Beacon) Close() error {
|
||||||
|
return beacon.ethone.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsPoSHeader reports the header belongs to the PoS-stage with some special fields.
|
||||||
|
// This function is not suitable for a part of APIs like Prepare or CalcDifficulty
|
||||||
|
// because the header difficulty is not set yet.
|
||||||
|
func (beacon *Beacon) IsPoSHeader(header *types.Header) bool {
|
||||||
|
if header.Difficulty == nil {
|
||||||
|
panic("IsPoSHeader called with invalid difficulty")
|
||||||
|
}
|
||||||
|
return header.Difficulty.Cmp(beaconDifficulty) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// InnerEngine returns the embedded eth1 consensus engine.
|
||||||
|
func (beacon *Beacon) InnerEngine() consensus.Engine {
|
||||||
|
return beacon.ethone
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetThreads updates the mining threads. Delegate the call
|
||||||
|
// to the eth1 engine if it's threaded.
|
||||||
|
func (beacon *Beacon) SetThreads(threads int) {
|
||||||
|
type threaded interface {
|
||||||
|
SetThreads(threads int)
|
||||||
|
}
|
||||||
|
if th, ok := beacon.ethone.(threaded); ok {
|
||||||
|
th.SetThreads(threads)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsTTDReached checks if the TotalTerminalDifficulty has been surpassed on the `parentHash` block.
|
||||||
|
// It depends on the parentHash already being stored in the database.
|
||||||
|
// If the parentHash is not stored in the database a UnknownAncestor error is returned.
|
||||||
|
func IsTTDReached(chain consensus.ChainHeaderReader, parentHash common.Hash, number uint64) (bool, error) {
|
||||||
|
if chain.Config().TerminalTotalDifficulty == nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
td := chain.GetTd(parentHash, number)
|
||||||
|
if td == nil {
|
||||||
|
return false, consensus.ErrUnknownAncestor
|
||||||
|
}
|
||||||
|
return td.Cmp(chain.Config().TerminalTotalDifficulty) >= 0, nil
|
||||||
|
}
|
@@ -196,7 +196,11 @@ func (sb *blockNumberOrHashOrRLP) UnmarshalJSON(data []byte) error {
|
|||||||
if err := json.Unmarshal(data, &input); err != nil {
|
if err := json.Unmarshal(data, &input); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sb.RLP = hexutil.MustDecode(input)
|
blob, err := hexutil.Decode(input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
sb.RLP = blob
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -214,6 +218,9 @@ func (api *API) GetSigner(rlpOrBlockNr *blockNumberOrHashOrRLP) (common.Address,
|
|||||||
} else if number, ok := blockNrOrHash.Number(); ok {
|
} else if number, ok := blockNrOrHash.Number(); ok {
|
||||||
header = api.chain.GetHeaderByNumber(uint64(number.Int64()))
|
header = api.chain.GetHeaderByNumber(uint64(number.Int64()))
|
||||||
}
|
}
|
||||||
|
if header == nil {
|
||||||
|
return common.Address{}, fmt.Errorf("missing block %v", blockNrOrHash.String())
|
||||||
|
}
|
||||||
return api.clique.Author(header)
|
return api.clique.Author(header)
|
||||||
}
|
}
|
||||||
block := new(types.Block)
|
block := new(types.Block)
|
||||||
|
@@ -600,8 +600,7 @@ func (c *Clique) Seal(chain consensus.ChainHeaderReader, block *types.Block, res
|
|||||||
}
|
}
|
||||||
// For 0-period chains, refuse to seal empty blocks (no reward but would spin sealing)
|
// For 0-period chains, refuse to seal empty blocks (no reward but would spin sealing)
|
||||||
if c.config.Period == 0 && len(block.Transactions()) == 0 {
|
if c.config.Period == 0 && len(block.Transactions()) == 0 {
|
||||||
log.Info("Sealing paused, waiting for transactions")
|
return errors.New("sealing paused while waiting for transactions")
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
// Don't hold the signer fields for the entire sealing procedure
|
// Don't hold the signer fields for the entire sealing procedure
|
||||||
c.lock.RLock()
|
c.lock.RLock()
|
||||||
@@ -621,8 +620,7 @@ func (c *Clique) Seal(chain consensus.ChainHeaderReader, block *types.Block, res
|
|||||||
if recent == signer {
|
if recent == signer {
|
||||||
// Signer is among recents, only wait if the current block doesn't shift it out
|
// Signer is among recents, only wait if the current block doesn't shift it out
|
||||||
if limit := uint64(len(snap.Signers)/2 + 1); number < limit || seen > number-limit {
|
if limit := uint64(len(snap.Signers)/2 + 1); number < limit || seen > number-limit {
|
||||||
log.Info("Signed recently, must wait for others")
|
return errors.New("signed recently, must wait for others")
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -44,6 +44,9 @@ type ChainHeaderReader interface {
|
|||||||
|
|
||||||
// GetHeaderByHash retrieves a block header from the database by its hash.
|
// GetHeaderByHash retrieves a block header from the database by its hash.
|
||||||
GetHeaderByHash(hash common.Hash) *types.Header
|
GetHeaderByHash(hash common.Hash) *types.Header
|
||||||
|
|
||||||
|
// GetTd retrieves the total difficulty from the database by hash and number.
|
||||||
|
GetTd(hash common.Hash, number uint64) *big.Int
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChainReader defines a small collection of methods needed to access the local
|
// ChainReader defines a small collection of methods needed to access the local
|
||||||
|
@@ -34,6 +34,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
"github.com/ethereum/go-ethereum/trie/utils"
|
||||||
"golang.org/x/crypto/sha3"
|
"golang.org/x/crypto/sha3"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -45,6 +46,11 @@ var (
|
|||||||
maxUncles = 2 // Maximum number of uncles allowed in a single block
|
maxUncles = 2 // Maximum number of uncles allowed in a single block
|
||||||
allowedFutureBlockTimeSeconds = int64(15) // Max seconds from current time allowed for blocks, before they're considered future blocks
|
allowedFutureBlockTimeSeconds = int64(15) // Max seconds from current time allowed for blocks, before they're considered future blocks
|
||||||
|
|
||||||
|
// calcDifficultyEip4345 is the difficulty adjustment algorithm as specified by EIP 4345.
|
||||||
|
// It offsets the bomb a total of 10.7M blocks.
|
||||||
|
// Specification EIP-4345: https://eips.ethereum.org/EIPS/eip-4345
|
||||||
|
calcDifficultyEip4345 = makeDifficultyCalculator(big.NewInt(10_700_000))
|
||||||
|
|
||||||
// calcDifficultyEip3554 is the difficulty adjustment algorithm as specified by EIP 3554.
|
// calcDifficultyEip3554 is the difficulty adjustment algorithm as specified by EIP 3554.
|
||||||
// It offsets the bomb a total of 9.7M blocks.
|
// It offsets the bomb a total of 9.7M blocks.
|
||||||
// Specification EIP-3554: https://eips.ethereum.org/EIPS/eip-3554
|
// Specification EIP-3554: https://eips.ethereum.org/EIPS/eip-3554
|
||||||
@@ -330,6 +336,8 @@ func (ethash *Ethash) CalcDifficulty(chain consensus.ChainHeaderReader, time uin
|
|||||||
func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
|
func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
|
||||||
next := new(big.Int).Add(parent.Number, big1)
|
next := new(big.Int).Add(parent.Number, big1)
|
||||||
switch {
|
switch {
|
||||||
|
case config.IsArrowGlacier(next):
|
||||||
|
return calcDifficultyEip4345(time, parent)
|
||||||
case config.IsLondon(next):
|
case config.IsLondon(next):
|
||||||
return calcDifficultyEip3554(time, parent)
|
return calcDifficultyEip3554(time, parent)
|
||||||
case config.IsMuirGlacier(next):
|
case config.IsMuirGlacier(next):
|
||||||
@@ -653,10 +661,14 @@ func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header
|
|||||||
r.Sub(r, header.Number)
|
r.Sub(r, header.Number)
|
||||||
r.Mul(r, blockReward)
|
r.Mul(r, blockReward)
|
||||||
r.Div(r, big8)
|
r.Div(r, big8)
|
||||||
|
uncleCoinbase := utils.GetTreeKeyBalance(uncle.Coinbase.Bytes())
|
||||||
|
state.Witness().TouchAddress(uncleCoinbase, state.GetBalance(uncle.Coinbase).Bytes())
|
||||||
state.AddBalance(uncle.Coinbase, r)
|
state.AddBalance(uncle.Coinbase, r)
|
||||||
|
|
||||||
r.Div(blockReward, big32)
|
r.Div(blockReward, big32)
|
||||||
reward.Add(reward, r)
|
reward.Add(reward, r)
|
||||||
}
|
}
|
||||||
|
coinbase := utils.GetTreeKeyBalance(header.Coinbase.Bytes())
|
||||||
|
state.Witness().TouchAddress(coinbase, state.GetBalance(header.Coinbase).Bytes())
|
||||||
state.AddBalance(header.Coinbase, reward)
|
state.AddBalance(header.Coinbase, reward)
|
||||||
}
|
}
|
||||||
|
@@ -136,13 +136,16 @@ func memoryMapAndGenerate(path string, size uint64, lock bool, generator func(bu
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
if err = dump.Truncate(int64(len(dumpMagic))*4 + int64(size)); err != nil {
|
if err = ensureSize(dump, int64(len(dumpMagic))*4+int64(size)); err != nil {
|
||||||
|
dump.Close()
|
||||||
|
os.Remove(temp)
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
// Memory map the file for writing and fill it with the generator
|
// Memory map the file for writing and fill it with the generator
|
||||||
mem, buffer, err := memoryMapFile(dump, true)
|
mem, buffer, err := memoryMapFile(dump, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dump.Close()
|
dump.Close()
|
||||||
|
os.Remove(temp)
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
copy(buffer, dumpMagic)
|
copy(buffer, dumpMagic)
|
||||||
@@ -358,7 +361,7 @@ func (d *dataset) generate(dir string, limit int, lock bool, test bool) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("Failed to generate mapped ethash dataset", "err", err)
|
logger.Error("Failed to generate mapped ethash dataset", "err", err)
|
||||||
|
|
||||||
d.dataset = make([]uint32, dsize/2)
|
d.dataset = make([]uint32, dsize/4)
|
||||||
generateDataset(d.dataset, d.epoch, cache)
|
generateDataset(d.dataset, d.epoch, cache)
|
||||||
}
|
}
|
||||||
// Iterate over all previous instances and delete old ones
|
// Iterate over all previous instances and delete old ones
|
||||||
|
35
consensus/ethash/mmap_help_linux.go
Normal file
35
consensus/ethash/mmap_help_linux.go
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//go:build linux
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package ethash
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ensureSize expands the file to the given size. This is to prevent runtime
|
||||||
|
// errors later on, if the underlying file expands beyond the disk capacity,
|
||||||
|
// even though it ostensibly is already expanded, but due to being sparse
|
||||||
|
// does not actually occupy the full declared size on disk.
|
||||||
|
func ensureSize(f *os.File, size int64) error {
|
||||||
|
// Docs: https://www.man7.org/linux/man-pages/man2/fallocate.2.html
|
||||||
|
return unix.Fallocate(int(f.Fd()), 0, 0, size)
|
||||||
|
}
|
36
consensus/ethash/mmap_help_other.go
Normal file
36
consensus/ethash/mmap_help_other.go
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//go:build !linux
|
||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package ethash
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ensureSize expands the file to the given size. This is to prevent runtime
|
||||||
|
// errors later on, if the underlying file expands beyond the disk capacity,
|
||||||
|
// even though it ostensibly is already expanded, but due to being sparse
|
||||||
|
// does not actually occupy the full declared size on disk.
|
||||||
|
func ensureSize(f *os.File, size int64) error {
|
||||||
|
// On systems which do not support fallocate, we merely truncate it.
|
||||||
|
// More robust alternatives would be to
|
||||||
|
// - Use posix_fallocate, or
|
||||||
|
// - explicitly fill the file with zeroes.
|
||||||
|
return f.Truncate(size)
|
||||||
|
}
|
110
consensus/merger.go
Normal file
110
consensus/merger.go
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package consensus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// transitionStatus describes the status of eth1/2 transition. This switch
|
||||||
|
// between modes is a one-way action which is triggered by corresponding
|
||||||
|
// consensus-layer message.
|
||||||
|
type transitionStatus struct {
|
||||||
|
LeftPoW bool // The flag is set when the first NewHead message received
|
||||||
|
EnteredPoS bool // The flag is set when the first FinalisedBlock message received
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merger is an internal help structure used to track the eth1/2 transition status.
|
||||||
|
// It's a common structure can be used in both full node and light client.
|
||||||
|
type Merger struct {
|
||||||
|
db ethdb.KeyValueStore
|
||||||
|
status transitionStatus
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMerger creates a new Merger which stores its transition status in the provided db.
|
||||||
|
func NewMerger(db ethdb.KeyValueStore) *Merger {
|
||||||
|
var status transitionStatus
|
||||||
|
blob := rawdb.ReadTransitionStatus(db)
|
||||||
|
if len(blob) != 0 {
|
||||||
|
if err := rlp.DecodeBytes(blob, &status); err != nil {
|
||||||
|
log.Crit("Failed to decode the transition status", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &Merger{
|
||||||
|
db: db,
|
||||||
|
status: status,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReachTTD is called whenever the first NewHead message received
|
||||||
|
// from the consensus-layer.
|
||||||
|
func (m *Merger) ReachTTD() {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
|
||||||
|
if m.status.LeftPoW {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.status = transitionStatus{LeftPoW: true}
|
||||||
|
blob, err := rlp.EncodeToBytes(m.status)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("Failed to encode the transition status: %v", err))
|
||||||
|
}
|
||||||
|
rawdb.WriteTransitionStatus(m.db, blob)
|
||||||
|
log.Info("Left PoW stage")
|
||||||
|
}
|
||||||
|
|
||||||
|
// FinalizePoS is called whenever the first FinalisedBlock message received
|
||||||
|
// from the consensus-layer.
|
||||||
|
func (m *Merger) FinalizePoS() {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
|
||||||
|
if m.status.EnteredPoS {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.status = transitionStatus{LeftPoW: true, EnteredPoS: true}
|
||||||
|
blob, err := rlp.EncodeToBytes(m.status)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("Failed to encode the transition status: %v", err))
|
||||||
|
}
|
||||||
|
rawdb.WriteTransitionStatus(m.db, blob)
|
||||||
|
log.Info("Entered PoS stage")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TDDReached reports whether the chain has left the PoW stage.
|
||||||
|
func (m *Merger) TDDReached() bool {
|
||||||
|
m.mu.RLock()
|
||||||
|
defer m.mu.RUnlock()
|
||||||
|
|
||||||
|
return m.status.LeftPoW
|
||||||
|
}
|
||||||
|
|
||||||
|
// PoSFinalized reports whether the chain has entered the PoS stage.
|
||||||
|
func (m *Merger) PoSFinalized() bool {
|
||||||
|
m.mu.RLock()
|
||||||
|
defer m.mu.RUnlock()
|
||||||
|
|
||||||
|
return m.status.EnteredPoS
|
||||||
|
}
|
@@ -99,7 +99,7 @@ func newTester(t *testing.T, confOverride func(*ethconfig.Config)) *tester {
|
|||||||
t.Fatalf("failed to create node: %v", err)
|
t.Fatalf("failed to create node: %v", err)
|
||||||
}
|
}
|
||||||
ethConf := ðconfig.Config{
|
ethConf := ðconfig.Config{
|
||||||
Genesis: core.DeveloperGenesisBlock(15, common.Address{}),
|
Genesis: core.DeveloperGenesisBlock(15, 11_500_000, common.Address{}),
|
||||||
Miner: miner.Config{
|
Miner: miner.Config{
|
||||||
Etherbase: common.HexToAddress(testAddress),
|
Etherbase: common.HexToAddress(testAddress),
|
||||||
},
|
},
|
||||||
|
@@ -75,7 +75,7 @@ var (
|
|||||||
// This is the content of the genesis block used by the benchmarks.
|
// This is the content of the genesis block used by the benchmarks.
|
||||||
benchRootKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
benchRootKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||||
benchRootAddr = crypto.PubkeyToAddress(benchRootKey.PublicKey)
|
benchRootAddr = crypto.PubkeyToAddress(benchRootKey.PublicKey)
|
||||||
benchRootFunds = math.BigPow(2, 100)
|
benchRootFunds = math.BigPow(2, 200)
|
||||||
)
|
)
|
||||||
|
|
||||||
// genValueTx returns a block generator that includes a single
|
// genValueTx returns a block generator that includes a single
|
||||||
@@ -86,7 +86,19 @@ func genValueTx(nbytes int) func(int, *BlockGen) {
|
|||||||
toaddr := common.Address{}
|
toaddr := common.Address{}
|
||||||
data := make([]byte, nbytes)
|
data := make([]byte, nbytes)
|
||||||
gas, _ := IntrinsicGas(data, nil, false, false, false)
|
gas, _ := IntrinsicGas(data, nil, false, false, false)
|
||||||
tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(benchRootAddr), toaddr, big.NewInt(1), gas, nil, data), types.HomesteadSigner{}, benchRootKey)
|
signer := types.MakeSigner(gen.config, big.NewInt(int64(i)))
|
||||||
|
gasPrice := big.NewInt(0)
|
||||||
|
if gen.header.BaseFee != nil {
|
||||||
|
gasPrice = gen.header.BaseFee
|
||||||
|
}
|
||||||
|
tx, _ := types.SignNewTx(benchRootKey, signer, &types.LegacyTx{
|
||||||
|
Nonce: gen.TxNonce(benchRootAddr),
|
||||||
|
To: &toaddr,
|
||||||
|
Value: big.NewInt(1),
|
||||||
|
Gas: gas,
|
||||||
|
Data: data,
|
||||||
|
GasPrice: gasPrice,
|
||||||
|
})
|
||||||
gen.AddTx(tx)
|
gen.AddTx(tx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -110,24 +122,38 @@ func init() {
|
|||||||
// and fills the blocks with many small transactions.
|
// and fills the blocks with many small transactions.
|
||||||
func genTxRing(naccounts int) func(int, *BlockGen) {
|
func genTxRing(naccounts int) func(int, *BlockGen) {
|
||||||
from := 0
|
from := 0
|
||||||
|
availableFunds := new(big.Int).Set(benchRootFunds)
|
||||||
return func(i int, gen *BlockGen) {
|
return func(i int, gen *BlockGen) {
|
||||||
block := gen.PrevBlock(i - 1)
|
block := gen.PrevBlock(i - 1)
|
||||||
gas := block.GasLimit()
|
gas := block.GasLimit()
|
||||||
|
gasPrice := big.NewInt(0)
|
||||||
|
if gen.header.BaseFee != nil {
|
||||||
|
gasPrice = gen.header.BaseFee
|
||||||
|
}
|
||||||
|
signer := types.MakeSigner(gen.config, big.NewInt(int64(i)))
|
||||||
for {
|
for {
|
||||||
gas -= params.TxGas
|
gas -= params.TxGas
|
||||||
if gas < params.TxGas {
|
if gas < params.TxGas {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
to := (from + 1) % naccounts
|
to := (from + 1) % naccounts
|
||||||
tx := types.NewTransaction(
|
burn := new(big.Int).SetUint64(params.TxGas)
|
||||||
gen.TxNonce(ringAddrs[from]),
|
burn.Mul(burn, gen.header.BaseFee)
|
||||||
ringAddrs[to],
|
availableFunds.Sub(availableFunds, burn)
|
||||||
benchRootFunds,
|
if availableFunds.Cmp(big.NewInt(1)) < 0 {
|
||||||
params.TxGas,
|
panic("not enough funds")
|
||||||
nil,
|
}
|
||||||
nil,
|
tx, err := types.SignNewTx(ringKeys[from], signer,
|
||||||
)
|
&types.LegacyTx{
|
||||||
tx, _ = types.SignTx(tx, types.HomesteadSigner{}, ringKeys[from])
|
Nonce: gen.TxNonce(ringAddrs[from]),
|
||||||
|
To: &ringAddrs[to],
|
||||||
|
Value: availableFunds,
|
||||||
|
Gas: params.TxGas,
|
||||||
|
GasPrice: gasPrice,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
gen.AddTx(tx)
|
gen.AddTx(tx)
|
||||||
from = to
|
from = to
|
||||||
}
|
}
|
||||||
@@ -245,6 +271,7 @@ func makeChainForBench(db ethdb.Database, full bool, count uint64) {
|
|||||||
block := types.NewBlockWithHeader(header)
|
block := types.NewBlockWithHeader(header)
|
||||||
rawdb.WriteBody(db, hash, n, block.Body())
|
rawdb.WriteBody(db, hash, n, block.Body())
|
||||||
rawdb.WriteReceipts(db, hash, n, nil)
|
rawdb.WriteReceipts(db, hash, n, nil)
|
||||||
|
rawdb.WriteHeadBlockHash(db, hash)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -278,6 +305,8 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
|
|||||||
}
|
}
|
||||||
makeChainForBench(db, full, count)
|
makeChainForBench(db, full, count)
|
||||||
db.Close()
|
db.Close()
|
||||||
|
cacheConfig := *defaultCacheConfig
|
||||||
|
cacheConfig.TrieDirtyDisabled = true
|
||||||
|
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
@@ -287,7 +316,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("error opening database at %v: %v", dir, err)
|
b.Fatalf("error opening database at %v: %v", dir, err)
|
||||||
}
|
}
|
||||||
chain, err := NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
|
chain, err := NewBlockChain(db, &cacheConfig, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("error creating chain: %v", err)
|
b.Fatalf("error creating chain: %v", err)
|
||||||
}
|
}
|
||||||
|
@@ -17,14 +17,21 @@
|
|||||||
package core
|
package core
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"math/big"
|
||||||
"runtime"
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/beacon"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/clique"
|
||||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -76,6 +83,172 @@ func TestHeaderVerification(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHeaderVerificationForMergingClique(t *testing.T) { testHeaderVerificationForMerging(t, true) }
|
||||||
|
func TestHeaderVerificationForMergingEthash(t *testing.T) { testHeaderVerificationForMerging(t, false) }
|
||||||
|
|
||||||
|
// Tests the verification for eth1/2 merging, including pre-merge and post-merge
|
||||||
|
func testHeaderVerificationForMerging(t *testing.T, isClique bool) {
|
||||||
|
var (
|
||||||
|
testdb = rawdb.NewMemoryDatabase()
|
||||||
|
preBlocks []*types.Block
|
||||||
|
postBlocks []*types.Block
|
||||||
|
runEngine consensus.Engine
|
||||||
|
chainConfig *params.ChainConfig
|
||||||
|
merger = consensus.NewMerger(rawdb.NewMemoryDatabase())
|
||||||
|
)
|
||||||
|
if isClique {
|
||||||
|
var (
|
||||||
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||||
|
addr = crypto.PubkeyToAddress(key.PublicKey)
|
||||||
|
engine = clique.New(params.AllCliqueProtocolChanges.Clique, testdb)
|
||||||
|
)
|
||||||
|
genspec := &Genesis{
|
||||||
|
ExtraData: make([]byte, 32+common.AddressLength+crypto.SignatureLength),
|
||||||
|
Alloc: map[common.Address]GenesisAccount{
|
||||||
|
addr: {Balance: big.NewInt(1)},
|
||||||
|
},
|
||||||
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||||
|
}
|
||||||
|
copy(genspec.ExtraData[32:], addr[:])
|
||||||
|
genesis := genspec.MustCommit(testdb)
|
||||||
|
|
||||||
|
genEngine := beacon.New(engine)
|
||||||
|
preBlocks, _ = GenerateChain(params.AllCliqueProtocolChanges, genesis, genEngine, testdb, 8, nil)
|
||||||
|
td := 0
|
||||||
|
for i, block := range preBlocks {
|
||||||
|
header := block.Header()
|
||||||
|
if i > 0 {
|
||||||
|
header.ParentHash = preBlocks[i-1].Hash()
|
||||||
|
}
|
||||||
|
header.Extra = make([]byte, 32+crypto.SignatureLength)
|
||||||
|
header.Difficulty = big.NewInt(2)
|
||||||
|
|
||||||
|
sig, _ := crypto.Sign(genEngine.SealHash(header).Bytes(), key)
|
||||||
|
copy(header.Extra[len(header.Extra)-crypto.SignatureLength:], sig)
|
||||||
|
preBlocks[i] = block.WithSeal(header)
|
||||||
|
// calculate td
|
||||||
|
td += int(block.Difficulty().Uint64())
|
||||||
|
}
|
||||||
|
config := *params.AllCliqueProtocolChanges
|
||||||
|
config.TerminalTotalDifficulty = big.NewInt(int64(td))
|
||||||
|
postBlocks, _ = GenerateChain(&config, preBlocks[len(preBlocks)-1], genEngine, testdb, 8, nil)
|
||||||
|
chainConfig = &config
|
||||||
|
runEngine = beacon.New(engine)
|
||||||
|
} else {
|
||||||
|
gspec := &Genesis{Config: params.TestChainConfig}
|
||||||
|
genesis := gspec.MustCommit(testdb)
|
||||||
|
genEngine := beacon.New(ethash.NewFaker())
|
||||||
|
|
||||||
|
preBlocks, _ = GenerateChain(params.TestChainConfig, genesis, genEngine, testdb, 8, nil)
|
||||||
|
td := 0
|
||||||
|
for _, block := range preBlocks {
|
||||||
|
// calculate td
|
||||||
|
td += int(block.Difficulty().Uint64())
|
||||||
|
}
|
||||||
|
config := *params.TestChainConfig
|
||||||
|
config.TerminalTotalDifficulty = big.NewInt(int64(td))
|
||||||
|
postBlocks, _ = GenerateChain(params.TestChainConfig, preBlocks[len(preBlocks)-1], genEngine, testdb, 8, nil)
|
||||||
|
|
||||||
|
chainConfig = &config
|
||||||
|
runEngine = beacon.New(ethash.NewFaker())
|
||||||
|
}
|
||||||
|
|
||||||
|
preHeaders := make([]*types.Header, len(preBlocks))
|
||||||
|
for i, block := range preBlocks {
|
||||||
|
preHeaders[i] = block.Header()
|
||||||
|
|
||||||
|
blob, _ := json.Marshal(block.Header())
|
||||||
|
t.Logf("Log header before the merging %d: %v", block.NumberU64(), string(blob))
|
||||||
|
}
|
||||||
|
postHeaders := make([]*types.Header, len(postBlocks))
|
||||||
|
for i, block := range postBlocks {
|
||||||
|
postHeaders[i] = block.Header()
|
||||||
|
|
||||||
|
blob, _ := json.Marshal(block.Header())
|
||||||
|
t.Logf("Log header after the merging %d: %v", block.NumberU64(), string(blob))
|
||||||
|
}
|
||||||
|
// Run the header checker for blocks one-by-one, checking for both valid and invalid nonces
|
||||||
|
chain, _ := NewBlockChain(testdb, nil, chainConfig, runEngine, vm.Config{}, nil, nil)
|
||||||
|
defer chain.Stop()
|
||||||
|
|
||||||
|
// Verify the blocks before the merging
|
||||||
|
for i := 0; i < len(preBlocks); i++ {
|
||||||
|
_, results := runEngine.VerifyHeaders(chain, []*types.Header{preHeaders[i]}, []bool{true})
|
||||||
|
// Wait for the verification result
|
||||||
|
select {
|
||||||
|
case result := <-results:
|
||||||
|
if result != nil {
|
||||||
|
t.Errorf("test %d: verification failed %v", i, result)
|
||||||
|
}
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatalf("test %d: verification timeout", i)
|
||||||
|
}
|
||||||
|
// Make sure no more data is returned
|
||||||
|
select {
|
||||||
|
case result := <-results:
|
||||||
|
t.Fatalf("test %d: unexpected result returned: %v", i, result)
|
||||||
|
case <-time.After(25 * time.Millisecond):
|
||||||
|
}
|
||||||
|
chain.InsertChain(preBlocks[i : i+1])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make the transition
|
||||||
|
merger.ReachTTD()
|
||||||
|
merger.FinalizePoS()
|
||||||
|
|
||||||
|
// Verify the blocks after the merging
|
||||||
|
for i := 0; i < len(postBlocks); i++ {
|
||||||
|
_, results := runEngine.VerifyHeaders(chain, []*types.Header{postHeaders[i]}, []bool{true})
|
||||||
|
// Wait for the verification result
|
||||||
|
select {
|
||||||
|
case result := <-results:
|
||||||
|
if result != nil {
|
||||||
|
t.Errorf("test %d: verification failed %v", i, result)
|
||||||
|
}
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatalf("test %d: verification timeout", i)
|
||||||
|
}
|
||||||
|
// Make sure no more data is returned
|
||||||
|
select {
|
||||||
|
case result := <-results:
|
||||||
|
t.Fatalf("test %d: unexpected result returned: %v", i, result)
|
||||||
|
case <-time.After(25 * time.Millisecond):
|
||||||
|
}
|
||||||
|
chain.InsertBlockWithoutSetHead(postBlocks[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify the blocks with pre-merge blocks and post-merge blocks
|
||||||
|
var (
|
||||||
|
headers []*types.Header
|
||||||
|
seals []bool
|
||||||
|
)
|
||||||
|
for _, block := range preBlocks {
|
||||||
|
headers = append(headers, block.Header())
|
||||||
|
seals = append(seals, true)
|
||||||
|
}
|
||||||
|
for _, block := range postBlocks {
|
||||||
|
headers = append(headers, block.Header())
|
||||||
|
seals = append(seals, true)
|
||||||
|
}
|
||||||
|
_, results := runEngine.VerifyHeaders(chain, headers, seals)
|
||||||
|
for i := 0; i < len(headers); i++ {
|
||||||
|
select {
|
||||||
|
case result := <-results:
|
||||||
|
if result != nil {
|
||||||
|
t.Errorf("test %d: verification failed %v", i, result)
|
||||||
|
}
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatalf("test %d: verification timeout", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Make sure no more data is returned
|
||||||
|
select {
|
||||||
|
case result := <-results:
|
||||||
|
t.Fatalf("unexpected result returned: %v", result)
|
||||||
|
case <-time.After(25 * time.Millisecond):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Tests that concurrent header verification works, for both good and bad blocks.
|
// Tests that concurrent header verification works, for both good and bad blocks.
|
||||||
func TestHeaderConcurrentVerification2(t *testing.T) { testHeaderConcurrentVerification(t, 2) }
|
func TestHeaderConcurrentVerification2(t *testing.T) { testHeaderConcurrentVerification(t, 2) }
|
||||||
func TestHeaderConcurrentVerification8(t *testing.T) { testHeaderConcurrentVerification(t, 8) }
|
func TestHeaderConcurrentVerification8(t *testing.T) { testHeaderConcurrentVerification(t, 8) }
|
||||||
|
File diff suppressed because it is too large
Load Diff
@@ -150,6 +150,14 @@ func (it *insertIterator) previous() *types.Header {
|
|||||||
return it.chain[it.index-1].Header()
|
return it.chain[it.index-1].Header()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// current returns the current header that is being processed, or nil.
|
||||||
|
func (it *insertIterator) current() *types.Header {
|
||||||
|
if it.index == -1 || it.index >= len(it.chain) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return it.chain[it.index].Header()
|
||||||
|
}
|
||||||
|
|
||||||
// first returns the first block in the it.
|
// first returns the first block in the it.
|
||||||
func (it *insertIterator) first() *types.Block {
|
func (it *insertIterator) first() *types.Block {
|
||||||
return it.chain[0]
|
return it.chain[0]
|
||||||
|
387
core/blockchain_reader.go
Normal file
387
core/blockchain_reader.go
Normal file
@@ -0,0 +1,387 @@
|
|||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package core
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
|
"github.com/ethereum/go-ethereum/core/state/snapshot"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
"github.com/ethereum/go-ethereum/event"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CurrentHeader retrieves the current head header of the canonical chain. The
|
||||||
|
// header is retrieved from the HeaderChain's internal cache.
|
||||||
|
func (bc *BlockChain) CurrentHeader() *types.Header {
|
||||||
|
return bc.hc.CurrentHeader()
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurrentBlock retrieves the current head block of the canonical chain. The
|
||||||
|
// block is retrieved from the blockchain's internal cache.
|
||||||
|
func (bc *BlockChain) CurrentBlock() *types.Block {
|
||||||
|
return bc.currentBlock.Load().(*types.Block)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurrentFastBlock retrieves the current fast-sync head block of the canonical
|
||||||
|
// chain. The block is retrieved from the blockchain's internal cache.
|
||||||
|
func (bc *BlockChain) CurrentFastBlock() *types.Block {
|
||||||
|
return bc.currentFastBlock.Load().(*types.Block)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasHeader checks if a block header is present in the database or not, caching
|
||||||
|
// it if present.
|
||||||
|
func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
|
||||||
|
return bc.hc.HasHeader(hash, number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetHeader retrieves a block header from the database by hash and number,
|
||||||
|
// caching it if found.
|
||||||
|
func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
|
||||||
|
return bc.hc.GetHeader(hash, number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetHeaderByHash retrieves a block header from the database by hash, caching it if
|
||||||
|
// found.
|
||||||
|
func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
|
||||||
|
return bc.hc.GetHeaderByHash(hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetHeaderByNumber retrieves a block header from the database by number,
|
||||||
|
// caching it (associated with its hash) if found.
|
||||||
|
func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
|
||||||
|
return bc.hc.GetHeaderByNumber(number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBody retrieves a block body (transactions and uncles) from the database by
|
||||||
|
// hash, caching it if found.
|
||||||
|
func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
|
||||||
|
// Short circuit if the body's already in the cache, retrieve otherwise
|
||||||
|
if cached, ok := bc.bodyCache.Get(hash); ok {
|
||||||
|
body := cached.(*types.Body)
|
||||||
|
return body
|
||||||
|
}
|
||||||
|
number := bc.hc.GetBlockNumber(hash)
|
||||||
|
if number == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
body := rawdb.ReadBody(bc.db, hash, *number)
|
||||||
|
if body == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Cache the found body for next time and return
|
||||||
|
bc.bodyCache.Add(hash, body)
|
||||||
|
return body
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
|
||||||
|
// caching it if found.
|
||||||
|
func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
|
||||||
|
// Short circuit if the body's already in the cache, retrieve otherwise
|
||||||
|
if cached, ok := bc.bodyRLPCache.Get(hash); ok {
|
||||||
|
return cached.(rlp.RawValue)
|
||||||
|
}
|
||||||
|
number := bc.hc.GetBlockNumber(hash)
|
||||||
|
if number == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
body := rawdb.ReadBodyRLP(bc.db, hash, *number)
|
||||||
|
if len(body) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Cache the found body for next time and return
|
||||||
|
bc.bodyRLPCache.Add(hash, body)
|
||||||
|
return body
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasBlock checks if a block is fully present in the database or not.
|
||||||
|
func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
|
||||||
|
if bc.blockCache.Contains(hash) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return rawdb.HasBody(bc.db, hash, number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasFastBlock checks if a fast block is fully present in the database or not.
|
||||||
|
func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool {
|
||||||
|
if !bc.HasBlock(hash, number) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if bc.receiptsCache.Contains(hash) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return rawdb.HasReceipts(bc.db, hash, number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlock retrieves a block from the database by hash and number,
|
||||||
|
// caching it if found.
|
||||||
|
func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
|
||||||
|
// Short circuit if the block's already in the cache, retrieve otherwise
|
||||||
|
if block, ok := bc.blockCache.Get(hash); ok {
|
||||||
|
return block.(*types.Block)
|
||||||
|
}
|
||||||
|
block := rawdb.ReadBlock(bc.db, hash, number)
|
||||||
|
if block == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Cache the found block for next time and return
|
||||||
|
bc.blockCache.Add(block.Hash(), block)
|
||||||
|
return block
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlockByHash retrieves a block from the database by hash, caching it if found.
|
||||||
|
func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
|
||||||
|
number := bc.hc.GetBlockNumber(hash)
|
||||||
|
if number == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return bc.GetBlock(hash, *number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlockByNumber retrieves a block from the database by number, caching it
|
||||||
|
// (associated with its hash) if found.
|
||||||
|
func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
|
||||||
|
hash := rawdb.ReadCanonicalHash(bc.db, number)
|
||||||
|
if hash == (common.Hash{}) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return bc.GetBlock(hash, number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
|
||||||
|
// [deprecated by eth/62]
|
||||||
|
func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
|
||||||
|
number := bc.hc.GetBlockNumber(hash)
|
||||||
|
if number == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
block := bc.GetBlock(hash, *number)
|
||||||
|
if block == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
blocks = append(blocks, block)
|
||||||
|
hash = block.ParentHash()
|
||||||
|
*number--
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetReceiptsByHash retrieves the receipts for all transactions in a given block.
|
||||||
|
func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
|
||||||
|
if receipts, ok := bc.receiptsCache.Get(hash); ok {
|
||||||
|
return receipts.(types.Receipts)
|
||||||
|
}
|
||||||
|
number := rawdb.ReadHeaderNumber(bc.db, hash)
|
||||||
|
if number == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig)
|
||||||
|
if receipts == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
bc.receiptsCache.Add(hash, receipts)
|
||||||
|
return receipts
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUnclesInChain retrieves all the uncles from a given block backwards until
|
||||||
|
// a specific distance is reached.
|
||||||
|
func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
|
||||||
|
uncles := []*types.Header{}
|
||||||
|
for i := 0; block != nil && i < length; i++ {
|
||||||
|
uncles = append(uncles, block.Uncles()...)
|
||||||
|
block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
|
||||||
|
}
|
||||||
|
return uncles
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCanonicalHash returns the canonical hash for a given block number
|
||||||
|
func (bc *BlockChain) GetCanonicalHash(number uint64) common.Hash {
|
||||||
|
return bc.hc.GetCanonicalHash(number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
|
||||||
|
// a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
|
||||||
|
// number of blocks to be individually checked before we reach the canonical chain.
|
||||||
|
//
|
||||||
|
// Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
|
||||||
|
func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
|
||||||
|
return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTransactionLookup retrieves the lookup associate with the given transaction
|
||||||
|
// hash from the cache or database.
|
||||||
|
func (bc *BlockChain) GetTransactionLookup(hash common.Hash) *rawdb.LegacyTxLookupEntry {
|
||||||
|
// Short circuit if the txlookup already in the cache, retrieve otherwise
|
||||||
|
if lookup, exist := bc.txLookupCache.Get(hash); exist {
|
||||||
|
return lookup.(*rawdb.LegacyTxLookupEntry)
|
||||||
|
}
|
||||||
|
tx, blockHash, blockNumber, txIndex := rawdb.ReadTransaction(bc.db, hash)
|
||||||
|
if tx == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
lookup := &rawdb.LegacyTxLookupEntry{BlockHash: blockHash, BlockIndex: blockNumber, Index: txIndex}
|
||||||
|
bc.txLookupCache.Add(hash, lookup)
|
||||||
|
return lookup
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTd retrieves a block's total difficulty in the canonical chain from the
|
||||||
|
// database by hash and number, caching it if found.
|
||||||
|
func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
|
||||||
|
return bc.hc.GetTd(hash, number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasState checks if state trie is fully present in the database or not.
|
||||||
|
func (bc *BlockChain) HasState(hash common.Hash) bool {
|
||||||
|
_, err := bc.stateCache.OpenTrie(hash)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasBlockAndState checks if a block and associated state trie is fully present
|
||||||
|
// in the database or not, caching it if present.
|
||||||
|
func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
|
||||||
|
// Check first that the block itself is known
|
||||||
|
block := bc.GetBlock(hash, number)
|
||||||
|
if block == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return bc.HasState(block.Root())
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrieNode retrieves a blob of data associated with a trie node
|
||||||
|
// either from ephemeral in-memory cache, or from persistent storage.
|
||||||
|
func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
|
||||||
|
return bc.stateCache.TrieDB().Node(hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContractCode retrieves a blob of data associated with a contract hash
|
||||||
|
// either from ephemeral in-memory cache, or from persistent storage.
|
||||||
|
func (bc *BlockChain) ContractCode(hash common.Hash) ([]byte, error) {
|
||||||
|
return bc.stateCache.ContractCode(common.Hash{}, hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContractCodeWithPrefix retrieves a blob of data associated with a contract
|
||||||
|
// hash either from ephemeral in-memory cache, or from persistent storage.
|
||||||
|
//
|
||||||
|
// If the code doesn't exist in the in-memory cache, check the storage with
|
||||||
|
// new code scheme.
|
||||||
|
func (bc *BlockChain) ContractCodeWithPrefix(hash common.Hash) ([]byte, error) {
|
||||||
|
type codeReader interface {
|
||||||
|
ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]byte, error)
|
||||||
|
}
|
||||||
|
return bc.stateCache.(codeReader).ContractCodeWithPrefix(common.Hash{}, hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// State returns a new mutable state based on the current HEAD block.
|
||||||
|
func (bc *BlockChain) State() (*state.StateDB, error) {
|
||||||
|
return bc.StateAt(bc.CurrentBlock().Root())
|
||||||
|
}
|
||||||
|
|
||||||
|
// StateAt returns a new mutable state based on a particular point in time.
|
||||||
|
func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
|
||||||
|
return state.New(root, bc.stateCache, bc.snaps)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config retrieves the chain's fork configuration.
|
||||||
|
func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
|
||||||
|
|
||||||
|
// Engine retrieves the blockchain's consensus engine.
|
||||||
|
func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
|
||||||
|
|
||||||
|
// Snapshots returns the blockchain snapshot tree.
|
||||||
|
func (bc *BlockChain) Snapshots() *snapshot.Tree {
|
||||||
|
return bc.snaps
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validator returns the current validator.
|
||||||
|
func (bc *BlockChain) Validator() Validator {
|
||||||
|
return bc.validator
|
||||||
|
}
|
||||||
|
|
||||||
|
// Processor returns the current processor.
|
||||||
|
func (bc *BlockChain) Processor() Processor {
|
||||||
|
return bc.processor
|
||||||
|
}
|
||||||
|
|
||||||
|
// StateCache returns the caching database underpinning the blockchain instance.
|
||||||
|
func (bc *BlockChain) StateCache() state.Database {
|
||||||
|
return bc.stateCache
|
||||||
|
}
|
||||||
|
|
||||||
|
// GasLimit returns the gas limit of the current HEAD block.
|
||||||
|
func (bc *BlockChain) GasLimit() uint64 {
|
||||||
|
return bc.CurrentBlock().GasLimit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Genesis retrieves the chain's genesis block.
|
||||||
|
func (bc *BlockChain) Genesis() *types.Block {
|
||||||
|
return bc.genesisBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetVMConfig returns the block chain VM config.
|
||||||
|
func (bc *BlockChain) GetVMConfig() *vm.Config {
|
||||||
|
return &bc.vmConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTxLookupLimit is responsible for updating the txlookup limit to the
|
||||||
|
// original one stored in db if the new mismatches with the old one.
|
||||||
|
func (bc *BlockChain) SetTxLookupLimit(limit uint64) {
|
||||||
|
bc.txLookupLimit = limit
|
||||||
|
}
|
||||||
|
|
||||||
|
// TxLookupLimit retrieves the txlookup limit used by blockchain to prune
|
||||||
|
// stale transaction indices.
|
||||||
|
func (bc *BlockChain) TxLookupLimit() uint64 {
|
||||||
|
return bc.txLookupLimit
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
|
||||||
|
func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
|
||||||
|
return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribeChainEvent registers a subscription of ChainEvent.
|
||||||
|
func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
|
||||||
|
return bc.scope.Track(bc.chainFeed.Subscribe(ch))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
|
||||||
|
func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
|
||||||
|
return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribeChainSideEvent registers a subscription of ChainSideEvent.
|
||||||
|
func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
|
||||||
|
return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribeLogsEvent registers a subscription of []*types.Log.
|
||||||
|
func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
|
||||||
|
return bc.scope.Track(bc.logsFeed.Subscribe(ch))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribeBlockProcessingEvent registers a subscription of bool where true means
|
||||||
|
// block processing has started while false means it has stopped.
|
||||||
|
func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscription {
|
||||||
|
return bc.scope.Track(bc.blockProcFeed.Subscribe(ch))
|
||||||
|
}
|
@@ -79,10 +79,10 @@ func testShortRepair(t *testing.T, snapshots bool) {
|
|||||||
// already committed, after which the process crashed. In this case we expect the full
|
// already committed, after which the process crashed. In this case we expect the full
|
||||||
// chain to be rolled back to the committed block, but the chain data itself left in
|
// chain to be rolled back to the committed block, but the chain data itself left in
|
||||||
// the database for replaying.
|
// the database for replaying.
|
||||||
func TestShortFastSyncedRepair(t *testing.T) { testShortFastSyncedRepair(t, false) }
|
func TestShortSnapSyncedRepair(t *testing.T) { testShortSnapSyncedRepair(t, false) }
|
||||||
func TestShortFastSyncedRepairWithSnapshots(t *testing.T) { testShortFastSyncedRepair(t, true) }
|
func TestShortSnapSyncedRepairWithSnapshots(t *testing.T) { testShortSnapSyncedRepair(t, true) }
|
||||||
|
|
||||||
func testShortFastSyncedRepair(t *testing.T, snapshots bool) {
|
func testShortSnapSyncedRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
//
|
//
|
||||||
@@ -119,10 +119,10 @@ func testShortFastSyncedRepair(t *testing.T, snapshots bool) {
|
|||||||
// not yet committed, but the process crashed. In this case we expect the chain to
|
// not yet committed, but the process crashed. In this case we expect the chain to
|
||||||
// detect that it was fast syncing and not delete anything, since we can just pick
|
// detect that it was fast syncing and not delete anything, since we can just pick
|
||||||
// up directly where we left off.
|
// up directly where we left off.
|
||||||
func TestShortFastSyncingRepair(t *testing.T) { testShortFastSyncingRepair(t, false) }
|
func TestShortSnapSyncingRepair(t *testing.T) { testShortSnapSyncingRepair(t, false) }
|
||||||
func TestShortFastSyncingRepairWithSnapshots(t *testing.T) { testShortFastSyncingRepair(t, true) }
|
func TestShortSnapSyncingRepairWithSnapshots(t *testing.T) { testShortSnapSyncingRepair(t, true) }
|
||||||
|
|
||||||
func testShortFastSyncingRepair(t *testing.T, snapshots bool) {
|
func testShortSnapSyncingRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
//
|
//
|
||||||
@@ -203,14 +203,14 @@ func testShortOldForkedRepair(t *testing.T, snapshots bool) {
|
|||||||
// crashed. In this test scenario the side chain is below the committed block. In
|
// crashed. In this test scenario the side chain is below the committed block. In
|
||||||
// this case we expect the canonical chain to be rolled back to the committed block,
|
// this case we expect the canonical chain to be rolled back to the committed block,
|
||||||
// but the chain data itself left in the database for replaying.
|
// but the chain data itself left in the database for replaying.
|
||||||
func TestShortOldForkedFastSyncedRepair(t *testing.T) {
|
func TestShortOldForkedSnapSyncedRepair(t *testing.T) {
|
||||||
testShortOldForkedFastSyncedRepair(t, false)
|
testShortOldForkedSnapSyncedRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestShortOldForkedFastSyncedRepairWithSnapshots(t *testing.T) {
|
func TestShortOldForkedSnapSyncedRepairWithSnapshots(t *testing.T) {
|
||||||
testShortOldForkedFastSyncedRepair(t, true)
|
testShortOldForkedSnapSyncedRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testShortOldForkedFastSyncedRepair(t *testing.T, snapshots bool) {
|
func testShortOldForkedSnapSyncedRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
// └->S1->S2->S3
|
// └->S1->S2->S3
|
||||||
@@ -250,14 +250,14 @@ func testShortOldForkedFastSyncedRepair(t *testing.T, snapshots bool) {
|
|||||||
// test scenario the side chain is below the committed block. In this case we expect
|
// test scenario the side chain is below the committed block. In this case we expect
|
||||||
// the chain to detect that it was fast syncing and not delete anything, since we
|
// the chain to detect that it was fast syncing and not delete anything, since we
|
||||||
// can just pick up directly where we left off.
|
// can just pick up directly where we left off.
|
||||||
func TestShortOldForkedFastSyncingRepair(t *testing.T) {
|
func TestShortOldForkedSnapSyncingRepair(t *testing.T) {
|
||||||
testShortOldForkedFastSyncingRepair(t, false)
|
testShortOldForkedSnapSyncingRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestShortOldForkedFastSyncingRepairWithSnapshots(t *testing.T) {
|
func TestShortOldForkedSnapSyncingRepairWithSnapshots(t *testing.T) {
|
||||||
testShortOldForkedFastSyncingRepair(t, true)
|
testShortOldForkedSnapSyncingRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testShortOldForkedFastSyncingRepair(t *testing.T, snapshots bool) {
|
func testShortOldForkedSnapSyncingRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
// └->S1->S2->S3
|
// └->S1->S2->S3
|
||||||
@@ -340,14 +340,14 @@ func testShortNewlyForkedRepair(t *testing.T, snapshots bool) {
|
|||||||
// crashed. In this test scenario the side chain reaches above the committed block.
|
// crashed. In this test scenario the side chain reaches above the committed block.
|
||||||
// In this case we expect the canonical chain to be rolled back to the committed
|
// In this case we expect the canonical chain to be rolled back to the committed
|
||||||
// block, but the chain data itself left in the database for replaying.
|
// block, but the chain data itself left in the database for replaying.
|
||||||
func TestShortNewlyForkedFastSyncedRepair(t *testing.T) {
|
func TestShortNewlyForkedSnapSyncedRepair(t *testing.T) {
|
||||||
testShortNewlyForkedFastSyncedRepair(t, false)
|
testShortNewlyForkedSnapSyncedRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestShortNewlyForkedFastSyncedRepairWithSnapshots(t *testing.T) {
|
func TestShortNewlyForkedSnapSyncedRepairWithSnapshots(t *testing.T) {
|
||||||
testShortNewlyForkedFastSyncedRepair(t, true)
|
testShortNewlyForkedSnapSyncedRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testShortNewlyForkedFastSyncedRepair(t *testing.T, snapshots bool) {
|
func testShortNewlyForkedSnapSyncedRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6
|
// └->S1->S2->S3->S4->S5->S6
|
||||||
@@ -387,14 +387,14 @@ func testShortNewlyForkedFastSyncedRepair(t *testing.T, snapshots bool) {
|
|||||||
// this test scenario the side chain reaches above the committed block. In this
|
// this test scenario the side chain reaches above the committed block. In this
|
||||||
// case we expect the chain to detect that it was fast syncing and not delete
|
// case we expect the chain to detect that it was fast syncing and not delete
|
||||||
// anything, since we can just pick up directly where we left off.
|
// anything, since we can just pick up directly where we left off.
|
||||||
func TestShortNewlyForkedFastSyncingRepair(t *testing.T) {
|
func TestShortNewlyForkedSnapSyncingRepair(t *testing.T) {
|
||||||
testShortNewlyForkedFastSyncingRepair(t, false)
|
testShortNewlyForkedSnapSyncingRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestShortNewlyForkedFastSyncingRepairWithSnapshots(t *testing.T) {
|
func TestShortNewlyForkedSnapSyncingRepairWithSnapshots(t *testing.T) {
|
||||||
testShortNewlyForkedFastSyncingRepair(t, true)
|
testShortNewlyForkedSnapSyncingRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testShortNewlyForkedFastSyncingRepair(t *testing.T, snapshots bool) {
|
func testShortNewlyForkedSnapSyncingRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6
|
// └->S1->S2->S3->S4->S5->S6
|
||||||
@@ -475,14 +475,14 @@ func testShortReorgedRepair(t *testing.T, snapshots bool) {
|
|||||||
// the fast sync pivot point was already committed to disk and then the process
|
// the fast sync pivot point was already committed to disk and then the process
|
||||||
// crashed. In this case we expect the canonical chain to be rolled back to the
|
// crashed. In this case we expect the canonical chain to be rolled back to the
|
||||||
// committed block, but the chain data itself left in the database for replaying.
|
// committed block, but the chain data itself left in the database for replaying.
|
||||||
func TestShortReorgedFastSyncedRepair(t *testing.T) {
|
func TestShortReorgedSnapSyncedRepair(t *testing.T) {
|
||||||
testShortReorgedFastSyncedRepair(t, false)
|
testShortReorgedSnapSyncedRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestShortReorgedFastSyncedRepairWithSnapshots(t *testing.T) {
|
func TestShortReorgedSnapSyncedRepairWithSnapshots(t *testing.T) {
|
||||||
testShortReorgedFastSyncedRepair(t, true)
|
testShortReorgedSnapSyncedRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testShortReorgedFastSyncedRepair(t *testing.T, snapshots bool) {
|
func testShortReorgedSnapSyncedRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
|
||||||
@@ -521,14 +521,14 @@ func testShortReorgedFastSyncedRepair(t *testing.T, snapshots bool) {
|
|||||||
// the fast sync pivot point was not yet committed, but the process crashed. In
|
// the fast sync pivot point was not yet committed, but the process crashed. In
|
||||||
// this case we expect the chain to detect that it was fast syncing and not delete
|
// this case we expect the chain to detect that it was fast syncing and not delete
|
||||||
// anything, since we can just pick up directly where we left off.
|
// anything, since we can just pick up directly where we left off.
|
||||||
func TestShortReorgedFastSyncingRepair(t *testing.T) {
|
func TestShortReorgedSnapSyncingRepair(t *testing.T) {
|
||||||
testShortReorgedFastSyncingRepair(t, false)
|
testShortReorgedSnapSyncingRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestShortReorgedFastSyncingRepairWithSnapshots(t *testing.T) {
|
func TestShortReorgedSnapSyncingRepairWithSnapshots(t *testing.T) {
|
||||||
testShortReorgedFastSyncingRepair(t, true)
|
testShortReorgedSnapSyncingRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testShortReorgedFastSyncingRepair(t *testing.T, snapshots bool) {
|
func testShortReorgedSnapSyncingRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
|
||||||
@@ -656,14 +656,14 @@ func testLongDeepRepair(t *testing.T, snapshots bool) {
|
|||||||
// sync pivot point - newer than the ancient limit - was already committed, after
|
// sync pivot point - newer than the ancient limit - was already committed, after
|
||||||
// which the process crashed. In this case we expect the chain to be rolled back
|
// which the process crashed. In this case we expect the chain to be rolled back
|
||||||
// to the committed block, with everything afterwads kept as fast sync data.
|
// to the committed block, with everything afterwads kept as fast sync data.
|
||||||
func TestLongFastSyncedShallowRepair(t *testing.T) {
|
func TestLongSnapSyncedShallowRepair(t *testing.T) {
|
||||||
testLongFastSyncedShallowRepair(t, false)
|
testLongSnapSyncedShallowRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongFastSyncedShallowRepairWithSnapshots(t *testing.T) {
|
func TestLongSnapSyncedShallowRepairWithSnapshots(t *testing.T) {
|
||||||
testLongFastSyncedShallowRepair(t, true)
|
testLongSnapSyncedShallowRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongFastSyncedShallowRepair(t *testing.T, snapshots bool) {
|
func testLongSnapSyncedShallowRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
//
|
//
|
||||||
@@ -705,10 +705,10 @@ func testLongFastSyncedShallowRepair(t *testing.T, snapshots bool) {
|
|||||||
// sync pivot point - older than the ancient limit - was already committed, after
|
// sync pivot point - older than the ancient limit - was already committed, after
|
||||||
// which the process crashed. In this case we expect the chain to be rolled back
|
// which the process crashed. In this case we expect the chain to be rolled back
|
||||||
// to the committed block, with everything afterwads deleted.
|
// to the committed block, with everything afterwads deleted.
|
||||||
func TestLongFastSyncedDeepRepair(t *testing.T) { testLongFastSyncedDeepRepair(t, false) }
|
func TestLongSnapSyncedDeepRepair(t *testing.T) { testLongSnapSyncedDeepRepair(t, false) }
|
||||||
func TestLongFastSyncedDeepRepairWithSnapshots(t *testing.T) { testLongFastSyncedDeepRepair(t, true) }
|
func TestLongSnapSyncedDeepRepairWithSnapshots(t *testing.T) { testLongSnapSyncedDeepRepair(t, true) }
|
||||||
|
|
||||||
func testLongFastSyncedDeepRepair(t *testing.T, snapshots bool) {
|
func testLongSnapSyncedDeepRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
//
|
//
|
||||||
@@ -750,14 +750,14 @@ func testLongFastSyncedDeepRepair(t *testing.T, snapshots bool) {
|
|||||||
// process crashed. In this case we expect the chain to detect that it was fast
|
// process crashed. In this case we expect the chain to detect that it was fast
|
||||||
// syncing and not delete anything, since we can just pick up directly where we
|
// syncing and not delete anything, since we can just pick up directly where we
|
||||||
// left off.
|
// left off.
|
||||||
func TestLongFastSyncingShallowRepair(t *testing.T) {
|
func TestLongSnapSyncingShallowRepair(t *testing.T) {
|
||||||
testLongFastSyncingShallowRepair(t, false)
|
testLongSnapSyncingShallowRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongFastSyncingShallowRepairWithSnapshots(t *testing.T) {
|
func TestLongSnapSyncingShallowRepairWithSnapshots(t *testing.T) {
|
||||||
testLongFastSyncingShallowRepair(t, true)
|
testLongSnapSyncingShallowRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongFastSyncingShallowRepair(t *testing.T, snapshots bool) {
|
func testLongSnapSyncingShallowRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
//
|
//
|
||||||
@@ -800,10 +800,10 @@ func testLongFastSyncingShallowRepair(t *testing.T, snapshots bool) {
|
|||||||
// process crashed. In this case we expect the chain to detect that it was fast
|
// process crashed. In this case we expect the chain to detect that it was fast
|
||||||
// syncing and not delete anything, since we can just pick up directly where we
|
// syncing and not delete anything, since we can just pick up directly where we
|
||||||
// left off.
|
// left off.
|
||||||
func TestLongFastSyncingDeepRepair(t *testing.T) { testLongFastSyncingDeepRepair(t, false) }
|
func TestLongSnapSyncingDeepRepair(t *testing.T) { testLongSnapSyncingDeepRepair(t, false) }
|
||||||
func TestLongFastSyncingDeepRepairWithSnapshots(t *testing.T) { testLongFastSyncingDeepRepair(t, true) }
|
func TestLongSnapSyncingDeepRepairWithSnapshots(t *testing.T) { testLongSnapSyncingDeepRepair(t, true) }
|
||||||
|
|
||||||
func testLongFastSyncingDeepRepair(t *testing.T, snapshots bool) {
|
func testLongSnapSyncingDeepRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
//
|
//
|
||||||
@@ -946,14 +946,14 @@ func testLongOldForkedDeepRepair(t *testing.T, snapshots bool) {
|
|||||||
// the side chain is below the committed block. In this case we expect the chain
|
// the side chain is below the committed block. In this case we expect the chain
|
||||||
// to be rolled back to the committed block, with everything afterwads kept as
|
// to be rolled back to the committed block, with everything afterwads kept as
|
||||||
// fast sync data; the side chain completely nuked by the freezer.
|
// fast sync data; the side chain completely nuked by the freezer.
|
||||||
func TestLongOldForkedFastSyncedShallowRepair(t *testing.T) {
|
func TestLongOldForkedSnapSyncedShallowRepair(t *testing.T) {
|
||||||
testLongOldForkedFastSyncedShallowRepair(t, false)
|
testLongOldForkedSnapSyncedShallowRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongOldForkedFastSyncedShallowRepairWithSnapshots(t *testing.T) {
|
func TestLongOldForkedSnapSyncedShallowRepairWithSnapshots(t *testing.T) {
|
||||||
testLongOldForkedFastSyncedShallowRepair(t, true)
|
testLongOldForkedSnapSyncedShallowRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongOldForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) {
|
func testLongOldForkedSnapSyncedShallowRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
// └->S1->S2->S3
|
// └->S1->S2->S3
|
||||||
@@ -998,14 +998,14 @@ func testLongOldForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) {
|
|||||||
// the side chain is below the committed block. In this case we expect the canonical
|
// the side chain is below the committed block. In this case we expect the canonical
|
||||||
// chain to be rolled back to the committed block, with everything afterwads deleted;
|
// chain to be rolled back to the committed block, with everything afterwads deleted;
|
||||||
// the side chain completely nuked by the freezer.
|
// the side chain completely nuked by the freezer.
|
||||||
func TestLongOldForkedFastSyncedDeepRepair(t *testing.T) {
|
func TestLongOldForkedSnapSyncedDeepRepair(t *testing.T) {
|
||||||
testLongOldForkedFastSyncedDeepRepair(t, false)
|
testLongOldForkedSnapSyncedDeepRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongOldForkedFastSyncedDeepRepairWithSnapshots(t *testing.T) {
|
func TestLongOldForkedSnapSyncedDeepRepairWithSnapshots(t *testing.T) {
|
||||||
testLongOldForkedFastSyncedDeepRepair(t, true)
|
testLongOldForkedSnapSyncedDeepRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongOldForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) {
|
func testLongOldForkedSnapSyncedDeepRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
// └->S1->S2->S3
|
// └->S1->S2->S3
|
||||||
@@ -1049,14 +1049,14 @@ func testLongOldForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) {
|
|||||||
// chain is below the committed block. In this case we expect the chain to detect
|
// chain is below the committed block. In this case we expect the chain to detect
|
||||||
// that it was fast syncing and not delete anything. The side chain is completely
|
// that it was fast syncing and not delete anything. The side chain is completely
|
||||||
// nuked by the freezer.
|
// nuked by the freezer.
|
||||||
func TestLongOldForkedFastSyncingShallowRepair(t *testing.T) {
|
func TestLongOldForkedSnapSyncingShallowRepair(t *testing.T) {
|
||||||
testLongOldForkedFastSyncingShallowRepair(t, false)
|
testLongOldForkedSnapSyncingShallowRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongOldForkedFastSyncingShallowRepairWithSnapshots(t *testing.T) {
|
func TestLongOldForkedSnapSyncingShallowRepairWithSnapshots(t *testing.T) {
|
||||||
testLongOldForkedFastSyncingShallowRepair(t, true)
|
testLongOldForkedSnapSyncingShallowRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongOldForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) {
|
func testLongOldForkedSnapSyncingShallowRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
// └->S1->S2->S3
|
// └->S1->S2->S3
|
||||||
@@ -1101,14 +1101,14 @@ func testLongOldForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) {
|
|||||||
// chain is below the committed block. In this case we expect the chain to detect
|
// chain is below the committed block. In this case we expect the chain to detect
|
||||||
// that it was fast syncing and not delete anything. The side chain is completely
|
// that it was fast syncing and not delete anything. The side chain is completely
|
||||||
// nuked by the freezer.
|
// nuked by the freezer.
|
||||||
func TestLongOldForkedFastSyncingDeepRepair(t *testing.T) {
|
func TestLongOldForkedSnapSyncingDeepRepair(t *testing.T) {
|
||||||
testLongOldForkedFastSyncingDeepRepair(t, false)
|
testLongOldForkedSnapSyncingDeepRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongOldForkedFastSyncingDeepRepairWithSnapshots(t *testing.T) {
|
func TestLongOldForkedSnapSyncingDeepRepairWithSnapshots(t *testing.T) {
|
||||||
testLongOldForkedFastSyncingDeepRepair(t, true)
|
testLongOldForkedSnapSyncingDeepRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongOldForkedFastSyncingDeepRepair(t *testing.T, snapshots bool) {
|
func testLongOldForkedSnapSyncingDeepRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
// └->S1->S2->S3
|
// └->S1->S2->S3
|
||||||
@@ -1252,14 +1252,14 @@ func testLongNewerForkedDeepRepair(t *testing.T, snapshots bool) {
|
|||||||
// the side chain is above the committed block. In this case we expect the chain
|
// the side chain is above the committed block. In this case we expect the chain
|
||||||
// to be rolled back to the committed block, with everything afterwads kept as fast
|
// to be rolled back to the committed block, with everything afterwads kept as fast
|
||||||
// sync data; the side chain completely nuked by the freezer.
|
// sync data; the side chain completely nuked by the freezer.
|
||||||
func TestLongNewerForkedFastSyncedShallowRepair(t *testing.T) {
|
func TestLongNewerForkedSnapSyncedShallowRepair(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncedShallowRepair(t, false)
|
testLongNewerForkedSnapSyncedShallowRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongNewerForkedFastSyncedShallowRepairWithSnapshots(t *testing.T) {
|
func TestLongNewerForkedSnapSyncedShallowRepairWithSnapshots(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncedShallowRepair(t, true)
|
testLongNewerForkedSnapSyncedShallowRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongNewerForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) {
|
func testLongNewerForkedSnapSyncedShallowRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
||||||
@@ -1304,14 +1304,14 @@ func testLongNewerForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) {
|
|||||||
// the side chain is above the committed block. In this case we expect the canonical
|
// the side chain is above the committed block. In this case we expect the canonical
|
||||||
// chain to be rolled back to the committed block, with everything afterwads deleted;
|
// chain to be rolled back to the committed block, with everything afterwads deleted;
|
||||||
// the side chain completely nuked by the freezer.
|
// the side chain completely nuked by the freezer.
|
||||||
func TestLongNewerForkedFastSyncedDeepRepair(t *testing.T) {
|
func TestLongNewerForkedSnapSyncedDeepRepair(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncedDeepRepair(t, false)
|
testLongNewerForkedSnapSyncedDeepRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongNewerForkedFastSyncedDeepRepairWithSnapshots(t *testing.T) {
|
func TestLongNewerForkedSnapSyncedDeepRepairWithSnapshots(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncedDeepRepair(t, true)
|
testLongNewerForkedSnapSyncedDeepRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongNewerForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) {
|
func testLongNewerForkedSnapSyncedDeepRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
||||||
@@ -1355,14 +1355,14 @@ func testLongNewerForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) {
|
|||||||
// chain is above the committed block. In this case we expect the chain to detect
|
// chain is above the committed block. In this case we expect the chain to detect
|
||||||
// that it was fast syncing and not delete anything. The side chain is completely
|
// that it was fast syncing and not delete anything. The side chain is completely
|
||||||
// nuked by the freezer.
|
// nuked by the freezer.
|
||||||
func TestLongNewerForkedFastSyncingShallowRepair(t *testing.T) {
|
func TestLongNewerForkedSnapSyncingShallowRepair(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncingShallowRepair(t, false)
|
testLongNewerForkedSnapSyncingShallowRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongNewerForkedFastSyncingShallowRepairWithSnapshots(t *testing.T) {
|
func TestLongNewerForkedSnapSyncingShallowRepairWithSnapshots(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncingShallowRepair(t, true)
|
testLongNewerForkedSnapSyncingShallowRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongNewerForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) {
|
func testLongNewerForkedSnapSyncingShallowRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
||||||
@@ -1407,14 +1407,14 @@ func testLongNewerForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) {
|
|||||||
// chain is above the committed block. In this case we expect the chain to detect
|
// chain is above the committed block. In this case we expect the chain to detect
|
||||||
// that it was fast syncing and not delete anything. The side chain is completely
|
// that it was fast syncing and not delete anything. The side chain is completely
|
||||||
// nuked by the freezer.
|
// nuked by the freezer.
|
||||||
func TestLongNewerForkedFastSyncingDeepRepair(t *testing.T) {
|
func TestLongNewerForkedSnapSyncingDeepRepair(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncingDeepRepair(t, false)
|
testLongNewerForkedSnapSyncingDeepRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongNewerForkedFastSyncingDeepRepairWithSnapshots(t *testing.T) {
|
func TestLongNewerForkedSnapSyncingDeepRepairWithSnapshots(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncingDeepRepair(t, true)
|
testLongNewerForkedSnapSyncingDeepRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongNewerForkedFastSyncingDeepRepair(t *testing.T, snapshots bool) {
|
func testLongNewerForkedSnapSyncingDeepRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
||||||
@@ -1552,14 +1552,14 @@ func testLongReorgedDeepRepair(t *testing.T, snapshots bool) {
|
|||||||
// expect the chain to be rolled back to the committed block, with everything
|
// expect the chain to be rolled back to the committed block, with everything
|
||||||
// afterwads kept as fast sync data. The side chain completely nuked by the
|
// afterwads kept as fast sync data. The side chain completely nuked by the
|
||||||
// freezer.
|
// freezer.
|
||||||
func TestLongReorgedFastSyncedShallowRepair(t *testing.T) {
|
func TestLongReorgedSnapSyncedShallowRepair(t *testing.T) {
|
||||||
testLongReorgedFastSyncedShallowRepair(t, false)
|
testLongReorgedSnapSyncedShallowRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongReorgedFastSyncedShallowRepairWithSnapshots(t *testing.T) {
|
func TestLongReorgedSnapSyncedShallowRepairWithSnapshots(t *testing.T) {
|
||||||
testLongReorgedFastSyncedShallowRepair(t, true)
|
testLongReorgedSnapSyncedShallowRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongReorgedFastSyncedShallowRepair(t *testing.T, snapshots bool) {
|
func testLongReorgedSnapSyncedShallowRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
||||||
@@ -1603,14 +1603,14 @@ func testLongReorgedFastSyncedShallowRepair(t *testing.T, snapshots bool) {
|
|||||||
// was already committed to disk and then the process crashed. In this case we
|
// was already committed to disk and then the process crashed. In this case we
|
||||||
// expect the canonical chains to be rolled back to the committed block, with
|
// expect the canonical chains to be rolled back to the committed block, with
|
||||||
// everything afterwads deleted. The side chain completely nuked by the freezer.
|
// everything afterwads deleted. The side chain completely nuked by the freezer.
|
||||||
func TestLongReorgedFastSyncedDeepRepair(t *testing.T) {
|
func TestLongReorgedSnapSyncedDeepRepair(t *testing.T) {
|
||||||
testLongReorgedFastSyncedDeepRepair(t, false)
|
testLongReorgedSnapSyncedDeepRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongReorgedFastSyncedDeepRepairWithSnapshots(t *testing.T) {
|
func TestLongReorgedSnapSyncedDeepRepairWithSnapshots(t *testing.T) {
|
||||||
testLongReorgedFastSyncedDeepRepair(t, true)
|
testLongReorgedSnapSyncedDeepRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongReorgedFastSyncedDeepRepair(t *testing.T, snapshots bool) {
|
func testLongReorgedSnapSyncedDeepRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
||||||
@@ -1653,14 +1653,14 @@ func testLongReorgedFastSyncedDeepRepair(t *testing.T, snapshots bool) {
|
|||||||
// was not yet committed, but the process crashed. In this case we expect the
|
// was not yet committed, but the process crashed. In this case we expect the
|
||||||
// chain to detect that it was fast syncing and not delete anything, since we
|
// chain to detect that it was fast syncing and not delete anything, since we
|
||||||
// can just pick up directly where we left off.
|
// can just pick up directly where we left off.
|
||||||
func TestLongReorgedFastSyncingShallowRepair(t *testing.T) {
|
func TestLongReorgedSnapSyncingShallowRepair(t *testing.T) {
|
||||||
testLongReorgedFastSyncingShallowRepair(t, false)
|
testLongReorgedSnapSyncingShallowRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongReorgedFastSyncingShallowRepairWithSnapshots(t *testing.T) {
|
func TestLongReorgedSnapSyncingShallowRepairWithSnapshots(t *testing.T) {
|
||||||
testLongReorgedFastSyncingShallowRepair(t, true)
|
testLongReorgedSnapSyncingShallowRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongReorgedFastSyncingShallowRepair(t *testing.T, snapshots bool) {
|
func testLongReorgedSnapSyncingShallowRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
||||||
@@ -1704,14 +1704,14 @@ func testLongReorgedFastSyncingShallowRepair(t *testing.T, snapshots bool) {
|
|||||||
// was not yet committed, but the process crashed. In this case we expect the
|
// was not yet committed, but the process crashed. In this case we expect the
|
||||||
// chain to detect that it was fast syncing and not delete anything, since we
|
// chain to detect that it was fast syncing and not delete anything, since we
|
||||||
// can just pick up directly where we left off.
|
// can just pick up directly where we left off.
|
||||||
func TestLongReorgedFastSyncingDeepRepair(t *testing.T) {
|
func TestLongReorgedSnapSyncingDeepRepair(t *testing.T) {
|
||||||
testLongReorgedFastSyncingDeepRepair(t, false)
|
testLongReorgedSnapSyncingDeepRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongReorgedFastSyncingDeepRepairWithSnapshots(t *testing.T) {
|
func TestLongReorgedSnapSyncingDeepRepairWithSnapshots(t *testing.T) {
|
||||||
testLongReorgedFastSyncingDeepRepair(t, true)
|
testLongReorgedSnapSyncingDeepRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongReorgedFastSyncingDeepRepair(t *testing.T, snapshots bool) {
|
func testLongReorgedSnapSyncingDeepRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
||||||
@@ -1829,7 +1829,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
|
|||||||
// Pull the plug on the database, simulating a hard crash
|
// Pull the plug on the database, simulating a hard crash
|
||||||
db.Close()
|
db.Close()
|
||||||
|
|
||||||
// Start a new blockchain back up and see where the repait leads us
|
// Start a new blockchain back up and see where the repair leads us
|
||||||
db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
|
db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to reopen persistent database: %v", err)
|
t.Fatalf("Failed to reopen persistent database: %v", err)
|
||||||
@@ -1863,3 +1863,124 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
|
|||||||
t.Errorf("Frozen block count mismatch: have %d, want %d", frozen, tt.expFrozen)
|
t.Errorf("Frozen block count mismatch: have %d, want %d", frozen, tt.expFrozen)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestIssue23496 tests scenario described in https://github.com/ethereum/go-ethereum/pull/23496#issuecomment-926393893
|
||||||
|
// Credits to @zzyalbert for finding the issue.
|
||||||
|
//
|
||||||
|
// Local chain owns these blocks:
|
||||||
|
// G B1 B2 B3 B4
|
||||||
|
// B1: state committed
|
||||||
|
// B2: snapshot disk layer
|
||||||
|
// B3: state committed
|
||||||
|
// B4: head block
|
||||||
|
//
|
||||||
|
// Crash happens without fully persisting snapshot and in-memory states,
|
||||||
|
// chain rewinds itself to the B1 (skip B3 in order to recover snapshot)
|
||||||
|
// In this case the snapshot layer of B3 is not created because of existent
|
||||||
|
// state.
|
||||||
|
func TestIssue23496(t *testing.T) {
|
||||||
|
// It's hard to follow the test case, visualize the input
|
||||||
|
//log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
||||||
|
|
||||||
|
// Create a temporary persistent database
|
||||||
|
datadir, err := ioutil.TempDir("", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temporary datadir: %v", err)
|
||||||
|
}
|
||||||
|
os.RemoveAll(datadir)
|
||||||
|
|
||||||
|
db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create persistent database: %v", err)
|
||||||
|
}
|
||||||
|
defer db.Close() // Might double close, should be fine
|
||||||
|
|
||||||
|
// Initialize a fresh chain
|
||||||
|
var (
|
||||||
|
genesis = (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
|
||||||
|
engine = ethash.NewFullFaker()
|
||||||
|
config = &CacheConfig{
|
||||||
|
TrieCleanLimit: 256,
|
||||||
|
TrieDirtyLimit: 256,
|
||||||
|
TrieTimeLimit: 5 * time.Minute,
|
||||||
|
SnapshotLimit: 256,
|
||||||
|
SnapshotWait: true,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
chain, err := NewBlockChain(db, config, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create chain: %v", err)
|
||||||
|
}
|
||||||
|
blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), 4, func(i int, b *BlockGen) {
|
||||||
|
b.SetCoinbase(common.Address{0x02})
|
||||||
|
b.SetDifficulty(big.NewInt(1000000))
|
||||||
|
})
|
||||||
|
|
||||||
|
// Insert block B1 and commit the state into disk
|
||||||
|
if _, err := chain.InsertChain(blocks[:1]); err != nil {
|
||||||
|
t.Fatalf("Failed to import canonical chain start: %v", err)
|
||||||
|
}
|
||||||
|
chain.stateCache.TrieDB().Commit(blocks[0].Root(), true, nil)
|
||||||
|
|
||||||
|
// Insert block B2 and commit the snapshot into disk
|
||||||
|
if _, err := chain.InsertChain(blocks[1:2]); err != nil {
|
||||||
|
t.Fatalf("Failed to import canonical chain start: %v", err)
|
||||||
|
}
|
||||||
|
if err := chain.snaps.Cap(blocks[1].Root(), 0); err != nil {
|
||||||
|
t.Fatalf("Failed to flatten snapshots: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert block B3 and commit the state into disk
|
||||||
|
if _, err := chain.InsertChain(blocks[2:3]); err != nil {
|
||||||
|
t.Fatalf("Failed to import canonical chain start: %v", err)
|
||||||
|
}
|
||||||
|
chain.stateCache.TrieDB().Commit(blocks[2].Root(), true, nil)
|
||||||
|
|
||||||
|
// Insert the remaining blocks
|
||||||
|
if _, err := chain.InsertChain(blocks[3:]); err != nil {
|
||||||
|
t.Fatalf("Failed to import canonical chain tail: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pull the plug on the database, simulating a hard crash
|
||||||
|
db.Close()
|
||||||
|
|
||||||
|
// Start a new blockchain back up and see where the repair leads us
|
||||||
|
db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to reopen persistent database: %v", err)
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to recreate chain: %v", err)
|
||||||
|
}
|
||||||
|
defer chain.Stop()
|
||||||
|
|
||||||
|
if head := chain.CurrentHeader(); head.Number.Uint64() != uint64(4) {
|
||||||
|
t.Errorf("Head header mismatch: have %d, want %d", head.Number, 4)
|
||||||
|
}
|
||||||
|
if head := chain.CurrentFastBlock(); head.NumberU64() != uint64(4) {
|
||||||
|
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), uint64(4))
|
||||||
|
}
|
||||||
|
if head := chain.CurrentBlock(); head.NumberU64() != uint64(1) {
|
||||||
|
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), uint64(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reinsert B2-B4
|
||||||
|
if _, err := chain.InsertChain(blocks[1:]); err != nil {
|
||||||
|
t.Fatalf("Failed to import canonical chain tail: %v", err)
|
||||||
|
}
|
||||||
|
if head := chain.CurrentHeader(); head.Number.Uint64() != uint64(4) {
|
||||||
|
t.Errorf("Head header mismatch: have %d, want %d", head.Number, 4)
|
||||||
|
}
|
||||||
|
if head := chain.CurrentFastBlock(); head.NumberU64() != uint64(4) {
|
||||||
|
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), uint64(4))
|
||||||
|
}
|
||||||
|
if head := chain.CurrentBlock(); head.NumberU64() != uint64(4) {
|
||||||
|
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), uint64(4))
|
||||||
|
}
|
||||||
|
if layer := chain.Snapshots().Snapshot(blocks[2].Root()); layer == nil {
|
||||||
|
t.Error("Failed to regenerate the snapshot of known state")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@@ -194,10 +194,10 @@ func testShortSetHead(t *testing.T, snapshots bool) {
|
|||||||
// Everything above the sethead point should be deleted. In between the committed
|
// Everything above the sethead point should be deleted. In between the committed
|
||||||
// block and the requested head the data can remain as "fast sync" data to avoid
|
// block and the requested head the data can remain as "fast sync" data to avoid
|
||||||
// redownloading it.
|
// redownloading it.
|
||||||
func TestShortFastSyncedSetHead(t *testing.T) { testShortFastSyncedSetHead(t, false) }
|
func TestShortSnapSyncedSetHead(t *testing.T) { testShortSnapSyncedSetHead(t, false) }
|
||||||
func TestShortFastSyncedSetHeadWithSnapshots(t *testing.T) { testShortFastSyncedSetHead(t, true) }
|
func TestShortSnapSyncedSetHeadWithSnapshots(t *testing.T) { testShortSnapSyncedSetHead(t, true) }
|
||||||
|
|
||||||
func testShortFastSyncedSetHead(t *testing.T, snapshots bool) {
|
func testShortSnapSyncedSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
//
|
//
|
||||||
@@ -236,10 +236,10 @@ func testShortFastSyncedSetHead(t *testing.T, snapshots bool) {
|
|||||||
// detect that it was fast syncing and delete everything from the new head, since
|
// detect that it was fast syncing and delete everything from the new head, since
|
||||||
// we can just pick up fast syncing from there. The head full block should be set
|
// we can just pick up fast syncing from there. The head full block should be set
|
||||||
// to the genesis.
|
// to the genesis.
|
||||||
func TestShortFastSyncingSetHead(t *testing.T) { testShortFastSyncingSetHead(t, false) }
|
func TestShortSnapSyncingSetHead(t *testing.T) { testShortSnapSyncingSetHead(t, false) }
|
||||||
func TestShortFastSyncingSetHeadWithSnapshots(t *testing.T) { testShortFastSyncingSetHead(t, true) }
|
func TestShortSnapSyncingSetHeadWithSnapshots(t *testing.T) { testShortSnapSyncingSetHead(t, true) }
|
||||||
|
|
||||||
func testShortFastSyncingSetHead(t *testing.T, snapshots bool) {
|
func testShortSnapSyncingSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
//
|
//
|
||||||
@@ -326,14 +326,14 @@ func testShortOldForkedSetHead(t *testing.T, snapshots bool) {
|
|||||||
// block. Everything above the sethead point should be deleted. In between the
|
// block. Everything above the sethead point should be deleted. In between the
|
||||||
// committed block and the requested head the data can remain as "fast sync" data
|
// committed block and the requested head the data can remain as "fast sync" data
|
||||||
// to avoid redownloading it. The side chain should be left alone as it was shorter.
|
// to avoid redownloading it. The side chain should be left alone as it was shorter.
|
||||||
func TestShortOldForkedFastSyncedSetHead(t *testing.T) {
|
func TestShortOldForkedSnapSyncedSetHead(t *testing.T) {
|
||||||
testShortOldForkedFastSyncedSetHead(t, false)
|
testShortOldForkedSnapSyncedSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestShortOldForkedFastSyncedSetHeadWithSnapshots(t *testing.T) {
|
func TestShortOldForkedSnapSyncedSetHeadWithSnapshots(t *testing.T) {
|
||||||
testShortOldForkedFastSyncedSetHead(t, true)
|
testShortOldForkedSnapSyncedSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testShortOldForkedFastSyncedSetHead(t *testing.T, snapshots bool) {
|
func testShortOldForkedSnapSyncedSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
// └->S1->S2->S3
|
// └->S1->S2->S3
|
||||||
@@ -375,14 +375,14 @@ func testShortOldForkedFastSyncedSetHead(t *testing.T, snapshots bool) {
|
|||||||
// the chain to detect that it was fast syncing and delete everything from the new
|
// the chain to detect that it was fast syncing and delete everything from the new
|
||||||
// head, since we can just pick up fast syncing from there. The head full block
|
// head, since we can just pick up fast syncing from there. The head full block
|
||||||
// should be set to the genesis.
|
// should be set to the genesis.
|
||||||
func TestShortOldForkedFastSyncingSetHead(t *testing.T) {
|
func TestShortOldForkedSnapSyncingSetHead(t *testing.T) {
|
||||||
testShortOldForkedFastSyncingSetHead(t, false)
|
testShortOldForkedSnapSyncingSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestShortOldForkedFastSyncingSetHeadWithSnapshots(t *testing.T) {
|
func TestShortOldForkedSnapSyncingSetHeadWithSnapshots(t *testing.T) {
|
||||||
testShortOldForkedFastSyncingSetHead(t, true)
|
testShortOldForkedSnapSyncingSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testShortOldForkedFastSyncingSetHead(t *testing.T, snapshots bool) {
|
func testShortOldForkedSnapSyncingSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
// └->S1->S2->S3
|
// └->S1->S2->S3
|
||||||
@@ -478,14 +478,14 @@ func testShortNewlyForkedSetHead(t *testing.T, snapshots bool) {
|
|||||||
// The side chain could be left to be if the fork point was before the new head
|
// The side chain could be left to be if the fork point was before the new head
|
||||||
// we are deleting to, but it would be exceedingly hard to detect that case and
|
// we are deleting to, but it would be exceedingly hard to detect that case and
|
||||||
// properly handle it, so we'll trade extra work in exchange for simpler code.
|
// properly handle it, so we'll trade extra work in exchange for simpler code.
|
||||||
func TestShortNewlyForkedFastSyncedSetHead(t *testing.T) {
|
func TestShortNewlyForkedSnapSyncedSetHead(t *testing.T) {
|
||||||
testShortNewlyForkedFastSyncedSetHead(t, false)
|
testShortNewlyForkedSnapSyncedSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestShortNewlyForkedFastSyncedSetHeadWithSnapshots(t *testing.T) {
|
func TestShortNewlyForkedSnapSyncedSetHeadWithSnapshots(t *testing.T) {
|
||||||
testShortNewlyForkedFastSyncedSetHead(t, true)
|
testShortNewlyForkedSnapSyncedSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testShortNewlyForkedFastSyncedSetHead(t *testing.T, snapshots bool) {
|
func testShortNewlyForkedSnapSyncedSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8
|
// └->S1->S2->S3->S4->S5->S6->S7->S8
|
||||||
@@ -531,14 +531,14 @@ func testShortNewlyForkedFastSyncedSetHead(t *testing.T, snapshots bool) {
|
|||||||
// The side chain could be left to be if the fork point was before the new head
|
// The side chain could be left to be if the fork point was before the new head
|
||||||
// we are deleting to, but it would be exceedingly hard to detect that case and
|
// we are deleting to, but it would be exceedingly hard to detect that case and
|
||||||
// properly handle it, so we'll trade extra work in exchange for simpler code.
|
// properly handle it, so we'll trade extra work in exchange for simpler code.
|
||||||
func TestShortNewlyForkedFastSyncingSetHead(t *testing.T) {
|
func TestShortNewlyForkedSnapSyncingSetHead(t *testing.T) {
|
||||||
testShortNewlyForkedFastSyncingSetHead(t, false)
|
testShortNewlyForkedSnapSyncingSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestShortNewlyForkedFastSyncingSetHeadWithSnapshots(t *testing.T) {
|
func TestShortNewlyForkedSnapSyncingSetHeadWithSnapshots(t *testing.T) {
|
||||||
testShortNewlyForkedFastSyncingSetHead(t, true)
|
testShortNewlyForkedSnapSyncingSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testShortNewlyForkedFastSyncingSetHead(t *testing.T, snapshots bool) {
|
func testShortNewlyForkedSnapSyncingSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8
|
// └->S1->S2->S3->S4->S5->S6->S7->S8
|
||||||
@@ -634,14 +634,14 @@ func testShortReorgedSetHead(t *testing.T, snapshots bool) {
|
|||||||
// The side chain could be left to be if the fork point was before the new head
|
// The side chain could be left to be if the fork point was before the new head
|
||||||
// we are deleting to, but it would be exceedingly hard to detect that case and
|
// we are deleting to, but it would be exceedingly hard to detect that case and
|
||||||
// properly handle it, so we'll trade extra work in exchange for simpler code.
|
// properly handle it, so we'll trade extra work in exchange for simpler code.
|
||||||
func TestShortReorgedFastSyncedSetHead(t *testing.T) {
|
func TestShortReorgedSnapSyncedSetHead(t *testing.T) {
|
||||||
testShortReorgedFastSyncedSetHead(t, false)
|
testShortReorgedSnapSyncedSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestShortReorgedFastSyncedSetHeadWithSnapshots(t *testing.T) {
|
func TestShortReorgedSnapSyncedSetHeadWithSnapshots(t *testing.T) {
|
||||||
testShortReorgedFastSyncedSetHead(t, true)
|
testShortReorgedSnapSyncedSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testShortReorgedFastSyncedSetHead(t *testing.T, snapshots bool) {
|
func testShortReorgedSnapSyncedSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
|
||||||
@@ -686,14 +686,14 @@ func testShortReorgedFastSyncedSetHead(t *testing.T, snapshots bool) {
|
|||||||
// The side chain could be left to be if the fork point was before the new head
|
// The side chain could be left to be if the fork point was before the new head
|
||||||
// we are deleting to, but it would be exceedingly hard to detect that case and
|
// we are deleting to, but it would be exceedingly hard to detect that case and
|
||||||
// properly handle it, so we'll trade extra work in exchange for simpler code.
|
// properly handle it, so we'll trade extra work in exchange for simpler code.
|
||||||
func TestShortReorgedFastSyncingSetHead(t *testing.T) {
|
func TestShortReorgedSnapSyncingSetHead(t *testing.T) {
|
||||||
testShortReorgedFastSyncingSetHead(t, false)
|
testShortReorgedSnapSyncingSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestShortReorgedFastSyncingSetHeadWithSnapshots(t *testing.T) {
|
func TestShortReorgedSnapSyncingSetHeadWithSnapshots(t *testing.T) {
|
||||||
testShortReorgedFastSyncingSetHead(t, true)
|
testShortReorgedSnapSyncingSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testShortReorgedFastSyncingSetHead(t *testing.T, snapshots bool) {
|
func testShortReorgedSnapSyncingSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
|
||||||
@@ -829,14 +829,14 @@ func testLongDeepSetHead(t *testing.T, snapshots bool) {
|
|||||||
// back to the committed block. Everything above the sethead point should be
|
// back to the committed block. Everything above the sethead point should be
|
||||||
// deleted. In between the committed block and the requested head the data can
|
// deleted. In between the committed block and the requested head the data can
|
||||||
// remain as "fast sync" data to avoid redownloading it.
|
// remain as "fast sync" data to avoid redownloading it.
|
||||||
func TestLongFastSyncedShallowSetHead(t *testing.T) {
|
func TestLongSnapSyncedShallowSetHead(t *testing.T) {
|
||||||
testLongFastSyncedShallowSetHead(t, false)
|
testLongSnapSyncedShallowSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
|
func TestLongSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongFastSyncedShallowSetHead(t, true)
|
testLongSnapSyncedShallowSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
func testLongSnapSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
//
|
//
|
||||||
@@ -880,10 +880,10 @@ func testLongFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
|||||||
// which sethead was called. In this case we expect the full chain to be rolled
|
// which sethead was called. In this case we expect the full chain to be rolled
|
||||||
// back to the committed block. Since the ancient limit was underflown, everything
|
// back to the committed block. Since the ancient limit was underflown, everything
|
||||||
// needs to be deleted onwards to avoid creating a gap.
|
// needs to be deleted onwards to avoid creating a gap.
|
||||||
func TestLongFastSyncedDeepSetHead(t *testing.T) { testLongFastSyncedDeepSetHead(t, false) }
|
func TestLongSnapSyncedDeepSetHead(t *testing.T) { testLongSnapSyncedDeepSetHead(t, false) }
|
||||||
func TestLongFastSyncedDeepSetHeadWithSnapshots(t *testing.T) { testLongFastSyncedDeepSetHead(t, true) }
|
func TestLongSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) { testLongSnapSyncedDeepSetHead(t, true) }
|
||||||
|
|
||||||
func testLongFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
func testLongSnapSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
//
|
//
|
||||||
@@ -926,14 +926,14 @@ func testLongFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
|||||||
// sethead was called. In this case we expect the chain to detect that it was fast
|
// sethead was called. In this case we expect the chain to detect that it was fast
|
||||||
// syncing and delete everything from the new head, since we can just pick up fast
|
// syncing and delete everything from the new head, since we can just pick up fast
|
||||||
// syncing from there.
|
// syncing from there.
|
||||||
func TestLongFastSyncingShallowSetHead(t *testing.T) {
|
func TestLongSnapSyncingShallowSetHead(t *testing.T) {
|
||||||
testLongFastSyncingShallowSetHead(t, false)
|
testLongSnapSyncingShallowSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
|
func TestLongSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongFastSyncingShallowSetHead(t, true)
|
testLongSnapSyncingShallowSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
|
func testLongSnapSyncingShallowSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
//
|
//
|
||||||
@@ -977,14 +977,14 @@ func testLongFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
|
|||||||
// sethead was called. In this case we expect the chain to detect that it was fast
|
// sethead was called. In this case we expect the chain to detect that it was fast
|
||||||
// syncing and delete everything from the new head, since we can just pick up fast
|
// syncing and delete everything from the new head, since we can just pick up fast
|
||||||
// syncing from there.
|
// syncing from there.
|
||||||
func TestLongFastSyncingDeepSetHead(t *testing.T) {
|
func TestLongSnapSyncingDeepSetHead(t *testing.T) {
|
||||||
testLongFastSyncingDeepSetHead(t, false)
|
testLongSnapSyncingDeepSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
|
func TestLongSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongFastSyncingDeepSetHead(t, true)
|
testLongSnapSyncingDeepSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
|
func testLongSnapSyncingDeepSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
//
|
//
|
||||||
@@ -1132,14 +1132,14 @@ func testLongOldForkedDeepSetHead(t *testing.T, snapshots bool) {
|
|||||||
// sethead point should be deleted. In between the committed block and the
|
// sethead point should be deleted. In between the committed block and the
|
||||||
// requested head the data can remain as "fast sync" data to avoid redownloading
|
// requested head the data can remain as "fast sync" data to avoid redownloading
|
||||||
// it. The side chain is nuked by the freezer.
|
// it. The side chain is nuked by the freezer.
|
||||||
func TestLongOldForkedFastSyncedShallowSetHead(t *testing.T) {
|
func TestLongOldForkedSnapSyncedShallowSetHead(t *testing.T) {
|
||||||
testLongOldForkedFastSyncedShallowSetHead(t, false)
|
testLongOldForkedSnapSyncedShallowSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongOldForkedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
|
func TestLongOldForkedSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongOldForkedFastSyncedShallowSetHead(t, true)
|
testLongOldForkedSnapSyncedShallowSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongOldForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
func testLongOldForkedSnapSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
// └->S1->S2->S3
|
// └->S1->S2->S3
|
||||||
@@ -1186,14 +1186,14 @@ func testLongOldForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
|||||||
// full chain to be rolled back to the committed block. Since the ancient limit was
|
// full chain to be rolled back to the committed block. Since the ancient limit was
|
||||||
// underflown, everything needs to be deleted onwards to avoid creating a gap. The
|
// underflown, everything needs to be deleted onwards to avoid creating a gap. The
|
||||||
// side chain is nuked by the freezer.
|
// side chain is nuked by the freezer.
|
||||||
func TestLongOldForkedFastSyncedDeepSetHead(t *testing.T) {
|
func TestLongOldForkedSnapSyncedDeepSetHead(t *testing.T) {
|
||||||
testLongOldForkedFastSyncedDeepSetHead(t, false)
|
testLongOldForkedSnapSyncedDeepSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongOldForkedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) {
|
func TestLongOldForkedSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongOldForkedFastSyncedDeepSetHead(t, true)
|
testLongOldForkedSnapSyncedDeepSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongOldForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
func testLongOldForkedSnapSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
// └->S1->S2->S3
|
// └->S1->S2->S3
|
||||||
@@ -1239,14 +1239,14 @@ func testLongOldForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
|||||||
// that it was fast syncing and delete everything from the new head, since we can
|
// that it was fast syncing and delete everything from the new head, since we can
|
||||||
// just pick up fast syncing from there. The side chain is completely nuked by the
|
// just pick up fast syncing from there. The side chain is completely nuked by the
|
||||||
// freezer.
|
// freezer.
|
||||||
func TestLongOldForkedFastSyncingShallowSetHead(t *testing.T) {
|
func TestLongOldForkedSnapSyncingShallowSetHead(t *testing.T) {
|
||||||
testLongOldForkedFastSyncingShallowSetHead(t, false)
|
testLongOldForkedSnapSyncingShallowSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongOldForkedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
|
func TestLongOldForkedSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongOldForkedFastSyncingShallowSetHead(t, true)
|
testLongOldForkedSnapSyncingShallowSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongOldForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
|
func testLongOldForkedSnapSyncingShallowSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
// └->S1->S2->S3
|
// └->S1->S2->S3
|
||||||
@@ -1293,14 +1293,14 @@ func testLongOldForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
|
|||||||
// that it was fast syncing and delete everything from the new head, since we can
|
// that it was fast syncing and delete everything from the new head, since we can
|
||||||
// just pick up fast syncing from there. The side chain is completely nuked by the
|
// just pick up fast syncing from there. The side chain is completely nuked by the
|
||||||
// freezer.
|
// freezer.
|
||||||
func TestLongOldForkedFastSyncingDeepSetHead(t *testing.T) {
|
func TestLongOldForkedSnapSyncingDeepSetHead(t *testing.T) {
|
||||||
testLongOldForkedFastSyncingDeepSetHead(t, false)
|
testLongOldForkedSnapSyncingDeepSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongOldForkedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
|
func TestLongOldForkedSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongOldForkedFastSyncingDeepSetHead(t, true)
|
testLongOldForkedSnapSyncingDeepSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongOldForkedFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
|
func testLongOldForkedSnapSyncingDeepSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
// └->S1->S2->S3
|
// └->S1->S2->S3
|
||||||
@@ -1446,15 +1446,15 @@ func testLongNewerForkedDeepSetHead(t *testing.T, snapshots bool) {
|
|||||||
// side chain, where the fast sync pivot point - newer than the ancient limit -
|
// side chain, where the fast sync pivot point - newer than the ancient limit -
|
||||||
// was already committed to disk and then sethead was called. In this test scenario
|
// was already committed to disk and then sethead was called. In this test scenario
|
||||||
// the side chain is above the committed block. In this case the freezer will delete
|
// the side chain is above the committed block. In this case the freezer will delete
|
||||||
// the sidechain since it's dangling, reverting to TestLongFastSyncedShallowSetHead.
|
// the sidechain since it's dangling, reverting to TestLongSnapSyncedShallowSetHead.
|
||||||
func TestLongNewerForkedFastSyncedShallowSetHead(t *testing.T) {
|
func TestLongNewerForkedSnapSyncedShallowSetHead(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncedShallowSetHead(t, false)
|
testLongNewerForkedSnapSyncedShallowSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongNewerForkedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
|
func TestLongNewerForkedSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncedShallowSetHead(t, true)
|
testLongNewerForkedSnapSyncedShallowSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongNewerForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
func testLongNewerForkedSnapSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
||||||
@@ -1498,15 +1498,15 @@ func testLongNewerForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
|||||||
// side chain, where the fast sync pivot point - older than the ancient limit -
|
// side chain, where the fast sync pivot point - older than the ancient limit -
|
||||||
// was already committed to disk and then sethead was called. In this test scenario
|
// was already committed to disk and then sethead was called. In this test scenario
|
||||||
// the side chain is above the committed block. In this case the freezer will delete
|
// the side chain is above the committed block. In this case the freezer will delete
|
||||||
// the sidechain since it's dangling, reverting to TestLongFastSyncedDeepSetHead.
|
// the sidechain since it's dangling, reverting to TestLongSnapSyncedDeepSetHead.
|
||||||
func TestLongNewerForkedFastSyncedDeepSetHead(t *testing.T) {
|
func TestLongNewerForkedSnapSyncedDeepSetHead(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncedDeepSetHead(t, false)
|
testLongNewerForkedSnapSyncedDeepSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongNewerForkedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) {
|
func TestLongNewerForkedSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncedDeepSetHead(t, true)
|
testLongNewerForkedSnapSyncedDeepSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongNewerForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
func testLongNewerForkedSnapSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
||||||
@@ -1549,15 +1549,15 @@ func testLongNewerForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
|||||||
// side chain, where the fast sync pivot point - newer than the ancient limit -
|
// side chain, where the fast sync pivot point - newer than the ancient limit -
|
||||||
// was not yet committed, but sethead was called. In this test scenario the side
|
// was not yet committed, but sethead was called. In this test scenario the side
|
||||||
// chain is above the committed block. In this case the freezer will delete the
|
// chain is above the committed block. In this case the freezer will delete the
|
||||||
// sidechain since it's dangling, reverting to TestLongFastSyncinghallowSetHead.
|
// sidechain since it's dangling, reverting to TestLongSnapSyncinghallowSetHead.
|
||||||
func TestLongNewerForkedFastSyncingShallowSetHead(t *testing.T) {
|
func TestLongNewerForkedSnapSyncingShallowSetHead(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncingShallowSetHead(t, false)
|
testLongNewerForkedSnapSyncingShallowSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongNewerForkedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
|
func TestLongNewerForkedSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncingShallowSetHead(t, true)
|
testLongNewerForkedSnapSyncingShallowSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongNewerForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
|
func testLongNewerForkedSnapSyncingShallowSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
||||||
@@ -1601,15 +1601,15 @@ func testLongNewerForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool)
|
|||||||
// side chain, where the fast sync pivot point - older than the ancient limit -
|
// side chain, where the fast sync pivot point - older than the ancient limit -
|
||||||
// was not yet committed, but sethead was called. In this test scenario the side
|
// was not yet committed, but sethead was called. In this test scenario the side
|
||||||
// chain is above the committed block. In this case the freezer will delete the
|
// chain is above the committed block. In this case the freezer will delete the
|
||||||
// sidechain since it's dangling, reverting to TestLongFastSyncingDeepSetHead.
|
// sidechain since it's dangling, reverting to TestLongSnapSyncingDeepSetHead.
|
||||||
func TestLongNewerForkedFastSyncingDeepSetHead(t *testing.T) {
|
func TestLongNewerForkedSnapSyncingDeepSetHead(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncingDeepSetHead(t, false)
|
testLongNewerForkedSnapSyncingDeepSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongNewerForkedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
|
func TestLongNewerForkedSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncingDeepSetHead(t, true)
|
testLongNewerForkedSnapSyncingDeepSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongNewerForkedFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
|
func testLongNewerForkedSnapSyncingDeepSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
||||||
@@ -1745,15 +1745,15 @@ func testLongReorgedDeepSetHead(t *testing.T, snapshots bool) {
|
|||||||
// side chain, where the fast sync pivot point - newer than the ancient limit -
|
// side chain, where the fast sync pivot point - newer than the ancient limit -
|
||||||
// was already committed to disk and then sethead was called. In this case the
|
// was already committed to disk and then sethead was called. In this case the
|
||||||
// freezer will delete the sidechain since it's dangling, reverting to
|
// freezer will delete the sidechain since it's dangling, reverting to
|
||||||
// TestLongFastSyncedShallowSetHead.
|
// TestLongSnapSyncedShallowSetHead.
|
||||||
func TestLongReorgedFastSyncedShallowSetHead(t *testing.T) {
|
func TestLongReorgedSnapSyncedShallowSetHead(t *testing.T) {
|
||||||
testLongReorgedFastSyncedShallowSetHead(t, false)
|
testLongReorgedSnapSyncedShallowSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongReorgedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
|
func TestLongReorgedSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongReorgedFastSyncedShallowSetHead(t, true)
|
testLongReorgedSnapSyncedShallowSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongReorgedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
func testLongReorgedSnapSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
||||||
@@ -1797,15 +1797,15 @@ func testLongReorgedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
|||||||
// side chain, where the fast sync pivot point - older than the ancient limit -
|
// side chain, where the fast sync pivot point - older than the ancient limit -
|
||||||
// was already committed to disk and then sethead was called. In this case the
|
// was already committed to disk and then sethead was called. In this case the
|
||||||
// freezer will delete the sidechain since it's dangling, reverting to
|
// freezer will delete the sidechain since it's dangling, reverting to
|
||||||
// TestLongFastSyncedDeepSetHead.
|
// TestLongSnapSyncedDeepSetHead.
|
||||||
func TestLongReorgedFastSyncedDeepSetHead(t *testing.T) {
|
func TestLongReorgedSnapSyncedDeepSetHead(t *testing.T) {
|
||||||
testLongReorgedFastSyncedDeepSetHead(t, false)
|
testLongReorgedSnapSyncedDeepSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongReorgedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) {
|
func TestLongReorgedSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongReorgedFastSyncedDeepSetHead(t, true)
|
testLongReorgedSnapSyncedDeepSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongReorgedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
func testLongReorgedSnapSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
||||||
@@ -1850,14 +1850,14 @@ func testLongReorgedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
|||||||
// chain to detect that it was fast syncing and delete everything from the new
|
// chain to detect that it was fast syncing and delete everything from the new
|
||||||
// head, since we can just pick up fast syncing from there. The side chain is
|
// head, since we can just pick up fast syncing from there. The side chain is
|
||||||
// completely nuked by the freezer.
|
// completely nuked by the freezer.
|
||||||
func TestLongReorgedFastSyncingShallowSetHead(t *testing.T) {
|
func TestLongReorgedSnapSyncingShallowSetHead(t *testing.T) {
|
||||||
testLongReorgedFastSyncingShallowSetHead(t, false)
|
testLongReorgedSnapSyncingShallowSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongReorgedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
|
func TestLongReorgedSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongReorgedFastSyncingShallowSetHead(t, true)
|
testLongReorgedSnapSyncingShallowSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongReorgedFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
|
func testLongReorgedSnapSyncingShallowSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
||||||
@@ -1903,14 +1903,14 @@ func testLongReorgedFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
|
|||||||
// chain to detect that it was fast syncing and delete everything from the new
|
// chain to detect that it was fast syncing and delete everything from the new
|
||||||
// head, since we can just pick up fast syncing from there. The side chain is
|
// head, since we can just pick up fast syncing from there. The side chain is
|
||||||
// completely nuked by the freezer.
|
// completely nuked by the freezer.
|
||||||
func TestLongReorgedFastSyncingDeepSetHead(t *testing.T) {
|
func TestLongReorgedSnapSyncingDeepSetHead(t *testing.T) {
|
||||||
testLongReorgedFastSyncingDeepSetHead(t, false)
|
testLongReorgedSnapSyncingDeepSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongReorgedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
|
func TestLongReorgedSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongReorgedFastSyncingDeepSetHead(t, true)
|
testLongReorgedSnapSyncingDeepSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongReorgedFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
|
func testLongReorgedSnapSyncingDeepSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user