Compare commits
141 Commits
v1.9.8
...
ChrisChinc
Author | SHA1 | Date | |
---|---|---|---|
|
8b6f5d4f72 | ||
|
2e1ecc02bd | ||
|
6df973df27 | ||
|
4be8840120 | ||
|
529b81dadb | ||
|
6a62fe399b | ||
|
dae3aee5ff | ||
|
05ccbb5edd | ||
|
4f55e24c02 | ||
|
91b228966e | ||
|
1b9c5b393b | ||
|
57d4898e29 | ||
|
c2117982b8 | ||
|
1c4c486a85 | ||
|
ac72787768 | ||
|
26284ec3cc | ||
|
fef8c985bc | ||
|
36a1e0b67d | ||
|
37531b1884 | ||
|
855690523a | ||
|
38d1b0cba2 | ||
|
eddcecc160 | ||
|
9938d954c8 | ||
|
90caa2cabb | ||
|
5f2002bbcc | ||
|
a9614c3c91 | ||
|
46c4b699c8 | ||
|
1821328162 | ||
|
8045504abf | ||
|
c22fdec3c7 | ||
|
049e17116e | ||
|
dcffb7777f | ||
|
8694d14e65 | ||
|
172f7778fe | ||
|
34bb132b10 | ||
|
675f4e75b8 | ||
|
4a231cd951 | ||
|
976a0f5558 | ||
|
a1313b5b1e | ||
|
711ed74e09 | ||
|
058a4ac5f1 | ||
|
33791dbeb5 | ||
|
5a9c96454e | ||
|
4cc89a5a32 | ||
|
15d09038a6 | ||
|
3c776c7199 | ||
|
24cab2d535 | ||
|
594e038e75 | ||
|
a903912b96 | ||
|
44c365c3e2 | ||
|
7b68975a00 | ||
|
60deeb103e | ||
|
0b284f6c6c | ||
|
8a5c81349e | ||
|
33c56ebc67 | ||
|
31baf3a9af | ||
|
ad2fc7c6a6 | ||
|
58cf5686ea | ||
|
b4aa4a6965 | ||
|
b88b4632c2 | ||
|
1f1cefc036 | ||
|
4c8fcd93da | ||
|
fcc84c38dd | ||
|
6d200efe72 | ||
|
92956e2930 | ||
|
9b09c0fc83 | ||
|
770316dc20 | ||
|
0af96d2556 | ||
|
d5acc5ed9e | ||
|
fcafa0baa5 | ||
|
1ee754b056 | ||
|
b3b8d36995 | ||
|
9b32f592dc | ||
|
3e97b04a3d | ||
|
f20c8d495a | ||
|
8704e8a8fc | ||
|
94e8418939 | ||
|
feda78e052 | ||
|
8592a57553 | ||
|
b2de0bd87b | ||
|
e9e69d6e29 | ||
|
a90cc66f3c | ||
|
8bd37a1d91 | ||
|
b5c4ea56b8 | ||
|
fc392395fb | ||
|
b211742e5f | ||
|
0218d7001d | ||
|
4d663d57d6 | ||
|
8a63f7f504 | ||
|
c49a4165d0 | ||
|
a1bc0e3cb6 | ||
|
a013f02df2 | ||
|
50be790869 | ||
|
9e0f934e2b | ||
|
4f7b7f84ae | ||
|
c6285e6437 | ||
|
35f95aef6f | ||
|
7a509b4732 | ||
|
433937fb42 | ||
|
2eeb8dd271 | ||
|
b7cf41e4b3 | ||
|
3bb6815fc1 | ||
|
a67fe48b43 | ||
|
93b1171316 | ||
|
6ae9dc15cc | ||
|
49cf000df7 | ||
|
c4b7fdd27e | ||
|
275cd4988d | ||
|
f51cf573b5 | ||
|
191364c350 | ||
|
d90d1db609 | ||
|
b8bc9b3d8e | ||
|
f383eaa102 | ||
|
cecc7230c0 | ||
|
4b40b5377b | ||
|
370cb95b7f | ||
|
017449971e | ||
|
bc01593afb | ||
|
c9dce0bfd7 | ||
|
e78f631dfc | ||
|
6b6882f08b | ||
|
c2d65d34d5 | ||
|
13ccf6016e | ||
|
7ce7c3967c | ||
|
fc7e0fe6c7 | ||
|
5cc6e7a71e | ||
|
d556d39a2c | ||
|
54d332e1db | ||
|
e0bf5f0ccb | ||
|
1ff3d7c2d4 | ||
|
08611cfd75 | ||
|
9a529d64d1 | ||
|
a91b704b01 | ||
|
c9f28ca8e5 | ||
|
58e33d9e5a | ||
|
7800ba978d | ||
|
717f8a4e8f | ||
|
7b189d6f1f | ||
|
c4844e9ee2 | ||
|
23c8c74131 | ||
|
0676320169 |
@@ -6,6 +6,8 @@ run:
|
||||
# default is true. Enables skipping of directories:
|
||||
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
||||
skip-dirs-use-default: true
|
||||
skip-files:
|
||||
- core/genesis_alloc.go
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
@@ -43,3 +45,6 @@ issues:
|
||||
- path: core/vm/instructions_test.go
|
||||
linters:
|
||||
- goconst
|
||||
- path: cmd/faucet/
|
||||
linters:
|
||||
- deadcode
|
||||
|
@@ -57,6 +57,7 @@ jobs:
|
||||
|
||||
- stage: build
|
||||
os: osx
|
||||
osx_image: xcode11.3
|
||||
go: 1.13.x
|
||||
script:
|
||||
- echo "Increase the maximum number of open file descriptors on macOS"
|
||||
@@ -92,7 +93,7 @@ jobs:
|
||||
- python-paramiko
|
||||
script:
|
||||
- echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts
|
||||
- go run build/ci.go debsrc -goversion 1.13.4 -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>"
|
||||
- go run build/ci.go debsrc -goversion 1.13.8 -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>"
|
||||
|
||||
# This builder does the Linux Azure uploads
|
||||
- stage: build
|
||||
@@ -182,7 +183,7 @@ jobs:
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
before_install:
|
||||
- curl https://dl.google.com/go/go1.13.linux-amd64.tar.gz | tar -xz
|
||||
- curl https://dl.google.com/go/go1.13.8.linux-amd64.tar.gz | tar -xz
|
||||
- export PATH=`pwd`/go/bin:$PATH
|
||||
- export GOROOT=`pwd`/go
|
||||
- export GOPATH=$HOME/go
|
||||
|
43
Makefile
43
Makefile
@@ -10,33 +10,34 @@
|
||||
|
||||
GOBIN = ./build/bin
|
||||
GO ?= latest
|
||||
GORUN = env GO111MODULE=on go run
|
||||
|
||||
geth:
|
||||
build/env.sh go run build/ci.go install ./cmd/geth
|
||||
$(GORUN) build/ci.go install ./cmd/geth
|
||||
@echo "Done building."
|
||||
@echo "Run \"$(GOBIN)/geth\" to launch geth."
|
||||
|
||||
all:
|
||||
build/env.sh go run build/ci.go install
|
||||
$(GORUN) build/ci.go install
|
||||
|
||||
android:
|
||||
build/env.sh go run build/ci.go aar --local
|
||||
$(GORUN) build/ci.go aar --local
|
||||
@echo "Done building."
|
||||
@echo "Import \"$(GOBIN)/geth.aar\" to use the library."
|
||||
|
||||
ios:
|
||||
build/env.sh go run build/ci.go xcode --local
|
||||
$(GORUN) build/ci.go xcode --local
|
||||
@echo "Done building."
|
||||
@echo "Import \"$(GOBIN)/Geth.framework\" to use the library."
|
||||
|
||||
test: all
|
||||
build/env.sh go run build/ci.go test
|
||||
$(GORUN) build/ci.go test
|
||||
|
||||
lint: ## Run linters.
|
||||
build/env.sh go run build/ci.go lint
|
||||
$(GORUN) build/ci.go lint
|
||||
|
||||
clean:
|
||||
go clean -cache
|
||||
env GO111MODULE=on go clean -cache
|
||||
rm -fr build/_workspace/pkg/ $(GOBIN)/*
|
||||
|
||||
# The devtools target installs tools required for 'go generate'.
|
||||
@@ -63,12 +64,12 @@ geth-linux: geth-linux-386 geth-linux-amd64 geth-linux-arm geth-linux-mips64 get
|
||||
@ls -ld $(GOBIN)/geth-linux-*
|
||||
|
||||
geth-linux-386:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/386 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/386 -v ./cmd/geth
|
||||
@echo "Linux 386 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep 386
|
||||
|
||||
geth-linux-amd64:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/amd64 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/amd64 -v ./cmd/geth
|
||||
@echo "Linux amd64 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep amd64
|
||||
|
||||
@@ -77,42 +78,42 @@ geth-linux-arm: geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-ar
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm
|
||||
|
||||
geth-linux-arm-5:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm-5 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-5 -v ./cmd/geth
|
||||
@echo "Linux ARMv5 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm-5
|
||||
|
||||
geth-linux-arm-6:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm-6 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-6 -v ./cmd/geth
|
||||
@echo "Linux ARMv6 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm-6
|
||||
|
||||
geth-linux-arm-7:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm-7 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-7 -v ./cmd/geth
|
||||
@echo "Linux ARMv7 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm-7
|
||||
|
||||
geth-linux-arm64:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm64 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm64 -v ./cmd/geth
|
||||
@echo "Linux ARM64 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm64
|
||||
|
||||
geth-linux-mips:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mips --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||
@echo "Linux MIPS cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep mips
|
||||
|
||||
geth-linux-mipsle:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mipsle --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mipsle --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||
@echo "Linux MIPSle cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep mipsle
|
||||
|
||||
geth-linux-mips64:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mips64 --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64 --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||
@echo "Linux MIPS64 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep mips64
|
||||
|
||||
geth-linux-mips64le:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mips64le --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64le --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||
@echo "Linux MIPS64le cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep mips64le
|
||||
|
||||
@@ -121,12 +122,12 @@ geth-darwin: geth-darwin-386 geth-darwin-amd64
|
||||
@ls -ld $(GOBIN)/geth-darwin-*
|
||||
|
||||
geth-darwin-386:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=darwin/386 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/386 -v ./cmd/geth
|
||||
@echo "Darwin 386 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-darwin-* | grep 386
|
||||
|
||||
geth-darwin-amd64:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=darwin/amd64 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/amd64 -v ./cmd/geth
|
||||
@echo "Darwin amd64 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-darwin-* | grep amd64
|
||||
|
||||
@@ -135,11 +136,11 @@ geth-windows: geth-windows-386 geth-windows-amd64
|
||||
@ls -ld $(GOBIN)/geth-windows-*
|
||||
|
||||
geth-windows-386:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=windows/386 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/386 -v ./cmd/geth
|
||||
@echo "Windows 386 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-windows-* | grep 386
|
||||
|
||||
geth-windows-amd64:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=windows/amd64 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/amd64 -v ./cmd/geth
|
||||
@echo "Windows amd64 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-windows-* | grep amd64
|
||||
|
@@ -39,7 +39,7 @@ directory.
|
||||
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. |
|
||||
| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. |
|
||||
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
|
||||
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug`). |
|
||||
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). |
|
||||
| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://github.com/ethereum/wiki/wiki/JSON-RPC) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. |
|
||||
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://github.com/ethereum/wiki/wiki/RLP)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
|
||||
| `puppeth` | a CLI wizard that aids in creating a new Ethereum network. |
|
||||
@@ -294,7 +294,7 @@ also need to configure a miner to process transactions and create new blocks for
|
||||
Mining on the public Ethereum network is a complex task as it's only feasible using GPUs,
|
||||
requiring an OpenCL or CUDA enabled `ethminer` instance. For information on such a
|
||||
setup, please consult the [EtherMining subreddit](https://www.reddit.com/r/EtherMining/)
|
||||
and the [Genoil miner](https://github.com/Genoil/cpp-ethereum) repository.
|
||||
and the [ethminer](https://github.com/ethereum-mining/ethminer) repository.
|
||||
|
||||
In a private network setting, however a single CPU miner instance is more than enough for
|
||||
practical purposes as it can produce a stable stream of blocks at the correct intervals
|
||||
|
@@ -108,12 +108,13 @@ func (abi ABI) UnpackIntoMap(v map[string]interface{}, name string, data []byte)
|
||||
// UnmarshalJSON implements json.Unmarshaler interface
|
||||
func (abi *ABI) UnmarshalJSON(data []byte) error {
|
||||
var fields []struct {
|
||||
Type string
|
||||
Name string
|
||||
Constant bool
|
||||
Anonymous bool
|
||||
Inputs []Argument
|
||||
Outputs []Argument
|
||||
Type string
|
||||
Name string
|
||||
Constant bool
|
||||
StateMutability string
|
||||
Anonymous bool
|
||||
Inputs []Argument
|
||||
Outputs []Argument
|
||||
}
|
||||
if err := json.Unmarshal(data, &fields); err != nil {
|
||||
return err
|
||||
@@ -134,10 +135,11 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
|
||||
name = fmt.Sprintf("%s%d", field.Name, idx)
|
||||
_, ok = abi.Methods[name]
|
||||
}
|
||||
isConst := field.Constant || field.StateMutability == "pure" || field.StateMutability == "view"
|
||||
abi.Methods[name] = Method{
|
||||
Name: name,
|
||||
RawName: field.Name,
|
||||
Const: field.Constant,
|
||||
Const: isConst,
|
||||
Inputs: field.Inputs,
|
||||
Outputs: field.Outputs,
|
||||
}
|
||||
|
@@ -46,12 +46,17 @@ import (
|
||||
var _ bind.ContractBackend = (*SimulatedBackend)(nil)
|
||||
|
||||
var (
|
||||
errBlockNumberUnsupported = errors.New("simulatedBackend cannot access blocks other than the latest block")
|
||||
errGasEstimationFailed = errors.New("gas required exceeds allowance or always failing transaction")
|
||||
errBlockNumberUnsupported = errors.New("simulatedBackend cannot access blocks other than the latest block")
|
||||
errBlockDoesNotExist = errors.New("block does not exist in blockchain")
|
||||
errTransactionDoesNotExist = errors.New("transaction does not exist")
|
||||
errGasEstimationFailed = errors.New("gas required exceeds allowance or always failing transaction")
|
||||
)
|
||||
|
||||
// SimulatedBackend implements bind.ContractBackend, simulating a blockchain in
|
||||
// the background. Its main purpose is to allow easily testing contract bindings.
|
||||
// Simulated backend implements the following interfaces:
|
||||
// ChainReader, ChainStateReader, ContractBackend, ContractCaller, ContractFilterer, ContractTransactor,
|
||||
// DeployBackend, GasEstimator, GasPricer, LogFilterer, PendingContractCaller, TransactionReader, and TransactionSender
|
||||
type SimulatedBackend struct {
|
||||
database ethdb.Database // In memory database to store our testing data
|
||||
blockchain *core.BlockChain // Ethereum blockchain to handle the consensus
|
||||
@@ -76,7 +81,7 @@ func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.Genesis
|
||||
database: database,
|
||||
blockchain: blockchain,
|
||||
config: genesis.Config,
|
||||
events: filters.NewEventSystem(new(event.TypeMux), &filterBackend{database, blockchain}, false),
|
||||
events: filters.NewEventSystem(&filterBackend{database, blockchain}, false),
|
||||
}
|
||||
backend.rollback()
|
||||
return backend
|
||||
@@ -122,15 +127,28 @@ func (b *SimulatedBackend) rollback() {
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database())
|
||||
}
|
||||
|
||||
// stateByBlockNumber retrieves a state by a given blocknumber.
|
||||
func (b *SimulatedBackend) stateByBlockNumber(ctx context.Context, blockNumber *big.Int) (*state.StateDB, error) {
|
||||
if blockNumber == nil || blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) == 0 {
|
||||
return b.blockchain.State()
|
||||
}
|
||||
block, err := b.BlockByNumber(ctx, blockNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b.blockchain.StateAt(block.Hash())
|
||||
}
|
||||
|
||||
// CodeAt returns the code associated with a certain account in the blockchain.
|
||||
func (b *SimulatedBackend) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 {
|
||||
return nil, errBlockNumberUnsupported
|
||||
statedb, err := b.stateByBlockNumber(ctx, blockNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
statedb, _ := b.blockchain.State()
|
||||
|
||||
return statedb.GetCode(contract), nil
|
||||
}
|
||||
|
||||
@@ -139,10 +157,11 @@ func (b *SimulatedBackend) BalanceAt(ctx context.Context, contract common.Addres
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 {
|
||||
return nil, errBlockNumberUnsupported
|
||||
statedb, err := b.stateByBlockNumber(ctx, blockNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
statedb, _ := b.blockchain.State()
|
||||
|
||||
return statedb.GetBalance(contract), nil
|
||||
}
|
||||
|
||||
@@ -151,10 +170,11 @@ func (b *SimulatedBackend) NonceAt(ctx context.Context, contract common.Address,
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 {
|
||||
return 0, errBlockNumberUnsupported
|
||||
statedb, err := b.stateByBlockNumber(ctx, blockNumber)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
statedb, _ := b.blockchain.State()
|
||||
|
||||
return statedb.GetNonce(contract), nil
|
||||
}
|
||||
|
||||
@@ -163,16 +183,20 @@ func (b *SimulatedBackend) StorageAt(ctx context.Context, contract common.Addres
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 {
|
||||
return nil, errBlockNumberUnsupported
|
||||
statedb, err := b.stateByBlockNumber(ctx, blockNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
statedb, _ := b.blockchain.State()
|
||||
|
||||
val := statedb.GetState(contract, key)
|
||||
return val[:], nil
|
||||
}
|
||||
|
||||
// TransactionReceipt returns the receipt of a transaction.
|
||||
func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
receipt, _, _, _ := rawdb.ReadReceipt(b.database, txHash, b.config)
|
||||
return receipt, nil
|
||||
}
|
||||
@@ -196,6 +220,115 @@ func (b *SimulatedBackend) TransactionByHash(ctx context.Context, txHash common.
|
||||
return nil, false, ethereum.NotFound
|
||||
}
|
||||
|
||||
// BlockByHash retrieves a block based on the block hash
|
||||
func (b *SimulatedBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if hash == b.pendingBlock.Hash() {
|
||||
return b.pendingBlock, nil
|
||||
}
|
||||
|
||||
block := b.blockchain.GetBlockByHash(hash)
|
||||
if block != nil {
|
||||
return block, nil
|
||||
}
|
||||
|
||||
return nil, errBlockDoesNotExist
|
||||
}
|
||||
|
||||
// BlockByNumber retrieves a block from the database by number, caching it
|
||||
// (associated with its hash) if found.
|
||||
func (b *SimulatedBackend) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if number == nil || number.Cmp(b.pendingBlock.Number()) == 0 {
|
||||
return b.blockchain.CurrentBlock(), nil
|
||||
}
|
||||
|
||||
block := b.blockchain.GetBlockByNumber(uint64(number.Int64()))
|
||||
if block == nil {
|
||||
return nil, errBlockDoesNotExist
|
||||
}
|
||||
|
||||
return block, nil
|
||||
}
|
||||
|
||||
// HeaderByHash returns a block header from the current canonical chain.
|
||||
func (b *SimulatedBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if hash == b.pendingBlock.Hash() {
|
||||
return b.pendingBlock.Header(), nil
|
||||
}
|
||||
|
||||
header := b.blockchain.GetHeaderByHash(hash)
|
||||
if header == nil {
|
||||
return nil, errBlockDoesNotExist
|
||||
}
|
||||
|
||||
return header, nil
|
||||
}
|
||||
|
||||
// HeaderByNumber returns a block header from the current canonical chain. If number is
|
||||
// nil, the latest known header is returned.
|
||||
func (b *SimulatedBackend) HeaderByNumber(ctx context.Context, block *big.Int) (*types.Header, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if block == nil || block.Cmp(b.pendingBlock.Number()) == 0 {
|
||||
return b.blockchain.CurrentHeader(), nil
|
||||
}
|
||||
|
||||
return b.blockchain.GetHeaderByNumber(uint64(block.Int64())), nil
|
||||
}
|
||||
|
||||
// TransactionCount returns the number of transactions in a given block
|
||||
func (b *SimulatedBackend) TransactionCount(ctx context.Context, blockHash common.Hash) (uint, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if blockHash == b.pendingBlock.Hash() {
|
||||
return uint(b.pendingBlock.Transactions().Len()), nil
|
||||
}
|
||||
|
||||
block := b.blockchain.GetBlockByHash(blockHash)
|
||||
if block == nil {
|
||||
return uint(0), errBlockDoesNotExist
|
||||
}
|
||||
|
||||
return uint(block.Transactions().Len()), nil
|
||||
}
|
||||
|
||||
// TransactionInBlock returns the transaction for a specific block at a specific index
|
||||
func (b *SimulatedBackend) TransactionInBlock(ctx context.Context, blockHash common.Hash, index uint) (*types.Transaction, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if blockHash == b.pendingBlock.Hash() {
|
||||
transactions := b.pendingBlock.Transactions()
|
||||
if uint(len(transactions)) < index+1 {
|
||||
return nil, errTransactionDoesNotExist
|
||||
}
|
||||
|
||||
return transactions[index], nil
|
||||
}
|
||||
|
||||
block := b.blockchain.GetBlockByHash(blockHash)
|
||||
if block == nil {
|
||||
return nil, errBlockDoesNotExist
|
||||
}
|
||||
|
||||
transactions := block.Transactions()
|
||||
if uint(len(transactions)) < index+1 {
|
||||
return nil, errTransactionDoesNotExist
|
||||
}
|
||||
|
||||
return transactions[index], nil
|
||||
}
|
||||
|
||||
// PendingCodeAt returns the code associated with an account in the pending state.
|
||||
func (b *SimulatedBackend) PendingCodeAt(ctx context.Context, contract common.Address) ([]byte, error) {
|
||||
b.mu.Lock()
|
||||
@@ -419,10 +552,38 @@ func (b *SimulatedBackend) SubscribeFilterLogs(ctx context.Context, query ethere
|
||||
}), nil
|
||||
}
|
||||
|
||||
// SubscribeNewHead returns an event subscription for a new header
|
||||
func (b *SimulatedBackend) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) {
|
||||
// subscribe to a new head
|
||||
sink := make(chan *types.Header)
|
||||
sub := b.events.SubscribeNewHeads(sink)
|
||||
|
||||
return event.NewSubscription(func(quit <-chan struct{}) error {
|
||||
defer sub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case head := <-sink:
|
||||
select {
|
||||
case ch <- head:
|
||||
case err := <-sub.Err():
|
||||
return err
|
||||
case <-quit:
|
||||
return nil
|
||||
}
|
||||
case err := <-sub.Err():
|
||||
return err
|
||||
case <-quit:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}), nil
|
||||
}
|
||||
|
||||
// AdjustTime adds a time shift to the simulated clock.
|
||||
func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
|
||||
for _, tx := range b.pendingBlock.Transactions() {
|
||||
block.AddTx(tx)
|
||||
@@ -502,22 +663,34 @@ func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*ty
|
||||
}
|
||||
|
||||
func (fb *filterBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
|
||||
return nullSubscription()
|
||||
}
|
||||
|
||||
func (fb *filterBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
|
||||
return fb.bc.SubscribeChainEvent(ch)
|
||||
}
|
||||
|
||||
func (fb *filterBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
|
||||
return fb.bc.SubscribeRemovedLogsEvent(ch)
|
||||
}
|
||||
|
||||
func (fb *filterBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
|
||||
return fb.bc.SubscribeLogsEvent(ch)
|
||||
}
|
||||
|
||||
func (fb *filterBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {
|
||||
return nullSubscription()
|
||||
}
|
||||
|
||||
func (fb *filterBackend) BloomStatus() (uint64, uint64) { return 4096, 0 }
|
||||
|
||||
func (fb *filterBackend) ServiceFilter(ctx context.Context, ms *bloombits.MatcherSession) {
|
||||
panic("not supported")
|
||||
}
|
||||
|
||||
func nullSubscription() event.Subscription {
|
||||
return event.NewSubscription(func(quit <-chan struct{}) error {
|
||||
<-quit
|
||||
return nil
|
||||
})
|
||||
}
|
||||
func (fb *filterBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
|
||||
return fb.bc.SubscribeChainEvent(ch)
|
||||
}
|
||||
func (fb *filterBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
|
||||
return fb.bc.SubscribeRemovedLogsEvent(ch)
|
||||
}
|
||||
func (fb *filterBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
|
||||
return fb.bc.SubscribeLogsEvent(ch)
|
||||
}
|
||||
|
||||
func (fb *filterBackend) BloomStatus() (uint64, uint64) { return 4096, 0 }
|
||||
func (fb *filterBackend) ServiceFilter(ctx context.Context, ms *bloombits.MatcherSession) {
|
||||
panic("not supported")
|
||||
}
|
||||
|
@@ -14,20 +14,24 @@
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package backends_test
|
||||
package backends
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"math/big"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ethereum "github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
func TestSimulatedBackend(t *testing.T) {
|
||||
@@ -37,7 +41,7 @@ func TestSimulatedBackend(t *testing.T) {
|
||||
genAlloc := make(core.GenesisAlloc)
|
||||
genAlloc[auth.From] = core.GenesisAccount{Balance: big.NewInt(9223372036854775807)}
|
||||
|
||||
sim := backends.NewSimulatedBackend(genAlloc, gasLimit)
|
||||
sim := NewSimulatedBackend(genAlloc, gasLimit)
|
||||
defer sim.Close()
|
||||
|
||||
// should return an error if the tx is not found
|
||||
@@ -79,5 +83,760 @@ func TestSimulatedBackend(t *testing.T) {
|
||||
if isPending {
|
||||
t.Fatal("transaction should not have pending status")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
|
||||
// the following is based on this contract:
|
||||
// contract T {
|
||||
// event received(address sender, uint amount, bytes memo);
|
||||
// event receivedAddr(address sender);
|
||||
//
|
||||
// function receive(bytes calldata memo) external payable returns (string memory res) {
|
||||
// emit received(msg.sender, msg.value, memo);
|
||||
// emit receivedAddr(msg.sender);
|
||||
// return "hello world";
|
||||
// }
|
||||
// }
|
||||
const abiJSON = `[ { "constant": false, "inputs": [ { "name": "memo", "type": "bytes" } ], "name": "receive", "outputs": [ { "name": "res", "type": "string" } ], "payable": true, "stateMutability": "payable", "type": "function" }, { "anonymous": false, "inputs": [ { "indexed": false, "name": "sender", "type": "address" }, { "indexed": false, "name": "amount", "type": "uint256" }, { "indexed": false, "name": "memo", "type": "bytes" } ], "name": "received", "type": "event" }, { "anonymous": false, "inputs": [ { "indexed": false, "name": "sender", "type": "address" } ], "name": "receivedAddr", "type": "event" } ]`
|
||||
const abiBin = `0x608060405234801561001057600080fd5b506102a0806100206000396000f3fe60806040526004361061003b576000357c010000000000000000000000000000000000000000000000000000000090048063a69b6ed014610040575b600080fd5b6100b76004803603602081101561005657600080fd5b810190808035906020019064010000000081111561007357600080fd5b82018360208201111561008557600080fd5b803590602001918460018302840111640100000000831117156100a757600080fd5b9091929391929390505050610132565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156100f75780820151818401526020810190506100dc565b50505050905090810190601f1680156101245780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60607f75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed33348585604051808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001848152602001806020018281038252848482818152602001925080828437600081840152601f19601f8201169050808301925050509550505050505060405180910390a17f46923992397eac56cf13058aced2a1871933622717e27b24eabc13bf9dd329c833604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a16040805190810160405280600b81526020017f68656c6c6f20776f726c6400000000000000000000000000000000000000000081525090509291505056fea165627a7a72305820ff0c57dad254cfeda48c9cfb47f1353a558bccb4d1bc31da1dae69315772d29e0029`
|
||||
const deployedCode = `60806040526004361061003b576000357c010000000000000000000000000000000000000000000000000000000090048063a69b6ed014610040575b600080fd5b6100b76004803603602081101561005657600080fd5b810190808035906020019064010000000081111561007357600080fd5b82018360208201111561008557600080fd5b803590602001918460018302840111640100000000831117156100a757600080fd5b9091929391929390505050610132565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156100f75780820151818401526020810190506100dc565b50505050905090810190601f1680156101245780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60607f75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed33348585604051808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001848152602001806020018281038252848482818152602001925080828437600081840152601f19601f8201169050808301925050509550505050505060405180910390a17f46923992397eac56cf13058aced2a1871933622717e27b24eabc13bf9dd329c833604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a16040805190810160405280600b81526020017f68656c6c6f20776f726c6400000000000000000000000000000000000000000081525090509291505056fea165627a7a72305820ff0c57dad254cfeda48c9cfb47f1353a558bccb4d1bc31da1dae69315772d29e0029`
|
||||
|
||||
// expected return value contains "hello world"
|
||||
var expectedReturn = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
|
||||
func TestNewSimulatedBackend(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
expectedBal := big.NewInt(10000000000)
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: expectedBal},
|
||||
}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
|
||||
if sim.config != params.AllEthashProtocolChanges {
|
||||
t.Errorf("expected sim config to equal params.AllEthashProtocolChanges, got %v", sim.config)
|
||||
}
|
||||
|
||||
if sim.blockchain.Config() != params.AllEthashProtocolChanges {
|
||||
t.Errorf("expected sim blockchain config to equal params.AllEthashProtocolChanges, got %v", sim.config)
|
||||
}
|
||||
|
||||
statedb, _ := sim.blockchain.State()
|
||||
bal := statedb.GetBalance(testAddr)
|
||||
if bal.Cmp(expectedBal) != 0 {
|
||||
t.Errorf("expected balance for test address not received. expected: %v actual: %v", expectedBal, bal)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_AdjustTime(t *testing.T) {
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
|
||||
prevTime := sim.pendingBlock.Time()
|
||||
err := sim.AdjustTime(time.Second)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
newTime := sim.pendingBlock.Time()
|
||||
|
||||
if newTime-prevTime != uint64(time.Second.Seconds()) {
|
||||
t.Errorf("adjusted time not equal to a second. prev: %v, new: %v", prevTime, newTime)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_BalanceAt(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
expectedBal := big.NewInt(10000000000)
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: expectedBal},
|
||||
}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
|
||||
bal, err := sim.BalanceAt(bgCtx, testAddr, nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if bal.Cmp(expectedBal) != 0 {
|
||||
t.Errorf("expected balance for test address not received. expected: %v actual: %v", expectedBal, bal)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_BlockByHash(t *testing.T) {
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
|
||||
block, err := sim.BlockByNumber(bgCtx, nil)
|
||||
if err != nil {
|
||||
t.Errorf("could not get recent block: %v", err)
|
||||
}
|
||||
blockByHash, err := sim.BlockByHash(bgCtx, block.Hash())
|
||||
if err != nil {
|
||||
t.Errorf("could not get recent block: %v", err)
|
||||
}
|
||||
|
||||
if block.Hash() != blockByHash.Hash() {
|
||||
t.Errorf("did not get expected block")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_BlockByNumber(t *testing.T) {
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
|
||||
block, err := sim.BlockByNumber(bgCtx, nil)
|
||||
if err != nil {
|
||||
t.Errorf("could not get recent block: %v", err)
|
||||
}
|
||||
if block.NumberU64() != 0 {
|
||||
t.Errorf("did not get most recent block, instead got block number %v", block.NumberU64())
|
||||
}
|
||||
|
||||
// create one block
|
||||
sim.Commit()
|
||||
|
||||
block, err = sim.BlockByNumber(bgCtx, nil)
|
||||
if err != nil {
|
||||
t.Errorf("could not get recent block: %v", err)
|
||||
}
|
||||
if block.NumberU64() != 1 {
|
||||
t.Errorf("did not get most recent block, instead got block number %v", block.NumberU64())
|
||||
}
|
||||
|
||||
blockByNumber, err := sim.BlockByNumber(bgCtx, big.NewInt(1))
|
||||
if err != nil {
|
||||
t.Errorf("could not get block by number: %v", err)
|
||||
}
|
||||
if blockByNumber.Hash() != block.Hash() {
|
||||
t.Errorf("did not get the same block with height of 1 as before")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_NonceAt(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: big.NewInt(10000000000)},
|
||||
}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
|
||||
nonce, err := sim.NonceAt(bgCtx, testAddr, big.NewInt(0))
|
||||
if err != nil {
|
||||
t.Errorf("could not get nonce for test addr: %v", err)
|
||||
}
|
||||
|
||||
if nonce != uint64(0) {
|
||||
t.Errorf("received incorrect nonce. expected 0, got %v", nonce)
|
||||
}
|
||||
|
||||
// create a signed transaction to send
|
||||
tx := types.NewTransaction(nonce, testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil)
|
||||
signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey)
|
||||
if err != nil {
|
||||
t.Errorf("could not sign tx: %v", err)
|
||||
}
|
||||
|
||||
// send tx to simulated backend
|
||||
err = sim.SendTransaction(bgCtx, signedTx)
|
||||
if err != nil {
|
||||
t.Errorf("could not add tx to pending block: %v", err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
newNonce, err := sim.NonceAt(bgCtx, testAddr, big.NewInt(1))
|
||||
if err != nil {
|
||||
t.Errorf("could not get nonce for test addr: %v", err)
|
||||
}
|
||||
|
||||
if newNonce != nonce+uint64(1) {
|
||||
t.Errorf("received incorrect nonce. expected 1, got %v", nonce)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_SendTransaction(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: big.NewInt(10000000000)},
|
||||
}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
|
||||
// create a signed transaction to send
|
||||
tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil)
|
||||
signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey)
|
||||
if err != nil {
|
||||
t.Errorf("could not sign tx: %v", err)
|
||||
}
|
||||
|
||||
// send tx to simulated backend
|
||||
err = sim.SendTransaction(bgCtx, signedTx)
|
||||
if err != nil {
|
||||
t.Errorf("could not add tx to pending block: %v", err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
block, err := sim.BlockByNumber(bgCtx, big.NewInt(1))
|
||||
if err != nil {
|
||||
t.Errorf("could not get block at height 1: %v", err)
|
||||
}
|
||||
|
||||
if signedTx.Hash() != block.Transactions()[0].Hash() {
|
||||
t.Errorf("did not commit sent transaction. expected hash %v got hash %v", block.Transactions()[0].Hash(), signedTx.Hash())
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_TransactionByHash(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: big.NewInt(10000000000)},
|
||||
}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
|
||||
// create a signed transaction to send
|
||||
tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil)
|
||||
signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey)
|
||||
if err != nil {
|
||||
t.Errorf("could not sign tx: %v", err)
|
||||
}
|
||||
|
||||
// send tx to simulated backend
|
||||
err = sim.SendTransaction(bgCtx, signedTx)
|
||||
if err != nil {
|
||||
t.Errorf("could not add tx to pending block: %v", err)
|
||||
}
|
||||
|
||||
// ensure tx is committed pending
|
||||
receivedTx, pending, err := sim.TransactionByHash(bgCtx, signedTx.Hash())
|
||||
if err != nil {
|
||||
t.Errorf("could not get transaction by hash %v: %v", signedTx.Hash(), err)
|
||||
}
|
||||
if !pending {
|
||||
t.Errorf("expected transaction to be in pending state")
|
||||
}
|
||||
if receivedTx.Hash() != signedTx.Hash() {
|
||||
t.Errorf("did not received committed transaction. expected hash %v got hash %v", signedTx.Hash(), receivedTx.Hash())
|
||||
}
|
||||
|
||||
sim.Commit()
|
||||
|
||||
// ensure tx is not and committed pending
|
||||
receivedTx, pending, err = sim.TransactionByHash(bgCtx, signedTx.Hash())
|
||||
if err != nil {
|
||||
t.Errorf("could not get transaction by hash %v: %v", signedTx.Hash(), err)
|
||||
}
|
||||
if pending {
|
||||
t.Errorf("expected transaction to not be in pending state")
|
||||
}
|
||||
if receivedTx.Hash() != signedTx.Hash() {
|
||||
t.Errorf("did not received committed transaction. expected hash %v got hash %v", signedTx.Hash(), receivedTx.Hash())
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_EstimateGas(t *testing.T) {
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
|
||||
gas, err := sim.EstimateGas(bgCtx, ethereum.CallMsg{
|
||||
From: testAddr,
|
||||
To: &testAddr,
|
||||
Value: big.NewInt(1000),
|
||||
Data: []byte{},
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("could not estimate gas: %v", err)
|
||||
}
|
||||
|
||||
if gas != params.TxGas {
|
||||
t.Errorf("expected 21000 gas cost for a transaction got %v", gas)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_HeaderByHash(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: big.NewInt(10000000000)},
|
||||
}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
|
||||
header, err := sim.HeaderByNumber(bgCtx, nil)
|
||||
if err != nil {
|
||||
t.Errorf("could not get recent block: %v", err)
|
||||
}
|
||||
headerByHash, err := sim.HeaderByHash(bgCtx, header.Hash())
|
||||
if err != nil {
|
||||
t.Errorf("could not get recent block: %v", err)
|
||||
}
|
||||
|
||||
if header.Hash() != headerByHash.Hash() {
|
||||
t.Errorf("did not get expected block")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_HeaderByNumber(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: big.NewInt(10000000000)},
|
||||
}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
|
||||
latestBlockHeader, err := sim.HeaderByNumber(bgCtx, nil)
|
||||
if err != nil {
|
||||
t.Errorf("could not get header for tip of chain: %v", err)
|
||||
}
|
||||
if latestBlockHeader == nil {
|
||||
t.Errorf("received a nil block header")
|
||||
}
|
||||
if latestBlockHeader.Number.Uint64() != uint64(0) {
|
||||
t.Errorf("expected block header number 0, instead got %v", latestBlockHeader.Number.Uint64())
|
||||
}
|
||||
|
||||
sim.Commit()
|
||||
|
||||
latestBlockHeader, err = sim.HeaderByNumber(bgCtx, nil)
|
||||
if err != nil {
|
||||
t.Errorf("could not get header for blockheight of 1: %v", err)
|
||||
}
|
||||
|
||||
blockHeader, err := sim.HeaderByNumber(bgCtx, big.NewInt(1))
|
||||
if err != nil {
|
||||
t.Errorf("could not get header for blockheight of 1: %v", err)
|
||||
}
|
||||
|
||||
if blockHeader.Hash() != latestBlockHeader.Hash() {
|
||||
t.Errorf("block header and latest block header are not the same")
|
||||
}
|
||||
if blockHeader.Number.Int64() != int64(1) {
|
||||
t.Errorf("did not get blockheader for block 1. instead got block %v", blockHeader.Number.Int64())
|
||||
}
|
||||
|
||||
block, err := sim.BlockByNumber(bgCtx, big.NewInt(1))
|
||||
if err != nil {
|
||||
t.Errorf("could not get block for blockheight of 1: %v", err)
|
||||
}
|
||||
|
||||
if block.Hash() != blockHeader.Hash() {
|
||||
t.Errorf("block hash and block header hash do not match. expected %v, got %v", block.Hash(), blockHeader.Hash())
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_TransactionCount(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: big.NewInt(10000000000)},
|
||||
}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
currentBlock, err := sim.BlockByNumber(bgCtx, nil)
|
||||
if err != nil || currentBlock == nil {
|
||||
t.Error("could not get current block")
|
||||
}
|
||||
|
||||
count, err := sim.TransactionCount(bgCtx, currentBlock.Hash())
|
||||
if err != nil {
|
||||
t.Error("could not get current block's transaction count")
|
||||
}
|
||||
|
||||
if count != 0 {
|
||||
t.Errorf("expected transaction count of %v does not match actual count of %v", 0, count)
|
||||
}
|
||||
|
||||
// create a signed transaction to send
|
||||
tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil)
|
||||
signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey)
|
||||
if err != nil {
|
||||
t.Errorf("could not sign tx: %v", err)
|
||||
}
|
||||
|
||||
// send tx to simulated backend
|
||||
err = sim.SendTransaction(bgCtx, signedTx)
|
||||
if err != nil {
|
||||
t.Errorf("could not add tx to pending block: %v", err)
|
||||
}
|
||||
|
||||
sim.Commit()
|
||||
|
||||
lastBlock, err := sim.BlockByNumber(bgCtx, nil)
|
||||
if err != nil {
|
||||
t.Errorf("could not get header for tip of chain: %v", err)
|
||||
}
|
||||
|
||||
count, err = sim.TransactionCount(bgCtx, lastBlock.Hash())
|
||||
if err != nil {
|
||||
t.Error("could not get current block's transaction count")
|
||||
}
|
||||
|
||||
if count != 1 {
|
||||
t.Errorf("expected transaction count of %v does not match actual count of %v", 1, count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_TransactionInBlock(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: big.NewInt(10000000000)},
|
||||
}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
|
||||
transaction, err := sim.TransactionInBlock(bgCtx, sim.pendingBlock.Hash(), uint(0))
|
||||
if err == nil && err != errTransactionDoesNotExist {
|
||||
t.Errorf("expected a transaction does not exist error to be received but received %v", err)
|
||||
}
|
||||
if transaction != nil {
|
||||
t.Errorf("expected transaction to be nil but received %v", transaction)
|
||||
}
|
||||
|
||||
// expect pending nonce to be 0 since account has not been used
|
||||
pendingNonce, err := sim.PendingNonceAt(bgCtx, testAddr)
|
||||
if err != nil {
|
||||
t.Errorf("did not get the pending nonce: %v", err)
|
||||
}
|
||||
|
||||
if pendingNonce != uint64(0) {
|
||||
t.Errorf("expected pending nonce of 0 got %v", pendingNonce)
|
||||
}
|
||||
|
||||
// create a signed transaction to send
|
||||
tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil)
|
||||
signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey)
|
||||
if err != nil {
|
||||
t.Errorf("could not sign tx: %v", err)
|
||||
}
|
||||
|
||||
// send tx to simulated backend
|
||||
err = sim.SendTransaction(bgCtx, signedTx)
|
||||
if err != nil {
|
||||
t.Errorf("could not add tx to pending block: %v", err)
|
||||
}
|
||||
|
||||
sim.Commit()
|
||||
|
||||
lastBlock, err := sim.BlockByNumber(bgCtx, nil)
|
||||
if err != nil {
|
||||
t.Errorf("could not get header for tip of chain: %v", err)
|
||||
}
|
||||
|
||||
transaction, err = sim.TransactionInBlock(bgCtx, lastBlock.Hash(), uint(1))
|
||||
if err == nil && err != errTransactionDoesNotExist {
|
||||
t.Errorf("expected a transaction does not exist error to be received but received %v", err)
|
||||
}
|
||||
if transaction != nil {
|
||||
t.Errorf("expected transaction to be nil but received %v", transaction)
|
||||
}
|
||||
|
||||
transaction, err = sim.TransactionInBlock(bgCtx, lastBlock.Hash(), uint(0))
|
||||
if err != nil {
|
||||
t.Errorf("could not get transaction in the lastest block with hash %v: %v", lastBlock.Hash().String(), err)
|
||||
}
|
||||
|
||||
if signedTx.Hash().String() != transaction.Hash().String() {
|
||||
t.Errorf("received transaction that did not match the sent transaction. expected hash %v, got hash %v", signedTx.Hash().String(), transaction.Hash().String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_PendingNonceAt(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: big.NewInt(10000000000)},
|
||||
}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
|
||||
// expect pending nonce to be 0 since account has not been used
|
||||
pendingNonce, err := sim.PendingNonceAt(bgCtx, testAddr)
|
||||
if err != nil {
|
||||
t.Errorf("did not get the pending nonce: %v", err)
|
||||
}
|
||||
|
||||
if pendingNonce != uint64(0) {
|
||||
t.Errorf("expected pending nonce of 0 got %v", pendingNonce)
|
||||
}
|
||||
|
||||
// create a signed transaction to send
|
||||
tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil)
|
||||
signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey)
|
||||
if err != nil {
|
||||
t.Errorf("could not sign tx: %v", err)
|
||||
}
|
||||
|
||||
// send tx to simulated backend
|
||||
err = sim.SendTransaction(bgCtx, signedTx)
|
||||
if err != nil {
|
||||
t.Errorf("could not add tx to pending block: %v", err)
|
||||
}
|
||||
|
||||
// expect pending nonce to be 1 since account has submitted one transaction
|
||||
pendingNonce, err = sim.PendingNonceAt(bgCtx, testAddr)
|
||||
if err != nil {
|
||||
t.Errorf("did not get the pending nonce: %v", err)
|
||||
}
|
||||
|
||||
if pendingNonce != uint64(1) {
|
||||
t.Errorf("expected pending nonce of 1 got %v", pendingNonce)
|
||||
}
|
||||
|
||||
// make a new transaction with a nonce of 1
|
||||
tx = types.NewTransaction(uint64(1), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil)
|
||||
signedTx, err = types.SignTx(tx, types.HomesteadSigner{}, testKey)
|
||||
if err != nil {
|
||||
t.Errorf("could not sign tx: %v", err)
|
||||
}
|
||||
err = sim.SendTransaction(bgCtx, signedTx)
|
||||
if err != nil {
|
||||
t.Errorf("could not send tx: %v", err)
|
||||
}
|
||||
|
||||
// expect pending nonce to be 2 since account now has two transactions
|
||||
pendingNonce, err = sim.PendingNonceAt(bgCtx, testAddr)
|
||||
if err != nil {
|
||||
t.Errorf("did not get the pending nonce: %v", err)
|
||||
}
|
||||
|
||||
if pendingNonce != uint64(2) {
|
||||
t.Errorf("expected pending nonce of 2 got %v", pendingNonce)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_TransactionReceipt(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: big.NewInt(10000000000)},
|
||||
}, 10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
|
||||
// create a signed transaction to send
|
||||
tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil)
|
||||
signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey)
|
||||
if err != nil {
|
||||
t.Errorf("could not sign tx: %v", err)
|
||||
}
|
||||
|
||||
// send tx to simulated backend
|
||||
err = sim.SendTransaction(bgCtx, signedTx)
|
||||
if err != nil {
|
||||
t.Errorf("could not add tx to pending block: %v", err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
receipt, err := sim.TransactionReceipt(bgCtx, signedTx.Hash())
|
||||
if err != nil {
|
||||
t.Errorf("could not get transaction receipt: %v", err)
|
||||
}
|
||||
|
||||
if receipt.ContractAddress != testAddr && receipt.TxHash != signedTx.Hash() {
|
||||
t.Errorf("received receipt is not correct: %v", receipt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_SuggestGasPrice(t *testing.T) {
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{},
|
||||
10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
gasPrice, err := sim.SuggestGasPrice(bgCtx)
|
||||
if err != nil {
|
||||
t.Errorf("could not get gas price: %v", err)
|
||||
}
|
||||
if gasPrice.Uint64() != uint64(1) {
|
||||
t.Errorf("gas price was not expected value of 1. actual: %v", gasPrice.Uint64())
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_PendingCodeAt(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: big.NewInt(10000000000)},
|
||||
},
|
||||
10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
code, err := sim.CodeAt(bgCtx, testAddr, nil)
|
||||
if err != nil {
|
||||
t.Errorf("could not get code at test addr: %v", err)
|
||||
}
|
||||
if len(code) != 0 {
|
||||
t.Errorf("got code for account that does not have contract code")
|
||||
}
|
||||
|
||||
parsed, err := abi.JSON(strings.NewReader(abiJSON))
|
||||
if err != nil {
|
||||
t.Errorf("could not get code at test addr: %v", err)
|
||||
}
|
||||
auth := bind.NewKeyedTransactor(testKey)
|
||||
contractAddr, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(abiBin), sim)
|
||||
if err != nil {
|
||||
t.Errorf("could not deploy contract: %v tx: %v contract: %v", err, tx, contract)
|
||||
}
|
||||
|
||||
code, err = sim.PendingCodeAt(bgCtx, contractAddr)
|
||||
if err != nil {
|
||||
t.Errorf("could not get code at test addr: %v", err)
|
||||
}
|
||||
if len(code) == 0 {
|
||||
t.Errorf("did not get code for account that has contract code")
|
||||
}
|
||||
// ensure code received equals code deployed
|
||||
if !bytes.Equal(code, common.FromHex(deployedCode)) {
|
||||
t.Errorf("code received did not match expected deployed code:\n expected %v\n actual %v", common.FromHex(deployedCode), code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedBackend_CodeAt(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: big.NewInt(10000000000)},
|
||||
},
|
||||
10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
code, err := sim.CodeAt(bgCtx, testAddr, nil)
|
||||
if err != nil {
|
||||
t.Errorf("could not get code at test addr: %v", err)
|
||||
}
|
||||
if len(code) != 0 {
|
||||
t.Errorf("got code for account that does not have contract code")
|
||||
}
|
||||
|
||||
parsed, err := abi.JSON(strings.NewReader(abiJSON))
|
||||
if err != nil {
|
||||
t.Errorf("could not get code at test addr: %v", err)
|
||||
}
|
||||
auth := bind.NewKeyedTransactor(testKey)
|
||||
contractAddr, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(abiBin), sim)
|
||||
if err != nil {
|
||||
t.Errorf("could not deploy contract: %v tx: %v contract: %v", err, tx, contract)
|
||||
}
|
||||
|
||||
sim.Commit()
|
||||
code, err = sim.CodeAt(bgCtx, contractAddr, nil)
|
||||
if err != nil {
|
||||
t.Errorf("could not get code at test addr: %v", err)
|
||||
}
|
||||
if len(code) == 0 {
|
||||
t.Errorf("did not get code for account that has contract code")
|
||||
}
|
||||
// ensure code received equals code deployed
|
||||
if !bytes.Equal(code, common.FromHex(deployedCode)) {
|
||||
t.Errorf("code received did not match expected deployed code:\n expected %v\n actual %v", common.FromHex(deployedCode), code)
|
||||
}
|
||||
}
|
||||
|
||||
// When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt:
|
||||
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]}
|
||||
func TestSimulatedBackend_PendingAndCallContract(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
sim := NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: big.NewInt(10000000000)},
|
||||
},
|
||||
10000000,
|
||||
)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
|
||||
parsed, err := abi.JSON(strings.NewReader(abiJSON))
|
||||
if err != nil {
|
||||
t.Errorf("could not get code at test addr: %v", err)
|
||||
}
|
||||
contractAuth := bind.NewKeyedTransactor(testKey)
|
||||
addr, _, _, err := bind.DeployContract(contractAuth, parsed, common.FromHex(abiBin), sim)
|
||||
if err != nil {
|
||||
t.Errorf("could not deploy contract: %v", err)
|
||||
}
|
||||
|
||||
input, err := parsed.Pack("receive", []byte("X"))
|
||||
if err != nil {
|
||||
t.Errorf("could pack receive function on contract: %v", err)
|
||||
}
|
||||
|
||||
// make sure you can call the contract in pending state
|
||||
res, err := sim.PendingCallContract(bgCtx, ethereum.CallMsg{
|
||||
From: testAddr,
|
||||
To: &addr,
|
||||
Data: input,
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("could not call receive method on contract: %v", err)
|
||||
}
|
||||
if len(res) == 0 {
|
||||
t.Errorf("result of contract call was empty: %v", res)
|
||||
}
|
||||
|
||||
// while comparing against the byte array is more exact, also compare against the human readable string for readability
|
||||
if !bytes.Equal(res, expectedReturn) || !strings.Contains(string(res), "hello world") {
|
||||
t.Errorf("response from calling contract was expected to be 'hello world' instead received %v", string(res))
|
||||
}
|
||||
|
||||
sim.Commit()
|
||||
|
||||
// make sure you can call the contract
|
||||
res, err = sim.CallContract(bgCtx, ethereum.CallMsg{
|
||||
From: testAddr,
|
||||
To: &addr,
|
||||
Data: input,
|
||||
}, nil)
|
||||
if err != nil {
|
||||
t.Errorf("could not call receive method on contract: %v", err)
|
||||
}
|
||||
if len(res) == 0 {
|
||||
t.Errorf("result of contract call was empty: %v", res)
|
||||
}
|
||||
|
||||
if !bytes.Equal(res, expectedReturn) || !strings.Contains(string(res), "hello world") {
|
||||
t.Errorf("response from calling contract was expected to be 'hello world' instead received %v", string(res))
|
||||
}
|
||||
}
|
||||
|
@@ -1384,7 +1384,7 @@ var bindTests = []struct {
|
||||
if n != 3 {
|
||||
t.Fatalf("Invalid bar0 event")
|
||||
}
|
||||
case <-time.NewTimer(100 * time.Millisecond).C:
|
||||
case <-time.NewTimer(3 * time.Second).C:
|
||||
t.Fatalf("Wait bar0 event timeout")
|
||||
}
|
||||
|
||||
@@ -1395,7 +1395,7 @@ var bindTests = []struct {
|
||||
if n != 1 {
|
||||
t.Fatalf("Invalid bar event")
|
||||
}
|
||||
case <-time.NewTimer(100 * time.Millisecond).C:
|
||||
case <-time.NewTimer(3 * time.Second).C:
|
||||
t.Fatalf("Wait bar event timeout")
|
||||
}
|
||||
close(stopCh)
|
||||
@@ -1530,6 +1530,61 @@ var bindTests = []struct {
|
||||
nil,
|
||||
[]string{"ContractOne", "ContractTwo", "ExternalLib"},
|
||||
},
|
||||
// Test the existence of the free retrieval calls
|
||||
{
|
||||
`PureAndView`,
|
||||
`pragma solidity >=0.6.0;
|
||||
contract PureAndView {
|
||||
function PureFunc() public pure returns (uint) {
|
||||
return 42;
|
||||
}
|
||||
function ViewFunc() public view returns (uint) {
|
||||
return block.number;
|
||||
}
|
||||
}
|
||||
`,
|
||||
[]string{`608060405234801561001057600080fd5b5060b68061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060325760003560e01c806376b5686a146037578063bb38c66c146053575b600080fd5b603d606f565b6040518082815260200191505060405180910390f35b60596077565b6040518082815260200191505060405180910390f35b600043905090565b6000602a90509056fea2646970667358221220d158c2ab7fdfce366a7998ec79ab84edd43b9815630bbaede2c760ea77f29f7f64736f6c63430006000033`},
|
||||
[]string{`[{"inputs": [],"name": "PureFunc","outputs": [{"internalType": "uint256","name": "","type": "uint256"}],"stateMutability": "pure","type": "function"},{"inputs": [],"name": "ViewFunc","outputs": [{"internalType": "uint256","name": "","type": "uint256"}],"stateMutability": "view","type": "function"}]`},
|
||||
`
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
`,
|
||||
`
|
||||
// Generate a new random account and a funded simulator
|
||||
key, _ := crypto.GenerateKey()
|
||||
auth := bind.NewKeyedTransactor(key)
|
||||
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
|
||||
defer sim.Close()
|
||||
|
||||
// Deploy a tester contract and execute a structured call on it
|
||||
_, _, pav, err := DeployPureAndView(auth, sim)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to deploy PureAndView contract: %v", err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
// This test the existence of the free retreiver call for view and pure functions
|
||||
if num, err := pav.PureFunc(nil); err != nil {
|
||||
t.Fatalf("Failed to call anonymous field retriever: %v", err)
|
||||
} else if num.Cmp(big.NewInt(42)) != 0 {
|
||||
t.Fatalf("Retrieved value mismatch: have %v, want %v", num, 42)
|
||||
}
|
||||
if num, err := pav.ViewFunc(nil); err != nil {
|
||||
t.Fatalf("Failed to call anonymous field retriever: %v", err)
|
||||
} else if num.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Fatalf("Retrieved value mismatch: have %v, want %v", num, 1)
|
||||
}
|
||||
`,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
},
|
||||
}
|
||||
|
||||
// Tests that packages generated by the binder can be successfully compiled and
|
||||
|
@@ -178,6 +178,13 @@ func parseTopics(out interface{}, fields abi.Arguments, topics []common.Hash) er
|
||||
|
||||
case reflectBigInt:
|
||||
num := new(big.Int).SetBytes(topics[0][:])
|
||||
if arg.Type.T == abi.IntTy {
|
||||
if num.Cmp(abi.MaxInt256) > 0 {
|
||||
num.Add(abi.MaxUint256, big.NewInt(0).Neg(num))
|
||||
num.Add(num, big.NewInt(1))
|
||||
num.Neg(num)
|
||||
}
|
||||
}
|
||||
field.Set(reflect.ValueOf(num))
|
||||
|
||||
default:
|
||||
@@ -212,8 +219,7 @@ func parseTopicsIntoMap(out map[string]interface{}, fields abi.Arguments, topics
|
||||
case abi.BoolTy:
|
||||
out[arg.Name] = topics[0][common.HashLength-1] == 1
|
||||
case abi.IntTy, abi.UintTy:
|
||||
num := new(big.Int).SetBytes(topics[0][:])
|
||||
out[arg.Name] = num
|
||||
out[arg.Name] = abi.ReadInteger(arg.Type.T, arg.Type.Kind, topics[0].Bytes())
|
||||
case abi.AddressTy:
|
||||
var addr common.Address
|
||||
copy(addr[:], topics[0][common.HashLength-common.AddressLength:])
|
||||
@@ -221,7 +227,11 @@ func parseTopicsIntoMap(out map[string]interface{}, fields abi.Arguments, topics
|
||||
case abi.HashTy:
|
||||
out[arg.Name] = topics[0]
|
||||
case abi.FixedBytesTy:
|
||||
out[arg.Name] = topics[0][:]
|
||||
array, err := abi.ReadFixedBytes(arg.Type, topics[0].Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out[arg.Name] = array
|
||||
case abi.StringTy, abi.BytesTy, abi.SliceTy, abi.ArrayTy:
|
||||
// Array types (including strings and bytes) have their keccak256 hashes stored in the topic- not a hash
|
||||
// whose bytes can be decoded to the actual value- so the best we can do is retrieve that hash
|
||||
|
@@ -17,6 +17,7 @@
|
||||
package bind
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
@@ -55,27 +56,44 @@ func TestMakeTopics(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTopics(t *testing.T) {
|
||||
type bytesStruct struct {
|
||||
StaticBytes [5]byte
|
||||
}
|
||||
type args struct {
|
||||
createObj func() interface{}
|
||||
resultObj func() interface{}
|
||||
resultMap func() map[string]interface{}
|
||||
fields abi.Arguments
|
||||
topics []common.Hash
|
||||
}
|
||||
|
||||
type bytesStruct struct {
|
||||
StaticBytes [5]byte
|
||||
}
|
||||
type int8Struct struct {
|
||||
Int8Value int8
|
||||
}
|
||||
type int256Struct struct {
|
||||
Int256Value *big.Int
|
||||
}
|
||||
|
||||
type topicTest struct {
|
||||
name string
|
||||
args args
|
||||
wantErr bool
|
||||
}
|
||||
|
||||
func setupTopicsTests() []topicTest {
|
||||
bytesType, _ := abi.NewType("bytes5", "", nil)
|
||||
type args struct {
|
||||
createObj func() interface{}
|
||||
resultObj func() interface{}
|
||||
fields abi.Arguments
|
||||
topics []common.Hash
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantErr bool
|
||||
}{
|
||||
int8Type, _ := abi.NewType("int8", "", nil)
|
||||
int256Type, _ := abi.NewType("int256", "", nil)
|
||||
|
||||
tests := []topicTest{
|
||||
{
|
||||
name: "support fixed byte types, right padded to 32 bytes",
|
||||
args: args{
|
||||
createObj: func() interface{} { return &bytesStruct{} },
|
||||
resultObj: func() interface{} { return &bytesStruct{StaticBytes: [5]byte{1, 2, 3, 4, 5}} },
|
||||
resultMap: func() map[string]interface{} {
|
||||
return map[string]interface{}{"staticBytes": [5]byte{1, 2, 3, 4, 5}}
|
||||
},
|
||||
fields: abi.Arguments{abi.Argument{
|
||||
Name: "staticBytes",
|
||||
Type: bytesType,
|
||||
@@ -87,7 +105,54 @@ func TestParseTopics(t *testing.T) {
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "int8 with negative value",
|
||||
args: args{
|
||||
createObj: func() interface{} { return &int8Struct{} },
|
||||
resultObj: func() interface{} { return &int8Struct{Int8Value: -1} },
|
||||
resultMap: func() map[string]interface{} {
|
||||
return map[string]interface{}{"int8Value": int8(-1)}
|
||||
},
|
||||
fields: abi.Arguments{abi.Argument{
|
||||
Name: "int8Value",
|
||||
Type: int8Type,
|
||||
Indexed: true,
|
||||
}},
|
||||
topics: []common.Hash{
|
||||
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "int256 with negative value",
|
||||
args: args{
|
||||
createObj: func() interface{} { return &int256Struct{} },
|
||||
resultObj: func() interface{} { return &int256Struct{Int256Value: big.NewInt(-1)} },
|
||||
resultMap: func() map[string]interface{} {
|
||||
return map[string]interface{}{"int256Value": big.NewInt(-1)}
|
||||
},
|
||||
fields: abi.Arguments{abi.Argument{
|
||||
Name: "int256Value",
|
||||
Type: int256Type,
|
||||
Indexed: true,
|
||||
}},
|
||||
topics: []common.Hash{
|
||||
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
return tests
|
||||
}
|
||||
|
||||
func TestParseTopics(t *testing.T) {
|
||||
tests := setupTopicsTests()
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
createObj := tt.args.createObj()
|
||||
@@ -101,3 +166,20 @@ func TestParseTopics(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTopicsIntoMap(t *testing.T) {
|
||||
tests := setupTopicsTests()
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
outMap := make(map[string]interface{})
|
||||
if err := parseTopicsIntoMap(outMap, tt.args.fields, tt.args.topics); (err != nil) != tt.wantErr {
|
||||
t.Errorf("parseTopicsIntoMap() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
resultMap := tt.args.resultMap()
|
||||
if !reflect.DeepEqual(outMap, resultMap) {
|
||||
t.Errorf("parseTopicsIntoMap() = %v, want %v", outMap, resultMap)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@@ -73,7 +73,7 @@ func packNum(value reflect.Value) []byte {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return U256(big.NewInt(value.Int()))
|
||||
case reflect.Ptr:
|
||||
return U256(value.Interface().(*big.Int))
|
||||
return U256(new(big.Int).Set(value.Interface().(*big.Int)))
|
||||
default:
|
||||
panic("abi: fatal error")
|
||||
}
|
||||
|
@@ -26,16 +26,18 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
maxUint256 = big.NewInt(0).Add(
|
||||
// MaxUint256 is the maximum value that can be represented by a uint256
|
||||
MaxUint256 = big.NewInt(0).Add(
|
||||
big.NewInt(0).Exp(big.NewInt(2), big.NewInt(256), nil),
|
||||
big.NewInt(-1))
|
||||
maxInt256 = big.NewInt(0).Add(
|
||||
// MaxInt256 is the maximum value that can be represented by a int256
|
||||
MaxInt256 = big.NewInt(0).Add(
|
||||
big.NewInt(0).Exp(big.NewInt(2), big.NewInt(255), nil),
|
||||
big.NewInt(-1))
|
||||
)
|
||||
|
||||
// reads the integer based on its kind
|
||||
func readInteger(typ byte, kind reflect.Kind, b []byte) interface{} {
|
||||
// ReadInteger reads the integer based on its kind and returns the appropriate value
|
||||
func ReadInteger(typ byte, kind reflect.Kind, b []byte) interface{} {
|
||||
switch kind {
|
||||
case reflect.Uint8:
|
||||
return b[len(b)-1]
|
||||
@@ -62,8 +64,8 @@ func readInteger(typ byte, kind reflect.Kind, b []byte) interface{} {
|
||||
return ret
|
||||
}
|
||||
|
||||
if ret.Cmp(maxInt256) > 0 {
|
||||
ret.Add(maxUint256, big.NewInt(0).Neg(ret))
|
||||
if ret.Cmp(MaxInt256) > 0 {
|
||||
ret.Add(MaxUint256, big.NewInt(0).Neg(ret))
|
||||
ret.Add(ret, big.NewInt(1))
|
||||
ret.Neg(ret)
|
||||
}
|
||||
@@ -102,8 +104,8 @@ func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// through reflection, creates a fixed array to be read from
|
||||
func readFixedBytes(t Type, word []byte) (interface{}, error) {
|
||||
// ReadFixedBytes uses reflection to create a fixed array to be read from
|
||||
func ReadFixedBytes(t Type, word []byte) (interface{}, error) {
|
||||
if t.T != FixedBytesTy {
|
||||
return nil, fmt.Errorf("abi: invalid type in call to make fixed byte array")
|
||||
}
|
||||
@@ -230,7 +232,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
|
||||
case StringTy: // variable arrays are written at the end of the return bytes
|
||||
return string(output[begin : begin+length]), nil
|
||||
case IntTy, UintTy:
|
||||
return readInteger(t.T, t.Kind, returnOutput), nil
|
||||
return ReadInteger(t.T, t.Kind, returnOutput), nil
|
||||
case BoolTy:
|
||||
return readBool(returnOutput)
|
||||
case AddressTy:
|
||||
@@ -240,7 +242,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
|
||||
case BytesTy:
|
||||
return output[begin : begin+length], nil
|
||||
case FixedBytesTy:
|
||||
return readFixedBytes(t, returnOutput)
|
||||
return ReadFixedBytes(t, returnOutput)
|
||||
case FunctionTy:
|
||||
return readFunctionType(t, returnOutput)
|
||||
default:
|
||||
|
@@ -141,6 +141,11 @@ func (am *Manager) Wallets() []Wallet {
|
||||
am.lock.RLock()
|
||||
defer am.lock.RUnlock()
|
||||
|
||||
return am.walletsNoLock()
|
||||
}
|
||||
|
||||
// walletsNoLock returns all registered wallets. Callers must hold am.lock.
|
||||
func (am *Manager) walletsNoLock() []Wallet {
|
||||
cpy := make([]Wallet, len(am.wallets))
|
||||
copy(cpy, am.wallets)
|
||||
return cpy
|
||||
@@ -155,7 +160,7 @@ func (am *Manager) Wallet(url string) (Wallet, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, wallet := range am.Wallets() {
|
||||
for _, wallet := range am.walletsNoLock() {
|
||||
if wallet.URL() == parsed {
|
||||
return wallet, nil
|
||||
}
|
||||
|
@@ -71,7 +71,7 @@ func NewSecureChannelSession(card *pcsc.Card, keyData []byte) (*SecureChannelSes
|
||||
|
||||
cardPublic, ok := gen.Unmarshal(keyData)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Could not unmarshal public key from card")
|
||||
return nil, fmt.Errorf("could not unmarshal public key from card")
|
||||
}
|
||||
|
||||
secret, err := gen.GenerateSharedSecret(private, cardPublic)
|
||||
@@ -109,7 +109,7 @@ func (s *SecureChannelSession) Pair(pairingPassword []byte) error {
|
||||
cardChallenge := response.Data[32:64]
|
||||
|
||||
if !bytes.Equal(expectedCryptogram, cardCryptogram) {
|
||||
return fmt.Errorf("Invalid card cryptogram %v != %v", expectedCryptogram, cardCryptogram)
|
||||
return fmt.Errorf("invalid card cryptogram %v != %v", expectedCryptogram, cardCryptogram)
|
||||
}
|
||||
|
||||
md.Reset()
|
||||
@@ -132,7 +132,7 @@ func (s *SecureChannelSession) Pair(pairingPassword []byte) error {
|
||||
// Unpair disestablishes an existing pairing.
|
||||
func (s *SecureChannelSession) Unpair() error {
|
||||
if s.PairingKey == nil {
|
||||
return fmt.Errorf("Cannot unpair: not paired")
|
||||
return fmt.Errorf("cannot unpair: not paired")
|
||||
}
|
||||
|
||||
_, err := s.transmitEncrypted(claSCWallet, insUnpair, s.PairingIndex, 0, []byte{})
|
||||
@@ -148,7 +148,7 @@ func (s *SecureChannelSession) Unpair() error {
|
||||
// Open initializes the secure channel.
|
||||
func (s *SecureChannelSession) Open() error {
|
||||
if s.iv != nil {
|
||||
return fmt.Errorf("Session already opened")
|
||||
return fmt.Errorf("session already opened")
|
||||
}
|
||||
|
||||
response, err := s.open()
|
||||
@@ -185,11 +185,11 @@ func (s *SecureChannelSession) mutuallyAuthenticate() error {
|
||||
return err
|
||||
}
|
||||
if response.Sw1 != 0x90 || response.Sw2 != 0x00 {
|
||||
return fmt.Errorf("Got unexpected response from MUTUALLY_AUTHENTICATE: 0x%x%x", response.Sw1, response.Sw2)
|
||||
return fmt.Errorf("got unexpected response from MUTUALLY_AUTHENTICATE: 0x%x%x", response.Sw1, response.Sw2)
|
||||
}
|
||||
|
||||
if len(response.Data) != scSecretLength {
|
||||
return fmt.Errorf("Response from MUTUALLY_AUTHENTICATE was %d bytes, expected %d", len(response.Data), scSecretLength)
|
||||
return fmt.Errorf("response from MUTUALLY_AUTHENTICATE was %d bytes, expected %d", len(response.Data), scSecretLength)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -222,7 +222,7 @@ func (s *SecureChannelSession) pair(p1 uint8, data []byte) (*responseAPDU, error
|
||||
// transmitEncrypted sends an encrypted message, and decrypts and returns the response.
|
||||
func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []byte) (*responseAPDU, error) {
|
||||
if s.iv == nil {
|
||||
return nil, fmt.Errorf("Channel not open")
|
||||
return nil, fmt.Errorf("channel not open")
|
||||
}
|
||||
|
||||
data, err := s.encryptAPDU(data)
|
||||
@@ -261,14 +261,14 @@ func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []b
|
||||
return nil, err
|
||||
}
|
||||
if !bytes.Equal(s.iv, rmac) {
|
||||
return nil, fmt.Errorf("Invalid MAC in response")
|
||||
return nil, fmt.Errorf("invalid MAC in response")
|
||||
}
|
||||
|
||||
rapdu := &responseAPDU{}
|
||||
rapdu.deserialize(plainData)
|
||||
|
||||
if rapdu.Sw1 != sw1Ok {
|
||||
return nil, fmt.Errorf("Unexpected response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", cla, ins, rapdu.Sw1, rapdu.Sw2)
|
||||
return nil, fmt.Errorf("unexpected response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", cla, ins, rapdu.Sw1, rapdu.Sw2)
|
||||
}
|
||||
|
||||
return rapdu, nil
|
||||
@@ -277,7 +277,7 @@ func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []b
|
||||
// encryptAPDU is an internal method that serializes and encrypts an APDU.
|
||||
func (s *SecureChannelSession) encryptAPDU(data []byte) ([]byte, error) {
|
||||
if len(data) > maxPayloadSize {
|
||||
return nil, fmt.Errorf("Payload of %d bytes exceeds maximum of %d", len(data), maxPayloadSize)
|
||||
return nil, fmt.Errorf("payload of %d bytes exceeds maximum of %d", len(data), maxPayloadSize)
|
||||
}
|
||||
data = pad(data, 0x80)
|
||||
|
||||
@@ -323,10 +323,10 @@ func unpad(data []byte, terminator byte) ([]byte, error) {
|
||||
case terminator:
|
||||
return data[:len(data)-i], nil
|
||||
default:
|
||||
return nil, fmt.Errorf("Expected end of padding, got %d", data[len(data)-i])
|
||||
return nil, fmt.Errorf("expected end of padding, got %d", data[len(data)-i])
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Expected end of padding, got 0")
|
||||
return nil, fmt.Errorf("expected end of padding, got 0")
|
||||
}
|
||||
|
||||
// updateIV is an internal method that updates the initialization vector after
|
||||
|
@@ -167,7 +167,7 @@ func transmit(card *pcsc.Card, command *commandAPDU) (*responseAPDU, error) {
|
||||
}
|
||||
|
||||
if response.Sw1 != sw1Ok {
|
||||
return nil, fmt.Errorf("Unexpected insecure response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", command.Cla, command.Ins, response.Sw1, response.Sw2)
|
||||
return nil, fmt.Errorf("unexpected insecure response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", command.Cla, command.Ins, response.Sw1, response.Sw2)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
@@ -252,7 +252,7 @@ func (w *Wallet) release() error {
|
||||
// with the wallet.
|
||||
func (w *Wallet) pair(puk []byte) error {
|
||||
if w.session.paired() {
|
||||
return fmt.Errorf("Wallet already paired")
|
||||
return fmt.Errorf("wallet already paired")
|
||||
}
|
||||
pairing, err := w.session.pair(puk)
|
||||
if err != nil {
|
||||
@@ -773,12 +773,12 @@ func (w *Wallet) findAccountPath(account accounts.Account) (accounts.DerivationP
|
||||
|
||||
// Look for the path in the URL
|
||||
if account.URL.Scheme != w.Hub.scheme {
|
||||
return nil, fmt.Errorf("Scheme %s does not match wallet scheme %s", account.URL.Scheme, w.Hub.scheme)
|
||||
return nil, fmt.Errorf("scheme %s does not match wallet scheme %s", account.URL.Scheme, w.Hub.scheme)
|
||||
}
|
||||
|
||||
parts := strings.SplitN(account.URL.Path, "/", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("Invalid URL format: %s", account.URL)
|
||||
return nil, fmt.Errorf("invalid URL format: %s", account.URL)
|
||||
}
|
||||
|
||||
if parts[0] != fmt.Sprintf("%x", w.PublicKey[1:3]) {
|
||||
@@ -813,7 +813,7 @@ func (s *Session) pair(secret []byte) (smartcardPairing, error) {
|
||||
// unpair deletes an existing pairing.
|
||||
func (s *Session) unpair() error {
|
||||
if !s.verified {
|
||||
return fmt.Errorf("Unpair requires that the PIN be verified")
|
||||
return fmt.Errorf("unpair requires that the PIN be verified")
|
||||
}
|
||||
return s.Channel.Unpair()
|
||||
}
|
||||
@@ -850,7 +850,7 @@ func (s *Session) paired() bool {
|
||||
// authenticate uses an existing pairing to establish a secure channel.
|
||||
func (s *Session) authenticate(pairing smartcardPairing) error {
|
||||
if !bytes.Equal(s.Wallet.PublicKey, pairing.PublicKey) {
|
||||
return fmt.Errorf("Cannot pair using another wallet's pairing; %x != %x", s.Wallet.PublicKey, pairing.PublicKey)
|
||||
return fmt.Errorf("cannot pair using another wallet's pairing; %x != %x", s.Wallet.PublicKey, pairing.PublicKey)
|
||||
}
|
||||
s.Channel.PairingKey = pairing.PairingKey
|
||||
s.Channel.PairingIndex = pairing.PairingIndex
|
||||
@@ -879,6 +879,7 @@ func (s *Session) walletStatus() (*walletStatus, error) {
|
||||
}
|
||||
|
||||
// derivationPath fetches the wallet's current derivation path from the card.
|
||||
//lint:ignore U1000 needs to be added to the console interface
|
||||
func (s *Session) derivationPath() (accounts.DerivationPath, error) {
|
||||
response, err := s.Channel.transmitEncrypted(claSCWallet, insStatus, statusP1Path, 0, nil)
|
||||
if err != nil {
|
||||
@@ -993,12 +994,14 @@ func (s *Session) derive(path accounts.DerivationPath) (accounts.Account, error)
|
||||
}
|
||||
|
||||
// keyExport contains information on an exported keypair.
|
||||
//lint:ignore U1000 needs to be added to the console interface
|
||||
type keyExport struct {
|
||||
PublicKey []byte `asn1:"tag:0"`
|
||||
PrivateKey []byte `asn1:"tag:1,optional"`
|
||||
}
|
||||
|
||||
// publicKey returns the public key for the current derivation path.
|
||||
//lint:ignore U1000 needs to be added to the console interface
|
||||
func (s *Session) publicKey() ([]byte, error) {
|
||||
response, err := s.Channel.transmitEncrypted(claSCWallet, insExportKey, exportP1Any, exportP2Pubkey, nil)
|
||||
if err != nil {
|
||||
|
@@ -162,7 +162,8 @@ func (w *ledgerDriver) SignTx(path accounts.DerivationPath, tx *types.Transactio
|
||||
return common.Address{}, nil, accounts.ErrWalletClosed
|
||||
}
|
||||
// Ensure the wallet is capable of signing the given transaction
|
||||
if chainID != nil && w.version[0] <= 1 && w.version[1] <= 0 && w.version[2] <= 2 {
|
||||
if chainID != nil && w.version[0] <= 1 && w.version[2] <= 2 {
|
||||
//lint:ignore ST1005 brand name displayed on the console
|
||||
return common.Address{}, nil, fmt.Errorf("Ledger v%d.%d.%d doesn't support signing this transaction, please update to v1.0.3 at least", w.version[0], w.version[1], w.version[2])
|
||||
}
|
||||
// All infos gathered and metadata checks out, request signing
|
||||
|
@@ -23,8 +23,8 @@ environment:
|
||||
install:
|
||||
- git submodule update --init
|
||||
- rmdir C:\go /s /q
|
||||
- appveyor DownloadFile https://dl.google.com/go/go1.13.4.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.13.4.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- appveyor DownloadFile https://dl.google.com/go/go1.13.8.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.13.8.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- go version
|
||||
- gcc --version
|
||||
|
||||
|
@@ -1,19 +1,19 @@
|
||||
# This file contains sha256 checksums of optional build dependencies.
|
||||
|
||||
95dbeab442ee2746b9acf0934c8e2fc26414a0565c008631b04addb8c02e7624 go1.13.4.src.tar.gz
|
||||
b13bf04633d4d8cf53226ebeaace8d4d2fd07ae6fa676d0844a688339debec34 go1.13.8.src.tar.gz
|
||||
|
||||
1fcbc9e36f4319eeed02beb8cfd1b3d425ffc2f90ddf09a80f18d5064c51e0cb golangci-lint-1.21.0-linux-386.tar.gz
|
||||
267b4066e67139a38d29499331a002d6a29ad5be7aafc83db3b1e88f1b027f90 golangci-lint-1.21.0-linux-armv6.tar.gz
|
||||
a602c1f25f90e46e621019cff0a8cb3f4e1837011f3537f15e730d6a9ebf507b golangci-lint-1.21.0-freebsd-armv7.tar.gz
|
||||
2c861f8dc56b560474aa27cab0c075991628cc01af3451e27ac82f5d10d5106b golangci-lint-1.21.0-linux-amd64.tar.gz
|
||||
a1c39e055280e755acaa906e7abfc20b99a5c28be8af541c57fbc44abbb20dde golangci-lint-1.21.0-linux-arm64.tar.gz
|
||||
a8f8bda8c6a4136acf858091077830b1e83ad5612606cb69d5dced869ce00bd8 golangci-lint-1.21.0-linux-ppc64le.tar.gz
|
||||
0a8a8c3bc660ccbca668897ab520f7ee9878f16cc8e4dd24fe46236ceec97ba3 golangci-lint-1.21.0-freebsd-armv6.tar.gz
|
||||
699b07f45e216571f54002bcbd83b511c4801464a422162158e299587b095b18 golangci-lint-1.21.0-freebsd-amd64.tar.gz
|
||||
980fb4993942154bb5c8129ea3b86de09574fe81b24384ebb58cd7a9d2f04483 golangci-lint-1.21.0-linux-armv7.tar.gz
|
||||
f15b689088a47f20d5d3c1d945e9ee7c6238f2b84ea468b5f886cf8713dce62e golangci-lint-1.21.0-windows-386.zip
|
||||
2e40ded7adcf11e59013cb15c24438b15a86526ca241edfcfdf1abd73a5280a8 golangci-lint-1.21.0-windows-amd64.zip
|
||||
6052c7cfea4d6dc2fc722f6c12792a5ec087420198db495afffbc22052653bf7 golangci-lint-1.21.0-freebsd-386.tar.gz
|
||||
ca00b8eacf9af14a71b908b4149606c762aa5c0eac781e74ca0abedfdfdf6c8c golangci-lint-1.21.0-linux-s390x.tar.gz
|
||||
1365455940c342f95718159d89d66ad2eef19f0846c3e87023e915a3527b929f golangci-lint-1.21.0-darwin-386.tar.gz
|
||||
2b2713ec5007e67883aa501eebb81f22abfab0cf0909134ba90f60a066db3760 golangci-lint-1.21.0-darwin-amd64.tar.gz
|
||||
478994633b0f5121a7a8d4f368078093e21014fdc7fb2c0ceeae63668c13c5b6 golangci-lint-1.22.2-freebsd-amd64.tar.gz
|
||||
fcf80824c21567eb0871055711bf9bdca91cf9a081122e2a45f1d11fed754600 golangci-lint-1.22.2-darwin-amd64.tar.gz
|
||||
cda85c72fc128b2ea0ae05baea7b91172c63aea34064829f65285f1dd536f1e0 golangci-lint-1.22.2-windows-386.zip
|
||||
94f04899f620aadc9c1524e5482e415efdbd993fa2b2918c4fec2798f030ac1c golangci-lint-1.22.2-linux-armv7.tar.gz
|
||||
0e72a87d71edde00b6e37e84a99841833ad55fee83e20d21130a7a622b2860bb golangci-lint-1.22.2-freebsd-386.tar.gz
|
||||
86def2f31fe8fd7c05674104ed2a4bef3e44b7132b93c6ad2f52f198b3d01801 golangci-lint-1.22.2-linux-s390x.tar.gz
|
||||
b0df4546d36be94e8107733ba290b98dd9b7e41a42d3fb202e87fc7e4ee800c3 golangci-lint-1.22.2-freebsd-armv6.tar.gz
|
||||
3d45958dcf6a8d195086d2fced1a21db42a90815dfd156d180efa62dbdda6724 golangci-lint-1.22.2-darwin-386.tar.gz
|
||||
7ee29f35c74fab017a454237990c74d984ce3855960f2c10509238992bb781f9 golangci-lint-1.22.2-linux-arm64.tar.gz
|
||||
52086ac52a502b68578e58e35d3964f127c16d7a90b9ffcb399a004d055ded51 golangci-lint-1.22.2-linux-386.tar.gz
|
||||
c2e4df1fab2ae53762f9baac6041503eeeaa968ce38ea41779f7cb526751c667 golangci-lint-1.22.2-windows-amd64.zip
|
||||
109d38cdc89f271392f5a138d6782657157f9f496fd4801956efa2d0428e0cbe golangci-lint-1.22.2-linux-amd64.tar.gz
|
||||
f08aae4868d4828c8f07deb0dcd941a1da695b97e58d15e9f3d1d07dcc7a0c84 golangci-lint-1.22.2-linux-armv6.tar.gz
|
||||
37af03d9c144d527cb15c46a07e6a22d3f62b5491e34ad6f3bfe6bb0b0b597d4 golangci-lint-1.22.2-linux-ppc64le.tar.gz
|
||||
251a1081d53944f1d5f86216d752837b23079f90605c9d1cc628da1ffcd2e749 golangci-lint-1.22.2-freebsd-armv7.tar.gz
|
||||
|
19
build/ci.go
19
build/ci.go
@@ -145,6 +145,7 @@ var (
|
||||
"bionic": "golang-go",
|
||||
"disco": "golang-go",
|
||||
"eoan": "golang-go",
|
||||
"focal": "golang-go",
|
||||
}
|
||||
|
||||
debGoBootPaths = map[string]string{
|
||||
@@ -214,9 +215,9 @@ func doInstall(cmdline []string) {
|
||||
var minor int
|
||||
fmt.Sscanf(strings.TrimPrefix(runtime.Version(), "go1."), "%d", &minor)
|
||||
|
||||
if minor < 9 {
|
||||
if minor < 11 {
|
||||
log.Println("You have Go version", runtime.Version())
|
||||
log.Println("go-ethereum requires at least Go version 1.9 and cannot")
|
||||
log.Println("go-ethereum requires at least Go version 1.11 and cannot")
|
||||
log.Println("be compiled with an earlier version. Please upgrade your Go installation.")
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -237,13 +238,6 @@ func doInstall(cmdline []string) {
|
||||
build.MustRun(goinstall)
|
||||
return
|
||||
}
|
||||
// If we are cross compiling to ARMv5 ARMv6 or ARMv7, clean any previous builds
|
||||
if *arch == "arm" {
|
||||
os.RemoveAll(filepath.Join(runtime.GOROOT(), "pkg", runtime.GOOS+"_arm"))
|
||||
for _, path := range filepath.SplitList(build.GOPATH()) {
|
||||
os.RemoveAll(filepath.Join(path, "pkg", runtime.GOOS+"_arm"))
|
||||
}
|
||||
}
|
||||
|
||||
// Seems we are cross compiling, work around forbidden GOBIN
|
||||
goinstall := goToolArch(*arch, *cc, "install", buildFlags(env)...)
|
||||
@@ -294,7 +288,6 @@ func goTool(subcmd string, args ...string) *exec.Cmd {
|
||||
|
||||
func goToolArch(arch string, cc string, subcmd string, args ...string) *exec.Cmd {
|
||||
cmd := build.GoTool(subcmd, args...)
|
||||
cmd.Env = []string{"GOPATH=" + build.GOPATH()}
|
||||
if arch == "" || arch == runtime.GOARCH {
|
||||
cmd.Env = append(cmd.Env, "GOBIN="+GOBIN)
|
||||
} else {
|
||||
@@ -305,7 +298,7 @@ func goToolArch(arch string, cc string, subcmd string, args ...string) *exec.Cmd
|
||||
cmd.Env = append(cmd.Env, "CC="+cc)
|
||||
}
|
||||
for _, e := range os.Environ() {
|
||||
if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "GOBIN=") {
|
||||
if strings.HasPrefix(e, "GOBIN=") {
|
||||
continue
|
||||
}
|
||||
cmd.Env = append(cmd.Env, e)
|
||||
@@ -363,7 +356,7 @@ func doLint(cmdline []string) {
|
||||
|
||||
// downloadLinter downloads and unpacks golangci-lint.
|
||||
func downloadLinter(cachedir string) string {
|
||||
const version = "1.21.0"
|
||||
const version = "1.22.2"
|
||||
|
||||
csdb := build.MustLoadChecksums("build/checksums.txt")
|
||||
base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, runtime.GOARCH)
|
||||
@@ -888,7 +881,6 @@ func gomobileTool(subcmd string, args ...string) *exec.Cmd {
|
||||
cmd := exec.Command(filepath.Join(GOBIN, "gomobile"), subcmd)
|
||||
cmd.Args = append(cmd.Args, args...)
|
||||
cmd.Env = []string{
|
||||
"GOPATH=" + build.GOPATH(),
|
||||
"PATH=" + GOBIN + string(os.PathListSeparator) + os.Getenv("PATH"),
|
||||
}
|
||||
for _, e := range os.Environ() {
|
||||
@@ -1078,7 +1070,6 @@ func xgoTool(args []string) *exec.Cmd {
|
||||
cmd := exec.Command(filepath.Join(GOBIN, "xgo"), args...)
|
||||
cmd.Env = os.Environ()
|
||||
cmd.Env = append(cmd.Env, []string{
|
||||
"GOPATH=" + build.GOPATH(),
|
||||
"GOBIN=" + GOBIN,
|
||||
}...)
|
||||
return cmd
|
||||
|
@@ -6,6 +6,7 @@
|
||||
|
||||
# Launchpad rejects Go's access to $HOME, use custom folders
|
||||
export GOCACHE=/tmp/go-build
|
||||
export GOPATH=/tmp/gopath
|
||||
export GOROOT_BOOTSTRAP={{.GoBootPath}}
|
||||
|
||||
override_dh_auto_clean:
|
||||
@@ -19,10 +20,11 @@ override_dh_auto_build:
|
||||
|
||||
# We can't download external go modules within Launchpad, so we're shipping the
|
||||
# entire dependency source cache with go-ethereum.
|
||||
(mkdir -p build/_workspace/pkg/mod && mv .mod/* build/_workspace/pkg/mod)
|
||||
mkdir -p $(GOPATH)/pkg
|
||||
mv .mod $(GOPATH)/pkg/mod
|
||||
|
||||
# A fresh Go was built, all dependency downloads faked, hope build works now
|
||||
build/env.sh ../.go/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
|
||||
../.go/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
|
||||
|
||||
override_dh_auto_test:
|
||||
|
||||
|
30
build/env.sh
30
build/env.sh
@@ -1,30 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
if [ ! -f "build/env.sh" ]; then
|
||||
echo "$0 must be run from the root of the repository."
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# Create fake Go workspace if it doesn't exist yet.
|
||||
workspace="$PWD/build/_workspace"
|
||||
root="$PWD"
|
||||
ethdir="$workspace/src/github.com/ethereum"
|
||||
if [ ! -L "$ethdir/go-ethereum" ]; then
|
||||
mkdir -p "$ethdir"
|
||||
cd "$ethdir"
|
||||
ln -s ../../../../../. go-ethereum
|
||||
cd "$root"
|
||||
fi
|
||||
|
||||
# Set up the environment to use the workspace.
|
||||
GOPATH="$workspace"
|
||||
export GOPATH
|
||||
|
||||
# Run the command inside the workspace.
|
||||
cd "$ethdir/go-ethereum"
|
||||
PWD="$ethdir/go-ethereum"
|
||||
|
||||
# Launch the arguments with the configured environment.
|
||||
exec "$@"
|
74
cmd/abidump/main.go
Normal file
74
cmd/abidump/main.go
Normal file
@@ -0,0 +1,74 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/signer/core"
|
||||
"github.com/ethereum/go-ethereum/signer/fourbyte"
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.Usage = func() {
|
||||
fmt.Fprintln(os.Stderr, "Usage:", os.Args[0], "<hexdata>")
|
||||
flag.PrintDefaults()
|
||||
fmt.Fprintln(os.Stderr, `
|
||||
Parses the given ABI data and tries to interpret it from the fourbyte database.`)
|
||||
}
|
||||
}
|
||||
|
||||
func parse(data []byte) {
|
||||
db, err := fourbyte.New()
|
||||
if err != nil {
|
||||
die(err)
|
||||
}
|
||||
messages := core.ValidationMessages{}
|
||||
db.ValidateCallData(nil, data, &messages)
|
||||
for _, m := range messages.Messages {
|
||||
fmt.Printf("%v: %v\n", m.Typ, m.Message)
|
||||
}
|
||||
}
|
||||
|
||||
// Example
|
||||
// ./abidump a9059cbb000000000000000000000000ea0e2dc7d65a50e77fc7e84bff3fd2a9e781ff5c0000000000000000000000000000000000000000000000015af1d78b58c40000
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
switch {
|
||||
case flag.NArg() == 1:
|
||||
hexdata := flag.Arg(0)
|
||||
data, err := hex.DecodeString(strings.TrimPrefix(hexdata, "0x"))
|
||||
if err != nil {
|
||||
die(err)
|
||||
}
|
||||
parse(data)
|
||||
default:
|
||||
fmt.Fprintln(os.Stderr, "Error: one argument needed")
|
||||
flag.Usage()
|
||||
os.Exit(2)
|
||||
}
|
||||
}
|
||||
|
||||
func die(args ...interface{}) {
|
||||
fmt.Fprintln(os.Stderr, args...)
|
||||
os.Exit(1)
|
||||
}
|
@@ -21,9 +21,11 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common/compiler"
|
||||
@@ -32,19 +34,6 @@ import (
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
const (
|
||||
commandHelperTemplate = `{{.Name}}{{if .Subcommands}} command{{end}}{{if .Flags}} [command options]{{end}} [arguments...]
|
||||
{{if .Description}}{{.Description}}
|
||||
{{end}}{{if .Subcommands}}
|
||||
SUBCOMMANDS:
|
||||
{{range .Subcommands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
|
||||
{{end}}{{end}}{{if .Flags}}
|
||||
OPTIONS:
|
||||
{{range $.Flags}}{{"\t"}}{{.}}
|
||||
{{end}}
|
||||
{{end}}`
|
||||
)
|
||||
|
||||
var (
|
||||
// Git SHA1 commit hash of the release (set via linker flags)
|
||||
gitCommit = ""
|
||||
@@ -128,7 +117,7 @@ func init() {
|
||||
aliasFlag,
|
||||
}
|
||||
app.Action = utils.MigrateFlags(abigen)
|
||||
cli.CommandHelpTemplate = commandHelperTemplate
|
||||
cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate
|
||||
}
|
||||
|
||||
func abigen(c *cli.Context) error {
|
||||
@@ -206,10 +195,22 @@ func abigen(c *cli.Context) error {
|
||||
utils.Fatalf("Failed to build Solidity contract: %v", err)
|
||||
}
|
||||
case c.GlobalIsSet(vyFlag.Name):
|
||||
contracts, err = compiler.CompileVyper(c.GlobalString(vyperFlag.Name), c.GlobalString(vyFlag.Name))
|
||||
output, err := compiler.CompileVyper(c.GlobalString(vyperFlag.Name), c.GlobalString(vyFlag.Name))
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to build Vyper contract: %v", err)
|
||||
}
|
||||
contracts = make(map[string]*compiler.Contract)
|
||||
for n, contract := range output {
|
||||
name := n
|
||||
// Sanitize the combined json names to match the
|
||||
// format expected by solidity.
|
||||
if !strings.Contains(n, ":") {
|
||||
// Remove extra path components
|
||||
name = abi.ToCamelCase(strings.TrimSuffix(filepath.Base(name), ".vy"))
|
||||
}
|
||||
contracts[name] = contract
|
||||
}
|
||||
|
||||
case c.GlobalIsSet(jsonFlag.Name):
|
||||
jsonOutput, err := ioutil.ReadFile(c.GlobalString(jsonFlag.Name))
|
||||
if err != nil {
|
||||
|
@@ -28,19 +28,6 @@ import (
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
const (
|
||||
commandHelperTemplate = `{{.Name}}{{if .Subcommands}} command{{end}}{{if .Flags}} [command options]{{end}} [arguments...]
|
||||
{{if .Description}}{{.Description}}
|
||||
{{end}}{{if .Subcommands}}
|
||||
SUBCOMMANDS:
|
||||
{{range .Subcommands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
|
||||
{{end}}{{end}}{{if .Flags}}
|
||||
OPTIONS:
|
||||
{{range $.Flags}}{{"\t"}}{{.}}
|
||||
{{end}}
|
||||
{{end}}`
|
||||
)
|
||||
|
||||
var (
|
||||
// Git SHA1 commit hash of the release (set via linker flags)
|
||||
gitCommit = ""
|
||||
@@ -61,7 +48,7 @@ func init() {
|
||||
oracleFlag,
|
||||
nodeURLFlag,
|
||||
}
|
||||
cli.CommandHelpTemplate = commandHelperTemplate
|
||||
cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate
|
||||
}
|
||||
|
||||
// Commonly used command line flags.
|
||||
|
@@ -223,6 +223,7 @@ func init() {
|
||||
}
|
||||
app.Action = signer
|
||||
app.Commands = []cli.Command{initCommand, attestCommand, setCredentialCommand, delCredentialCommand, gendocCommand}
|
||||
cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
@@ -1,6 +1,6 @@
|
||||
## Initializing Clef
|
||||
|
||||
First thing's first, Clef needs to store some data itself. Since that data might be sensitive (passwords, signing rules, accounts), Clef's entire storage is encrypted. To support encrypting data, the first step is to initialize Clef with a random master seed, itself too encrypted with your chosen password:
|
||||
First things first, Clef needs to store some data itself. Since that data might be sensitive (passwords, signing rules, accounts), Clef's entire storage is encrypted. To support encrypting data, the first step is to initialize Clef with a random master seed, itself too encrypted with your chosen password:
|
||||
|
||||
```text
|
||||
$ clef init
|
||||
|
@@ -130,9 +130,9 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string)
|
||||
if !exists {
|
||||
// Entry is unknown, push a new one to Cloudflare.
|
||||
log.Info(fmt.Sprintf("Creating %s = %q", path, val))
|
||||
ttl := 1
|
||||
ttl := rootTTL
|
||||
if path != name {
|
||||
ttl = 2147483647 // Max TTL permitted by Cloudflare
|
||||
ttl = treeNodeTTL // Max TTL permitted by Cloudflare
|
||||
}
|
||||
_, err = c.CreateDNSRecord(c.zoneID, cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl})
|
||||
} else if old.Content != val {
|
||||
|
303
cmd/devp2p/dns_route53.go
Normal file
303
cmd/devp2p/dns_route53.go
Normal file
@@ -0,0 +1,303 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/route53"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/dnsdisc"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
// The Route53 limits change sets to this size. DNS changes need to be split
|
||||
// up into multiple batches to work around the limit.
|
||||
const route53ChangeLimit = 30000
|
||||
|
||||
var (
|
||||
route53AccessKeyFlag = cli.StringFlag{
|
||||
Name: "access-key-id",
|
||||
Usage: "AWS Access Key ID",
|
||||
EnvVar: "AWS_ACCESS_KEY_ID",
|
||||
}
|
||||
route53AccessSecretFlag = cli.StringFlag{
|
||||
Name: "access-key-secret",
|
||||
Usage: "AWS Access Key Secret",
|
||||
EnvVar: "AWS_SECRET_ACCESS_KEY",
|
||||
}
|
||||
route53ZoneIDFlag = cli.StringFlag{
|
||||
Name: "zone-id",
|
||||
Usage: "Route53 Zone ID",
|
||||
}
|
||||
)
|
||||
|
||||
type route53Client struct {
|
||||
api *route53.Route53
|
||||
zoneID string
|
||||
}
|
||||
|
||||
type recordSet struct {
|
||||
values []string
|
||||
ttl int64
|
||||
}
|
||||
|
||||
// newRoute53Client sets up a Route53 API client from command line flags.
|
||||
func newRoute53Client(ctx *cli.Context) *route53Client {
|
||||
akey := ctx.String(route53AccessKeyFlag.Name)
|
||||
asec := ctx.String(route53AccessSecretFlag.Name)
|
||||
if akey == "" || asec == "" {
|
||||
exit(fmt.Errorf("need Route53 Access Key ID and secret proceed"))
|
||||
}
|
||||
config := &aws.Config{Credentials: credentials.NewStaticCredentials(akey, asec, "")}
|
||||
session, err := session.NewSession(config)
|
||||
if err != nil {
|
||||
exit(fmt.Errorf("can't create AWS session: %v", err))
|
||||
}
|
||||
return &route53Client{
|
||||
api: route53.New(session),
|
||||
zoneID: ctx.String(route53ZoneIDFlag.Name),
|
||||
}
|
||||
}
|
||||
|
||||
// deploy uploads the given tree to Route53.
|
||||
func (c *route53Client) deploy(name string, t *dnsdisc.Tree) error {
|
||||
if err := c.checkZone(name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Compute DNS changes.
|
||||
existing, err := c.collectRecords(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info(fmt.Sprintf("Found %d TXT records", len(existing)))
|
||||
|
||||
records := t.ToTXT(name)
|
||||
changes := c.computeChanges(name, records, existing)
|
||||
if len(changes) == 0 {
|
||||
log.Info("No DNS changes needed")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Submit change batches.
|
||||
batches := splitChanges(changes, route53ChangeLimit)
|
||||
for i, changes := range batches {
|
||||
log.Info(fmt.Sprintf("Submitting %d changes to Route53", len(changes)))
|
||||
batch := new(route53.ChangeBatch)
|
||||
batch.SetChanges(changes)
|
||||
batch.SetComment(fmt.Sprintf("enrtree update %d/%d of %s at seq %d", i+1, len(batches), name, t.Seq()))
|
||||
req := &route53.ChangeResourceRecordSetsInput{HostedZoneId: &c.zoneID, ChangeBatch: batch}
|
||||
resp, err := c.api.ChangeResourceRecordSets(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info(fmt.Sprintf("Waiting for change request %s", *resp.ChangeInfo.Id))
|
||||
wreq := &route53.GetChangeInput{Id: resp.ChangeInfo.Id}
|
||||
if err := c.api.WaitUntilResourceRecordSetsChanged(wreq); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkZone verifies zone information for the given domain.
|
||||
func (c *route53Client) checkZone(name string) (err error) {
|
||||
if c.zoneID == "" {
|
||||
c.zoneID, err = c.findZoneID(name)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// findZoneID searches for the Zone ID containing the given domain.
|
||||
func (c *route53Client) findZoneID(name string) (string, error) {
|
||||
log.Info(fmt.Sprintf("Finding Route53 Zone ID for %s", name))
|
||||
var req route53.ListHostedZonesByNameInput
|
||||
for {
|
||||
resp, err := c.api.ListHostedZonesByName(&req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, zone := range resp.HostedZones {
|
||||
if isSubdomain(name, *zone.Name) {
|
||||
return *zone.Id, nil
|
||||
}
|
||||
}
|
||||
if !*resp.IsTruncated {
|
||||
break
|
||||
}
|
||||
req.DNSName = resp.NextDNSName
|
||||
req.HostedZoneId = resp.NextHostedZoneId
|
||||
}
|
||||
return "", errors.New("can't find zone ID for " + name)
|
||||
}
|
||||
|
||||
// computeChanges creates DNS changes for the given record.
|
||||
func (c *route53Client) computeChanges(name string, records map[string]string, existing map[string]recordSet) []*route53.Change {
|
||||
// Convert all names to lowercase.
|
||||
lrecords := make(map[string]string, len(records))
|
||||
for name, r := range records {
|
||||
lrecords[strings.ToLower(name)] = r
|
||||
}
|
||||
records = lrecords
|
||||
|
||||
var changes []*route53.Change
|
||||
for path, val := range records {
|
||||
ttl := int64(rootTTL)
|
||||
if path != name {
|
||||
ttl = int64(treeNodeTTL)
|
||||
}
|
||||
|
||||
prevRecords, exists := existing[path]
|
||||
prevValue := strings.Join(prevRecords.values, "")
|
||||
if !exists {
|
||||
// Entry is unknown, push a new one
|
||||
log.Info(fmt.Sprintf("Creating %s = %q", path, val))
|
||||
changes = append(changes, newTXTChange("CREATE", path, ttl, splitTXT(val)))
|
||||
} else if prevValue != val {
|
||||
// Entry already exists, only change its content.
|
||||
log.Info(fmt.Sprintf("Updating %s from %q to %q", path, prevValue, val))
|
||||
changes = append(changes, newTXTChange("UPSERT", path, ttl, splitTXT(val)))
|
||||
} else {
|
||||
log.Info(fmt.Sprintf("Skipping %s = %q", path, val))
|
||||
}
|
||||
}
|
||||
|
||||
// Iterate over the old records and delete anything stale.
|
||||
for path, set := range existing {
|
||||
if _, ok := records[path]; ok {
|
||||
continue
|
||||
}
|
||||
// Stale entry, nuke it.
|
||||
log.Info(fmt.Sprintf("Deleting %s = %q", path, strings.Join(set.values, "")))
|
||||
changes = append(changes, newTXTChange("DELETE", path, set.ttl, set.values...))
|
||||
}
|
||||
|
||||
sortChanges(changes)
|
||||
return changes
|
||||
}
|
||||
|
||||
// sortChanges ensures DNS changes are in leaf-added -> root-changed -> leaf-deleted order.
|
||||
func sortChanges(changes []*route53.Change) {
|
||||
score := map[string]int{"CREATE": 1, "UPSERT": 2, "DELETE": 3}
|
||||
sort.Slice(changes, func(i, j int) bool {
|
||||
if *changes[i].Action == *changes[j].Action {
|
||||
return *changes[i].ResourceRecordSet.Name < *changes[j].ResourceRecordSet.Name
|
||||
}
|
||||
return score[*changes[i].Action] < score[*changes[j].Action]
|
||||
})
|
||||
}
|
||||
|
||||
// splitChanges splits up DNS changes such that each change batch
|
||||
// is smaller than the given RDATA limit.
|
||||
func splitChanges(changes []*route53.Change, limit int) [][]*route53.Change {
|
||||
var batches [][]*route53.Change
|
||||
var batchSize int
|
||||
for _, ch := range changes {
|
||||
// Start new batch if this change pushes the current one over the limit.
|
||||
size := changeSize(ch)
|
||||
if len(batches) == 0 || batchSize+size > limit {
|
||||
batches = append(batches, nil)
|
||||
batchSize = 0
|
||||
}
|
||||
batches[len(batches)-1] = append(batches[len(batches)-1], ch)
|
||||
batchSize += size
|
||||
}
|
||||
return batches
|
||||
}
|
||||
|
||||
// changeSize returns the RDATA size of a DNS change.
|
||||
func changeSize(ch *route53.Change) int {
|
||||
size := 0
|
||||
for _, rr := range ch.ResourceRecordSet.ResourceRecords {
|
||||
if rr.Value != nil {
|
||||
size += len(*rr.Value)
|
||||
}
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
// collectRecords collects all TXT records below the given name.
|
||||
func (c *route53Client) collectRecords(name string) (map[string]recordSet, error) {
|
||||
log.Info(fmt.Sprintf("Retrieving existing TXT records on %s (%s)", name, c.zoneID))
|
||||
var req route53.ListResourceRecordSetsInput
|
||||
req.SetHostedZoneId(c.zoneID)
|
||||
existing := make(map[string]recordSet)
|
||||
err := c.api.ListResourceRecordSetsPages(&req, func(resp *route53.ListResourceRecordSetsOutput, last bool) bool {
|
||||
for _, set := range resp.ResourceRecordSets {
|
||||
if !isSubdomain(*set.Name, name) || *set.Type != "TXT" {
|
||||
continue
|
||||
}
|
||||
s := recordSet{ttl: *set.TTL}
|
||||
for _, rec := range set.ResourceRecords {
|
||||
s.values = append(s.values, *rec.Value)
|
||||
}
|
||||
name := strings.TrimSuffix(*set.Name, ".")
|
||||
existing[name] = s
|
||||
}
|
||||
return true
|
||||
})
|
||||
return existing, err
|
||||
}
|
||||
|
||||
// newTXTChange creates a change to a TXT record.
|
||||
func newTXTChange(action, name string, ttl int64, values ...string) *route53.Change {
|
||||
var c route53.Change
|
||||
var r route53.ResourceRecordSet
|
||||
var rrs []*route53.ResourceRecord
|
||||
for _, val := range values {
|
||||
rr := new(route53.ResourceRecord)
|
||||
rr.SetValue(val)
|
||||
rrs = append(rrs, rr)
|
||||
}
|
||||
r.SetType("TXT")
|
||||
r.SetName(name)
|
||||
r.SetTTL(ttl)
|
||||
r.SetResourceRecords(rrs)
|
||||
c.SetAction(action)
|
||||
c.SetResourceRecordSet(&r)
|
||||
return &c
|
||||
}
|
||||
|
||||
// isSubdomain returns true if name is a subdomain of domain.
|
||||
func isSubdomain(name, domain string) bool {
|
||||
domain = strings.TrimSuffix(domain, ".")
|
||||
name = strings.TrimSuffix(name, ".")
|
||||
return strings.HasSuffix("."+name, "."+domain)
|
||||
}
|
||||
|
||||
// splitTXT splits value into a list of quoted 255-character strings.
|
||||
func splitTXT(value string) string {
|
||||
var result strings.Builder
|
||||
for len(value) > 0 {
|
||||
rlen := len(value)
|
||||
if rlen > 253 {
|
||||
rlen = 253
|
||||
}
|
||||
result.WriteString(strconv.Quote(value[:rlen]))
|
||||
value = value[rlen:]
|
||||
}
|
||||
return result.String()
|
||||
}
|
154
cmd/devp2p/dns_route53_test.go
Normal file
154
cmd/devp2p/dns_route53_test.go
Normal file
@@ -0,0 +1,154 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/route53"
|
||||
)
|
||||
|
||||
// This test checks that computeChanges/splitChanges create DNS changes in
|
||||
// leaf-added -> root-changed -> leaf-deleted order.
|
||||
func TestRoute53ChangeSort(t *testing.T) {
|
||||
testTree0 := map[string]recordSet{
|
||||
"2kfjogvxdqtxxugbh7gs7naaai.n": {ttl: 3333, values: []string{
|
||||
`"enr:-HW4QO1ml1DdXLeZLsUxewnthhUy8eROqkDyoMTyavfks9JlYQIlMFEUoM78PovJDPQrAkrb3LRJ-""vtrymDguKCOIAWAgmlkgnY0iXNlY3AyNTZrMaEDffaGfJzgGhUif1JqFruZlYmA31HzathLSWxfbq_QoQ4"`,
|
||||
}},
|
||||
"fdxn3sn67na5dka4j2gok7bvqi.n": {ttl: treeNodeTTL, values: []string{`"enrtree-branch:"`}},
|
||||
"n": {ttl: rootTTL, values: []string{`"enrtree-root:v1 e=2KFJOGVXDQTXXUGBH7GS7NAAAI l=FDXN3SN67NA5DKA4J2GOK7BVQI seq=0 sig=v_-J_q_9ICQg5ztExFvLQhDBGMb0lZPJLhe3ts9LAcgqhOhtT3YFJsl8BWNDSwGtamUdR-9xl88_w-X42SVpjwE"`}},
|
||||
}
|
||||
|
||||
testTree1 := map[string]string{
|
||||
"n": "enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA",
|
||||
"C7HRFPF3BLGF3YR4DY5KX3SMBE.n": "enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org",
|
||||
"JWXYDBPXYWG6FX3GMDIBFA6CJ4.n": "enrtree-branch:2XS2367YHAXJFGLZHVAWLQD4ZY,H4FHT4B454P6UXFD7JCYQ5PWDY,MHTDO6TMUBRIA2XWG5LUDACK24",
|
||||
"2XS2367YHAXJFGLZHVAWLQD4ZY.n": "enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA",
|
||||
"H4FHT4B454P6UXFD7JCYQ5PWDY.n": "enr:-HW4QAggRauloj2SDLtIHN1XBkvhFZ1vtf1raYQp9TBW2RD5EEawDzbtSmlXUfnaHcvwOizhVYLtr7e6vw7NAf6mTuoCgmlkgnY0iXNlY3AyNTZrMaECjrXI8TLNXU0f8cthpAMxEshUyQlK-AM0PW2wfrnacNI",
|
||||
"MHTDO6TMUBRIA2XWG5LUDACK24.n": "enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o",
|
||||
}
|
||||
|
||||
wantChanges := []*route53.Change{
|
||||
{
|
||||
Action: sp("CREATE"),
|
||||
ResourceRecordSet: &route53.ResourceRecordSet{
|
||||
Name: sp("2xs2367yhaxjfglzhvawlqd4zy.n"),
|
||||
ResourceRecords: []*route53.ResourceRecord{{
|
||||
Value: sp(`"enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA"`),
|
||||
}},
|
||||
TTL: ip(treeNodeTTL),
|
||||
Type: sp("TXT"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: sp("CREATE"),
|
||||
ResourceRecordSet: &route53.ResourceRecordSet{
|
||||
Name: sp("c7hrfpf3blgf3yr4dy5kx3smbe.n"),
|
||||
ResourceRecords: []*route53.ResourceRecord{{
|
||||
Value: sp(`"enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org"`),
|
||||
}},
|
||||
TTL: ip(treeNodeTTL),
|
||||
Type: sp("TXT"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: sp("CREATE"),
|
||||
ResourceRecordSet: &route53.ResourceRecordSet{
|
||||
Name: sp("h4fht4b454p6uxfd7jcyq5pwdy.n"),
|
||||
ResourceRecords: []*route53.ResourceRecord{{
|
||||
Value: sp(`"enr:-HW4QAggRauloj2SDLtIHN1XBkvhFZ1vtf1raYQp9TBW2RD5EEawDzbtSmlXUfnaHcvwOizhVYLtr7e6vw7NAf6mTuoCgmlkgnY0iXNlY3AyNTZrMaECjrXI8TLNXU0f8cthpAMxEshUyQlK-AM0PW2wfrnacNI"`),
|
||||
}},
|
||||
TTL: ip(treeNodeTTL),
|
||||
Type: sp("TXT"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: sp("CREATE"),
|
||||
ResourceRecordSet: &route53.ResourceRecordSet{
|
||||
Name: sp("jwxydbpxywg6fx3gmdibfa6cj4.n"),
|
||||
ResourceRecords: []*route53.ResourceRecord{{
|
||||
Value: sp(`"enrtree-branch:2XS2367YHAXJFGLZHVAWLQD4ZY,H4FHT4B454P6UXFD7JCYQ5PWDY,MHTDO6TMUBRIA2XWG5LUDACK24"`),
|
||||
}},
|
||||
TTL: ip(treeNodeTTL),
|
||||
Type: sp("TXT"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: sp("CREATE"),
|
||||
ResourceRecordSet: &route53.ResourceRecordSet{
|
||||
Name: sp("mhtdo6tmubria2xwg5ludack24.n"),
|
||||
ResourceRecords: []*route53.ResourceRecord{{
|
||||
Value: sp(`"enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o"`),
|
||||
}},
|
||||
TTL: ip(treeNodeTTL),
|
||||
Type: sp("TXT"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: sp("UPSERT"),
|
||||
ResourceRecordSet: &route53.ResourceRecordSet{
|
||||
Name: sp("n"),
|
||||
ResourceRecords: []*route53.ResourceRecord{{
|
||||
Value: sp(`"enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA"`),
|
||||
}},
|
||||
TTL: ip(rootTTL),
|
||||
Type: sp("TXT"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: sp("DELETE"),
|
||||
ResourceRecordSet: &route53.ResourceRecordSet{
|
||||
Name: sp("2kfjogvxdqtxxugbh7gs7naaai.n"),
|
||||
ResourceRecords: []*route53.ResourceRecord{
|
||||
{Value: sp(`"enr:-HW4QO1ml1DdXLeZLsUxewnthhUy8eROqkDyoMTyavfks9JlYQIlMFEUoM78PovJDPQrAkrb3LRJ-""vtrymDguKCOIAWAgmlkgnY0iXNlY3AyNTZrMaEDffaGfJzgGhUif1JqFruZlYmA31HzathLSWxfbq_QoQ4"`)},
|
||||
},
|
||||
TTL: ip(3333),
|
||||
Type: sp("TXT"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: sp("DELETE"),
|
||||
ResourceRecordSet: &route53.ResourceRecordSet{
|
||||
Name: sp("fdxn3sn67na5dka4j2gok7bvqi.n"),
|
||||
ResourceRecords: []*route53.ResourceRecord{{
|
||||
Value: sp(`"enrtree-branch:"`),
|
||||
}},
|
||||
TTL: ip(treeNodeTTL),
|
||||
Type: sp("TXT"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var client route53Client
|
||||
changes := client.computeChanges("n", testTree1, testTree0)
|
||||
if !reflect.DeepEqual(changes, wantChanges) {
|
||||
t.Fatalf("wrong changes (got %d, want %d)", len(changes), len(wantChanges))
|
||||
}
|
||||
|
||||
wantSplit := [][]*route53.Change{
|
||||
wantChanges[:4],
|
||||
wantChanges[4:8],
|
||||
}
|
||||
split := splitChanges(changes, 600)
|
||||
if !reflect.DeepEqual(split, wantSplit) {
|
||||
t.Fatalf("wrong split batches: got %d, want %d", len(split), len(wantSplit))
|
||||
}
|
||||
}
|
||||
|
||||
func sp(s string) *string { return &s }
|
||||
func ip(i int64) *int64 { return &i }
|
@@ -42,6 +42,7 @@ var (
|
||||
dnsSignCommand,
|
||||
dnsTXTCommand,
|
||||
dnsCloudflareCommand,
|
||||
dnsRoute53Command,
|
||||
},
|
||||
}
|
||||
dnsSyncCommand = cli.Command{
|
||||
@@ -66,11 +67,18 @@ var (
|
||||
}
|
||||
dnsCloudflareCommand = cli.Command{
|
||||
Name: "to-cloudflare",
|
||||
Usage: "Deploy DNS TXT records to cloudflare",
|
||||
Usage: "Deploy DNS TXT records to CloudFlare",
|
||||
ArgsUsage: "<tree-directory>",
|
||||
Action: dnsToCloudflare,
|
||||
Flags: []cli.Flag{cloudflareTokenFlag, cloudflareZoneIDFlag},
|
||||
}
|
||||
dnsRoute53Command = cli.Command{
|
||||
Name: "to-route53",
|
||||
Usage: "Deploy DNS TXT records to Amazon Route53",
|
||||
ArgsUsage: "<tree-directory>",
|
||||
Action: dnsToRoute53,
|
||||
Flags: []cli.Flag{route53AccessKeyFlag, route53AccessSecretFlag, route53ZoneIDFlag},
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -88,6 +96,11 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
rootTTL = 1
|
||||
treeNodeTTL = 2147483647
|
||||
)
|
||||
|
||||
// dnsSync performs dnsSyncCommand.
|
||||
func dnsSync(ctx *cli.Context) error {
|
||||
var (
|
||||
@@ -194,6 +207,19 @@ func dnsToCloudflare(ctx *cli.Context) error {
|
||||
return client.deploy(domain, t)
|
||||
}
|
||||
|
||||
// dnsToRoute53 peforms dnsRoute53Command.
|
||||
func dnsToRoute53(ctx *cli.Context) error {
|
||||
if ctx.NArg() < 1 {
|
||||
return fmt.Errorf("need tree definition directory as argument")
|
||||
}
|
||||
domain, t, err := loadTreeDefinitionForExport(ctx.Args().Get(0))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client := newRoute53Client(ctx)
|
||||
return client.deploy(domain, t)
|
||||
}
|
||||
|
||||
// loadSigningKey loads a private key in Ethereum keystore format.
|
||||
func loadSigningKey(keyfile string) *ecdsa.PrivateKey {
|
||||
keyjson, err := ioutil.ReadFile(keyfile)
|
||||
@@ -214,8 +240,7 @@ func dnsClient(ctx *cli.Context) *dnsdisc.Client {
|
||||
if commandHasFlag(ctx, dnsTimeoutFlag) {
|
||||
cfg.Timeout = ctx.Duration(dnsTimeoutFlag.Name)
|
||||
}
|
||||
c, _ := dnsdisc.NewClient(cfg) // cannot fail because no URLs given
|
||||
return c
|
||||
return dnsdisc.NewClient(cfg)
|
||||
}
|
||||
|
||||
// There are two file formats for DNS node trees on disk:
|
||||
|
@@ -45,7 +45,7 @@ func init() {
|
||||
// Set up the CLI app.
|
||||
app.Flags = append(app.Flags, debug.Flags...)
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return debug.Setup(ctx, "")
|
||||
return debug.Setup(ctx)
|
||||
}
|
||||
app.After = func(ctx *cli.Context) error {
|
||||
debug.Exit()
|
||||
|
@@ -52,6 +52,10 @@ If you want to encrypt an existing private key, it can be specified by setting
|
||||
Name: "privatekey",
|
||||
Usage: "file containing a raw private key to encrypt",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "lightkdf",
|
||||
Usage: "use less secure scrypt parameters",
|
||||
},
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
// Check if keyfile path given and make sure it doesn't already exist.
|
||||
@@ -91,7 +95,11 @@ If you want to encrypt an existing private key, it can be specified by setting
|
||||
|
||||
// Encrypt key with passphrase.
|
||||
passphrase := promptPassphrase(true)
|
||||
keyjson, err := keystore.EncryptKey(key, passphrase, keystore.StandardScryptN, keystore.StandardScryptP)
|
||||
scryptN, scryptP := keystore.StandardScryptN, keystore.StandardScryptP
|
||||
if ctx.Bool("lightkdf") {
|
||||
scryptN, scryptP = keystore.LightScryptN, keystore.LightScryptP
|
||||
}
|
||||
keyjson, err := keystore.EncryptKey(key, passphrase, scryptN, scryptP)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error encrypting key: %v", err)
|
||||
}
|
||||
|
@@ -43,6 +43,7 @@ func init() {
|
||||
commandSignMessage,
|
||||
commandVerifyMessage,
|
||||
}
|
||||
cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate
|
||||
}
|
||||
|
||||
// Commonly used command line flags.
|
||||
|
@@ -34,7 +34,7 @@ func TestMessageSignVerify(t *testing.T) {
|
||||
message := "test message"
|
||||
|
||||
// Create the key.
|
||||
generate := runEthkey(t, "generate", keyfile)
|
||||
generate := runEthkey(t, "generate", "--lightkdf", keyfile)
|
||||
generate.Expect(`
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Password: {{.InputLine "foobar"}}
|
||||
|
@@ -34,17 +34,22 @@ var disasmCommand = cli.Command{
|
||||
}
|
||||
|
||||
func disasmCmd(ctx *cli.Context) error {
|
||||
if len(ctx.Args().First()) == 0 {
|
||||
return errors.New("filename required")
|
||||
var in string
|
||||
switch {
|
||||
case len(ctx.Args().First()) > 0:
|
||||
fn := ctx.Args().First()
|
||||
input, err := ioutil.ReadFile(fn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
in = string(input)
|
||||
case ctx.GlobalIsSet(InputFlag.Name):
|
||||
in = ctx.GlobalString(InputFlag.Name)
|
||||
default:
|
||||
return errors.New("Missing filename or --input value")
|
||||
}
|
||||
|
||||
fn := ctx.Args().First()
|
||||
in, err := ioutil.ReadFile(fn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
code := strings.TrimSpace(string(in))
|
||||
code := strings.TrimSpace(in)
|
||||
fmt.Printf("%v\n", code)
|
||||
return asm.PrintDisassembled(code)
|
||||
}
|
||||
|
@@ -87,6 +87,10 @@ var (
|
||||
Name: "verbosity",
|
||||
Usage: "sets the verbosity level",
|
||||
}
|
||||
BenchFlag = cli.BoolFlag{
|
||||
Name: "bench",
|
||||
Usage: "benchmark the execution",
|
||||
}
|
||||
CreateFlag = cli.BoolFlag{
|
||||
Name: "create",
|
||||
Usage: "indicates the action should be create rather than call",
|
||||
@@ -124,6 +128,7 @@ var (
|
||||
|
||||
func init() {
|
||||
app.Flags = []cli.Flag{
|
||||
BenchFlag,
|
||||
CreateFlag,
|
||||
DebugFlag,
|
||||
VerbosityFlag,
|
||||
@@ -152,6 +157,7 @@ func init() {
|
||||
runCommand,
|
||||
stateTestCommand,
|
||||
}
|
||||
cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
@@ -25,6 +25,7 @@ import (
|
||||
"os"
|
||||
goruntime "runtime"
|
||||
"runtime/pprof"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/evm/internal/compiler"
|
||||
@@ -69,6 +70,33 @@ func readGenesis(genesisPath string) *core.Genesis {
|
||||
return genesis
|
||||
}
|
||||
|
||||
func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) ([]byte, uint64, time.Duration, error) {
|
||||
var (
|
||||
output []byte
|
||||
gasLeft uint64
|
||||
execTime time.Duration
|
||||
err error
|
||||
)
|
||||
|
||||
if bench {
|
||||
result := testing.Benchmark(func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
output, gasLeft, err = execFunc()
|
||||
}
|
||||
})
|
||||
|
||||
// Get the average execution time from the benchmarking result.
|
||||
// There are other useful stats here that could be reported.
|
||||
execTime = time.Duration(result.NsPerOp())
|
||||
} else {
|
||||
startTime := time.Now()
|
||||
output, gasLeft, err = execFunc()
|
||||
execTime = time.Since(startTime)
|
||||
}
|
||||
|
||||
return output, gasLeft, execTime, err
|
||||
}
|
||||
|
||||
func runCmd(ctx *cli.Context) error {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
||||
glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name)))
|
||||
@@ -116,11 +144,7 @@ func runCmd(ctx *cli.Context) error {
|
||||
receiver = common.HexToAddress(ctx.GlobalString(ReceiverFlag.Name))
|
||||
}
|
||||
|
||||
var (
|
||||
code []byte
|
||||
ret []byte
|
||||
err error
|
||||
)
|
||||
var code []byte
|
||||
codeFileFlag := ctx.GlobalString(CodeFileFlag.Name)
|
||||
codeFlag := ctx.GlobalString(CodeFlag.Name)
|
||||
|
||||
@@ -203,10 +227,10 @@ func runCmd(ctx *cli.Context) error {
|
||||
} else {
|
||||
runtimeConfig.ChainConfig = params.AllEthashProtocolChanges
|
||||
}
|
||||
tstart := time.Now()
|
||||
var leftOverGas uint64
|
||||
|
||||
var hexInput []byte
|
||||
if inputFileFlag := ctx.GlobalString(InputFileFlag.Name); inputFileFlag != "" {
|
||||
var err error
|
||||
if hexInput, err = ioutil.ReadFile(inputFileFlag); err != nil {
|
||||
fmt.Printf("could not load input from file: %v\n", err)
|
||||
os.Exit(1)
|
||||
@@ -215,16 +239,24 @@ func runCmd(ctx *cli.Context) error {
|
||||
hexInput = []byte(ctx.GlobalString(InputFlag.Name))
|
||||
}
|
||||
input := common.FromHex(string(bytes.TrimSpace(hexInput)))
|
||||
|
||||
var execFunc func() ([]byte, uint64, error)
|
||||
if ctx.GlobalBool(CreateFlag.Name) {
|
||||
input = append(code, input...)
|
||||
ret, _, leftOverGas, err = runtime.Create(input, &runtimeConfig)
|
||||
execFunc = func() ([]byte, uint64, error) {
|
||||
output, _, gasLeft, err := runtime.Create(input, &runtimeConfig)
|
||||
return output, gasLeft, err
|
||||
}
|
||||
} else {
|
||||
if len(code) > 0 {
|
||||
statedb.SetCode(receiver, code)
|
||||
}
|
||||
ret, leftOverGas, err = runtime.Call(receiver, input, &runtimeConfig)
|
||||
execFunc = func() ([]byte, uint64, error) {
|
||||
return runtime.Call(receiver, input, &runtimeConfig)
|
||||
}
|
||||
}
|
||||
execTime := time.Since(tstart)
|
||||
|
||||
output, leftOverGas, execTime, err := timedExec(ctx.GlobalBool(BenchFlag.Name), execFunc)
|
||||
|
||||
if ctx.GlobalBool(DumpFlag.Name) {
|
||||
statedb.Commit(true)
|
||||
@@ -267,7 +299,7 @@ Gas used: %d
|
||||
`, execTime, mem.HeapObjects, mem.Alloc, mem.TotalAlloc, mem.NumGC, initialGas-leftOverGas)
|
||||
}
|
||||
if tracer == nil {
|
||||
fmt.Printf("0x%x\n", ret)
|
||||
fmt.Printf("0x%x\n", output)
|
||||
if err != nil {
|
||||
fmt.Printf(" error: %v\n", err)
|
||||
}
|
||||
|
@@ -351,6 +351,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if head == nil || balance == nil {
|
||||
// Report the faucet offline until initial stats are ready
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
if err = sendError(conn, errors.New("Faucet offline")); err != nil {
|
||||
log.Warn("Failed to send faucet error to client", "err", err)
|
||||
return
|
||||
@@ -359,11 +360,14 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
// Send over the initial stats and the latest header
|
||||
f.lock.RLock()
|
||||
reqs := f.reqs
|
||||
f.lock.RUnlock()
|
||||
if err = send(conn, map[string]interface{}{
|
||||
"funds": new(big.Int).Div(balance, ether),
|
||||
"funded": nonce,
|
||||
"peers": f.stack.Server().PeerCount(),
|
||||
"requests": f.reqs,
|
||||
"requests": reqs,
|
||||
}, 3*time.Second); err != nil {
|
||||
log.Warn("Failed to send initial stats to client", "err", err)
|
||||
return
|
||||
@@ -392,6 +396,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
|
||||
continue
|
||||
}
|
||||
if msg.Tier >= uint(*tiersFlag) {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
if err = sendError(conn, errors.New("Invalid funding tier requested")); err != nil {
|
||||
log.Warn("Failed to send tier error to client", "err", err)
|
||||
return
|
||||
@@ -429,6 +434,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
if !result.Success {
|
||||
log.Warn("Captcha verification failed", "err", string(result.Errors))
|
||||
//lint:ignore ST1005 it's funny and the robot won't mind
|
||||
if err = sendError(conn, errors.New("Beep-bop, you're a robot!")); err != nil {
|
||||
log.Warn("Failed to send captcha failure to client", "err", err)
|
||||
return
|
||||
@@ -450,6 +456,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
continue
|
||||
case strings.HasPrefix(msg.URL, "https://plus.google.com/"):
|
||||
//lint:ignore ST1005 Google is a company name and should be capitalized.
|
||||
if err = sendError(conn, errors.New("Google+ authentication discontinued as the service was sunset")); err != nil {
|
||||
log.Warn("Failed to send Google+ deprecation to client", "err", err)
|
||||
return
|
||||
@@ -462,6 +469,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
|
||||
case *noauthFlag:
|
||||
username, avatar, address, err = authNoAuth(msg.URL)
|
||||
default:
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
err = errors.New("Something funky happened, please open an issue at https://github.com/ethereum/go-ethereum/issues")
|
||||
}
|
||||
if err != nil {
|
||||
@@ -520,7 +528,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// Send an error if too frequent funding, othewise a success
|
||||
if !fund {
|
||||
if err = sendError(conn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(timeout.Sub(time.Now())))); err != nil { // nolint: gosimple
|
||||
if err = sendError(conn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(time.Until(timeout)))); err != nil { // nolint: gosimple
|
||||
log.Warn("Failed to send funding error to client", "err", err)
|
||||
return
|
||||
}
|
||||
@@ -682,6 +690,7 @@ func authTwitter(url string) (string, string, common.Address, error) {
|
||||
// Ensure the user specified a meaningful URL, no fancy nonsense
|
||||
parts := strings.Split(url, "/")
|
||||
if len(parts) < 4 || parts[len(parts)-2] != "status" {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
return "", "", common.Address{}, errors.New("Invalid Twitter status URL")
|
||||
}
|
||||
// Twitter's API isn't really friendly with direct links. Still, we don't
|
||||
@@ -696,6 +705,7 @@ func authTwitter(url string) (string, string, common.Address, error) {
|
||||
// Resolve the username from the final redirect, no intermediate junk
|
||||
parts = strings.Split(res.Request.URL.String(), "/")
|
||||
if len(parts) < 4 || parts[len(parts)-2] != "status" {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
return "", "", common.Address{}, errors.New("Invalid Twitter status URL")
|
||||
}
|
||||
username := parts[len(parts)-3]
|
||||
@@ -706,6 +716,7 @@ func authTwitter(url string) (string, string, common.Address, error) {
|
||||
}
|
||||
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
|
||||
if address == (common.Address{}) {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
var avatar string
|
||||
@@ -721,6 +732,7 @@ func authFacebook(url string) (string, string, common.Address, error) {
|
||||
// Ensure the user specified a meaningful URL, no fancy nonsense
|
||||
parts := strings.Split(url, "/")
|
||||
if len(parts) < 4 || parts[len(parts)-2] != "posts" {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
return "", "", common.Address{}, errors.New("Invalid Facebook post URL")
|
||||
}
|
||||
username := parts[len(parts)-3]
|
||||
@@ -740,6 +752,7 @@ func authFacebook(url string) (string, string, common.Address, error) {
|
||||
}
|
||||
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
|
||||
if address == (common.Address{}) {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
var avatar string
|
||||
@@ -755,6 +768,7 @@ func authFacebook(url string) (string, string, common.Address, error) {
|
||||
func authNoAuth(url string) (string, string, common.Address, error) {
|
||||
address := common.HexToAddress(regexp.MustCompile("0x[0-9a-fA-F]{40}").FindString(url))
|
||||
if address == (common.Address{}) {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
return address.Hex() + "@noauth", "", address, nil
|
||||
|
@@ -56,6 +56,18 @@ This is a destructive action and changes the network in which you will be
|
||||
participating.
|
||||
|
||||
It expects the genesis file as argument.`,
|
||||
}
|
||||
dumpGenesisCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(dumpGenesis),
|
||||
Name: "dumpgenesis",
|
||||
Usage: "Dumps genesis block JSON configuration to stdout",
|
||||
ArgsUsage: "",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`,
|
||||
}
|
||||
importCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(importChain),
|
||||
@@ -227,6 +239,17 @@ func initGenesis(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func dumpGenesis(ctx *cli.Context) error {
|
||||
genesis := utils.MakeGenesis(ctx)
|
||||
if genesis == nil {
|
||||
genesis = core.DefaultGenesisBlock()
|
||||
}
|
||||
if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
|
||||
utils.Fatalf("could not encode genesis")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func importChain(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) < 1 {
|
||||
utils.Fatalf("This command requires an argument.")
|
||||
|
@@ -99,8 +99,8 @@ func defaultNodeConfig() node.Config {
|
||||
cfg := node.DefaultConfig
|
||||
cfg.Name = clientIdentifier
|
||||
cfg.Version = params.VersionWithCommit(gitCommit, gitDate)
|
||||
cfg.HTTPModules = append(cfg.HTTPModules, "eth", "shh")
|
||||
cfg.WSModules = append(cfg.WSModules, "eth", "shh")
|
||||
cfg.HTTPModules = append(cfg.HTTPModules, "eth")
|
||||
cfg.WSModules = append(cfg.WSModules, "eth")
|
||||
cfg.IPCPath = "geth.ipc"
|
||||
return cfg
|
||||
}
|
||||
@@ -150,6 +150,9 @@ func makeFullNode(ctx *cli.Context) *node.Node {
|
||||
if ctx.GlobalIsSet(utils.OverrideIstanbulFlag.Name) {
|
||||
cfg.Eth.OverrideIstanbul = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideIstanbulFlag.Name))
|
||||
}
|
||||
if ctx.GlobalIsSet(utils.OverrideMuirGlacierFlag.Name) {
|
||||
cfg.Eth.OverrideMuirGlacier = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideMuirGlacierFlag.Name))
|
||||
}
|
||||
utils.RegisterEthService(stack, &cfg.Eth)
|
||||
|
||||
// Whisper must be explicitly enabled by specifying at least 1 whisper flag or in dev mode
|
||||
|
@@ -51,7 +51,9 @@ func TestConsoleWelcome(t *testing.T) {
|
||||
geth.SetTemplateFunc("goarch", func() string { return runtime.GOARCH })
|
||||
geth.SetTemplateFunc("gover", runtime.Version)
|
||||
geth.SetTemplateFunc("gethver", func() string { return params.VersionWithCommit("", "") })
|
||||
geth.SetTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
|
||||
geth.SetTemplateFunc("niltime", func() string {
|
||||
return time.Unix(0, 0).Format("Mon Jan 02 2006 15:04:05 GMT-0700 (MST)")
|
||||
})
|
||||
geth.SetTemplateFunc("apis", func() string { return ipcAPIs })
|
||||
|
||||
// Verify the actual welcome message to the required template
|
||||
@@ -87,11 +89,14 @@ func TestIPCAttachWelcome(t *testing.T) {
|
||||
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
|
||||
"--etherbase", coinbase, "--shh", "--ipcpath", ipc)
|
||||
|
||||
defer func() {
|
||||
geth.Interrupt()
|
||||
geth.ExpectExit()
|
||||
}()
|
||||
|
||||
waitForEndpoint(t, ipc, 3*time.Second)
|
||||
testAttachWelcome(t, geth, "ipc:"+ipc, ipcAPIs)
|
||||
|
||||
geth.Interrupt()
|
||||
geth.ExpectExit()
|
||||
}
|
||||
|
||||
func TestHTTPAttachWelcome(t *testing.T) {
|
||||
@@ -100,13 +105,14 @@ func TestHTTPAttachWelcome(t *testing.T) {
|
||||
geth := runGeth(t,
|
||||
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
|
||||
"--etherbase", coinbase, "--rpc", "--rpcport", port)
|
||||
defer func() {
|
||||
geth.Interrupt()
|
||||
geth.ExpectExit()
|
||||
}()
|
||||
|
||||
endpoint := "http://127.0.0.1:" + port
|
||||
waitForEndpoint(t, endpoint, 3*time.Second)
|
||||
testAttachWelcome(t, geth, endpoint, httpAPIs)
|
||||
|
||||
geth.Interrupt()
|
||||
geth.ExpectExit()
|
||||
}
|
||||
|
||||
func TestWSAttachWelcome(t *testing.T) {
|
||||
@@ -116,13 +122,14 @@ func TestWSAttachWelcome(t *testing.T) {
|
||||
geth := runGeth(t,
|
||||
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
|
||||
"--etherbase", coinbase, "--ws", "--wsport", port)
|
||||
defer func() {
|
||||
geth.Interrupt()
|
||||
geth.ExpectExit()
|
||||
}()
|
||||
|
||||
endpoint := "ws://127.0.0.1:" + port
|
||||
waitForEndpoint(t, endpoint, 3*time.Second)
|
||||
testAttachWelcome(t, geth, endpoint, httpAPIs)
|
||||
|
||||
geth.Interrupt()
|
||||
geth.ExpectExit()
|
||||
}
|
||||
|
||||
func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) {
|
||||
@@ -137,7 +144,9 @@ func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) {
|
||||
attach.SetTemplateFunc("gover", runtime.Version)
|
||||
attach.SetTemplateFunc("gethver", func() string { return params.VersionWithCommit("", "") })
|
||||
attach.SetTemplateFunc("etherbase", func() string { return geth.Etherbase })
|
||||
attach.SetTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
|
||||
attach.SetTemplateFunc("niltime", func() string {
|
||||
return time.Unix(0, 0).Format("Mon Jan 02 2006 15:04:05 GMT-0700 (MST)")
|
||||
})
|
||||
attach.SetTemplateFunc("ipc", func() bool { return strings.HasPrefix(endpoint, "ipc") })
|
||||
attach.SetTemplateFunc("datadir", func() string { return geth.Datadir })
|
||||
attach.SetTemplateFunc("apis", func() string { return apis })
|
||||
|
@@ -70,6 +70,7 @@ var (
|
||||
utils.NoUSBFlag,
|
||||
utils.SmartCardDaemonPathFlag,
|
||||
utils.OverrideIstanbulFlag,
|
||||
utils.OverrideMuirGlacierFlag,
|
||||
utils.EthashCacheDirFlag,
|
||||
utils.EthashCachesInMemoryFlag,
|
||||
utils.EthashCachesOnDiskFlag,
|
||||
@@ -130,6 +131,7 @@ var (
|
||||
utils.NetrestrictFlag,
|
||||
utils.NodeKeyFileFlag,
|
||||
utils.NodeKeyHexFlag,
|
||||
utils.DNSDiscoveryFlag,
|
||||
utils.DeveloperFlag,
|
||||
utils.DeveloperPeriodFlag,
|
||||
utils.TestnetFlag,
|
||||
@@ -193,7 +195,7 @@ func init() {
|
||||
// Initialize the CLI app and start Geth
|
||||
app.Action = geth
|
||||
app.HideVersion = true // we have a command to print the version
|
||||
app.Copyright = "Copyright 2013-2019 The go-ethereum Authors"
|
||||
app.Copyright = "Copyright 2013-2020 The go-ethereum Authors"
|
||||
app.Commands = []cli.Command{
|
||||
// See chaincmd.go:
|
||||
initCommand,
|
||||
@@ -204,6 +206,7 @@ func init() {
|
||||
copydbCommand,
|
||||
removedbCommand,
|
||||
dumpCommand,
|
||||
dumpGenesisCommand,
|
||||
inspectCommand,
|
||||
// See accountcmd.go:
|
||||
accountCommand,
|
||||
@@ -232,7 +235,7 @@ func init() {
|
||||
app.Flags = append(app.Flags, metricsFlags...)
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return debug.Setup(ctx, "")
|
||||
return debug.Setup(ctx)
|
||||
}
|
||||
app.After = func(ctx *cli.Context) error {
|
||||
debug.Exit()
|
||||
|
@@ -80,6 +80,7 @@ type RetestethEthAPI interface {
|
||||
SendRawTransaction(ctx context.Context, rawTx hexutil.Bytes) (common.Hash, error)
|
||||
BlockNumber(ctx context.Context) (uint64, error)
|
||||
GetBlockByNumber(ctx context.Context, blockNr math.HexOrDecimal64, fullTx bool) (map[string]interface{}, error)
|
||||
GetBlockByHash(ctx context.Context, blockHash common.Hash, fullTx bool) (map[string]interface{}, error)
|
||||
GetBalance(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (*math.HexOrDecimal256, error)
|
||||
GetCode(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (hexutil.Bytes, error)
|
||||
GetTransactionCount(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (uint64, error)
|
||||
@@ -110,7 +111,6 @@ type RetestethAPI struct {
|
||||
genesisHash common.Hash
|
||||
engine *NoRewardEngine
|
||||
blockchain *core.BlockChain
|
||||
blockNumber uint64
|
||||
txMap map[common.Address]map[uint64]*types.Transaction // Sender -> Nonce -> Transaction
|
||||
txSenders map[common.Address]struct{} // Set of transaction senders
|
||||
blockInterval uint64
|
||||
@@ -356,7 +356,7 @@ func (api *RetestethAPI) SetChainParams(ctx context.Context, chainParams ChainPa
|
||||
ChainID: chainId,
|
||||
HomesteadBlock: homesteadBlock,
|
||||
DAOForkBlock: daoForkBlock,
|
||||
DAOForkSupport: false,
|
||||
DAOForkSupport: true,
|
||||
EIP150Block: eip150Block,
|
||||
EIP155Block: eip155Block,
|
||||
EIP158Block: eip158Block,
|
||||
@@ -411,7 +411,6 @@ func (api *RetestethAPI) SetChainParams(ctx context.Context, chainParams ChainPa
|
||||
api.engine = engine
|
||||
api.blockchain = blockchain
|
||||
api.db = state.NewDatabase(api.ethDb)
|
||||
api.blockNumber = 0
|
||||
api.txMap = make(map[common.Address]map[uint64]*types.Transaction)
|
||||
api.txSenders = make(map[common.Address]struct{})
|
||||
api.blockInterval = 0
|
||||
@@ -424,7 +423,7 @@ func (api *RetestethAPI) SendRawTransaction(ctx context.Context, rawTx hexutil.B
|
||||
// Return nil is not by mistake - some tests include sending transaction where gasLimit overflows uint64
|
||||
return common.Hash{}, nil
|
||||
}
|
||||
signer := types.MakeSigner(api.chainConfig, big.NewInt(int64(api.blockNumber)))
|
||||
signer := types.MakeSigner(api.chainConfig, big.NewInt(int64(api.currentNumber())))
|
||||
sender, err := types.Sender(signer, tx)
|
||||
if err != nil {
|
||||
return common.Hash{}, err
|
||||
@@ -450,9 +449,17 @@ func (api *RetestethAPI) MineBlocks(ctx context.Context, number uint64) (bool, e
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (api *RetestethAPI) currentNumber() uint64 {
|
||||
if current := api.blockchain.CurrentBlock(); current != nil {
|
||||
return current.NumberU64()
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (api *RetestethAPI) mineBlock() error {
|
||||
parentHash := rawdb.ReadCanonicalHash(api.ethDb, api.blockNumber)
|
||||
parent := rawdb.ReadBlock(api.ethDb, parentHash, api.blockNumber)
|
||||
number := api.currentNumber()
|
||||
parentHash := rawdb.ReadCanonicalHash(api.ethDb, number)
|
||||
parent := rawdb.ReadBlock(api.ethDb, parentHash, number)
|
||||
var timestamp uint64
|
||||
if api.blockInterval == 0 {
|
||||
timestamp = uint64(time.Now().Unix())
|
||||
@@ -462,7 +469,7 @@ func (api *RetestethAPI) mineBlock() error {
|
||||
gasLimit := core.CalcGasLimit(parent, 9223372036854775807, 9223372036854775807)
|
||||
header := &types.Header{
|
||||
ParentHash: parent.Hash(),
|
||||
Number: big.NewInt(int64(api.blockNumber + 1)),
|
||||
Number: big.NewInt(int64(number + 1)),
|
||||
GasLimit: gasLimit,
|
||||
Extra: api.extraData,
|
||||
Time: timestamp,
|
||||
@@ -495,7 +502,6 @@ func (api *RetestethAPI) mineBlock() error {
|
||||
txCount := 0
|
||||
var txs []*types.Transaction
|
||||
var receipts []*types.Receipt
|
||||
var coalescedLogs []*types.Log
|
||||
var blockFull = gasPool.Gas() < params.TxGas
|
||||
for address := range api.txSenders {
|
||||
if blockFull {
|
||||
@@ -522,7 +528,6 @@ func (api *RetestethAPI) mineBlock() error {
|
||||
}
|
||||
txs = append(txs, tx)
|
||||
receipts = append(receipts, receipt)
|
||||
coalescedLogs = append(coalescedLogs, receipt.Logs...)
|
||||
delete(m, nonce)
|
||||
if len(m) == 0 {
|
||||
// Last tx for the sender
|
||||
@@ -550,8 +555,7 @@ func (api *RetestethAPI) importBlock(block *types.Block) error {
|
||||
if _, err := api.blockchain.InsertChain([]*types.Block{block}); err != nil {
|
||||
return err
|
||||
}
|
||||
api.blockNumber = block.NumberU64()
|
||||
fmt.Printf("Imported block %d\n", block.NumberU64())
|
||||
fmt.Printf("Imported block %d, head is %d\n", block.NumberU64(), api.currentNumber())
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -576,7 +580,9 @@ func (api *RetestethAPI) RewindToBlock(ctx context.Context, newHead uint64) (boo
|
||||
if err := api.blockchain.SetHead(newHead); err != nil {
|
||||
return false, err
|
||||
}
|
||||
api.blockNumber = newHead
|
||||
// When we rewind, the transaction pool should be cleaned out.
|
||||
api.txMap = make(map[common.Address]map[uint64]*types.Transaction)
|
||||
api.txSenders = make(map[common.Address]struct{})
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -596,8 +602,7 @@ func (api *RetestethAPI) GetLogHash(ctx context.Context, txHash common.Hash) (co
|
||||
}
|
||||
|
||||
func (api *RetestethAPI) BlockNumber(ctx context.Context) (uint64, error) {
|
||||
//fmt.Printf("BlockNumber, response: %d\n", api.blockNumber)
|
||||
return api.blockNumber, nil
|
||||
return api.currentNumber(), nil
|
||||
}
|
||||
|
||||
func (api *RetestethAPI) GetBlockByNumber(ctx context.Context, blockNr math.HexOrDecimal64, fullTx bool) (map[string]interface{}, error) {
|
||||
@@ -614,6 +619,20 @@ func (api *RetestethAPI) GetBlockByNumber(ctx context.Context, blockNr math.HexO
|
||||
return nil, fmt.Errorf("block %d not found", blockNr)
|
||||
}
|
||||
|
||||
func (api *RetestethAPI) GetBlockByHash(ctx context.Context, blockHash common.Hash, fullTx bool) (map[string]interface{}, error) {
|
||||
block := api.blockchain.GetBlockByHash(blockHash)
|
||||
if block != nil {
|
||||
response, err := RPCMarshalBlock(block, true, fullTx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response["author"] = response["miner"]
|
||||
response["totalDifficulty"] = (*hexutil.Big)(api.blockchain.GetTd(block.Hash(), block.Number().Uint64()))
|
||||
return response, err
|
||||
}
|
||||
return nil, fmt.Errorf("block 0x%x not found", blockHash)
|
||||
}
|
||||
|
||||
func (api *RetestethAPI) AccountRange(ctx context.Context,
|
||||
blockHashOrNumber *math.HexOrDecimal256, txIndex uint64,
|
||||
addressHash *math.HexOrDecimal256, maxResults uint64,
|
||||
@@ -682,9 +701,6 @@ func (api *RetestethAPI) AccountRange(ctx context.Context,
|
||||
for i := 0; i < int(maxResults) && it.Next(); i++ {
|
||||
if preimage := accountTrie.GetKey(it.Key); preimage != nil {
|
||||
result.AddressMap[common.BytesToHash(it.Key)] = common.BytesToAddress(preimage)
|
||||
//fmt.Printf("%x: %x\n", it.Key, preimage)
|
||||
} else {
|
||||
//fmt.Printf("could not find preimage for %x\n", it.Key)
|
||||
}
|
||||
}
|
||||
//fmt.Printf("Number of entries returned: %d\n", len(result.AddressMap))
|
||||
@@ -808,9 +824,6 @@ func (api *RetestethAPI) StorageRangeAt(ctx context.Context,
|
||||
Key: string(ks),
|
||||
Value: string(vs),
|
||||
}
|
||||
//fmt.Printf("Key: %s, Value: %s\n", ks, vs)
|
||||
} else {
|
||||
//fmt.Printf("Did not find preimage for %x\n", it.Key)
|
||||
}
|
||||
}
|
||||
if it.Next() {
|
||||
@@ -889,7 +902,7 @@ func retesteth(ctx *cli.Context) error {
|
||||
log.Info("HTTP endpoint closed", "url", httpEndpoint)
|
||||
}()
|
||||
|
||||
abortChan := make(chan os.Signal)
|
||||
abortChan := make(chan os.Signal, 11)
|
||||
signal.Notify(abortChan, os.Interrupt)
|
||||
|
||||
sig := <-abortChan
|
||||
|
@@ -182,6 +182,7 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.BootnodesFlag,
|
||||
utils.BootnodesV4Flag,
|
||||
utils.BootnodesV5Flag,
|
||||
utils.DNSDiscoveryFlag,
|
||||
utils.ListenPortFlag,
|
||||
utils.MaxPeersFlag,
|
||||
utils.MaxPendingPeersFlag,
|
||||
|
@@ -41,7 +41,7 @@ var dashboardContent = `
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
|
||||
<title>{{.NetworkTitle}}: Ethereum Testnet</title>
|
||||
<title>{{.NetworkTitle}}: Network Dashboard</title>
|
||||
|
||||
<link href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/css/bootstrap.min.css" rel="stylesheet">
|
||||
<link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css" rel="stylesheet">
|
||||
|
@@ -77,6 +77,17 @@ SUBCOMMANDS:
|
||||
{{range $categorized.Flags}}{{"\t"}}{{.}}
|
||||
{{end}}
|
||||
{{end}}{{end}}`
|
||||
|
||||
OriginCommandHelpTemplate = `{{.Name}}{{if .Subcommands}} command{{end}}{{if .Flags}} [command options]{{end}} [arguments...]
|
||||
{{if .Description}}{{.Description}}
|
||||
{{end}}{{if .Subcommands}}
|
||||
SUBCOMMANDS:
|
||||
{{range .Subcommands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
|
||||
{{end}}{{end}}{{if .Flags}}
|
||||
OPTIONS:
|
||||
{{range $.Flags}}{{"\t"}}{{.}}
|
||||
{{end}}
|
||||
{{end}}`
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -226,6 +237,10 @@ var (
|
||||
Name: "override.istanbul",
|
||||
Usage: "Manually specify Istanbul fork-block, overriding the bundled setting",
|
||||
}
|
||||
OverrideMuirGlacierFlag = cli.Uint64Flag{
|
||||
Name: "override.muirglacier",
|
||||
Usage: "Manually specify Muir Glacier fork-block, overriding the bundled setting",
|
||||
}
|
||||
// Light server and client settings
|
||||
LightLegacyServFlag = cli.IntFlag{ // Deprecated in favor of light.serve, remove in 2021
|
||||
Name: "lightserv",
|
||||
@@ -643,6 +658,10 @@ var (
|
||||
Name: "netrestrict",
|
||||
Usage: "Restricts network communication to the given IP networks (CIDR masks)",
|
||||
}
|
||||
DNSDiscoveryFlag = cli.StringFlag{
|
||||
Name: "discovery.dns",
|
||||
Usage: "Sets DNS discovery entry points (use \"\" to disable DNS)",
|
||||
}
|
||||
|
||||
// ATM the url is left to the user and deployment to
|
||||
JSpathFlag = cli.StringFlag{
|
||||
@@ -796,9 +815,9 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
|
||||
switch {
|
||||
case ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(BootnodesV4Flag.Name):
|
||||
if ctx.GlobalIsSet(BootnodesV4Flag.Name) {
|
||||
urls = strings.Split(ctx.GlobalString(BootnodesV4Flag.Name), ",")
|
||||
urls = splitAndTrim(ctx.GlobalString(BootnodesV4Flag.Name))
|
||||
} else {
|
||||
urls = strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",")
|
||||
urls = splitAndTrim(ctx.GlobalString(BootnodesFlag.Name))
|
||||
}
|
||||
case ctx.GlobalBool(TestnetFlag.Name):
|
||||
urls = params.TestnetBootnodes
|
||||
@@ -830,9 +849,9 @@ func setBootstrapNodesV5(ctx *cli.Context, cfg *p2p.Config) {
|
||||
switch {
|
||||
case ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(BootnodesV5Flag.Name):
|
||||
if ctx.GlobalIsSet(BootnodesV5Flag.Name) {
|
||||
urls = strings.Split(ctx.GlobalString(BootnodesV5Flag.Name), ",")
|
||||
urls = splitAndTrim(ctx.GlobalString(BootnodesV5Flag.Name))
|
||||
} else {
|
||||
urls = strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",")
|
||||
urls = splitAndTrim(ctx.GlobalString(BootnodesFlag.Name))
|
||||
}
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
urls = params.RinkebyBootnodes
|
||||
@@ -1462,6 +1481,14 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
if ctx.GlobalIsSet(RPCGlobalGasCap.Name) {
|
||||
cfg.RPCGasCap = new(big.Int).SetUint64(ctx.GlobalUint64(RPCGlobalGasCap.Name))
|
||||
}
|
||||
if ctx.GlobalIsSet(DNSDiscoveryFlag.Name) {
|
||||
urls := ctx.GlobalString(DNSDiscoveryFlag.Name)
|
||||
if urls == "" {
|
||||
cfg.DiscoveryURLs = []string{}
|
||||
} else {
|
||||
cfg.DiscoveryURLs = splitAndTrim(urls)
|
||||
}
|
||||
}
|
||||
|
||||
// Override any default configs for hard coded networks.
|
||||
switch {
|
||||
@@ -1470,16 +1497,19 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
cfg.NetworkId = 3
|
||||
}
|
||||
cfg.Genesis = core.DefaultTestnetGenesisBlock()
|
||||
setDNSDiscoveryDefaults(cfg, params.KnownDNSNetworks[params.TestnetGenesisHash])
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||
cfg.NetworkId = 4
|
||||
}
|
||||
cfg.Genesis = core.DefaultRinkebyGenesisBlock()
|
||||
setDNSDiscoveryDefaults(cfg, params.KnownDNSNetworks[params.RinkebyGenesisHash])
|
||||
case ctx.GlobalBool(GoerliFlag.Name):
|
||||
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||
cfg.NetworkId = 5
|
||||
}
|
||||
cfg.Genesis = core.DefaultGoerliGenesisBlock()
|
||||
setDNSDiscoveryDefaults(cfg, params.KnownDNSNetworks[params.GoerliGenesisHash])
|
||||
case ctx.GlobalBool(DeveloperFlag.Name):
|
||||
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||
cfg.NetworkId = 1337
|
||||
@@ -1506,9 +1536,22 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
if !ctx.GlobalIsSet(MinerGasPriceFlag.Name) && !ctx.GlobalIsSet(MinerLegacyGasPriceFlag.Name) {
|
||||
cfg.Miner.GasPrice = big.NewInt(1)
|
||||
}
|
||||
default:
|
||||
if cfg.NetworkId == 1 {
|
||||
setDNSDiscoveryDefaults(cfg, params.KnownDNSNetworks[params.MainnetGenesisHash])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// setDNSDiscoveryDefaults configures DNS discovery with the given URL if
|
||||
// no URLs are set.
|
||||
func setDNSDiscoveryDefaults(cfg *eth.Config, url string) {
|
||||
if cfg.DiscoveryURLs != nil {
|
||||
return
|
||||
}
|
||||
cfg.DiscoveryURLs = []string{url}
|
||||
}
|
||||
|
||||
// RegisterEthService adds an Ethereum client to the stack.
|
||||
func RegisterEthService(stack *node.Node, cfg *eth.Config) {
|
||||
var err error
|
||||
|
@@ -31,44 +31,93 @@ func Now() AbsTime {
|
||||
return AbsTime(monotime.Now())
|
||||
}
|
||||
|
||||
// Add returns t + d.
|
||||
// Add returns t + d as absolute time.
|
||||
func (t AbsTime) Add(d time.Duration) AbsTime {
|
||||
return t + AbsTime(d)
|
||||
}
|
||||
|
||||
// Sub returns t - t2 as a duration.
|
||||
func (t AbsTime) Sub(t2 AbsTime) time.Duration {
|
||||
return time.Duration(t - t2)
|
||||
}
|
||||
|
||||
// The Clock interface makes it possible to replace the monotonic system clock with
|
||||
// a simulated clock.
|
||||
type Clock interface {
|
||||
Now() AbsTime
|
||||
Sleep(time.Duration)
|
||||
After(time.Duration) <-chan time.Time
|
||||
NewTimer(time.Duration) ChanTimer
|
||||
After(time.Duration) <-chan AbsTime
|
||||
AfterFunc(d time.Duration, f func()) Timer
|
||||
}
|
||||
|
||||
// Timer represents a cancellable event returned by AfterFunc
|
||||
// Timer is a cancellable event created by AfterFunc.
|
||||
type Timer interface {
|
||||
// Stop cancels the timer. It returns false if the timer has already
|
||||
// expired or been stopped.
|
||||
Stop() bool
|
||||
}
|
||||
|
||||
// ChanTimer is a cancellable event created by NewTimer.
|
||||
type ChanTimer interface {
|
||||
Timer
|
||||
|
||||
// The channel returned by C receives a value when the timer expires.
|
||||
C() <-chan AbsTime
|
||||
// Reset reschedules the timer with a new timeout.
|
||||
// It should be invoked only on stopped or expired timers with drained channels.
|
||||
Reset(time.Duration)
|
||||
}
|
||||
|
||||
// System implements Clock using the system clock.
|
||||
type System struct{}
|
||||
|
||||
// Now returns the current monotonic time.
|
||||
func (System) Now() AbsTime {
|
||||
func (c System) Now() AbsTime {
|
||||
return AbsTime(monotime.Now())
|
||||
}
|
||||
|
||||
// Sleep blocks for the given duration.
|
||||
func (System) Sleep(d time.Duration) {
|
||||
func (c System) Sleep(d time.Duration) {
|
||||
time.Sleep(d)
|
||||
}
|
||||
|
||||
// NewTimer creates a timer which can be rescheduled.
|
||||
func (c System) NewTimer(d time.Duration) ChanTimer {
|
||||
ch := make(chan AbsTime, 1)
|
||||
t := time.AfterFunc(d, func() {
|
||||
// This send is non-blocking because that's how time.Timer
|
||||
// behaves. It doesn't matter in the happy case, but does
|
||||
// when Reset is misused.
|
||||
select {
|
||||
case ch <- c.Now():
|
||||
default:
|
||||
}
|
||||
})
|
||||
return &systemTimer{t, ch}
|
||||
}
|
||||
|
||||
// After returns a channel which receives the current time after d has elapsed.
|
||||
func (System) After(d time.Duration) <-chan time.Time {
|
||||
return time.After(d)
|
||||
func (c System) After(d time.Duration) <-chan AbsTime {
|
||||
ch := make(chan AbsTime, 1)
|
||||
time.AfterFunc(d, func() { ch <- c.Now() })
|
||||
return ch
|
||||
}
|
||||
|
||||
// AfterFunc runs f on a new goroutine after the duration has elapsed.
|
||||
func (System) AfterFunc(d time.Duration, f func()) Timer {
|
||||
func (c System) AfterFunc(d time.Duration, f func()) Timer {
|
||||
return time.AfterFunc(d, f)
|
||||
}
|
||||
|
||||
type systemTimer struct {
|
||||
*time.Timer
|
||||
ch <-chan AbsTime
|
||||
}
|
||||
|
||||
func (st *systemTimer) Reset(d time.Duration) {
|
||||
st.Timer.Reset(d)
|
||||
}
|
||||
|
||||
func (st *systemTimer) C() <-chan AbsTime {
|
||||
return st.ch
|
||||
}
|
||||
|
@@ -17,6 +17,7 @@
|
||||
package mclock
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
@@ -32,18 +33,24 @@ import (
|
||||
// the timeout using a channel or semaphore.
|
||||
type Simulated struct {
|
||||
now AbsTime
|
||||
scheduled []*simTimer
|
||||
scheduled simTimerHeap
|
||||
mu sync.RWMutex
|
||||
cond *sync.Cond
|
||||
lastId uint64
|
||||
}
|
||||
|
||||
// simTimer implements Timer on the virtual clock.
|
||||
// simTimer implements ChanTimer on the virtual clock.
|
||||
type simTimer struct {
|
||||
do func()
|
||||
at AbsTime
|
||||
id uint64
|
||||
s *Simulated
|
||||
at AbsTime
|
||||
index int // position in s.scheduled
|
||||
s *Simulated
|
||||
do func()
|
||||
ch <-chan AbsTime
|
||||
}
|
||||
|
||||
func (s *Simulated) init() {
|
||||
if s.cond == nil {
|
||||
s.cond = sync.NewCond(&s.mu)
|
||||
}
|
||||
}
|
||||
|
||||
// Run moves the clock by the given duration, executing all timers before that duration.
|
||||
@@ -53,14 +60,9 @@ func (s *Simulated) Run(d time.Duration) {
|
||||
|
||||
end := s.now + AbsTime(d)
|
||||
var do []func()
|
||||
for len(s.scheduled) > 0 {
|
||||
ev := s.scheduled[0]
|
||||
if ev.at > end {
|
||||
break
|
||||
}
|
||||
s.now = ev.at
|
||||
for len(s.scheduled) > 0 && s.scheduled[0].at <= end {
|
||||
ev := heap.Pop(&s.scheduled).(*simTimer)
|
||||
do = append(do, ev.do)
|
||||
s.scheduled = s.scheduled[1:]
|
||||
}
|
||||
s.now = end
|
||||
s.mu.Unlock()
|
||||
@@ -102,14 +104,22 @@ func (s *Simulated) Sleep(d time.Duration) {
|
||||
<-s.After(d)
|
||||
}
|
||||
|
||||
// NewTimer creates a timer which fires when the clock has advanced by d.
|
||||
func (s *Simulated) NewTimer(d time.Duration) ChanTimer {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
ch := make(chan AbsTime, 1)
|
||||
var timer *simTimer
|
||||
timer = s.schedule(d, func() { ch <- timer.at })
|
||||
timer.ch = ch
|
||||
return timer
|
||||
}
|
||||
|
||||
// After returns a channel which receives the current time after the clock
|
||||
// has advanced by d.
|
||||
func (s *Simulated) After(d time.Duration) <-chan time.Time {
|
||||
after := make(chan time.Time, 1)
|
||||
s.AfterFunc(d, func() {
|
||||
after <- (time.Time{}).Add(time.Duration(s.now))
|
||||
})
|
||||
return after
|
||||
func (s *Simulated) After(d time.Duration) <-chan AbsTime {
|
||||
return s.NewTimer(d).C()
|
||||
}
|
||||
|
||||
// AfterFunc runs fn after the clock has advanced by d. Unlike with the system
|
||||
@@ -117,46 +127,83 @@ func (s *Simulated) After(d time.Duration) <-chan time.Time {
|
||||
func (s *Simulated) AfterFunc(d time.Duration, fn func()) Timer {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
return s.schedule(d, fn)
|
||||
}
|
||||
|
||||
func (s *Simulated) schedule(d time.Duration, fn func()) *simTimer {
|
||||
s.init()
|
||||
|
||||
at := s.now + AbsTime(d)
|
||||
s.lastId++
|
||||
id := s.lastId
|
||||
l, h := 0, len(s.scheduled)
|
||||
ll := h
|
||||
for l != h {
|
||||
m := (l + h) / 2
|
||||
if (at < s.scheduled[m].at) || ((at == s.scheduled[m].at) && (id < s.scheduled[m].id)) {
|
||||
h = m
|
||||
} else {
|
||||
l = m + 1
|
||||
}
|
||||
}
|
||||
ev := &simTimer{do: fn, at: at, s: s}
|
||||
s.scheduled = append(s.scheduled, nil)
|
||||
copy(s.scheduled[l+1:], s.scheduled[l:ll])
|
||||
s.scheduled[l] = ev
|
||||
heap.Push(&s.scheduled, ev)
|
||||
s.cond.Broadcast()
|
||||
return ev
|
||||
}
|
||||
|
||||
func (ev *simTimer) Stop() bool {
|
||||
s := ev.s
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
ev.s.mu.Lock()
|
||||
defer ev.s.mu.Unlock()
|
||||
|
||||
for i := 0; i < len(s.scheduled); i++ {
|
||||
if s.scheduled[i] == ev {
|
||||
s.scheduled = append(s.scheduled[:i], s.scheduled[i+1:]...)
|
||||
s.cond.Broadcast()
|
||||
return true
|
||||
}
|
||||
if ev.index < 0 {
|
||||
return false
|
||||
}
|
||||
return false
|
||||
heap.Remove(&ev.s.scheduled, ev.index)
|
||||
ev.s.cond.Broadcast()
|
||||
ev.index = -1
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *Simulated) init() {
|
||||
if s.cond == nil {
|
||||
s.cond = sync.NewCond(&s.mu)
|
||||
func (ev *simTimer) Reset(d time.Duration) {
|
||||
if ev.ch == nil {
|
||||
panic("mclock: Reset() on timer created by AfterFunc")
|
||||
}
|
||||
|
||||
ev.s.mu.Lock()
|
||||
defer ev.s.mu.Unlock()
|
||||
ev.at = ev.s.now.Add(d)
|
||||
if ev.index < 0 {
|
||||
heap.Push(&ev.s.scheduled, ev) // already expired
|
||||
} else {
|
||||
heap.Fix(&ev.s.scheduled, ev.index) // hasn't fired yet, reschedule
|
||||
}
|
||||
ev.s.cond.Broadcast()
|
||||
}
|
||||
|
||||
func (ev *simTimer) C() <-chan AbsTime {
|
||||
if ev.ch == nil {
|
||||
panic("mclock: C() on timer created by AfterFunc")
|
||||
}
|
||||
return ev.ch
|
||||
}
|
||||
|
||||
type simTimerHeap []*simTimer
|
||||
|
||||
func (h *simTimerHeap) Len() int {
|
||||
return len(*h)
|
||||
}
|
||||
|
||||
func (h *simTimerHeap) Less(i, j int) bool {
|
||||
return (*h)[i].at < (*h)[j].at
|
||||
}
|
||||
|
||||
func (h *simTimerHeap) Swap(i, j int) {
|
||||
(*h)[i], (*h)[j] = (*h)[j], (*h)[i]
|
||||
(*h)[i].index = i
|
||||
(*h)[j].index = j
|
||||
}
|
||||
|
||||
func (h *simTimerHeap) Push(x interface{}) {
|
||||
t := x.(*simTimer)
|
||||
t.index = len(*h)
|
||||
*h = append(*h, t)
|
||||
}
|
||||
|
||||
func (h *simTimerHeap) Pop() interface{} {
|
||||
end := len(*h) - 1
|
||||
t := (*h)[end]
|
||||
t.index = -1
|
||||
(*h)[end] = nil
|
||||
*h = (*h)[:end]
|
||||
return t
|
||||
}
|
||||
|
@@ -25,14 +25,16 @@ var _ Clock = System{}
|
||||
var _ Clock = new(Simulated)
|
||||
|
||||
func TestSimulatedAfter(t *testing.T) {
|
||||
const timeout = 30 * time.Minute
|
||||
const adv = time.Minute
|
||||
|
||||
var (
|
||||
c Simulated
|
||||
end = c.Now().Add(timeout)
|
||||
ch = c.After(timeout)
|
||||
timeout = 30 * time.Minute
|
||||
offset = 99 * time.Hour
|
||||
adv = 11 * time.Minute
|
||||
c Simulated
|
||||
)
|
||||
c.Run(offset)
|
||||
|
||||
end := c.Now().Add(timeout)
|
||||
ch := c.After(timeout)
|
||||
for c.Now() < end.Add(-adv) {
|
||||
c.Run(adv)
|
||||
select {
|
||||
@@ -45,8 +47,8 @@ func TestSimulatedAfter(t *testing.T) {
|
||||
c.Run(adv)
|
||||
select {
|
||||
case stamp := <-ch:
|
||||
want := time.Time{}.Add(timeout)
|
||||
if !stamp.Equal(want) {
|
||||
want := AbsTime(0).Add(offset).Add(timeout)
|
||||
if stamp != want {
|
||||
t.Errorf("Wrong time sent on timer channel: got %v, want %v", stamp, want)
|
||||
}
|
||||
default:
|
||||
@@ -94,7 +96,7 @@ func TestSimulatedSleep(t *testing.T) {
|
||||
var (
|
||||
c Simulated
|
||||
timeout = 1 * time.Hour
|
||||
done = make(chan AbsTime)
|
||||
done = make(chan AbsTime, 1)
|
||||
)
|
||||
go func() {
|
||||
c.Sleep(timeout)
|
||||
@@ -113,3 +115,48 @@ func TestSimulatedSleep(t *testing.T) {
|
||||
t.Fatal("Sleep didn't return in time")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedTimerReset(t *testing.T) {
|
||||
var (
|
||||
c Simulated
|
||||
timeout = 1 * time.Hour
|
||||
)
|
||||
timer := c.NewTimer(timeout)
|
||||
c.Run(2 * timeout)
|
||||
select {
|
||||
case ftime := <-timer.C():
|
||||
if ftime != AbsTime(timeout) {
|
||||
t.Fatalf("wrong time %v sent on timer channel, want %v", ftime, AbsTime(timeout))
|
||||
}
|
||||
default:
|
||||
t.Fatal("timer didn't fire")
|
||||
}
|
||||
|
||||
timer.Reset(timeout)
|
||||
c.Run(2 * timeout)
|
||||
select {
|
||||
case ftime := <-timer.C():
|
||||
if ftime != AbsTime(3*timeout) {
|
||||
t.Fatalf("wrong time %v sent on timer channel, want %v", ftime, AbsTime(3*timeout))
|
||||
}
|
||||
default:
|
||||
t.Fatal("timer didn't fire again")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedTimerStop(t *testing.T) {
|
||||
var (
|
||||
c Simulated
|
||||
timeout = 1 * time.Hour
|
||||
)
|
||||
timer := c.NewTimer(timeout)
|
||||
c.Run(2 * timeout)
|
||||
if timer.Stop() {
|
||||
t.Errorf("Stop returned true for fired timer")
|
||||
}
|
||||
select {
|
||||
case <-timer.C():
|
||||
default:
|
||||
t.Fatal("timer didn't fire")
|
||||
}
|
||||
}
|
||||
|
@@ -729,7 +729,7 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) {
|
||||
|
||||
go func(idx int) {
|
||||
defer pend.Done()
|
||||
ethash := New(Config{cachedir, 0, 1, "", 0, 0, ModeNormal}, nil, false)
|
||||
ethash := New(Config{cachedir, 0, 1, "", 0, 0, ModeNormal, nil}, nil, false)
|
||||
defer ethash.Close()
|
||||
if err := ethash.VerifySeal(nil, block.Header()); err != nil {
|
||||
t.Errorf("proc %d: block verification failed: %v", idx, err)
|
||||
|
@@ -28,7 +28,7 @@ var errEthashStopped = errors.New("ethash stopped")
|
||||
|
||||
// API exposes ethash related methods for the RPC interface.
|
||||
type API struct {
|
||||
ethash *Ethash // Make sure the mode of ethash is normal.
|
||||
ethash *Ethash
|
||||
}
|
||||
|
||||
// GetWork returns a work package for external miner.
|
||||
@@ -39,7 +39,7 @@ type API struct {
|
||||
// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||
// result[3] - hex encoded block number
|
||||
func (api *API) GetWork() ([4]string, error) {
|
||||
if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest {
|
||||
if api.ethash.remote == nil {
|
||||
return [4]string{}, errors.New("not supported")
|
||||
}
|
||||
|
||||
@@ -47,13 +47,11 @@ func (api *API) GetWork() ([4]string, error) {
|
||||
workCh = make(chan [4]string, 1)
|
||||
errc = make(chan error, 1)
|
||||
)
|
||||
|
||||
select {
|
||||
case api.ethash.fetchWorkCh <- &sealWork{errc: errc, res: workCh}:
|
||||
case <-api.ethash.exitCh:
|
||||
case api.ethash.remote.fetchWorkCh <- &sealWork{errc: errc, res: workCh}:
|
||||
case <-api.ethash.remote.exitCh:
|
||||
return [4]string{}, errEthashStopped
|
||||
}
|
||||
|
||||
select {
|
||||
case work := <-workCh:
|
||||
return work, nil
|
||||
@@ -66,23 +64,21 @@ func (api *API) GetWork() ([4]string, error) {
|
||||
// It returns an indication if the work was accepted.
|
||||
// Note either an invalid solution, a stale work a non-existent work will return false.
|
||||
func (api *API) SubmitWork(nonce types.BlockNonce, hash, digest common.Hash) bool {
|
||||
if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest {
|
||||
if api.ethash.remote == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var errc = make(chan error, 1)
|
||||
|
||||
select {
|
||||
case api.ethash.submitWorkCh <- &mineResult{
|
||||
case api.ethash.remote.submitWorkCh <- &mineResult{
|
||||
nonce: nonce,
|
||||
mixDigest: digest,
|
||||
hash: hash,
|
||||
errc: errc,
|
||||
}:
|
||||
case <-api.ethash.exitCh:
|
||||
case <-api.ethash.remote.exitCh:
|
||||
return false
|
||||
}
|
||||
|
||||
err := <-errc
|
||||
return err == nil
|
||||
}
|
||||
@@ -94,21 +90,19 @@ func (api *API) SubmitWork(nonce types.BlockNonce, hash, digest common.Hash) boo
|
||||
// It accepts the miner hash rate and an identifier which must be unique
|
||||
// between nodes.
|
||||
func (api *API) SubmitHashRate(rate hexutil.Uint64, id common.Hash) bool {
|
||||
if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest {
|
||||
if api.ethash.remote == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var done = make(chan struct{}, 1)
|
||||
|
||||
select {
|
||||
case api.ethash.submitRateCh <- &hashrate{done: done, rate: uint64(rate), id: id}:
|
||||
case <-api.ethash.exitCh:
|
||||
case api.ethash.remote.submitRateCh <- &hashrate{done: done, rate: uint64(rate), id: id}:
|
||||
case <-api.ethash.remote.exitCh:
|
||||
return false
|
||||
}
|
||||
|
||||
// Block until hash rate submitted successfully.
|
||||
<-done
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
|
@@ -44,6 +44,11 @@ var (
|
||||
maxUncles = 2 // Maximum number of uncles allowed in a single block
|
||||
allowedFutureBlockTime = 15 * time.Second // Max time from current time allowed for blocks, before they're considered future blocks
|
||||
|
||||
// calcDifficultyEip2384 is the difficulty adjustment algorithm as specified by EIP 2384.
|
||||
// It offsets the bomb 4M blocks from Constantinople, so in total 9M blocks.
|
||||
// Specification EIP-2384: https://eips.ethereum.org/EIPS/eip-2384
|
||||
calcDifficultyEip2384 = makeDifficultyCalculator(big.NewInt(9000000))
|
||||
|
||||
// calcDifficultyConstantinople is the difficulty adjustment algorithm for Constantinople.
|
||||
// It returns the difficulty that a new block should have when created at time given the
|
||||
// parent block's time and difficulty. The calculation uses the Byzantium rules, but with
|
||||
@@ -63,7 +68,7 @@ var (
|
||||
// codebase, inherently breaking if the engine is swapped out. Please put common
|
||||
// error types into the consensus package.
|
||||
var (
|
||||
errZeroBlockTime = errors.New("timestamp equals parent's")
|
||||
errOlderBlockTime = errors.New("timestamp older than parent")
|
||||
errTooManyUncles = errors.New("too many uncles")
|
||||
errDuplicateUncle = errors.New("duplicate uncle")
|
||||
errUncleIsAncestor = errors.New("uncle is ancestor")
|
||||
@@ -250,9 +255,9 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *
|
||||
}
|
||||
}
|
||||
if header.Time <= parent.Time {
|
||||
return errZeroBlockTime
|
||||
return errOlderBlockTime
|
||||
}
|
||||
// Verify the block's difficulty based in its timestamp and parent's difficulty
|
||||
// Verify the block's difficulty based on its timestamp and parent's difficulty
|
||||
expected := ethash.CalcDifficulty(chain, header.Time, parent)
|
||||
|
||||
if expected.Cmp(header.Difficulty) != 0 {
|
||||
@@ -311,6 +316,8 @@ func (ethash *Ethash) CalcDifficulty(chain consensus.ChainReader, time uint64, p
|
||||
func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
|
||||
next := new(big.Int).Add(parent.Number, big1)
|
||||
switch {
|
||||
case config.IsMuirGlacier(next):
|
||||
return calcDifficultyEip2384(time, parent)
|
||||
case config.IsConstantinople(next):
|
||||
return calcDifficultyConstantinople(time, parent)
|
||||
case config.IsByzantium(next):
|
||||
|
@@ -34,9 +34,7 @@ import (
|
||||
"unsafe"
|
||||
|
||||
mmap "github.com/edsrzf/mmap-go"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
@@ -50,7 +48,7 @@ var (
|
||||
two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
|
||||
|
||||
// sharedEthash is a full instance that can be shared between multiple users.
|
||||
sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal}, nil, false)
|
||||
sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal, nil}, nil, false)
|
||||
|
||||
// algorithmRevision is the data structure version used for file naming.
|
||||
algorithmRevision = 23
|
||||
@@ -403,36 +401,8 @@ type Config struct {
|
||||
DatasetsInMem int
|
||||
DatasetsOnDisk int
|
||||
PowMode Mode
|
||||
}
|
||||
|
||||
// sealTask wraps a seal block with relative result channel for remote sealer thread.
|
||||
type sealTask struct {
|
||||
block *types.Block
|
||||
results chan<- *types.Block
|
||||
}
|
||||
|
||||
// mineResult wraps the pow solution parameters for the specified block.
|
||||
type mineResult struct {
|
||||
nonce types.BlockNonce
|
||||
mixDigest common.Hash
|
||||
hash common.Hash
|
||||
|
||||
errc chan error
|
||||
}
|
||||
|
||||
// hashrate wraps the hash rate submitted by the remote sealer.
|
||||
type hashrate struct {
|
||||
id common.Hash
|
||||
ping time.Time
|
||||
rate uint64
|
||||
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// sealWork wraps a seal work package for remote sealer.
|
||||
type sealWork struct {
|
||||
errc chan error
|
||||
res chan [4]string
|
||||
Log log.Logger `toml:"-"`
|
||||
}
|
||||
|
||||
// Ethash is a consensus engine based on proof-of-work implementing the ethash
|
||||
@@ -448,52 +418,42 @@ type Ethash struct {
|
||||
threads int // Number of threads to mine on if mining
|
||||
update chan struct{} // Notification channel to update mining parameters
|
||||
hashrate metrics.Meter // Meter tracking the average hashrate
|
||||
|
||||
// Remote sealer related fields
|
||||
workCh chan *sealTask // Notification channel to push new work and relative result channel to remote sealer
|
||||
fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work
|
||||
submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result
|
||||
fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer.
|
||||
submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate
|
||||
remote *remoteSealer
|
||||
|
||||
// The fields below are hooks for testing
|
||||
shared *Ethash // Shared PoW verifier to avoid cache regeneration
|
||||
fakeFail uint64 // Block number which fails PoW check even in fake mode
|
||||
fakeDelay time.Duration // Time delay to sleep for before returning from verify
|
||||
|
||||
lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields
|
||||
closeOnce sync.Once // Ensures exit channel will not be closed twice.
|
||||
exitCh chan chan error // Notification channel to exiting backend threads
|
||||
lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields
|
||||
closeOnce sync.Once // Ensures exit channel will not be closed twice.
|
||||
}
|
||||
|
||||
// New creates a full sized ethash PoW scheme and starts a background thread for
|
||||
// remote mining, also optionally notifying a batch of remote services of new work
|
||||
// packages.
|
||||
func New(config Config, notify []string, noverify bool) *Ethash {
|
||||
if config.Log == nil {
|
||||
config.Log = log.Root()
|
||||
}
|
||||
if config.CachesInMem <= 0 {
|
||||
log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
|
||||
config.Log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
|
||||
config.CachesInMem = 1
|
||||
}
|
||||
if config.CacheDir != "" && config.CachesOnDisk > 0 {
|
||||
log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk)
|
||||
config.Log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk)
|
||||
}
|
||||
if config.DatasetDir != "" && config.DatasetsOnDisk > 0 {
|
||||
log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk)
|
||||
config.Log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk)
|
||||
}
|
||||
ethash := &Ethash{
|
||||
config: config,
|
||||
caches: newlru("cache", config.CachesInMem, newCache),
|
||||
datasets: newlru("dataset", config.DatasetsInMem, newDataset),
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeterForced(),
|
||||
workCh: make(chan *sealTask),
|
||||
fetchWorkCh: make(chan *sealWork),
|
||||
submitWorkCh: make(chan *mineResult),
|
||||
fetchRateCh: make(chan chan uint64),
|
||||
submitRateCh: make(chan *hashrate),
|
||||
exitCh: make(chan chan error),
|
||||
config: config,
|
||||
caches: newlru("cache", config.CachesInMem, newCache),
|
||||
datasets: newlru("dataset", config.DatasetsInMem, newDataset),
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeterForced(),
|
||||
}
|
||||
go ethash.remote(notify, noverify)
|
||||
ethash.remote = startRemoteSealer(ethash, notify, noverify)
|
||||
return ethash
|
||||
}
|
||||
|
||||
@@ -501,19 +461,13 @@ func New(config Config, notify []string, noverify bool) *Ethash {
|
||||
// purposes.
|
||||
func NewTester(notify []string, noverify bool) *Ethash {
|
||||
ethash := &Ethash{
|
||||
config: Config{PowMode: ModeTest},
|
||||
caches: newlru("cache", 1, newCache),
|
||||
datasets: newlru("dataset", 1, newDataset),
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeterForced(),
|
||||
workCh: make(chan *sealTask),
|
||||
fetchWorkCh: make(chan *sealWork),
|
||||
submitWorkCh: make(chan *mineResult),
|
||||
fetchRateCh: make(chan chan uint64),
|
||||
submitRateCh: make(chan *hashrate),
|
||||
exitCh: make(chan chan error),
|
||||
config: Config{PowMode: ModeTest, Log: log.Root()},
|
||||
caches: newlru("cache", 1, newCache),
|
||||
datasets: newlru("dataset", 1, newDataset),
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeterForced(),
|
||||
}
|
||||
go ethash.remote(notify, noverify)
|
||||
ethash.remote = startRemoteSealer(ethash, notify, noverify)
|
||||
return ethash
|
||||
}
|
||||
|
||||
@@ -524,6 +478,7 @@ func NewFaker() *Ethash {
|
||||
return &Ethash{
|
||||
config: Config{
|
||||
PowMode: ModeFake,
|
||||
Log: log.Root(),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -535,6 +490,7 @@ func NewFakeFailer(fail uint64) *Ethash {
|
||||
return &Ethash{
|
||||
config: Config{
|
||||
PowMode: ModeFake,
|
||||
Log: log.Root(),
|
||||
},
|
||||
fakeFail: fail,
|
||||
}
|
||||
@@ -547,6 +503,7 @@ func NewFakeDelayer(delay time.Duration) *Ethash {
|
||||
return &Ethash{
|
||||
config: Config{
|
||||
PowMode: ModeFake,
|
||||
Log: log.Root(),
|
||||
},
|
||||
fakeDelay: delay,
|
||||
}
|
||||
@@ -558,6 +515,7 @@ func NewFullFaker() *Ethash {
|
||||
return &Ethash{
|
||||
config: Config{
|
||||
PowMode: ModeFullFake,
|
||||
Log: log.Root(),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -573,13 +531,11 @@ func (ethash *Ethash) Close() error {
|
||||
var err error
|
||||
ethash.closeOnce.Do(func() {
|
||||
// Short circuit if the exit channel is not allocated.
|
||||
if ethash.exitCh == nil {
|
||||
if ethash.remote == nil {
|
||||
return
|
||||
}
|
||||
errc := make(chan error)
|
||||
ethash.exitCh <- errc
|
||||
err = <-errc
|
||||
close(ethash.exitCh)
|
||||
close(ethash.remote.requestExit)
|
||||
<-ethash.remote.exitCh
|
||||
})
|
||||
return err
|
||||
}
|
||||
@@ -680,8 +636,8 @@ func (ethash *Ethash) Hashrate() float64 {
|
||||
var res = make(chan uint64, 1)
|
||||
|
||||
select {
|
||||
case ethash.fetchRateCh <- res:
|
||||
case <-ethash.exitCh:
|
||||
case ethash.remote.fetchRateCh <- res:
|
||||
case <-ethash.remote.exitCh:
|
||||
// Return local hashrate only if ethash is stopped.
|
||||
return ethash.hashrate.Rate1()
|
||||
}
|
||||
|
@@ -18,6 +18,7 @@ package ethash
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
crand "crypto/rand"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
@@ -33,7 +34,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -56,7 +56,7 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, resu
|
||||
select {
|
||||
case results <- block.WithSeal(header):
|
||||
default:
|
||||
log.Warn("Sealing result is not read by miner", "mode", "fake", "sealhash", ethash.SealHash(block.Header()))
|
||||
ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "fake", "sealhash", ethash.SealHash(block.Header()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -85,8 +85,8 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, resu
|
||||
threads = 0 // Allows disabling local mining without extra logic around local/remote
|
||||
}
|
||||
// Push new work to remote sealer
|
||||
if ethash.workCh != nil {
|
||||
ethash.workCh <- &sealTask{block: block, results: results}
|
||||
if ethash.remote != nil {
|
||||
ethash.remote.workCh <- &sealTask{block: block, results: results}
|
||||
}
|
||||
var (
|
||||
pend sync.WaitGroup
|
||||
@@ -111,14 +111,14 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, resu
|
||||
select {
|
||||
case results <- result:
|
||||
default:
|
||||
log.Warn("Sealing result is not read by miner", "mode", "local", "sealhash", ethash.SealHash(block.Header()))
|
||||
ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "local", "sealhash", ethash.SealHash(block.Header()))
|
||||
}
|
||||
close(abort)
|
||||
case <-ethash.update:
|
||||
// Thread count was changed on user request, restart
|
||||
close(abort)
|
||||
if err := ethash.Seal(chain, block, results, stop); err != nil {
|
||||
log.Error("Failed to restart sealing after update", "err", err)
|
||||
ethash.config.Log.Error("Failed to restart sealing after update", "err", err)
|
||||
}
|
||||
}
|
||||
// Wait for all miners to terminate and return the block
|
||||
@@ -143,7 +143,7 @@ func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan s
|
||||
attempts = int64(0)
|
||||
nonce = seed
|
||||
)
|
||||
logger := log.New("miner", id)
|
||||
logger := ethash.config.Log.New("miner", id)
|
||||
logger.Trace("Started ethash search for new nonces", "seed", seed)
|
||||
search:
|
||||
for {
|
||||
@@ -186,160 +186,128 @@ search:
|
||||
runtime.KeepAlive(dataset)
|
||||
}
|
||||
|
||||
// remote is a standalone goroutine to handle remote mining related stuff.
|
||||
func (ethash *Ethash) remote(notify []string, noverify bool) {
|
||||
var (
|
||||
works = make(map[common.Hash]*types.Block)
|
||||
rates = make(map[common.Hash]hashrate)
|
||||
// This is the timeout for HTTP requests to notify external miners.
|
||||
const remoteSealerTimeout = 1 * time.Second
|
||||
|
||||
results chan<- *types.Block
|
||||
currentBlock *types.Block
|
||||
currentWork [4]string
|
||||
type remoteSealer struct {
|
||||
works map[common.Hash]*types.Block
|
||||
rates map[common.Hash]hashrate
|
||||
currentBlock *types.Block
|
||||
currentWork [4]string
|
||||
notifyCtx context.Context
|
||||
cancelNotify context.CancelFunc // cancels all notification requests
|
||||
reqWG sync.WaitGroup // tracks notification request goroutines
|
||||
|
||||
notifyTransport = &http.Transport{}
|
||||
notifyClient = &http.Client{
|
||||
Transport: notifyTransport,
|
||||
Timeout: time.Second,
|
||||
}
|
||||
notifyReqs = make([]*http.Request, len(notify))
|
||||
)
|
||||
// notifyWork notifies all the specified mining endpoints of the availability of
|
||||
// new work to be processed.
|
||||
notifyWork := func() {
|
||||
work := currentWork
|
||||
blob, _ := json.Marshal(work)
|
||||
ethash *Ethash
|
||||
noverify bool
|
||||
notifyURLs []string
|
||||
results chan<- *types.Block
|
||||
workCh chan *sealTask // Notification channel to push new work and relative result channel to remote sealer
|
||||
fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work
|
||||
submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result
|
||||
fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer.
|
||||
submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate
|
||||
requestExit chan struct{}
|
||||
exitCh chan struct{}
|
||||
}
|
||||
|
||||
for i, url := range notify {
|
||||
// Terminate any previously pending request and create the new work
|
||||
if notifyReqs[i] != nil {
|
||||
notifyTransport.CancelRequest(notifyReqs[i])
|
||||
}
|
||||
notifyReqs[i], _ = http.NewRequest("POST", url, bytes.NewReader(blob))
|
||||
notifyReqs[i].Header.Set("Content-Type", "application/json")
|
||||
// sealTask wraps a seal block with relative result channel for remote sealer thread.
|
||||
type sealTask struct {
|
||||
block *types.Block
|
||||
results chan<- *types.Block
|
||||
}
|
||||
|
||||
// Push the new work concurrently to all the remote nodes
|
||||
go func(req *http.Request, url string) {
|
||||
res, err := notifyClient.Do(req)
|
||||
if err != nil {
|
||||
log.Warn("Failed to notify remote miner", "err", err)
|
||||
} else {
|
||||
log.Trace("Notified remote miner", "miner", url, "hash", log.Lazy{Fn: func() common.Hash { return common.HexToHash(work[0]) }}, "target", work[2])
|
||||
res.Body.Close()
|
||||
}
|
||||
}(notifyReqs[i], url)
|
||||
}
|
||||
// mineResult wraps the pow solution parameters for the specified block.
|
||||
type mineResult struct {
|
||||
nonce types.BlockNonce
|
||||
mixDigest common.Hash
|
||||
hash common.Hash
|
||||
|
||||
errc chan error
|
||||
}
|
||||
|
||||
// hashrate wraps the hash rate submitted by the remote sealer.
|
||||
type hashrate struct {
|
||||
id common.Hash
|
||||
ping time.Time
|
||||
rate uint64
|
||||
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// sealWork wraps a seal work package for remote sealer.
|
||||
type sealWork struct {
|
||||
errc chan error
|
||||
res chan [4]string
|
||||
}
|
||||
|
||||
func startRemoteSealer(ethash *Ethash, urls []string, noverify bool) *remoteSealer {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
s := &remoteSealer{
|
||||
ethash: ethash,
|
||||
noverify: noverify,
|
||||
notifyURLs: urls,
|
||||
notifyCtx: ctx,
|
||||
cancelNotify: cancel,
|
||||
works: make(map[common.Hash]*types.Block),
|
||||
rates: make(map[common.Hash]hashrate),
|
||||
workCh: make(chan *sealTask),
|
||||
fetchWorkCh: make(chan *sealWork),
|
||||
submitWorkCh: make(chan *mineResult),
|
||||
fetchRateCh: make(chan chan uint64),
|
||||
submitRateCh: make(chan *hashrate),
|
||||
requestExit: make(chan struct{}),
|
||||
exitCh: make(chan struct{}),
|
||||
}
|
||||
// makeWork creates a work package for external miner.
|
||||
//
|
||||
// The work package consists of 3 strings:
|
||||
// result[0], 32 bytes hex encoded current block header pow-hash
|
||||
// result[1], 32 bytes hex encoded seed hash used for DAG
|
||||
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||
// result[3], hex encoded block number
|
||||
makeWork := func(block *types.Block) {
|
||||
hash := ethash.SealHash(block.Header())
|
||||
go s.loop()
|
||||
return s
|
||||
}
|
||||
|
||||
currentWork[0] = hash.Hex()
|
||||
currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex()
|
||||
currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex()
|
||||
currentWork[3] = hexutil.EncodeBig(block.Number())
|
||||
|
||||
// Trace the seal work fetched by remote sealer.
|
||||
currentBlock = block
|
||||
works[hash] = block
|
||||
}
|
||||
// submitWork verifies the submitted pow solution, returning
|
||||
// whether the solution was accepted or not (not can be both a bad pow as well as
|
||||
// any other error, like no pending work or stale mining result).
|
||||
submitWork := func(nonce types.BlockNonce, mixDigest common.Hash, sealhash common.Hash) bool {
|
||||
if currentBlock == nil {
|
||||
log.Error("Pending work without block", "sealhash", sealhash)
|
||||
return false
|
||||
}
|
||||
// Make sure the work submitted is present
|
||||
block := works[sealhash]
|
||||
if block == nil {
|
||||
log.Warn("Work submitted but none pending", "sealhash", sealhash, "curnumber", currentBlock.NumberU64())
|
||||
return false
|
||||
}
|
||||
// Verify the correctness of submitted result.
|
||||
header := block.Header()
|
||||
header.Nonce = nonce
|
||||
header.MixDigest = mixDigest
|
||||
|
||||
start := time.Now()
|
||||
if !noverify {
|
||||
if err := ethash.verifySeal(nil, header, true); err != nil {
|
||||
log.Warn("Invalid proof-of-work submitted", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)), "err", err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Make sure the result channel is assigned.
|
||||
if results == nil {
|
||||
log.Warn("Ethash result channel is empty, submitted mining result is rejected")
|
||||
return false
|
||||
}
|
||||
log.Trace("Verified correct proof-of-work", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||
|
||||
// Solutions seems to be valid, return to the miner and notify acceptance.
|
||||
solution := block.WithSeal(header)
|
||||
|
||||
// The submitted solution is within the scope of acceptance.
|
||||
if solution.NumberU64()+staleThreshold > currentBlock.NumberU64() {
|
||||
select {
|
||||
case results <- solution:
|
||||
log.Debug("Work submitted is acceptable", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
|
||||
return true
|
||||
default:
|
||||
log.Warn("Sealing result is not read by miner", "mode", "remote", "sealhash", sealhash)
|
||||
return false
|
||||
}
|
||||
}
|
||||
// The submitted block is too old to accept, drop it.
|
||||
log.Warn("Work submitted is too old", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
|
||||
return false
|
||||
}
|
||||
func (s *remoteSealer) loop() {
|
||||
defer func() {
|
||||
s.ethash.config.Log.Trace("Ethash remote sealer is exiting")
|
||||
s.cancelNotify()
|
||||
s.reqWG.Wait()
|
||||
close(s.exitCh)
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case work := <-ethash.workCh:
|
||||
case work := <-s.workCh:
|
||||
// Update current work with new received block.
|
||||
// Note same work can be past twice, happens when changing CPU threads.
|
||||
results = work.results
|
||||
s.results = work.results
|
||||
s.makeWork(work.block)
|
||||
s.notifyWork()
|
||||
|
||||
makeWork(work.block)
|
||||
|
||||
// Notify and requested URLs of the new work availability
|
||||
notifyWork()
|
||||
|
||||
case work := <-ethash.fetchWorkCh:
|
||||
case work := <-s.fetchWorkCh:
|
||||
// Return current mining work to remote miner.
|
||||
if currentBlock == nil {
|
||||
if s.currentBlock == nil {
|
||||
work.errc <- errNoMiningWork
|
||||
} else {
|
||||
work.res <- currentWork
|
||||
work.res <- s.currentWork
|
||||
}
|
||||
|
||||
case result := <-ethash.submitWorkCh:
|
||||
case result := <-s.submitWorkCh:
|
||||
// Verify submitted PoW solution based on maintained mining blocks.
|
||||
if submitWork(result.nonce, result.mixDigest, result.hash) {
|
||||
if s.submitWork(result.nonce, result.mixDigest, result.hash) {
|
||||
result.errc <- nil
|
||||
} else {
|
||||
result.errc <- errInvalidSealResult
|
||||
}
|
||||
|
||||
case result := <-ethash.submitRateCh:
|
||||
case result := <-s.submitRateCh:
|
||||
// Trace remote sealer's hash rate by submitted value.
|
||||
rates[result.id] = hashrate{rate: result.rate, ping: time.Now()}
|
||||
s.rates[result.id] = hashrate{rate: result.rate, ping: time.Now()}
|
||||
close(result.done)
|
||||
|
||||
case req := <-ethash.fetchRateCh:
|
||||
case req := <-s.fetchRateCh:
|
||||
// Gather all hash rate submitted by remote sealer.
|
||||
var total uint64
|
||||
for _, rate := range rates {
|
||||
for _, rate := range s.rates {
|
||||
// this could overflow
|
||||
total += rate.rate
|
||||
}
|
||||
@@ -347,25 +315,126 @@ func (ethash *Ethash) remote(notify []string, noverify bool) {
|
||||
|
||||
case <-ticker.C:
|
||||
// Clear stale submitted hash rate.
|
||||
for id, rate := range rates {
|
||||
for id, rate := range s.rates {
|
||||
if time.Since(rate.ping) > 10*time.Second {
|
||||
delete(rates, id)
|
||||
delete(s.rates, id)
|
||||
}
|
||||
}
|
||||
// Clear stale pending blocks
|
||||
if currentBlock != nil {
|
||||
for hash, block := range works {
|
||||
if block.NumberU64()+staleThreshold <= currentBlock.NumberU64() {
|
||||
delete(works, hash)
|
||||
if s.currentBlock != nil {
|
||||
for hash, block := range s.works {
|
||||
if block.NumberU64()+staleThreshold <= s.currentBlock.NumberU64() {
|
||||
delete(s.works, hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case errc := <-ethash.exitCh:
|
||||
// Exit remote loop if ethash is closed and return relevant error.
|
||||
errc <- nil
|
||||
log.Trace("Ethash remote sealer is exiting")
|
||||
case <-s.requestExit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// makeWork creates a work package for external miner.
|
||||
//
|
||||
// The work package consists of 3 strings:
|
||||
// result[0], 32 bytes hex encoded current block header pow-hash
|
||||
// result[1], 32 bytes hex encoded seed hash used for DAG
|
||||
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||
// result[3], hex encoded block number
|
||||
func (s *remoteSealer) makeWork(block *types.Block) {
|
||||
hash := s.ethash.SealHash(block.Header())
|
||||
s.currentWork[0] = hash.Hex()
|
||||
s.currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex()
|
||||
s.currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex()
|
||||
s.currentWork[3] = hexutil.EncodeBig(block.Number())
|
||||
|
||||
// Trace the seal work fetched by remote sealer.
|
||||
s.currentBlock = block
|
||||
s.works[hash] = block
|
||||
}
|
||||
|
||||
// notifyWork notifies all the specified mining endpoints of the availability of
|
||||
// new work to be processed.
|
||||
func (s *remoteSealer) notifyWork() {
|
||||
work := s.currentWork
|
||||
blob, _ := json.Marshal(work)
|
||||
s.reqWG.Add(len(s.notifyURLs))
|
||||
for _, url := range s.notifyURLs {
|
||||
go s.sendNotification(s.notifyCtx, url, blob, work)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *remoteSealer) sendNotification(ctx context.Context, url string, json []byte, work [4]string) {
|
||||
defer s.reqWG.Done()
|
||||
|
||||
req, err := http.NewRequest("POST", url, bytes.NewReader(json))
|
||||
if err != nil {
|
||||
s.ethash.config.Log.Warn("Can't create remote miner notification", "err", err)
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(ctx, remoteSealerTimeout)
|
||||
defer cancel()
|
||||
req = req.WithContext(ctx)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
s.ethash.config.Log.Warn("Failed to notify remote miner", "err", err)
|
||||
} else {
|
||||
s.ethash.config.Log.Trace("Notified remote miner", "miner", url, "hash", work[0], "target", work[2])
|
||||
resp.Body.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// submitWork verifies the submitted pow solution, returning
|
||||
// whether the solution was accepted or not (not can be both a bad pow as well as
|
||||
// any other error, like no pending work or stale mining result).
|
||||
func (s *remoteSealer) submitWork(nonce types.BlockNonce, mixDigest common.Hash, sealhash common.Hash) bool {
|
||||
if s.currentBlock == nil {
|
||||
s.ethash.config.Log.Error("Pending work without block", "sealhash", sealhash)
|
||||
return false
|
||||
}
|
||||
// Make sure the work submitted is present
|
||||
block := s.works[sealhash]
|
||||
if block == nil {
|
||||
s.ethash.config.Log.Warn("Work submitted but none pending", "sealhash", sealhash, "curnumber", s.currentBlock.NumberU64())
|
||||
return false
|
||||
}
|
||||
// Verify the correctness of submitted result.
|
||||
header := block.Header()
|
||||
header.Nonce = nonce
|
||||
header.MixDigest = mixDigest
|
||||
|
||||
start := time.Now()
|
||||
if !s.noverify {
|
||||
if err := s.ethash.verifySeal(nil, header, true); err != nil {
|
||||
s.ethash.config.Log.Warn("Invalid proof-of-work submitted", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)), "err", err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Make sure the result channel is assigned.
|
||||
if s.results == nil {
|
||||
s.ethash.config.Log.Warn("Ethash result channel is empty, submitted mining result is rejected")
|
||||
return false
|
||||
}
|
||||
s.ethash.config.Log.Trace("Verified correct proof-of-work", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||
|
||||
// Solutions seems to be valid, return to the miner and notify acceptance.
|
||||
solution := block.WithSeal(header)
|
||||
|
||||
// The submitted solution is within the scope of acceptance.
|
||||
if solution.NumberU64()+staleThreshold > s.currentBlock.NumberU64() {
|
||||
select {
|
||||
case s.results <- solution:
|
||||
s.ethash.config.Log.Debug("Work submitted is acceptable", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
|
||||
return true
|
||||
default:
|
||||
s.ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "remote", "sealhash", sealhash)
|
||||
return false
|
||||
}
|
||||
}
|
||||
// The submitted block is too old to accept, drop it.
|
||||
s.ethash.config.Log.Warn("Work submitted is too old", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
|
||||
return false
|
||||
}
|
||||
|
@@ -20,59 +20,39 @@ import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/internal/testlog"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// Tests whether remote HTTP servers are correctly notified of new work.
|
||||
func TestRemoteNotify(t *testing.T) {
|
||||
// Start a simple webserver to capture notifications
|
||||
// Start a simple web server to capture notifications.
|
||||
sink := make(chan [3]string)
|
||||
|
||||
server := &http.Server{
|
||||
Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
blob, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read miner notification: %v", err)
|
||||
}
|
||||
var work [3]string
|
||||
if err := json.Unmarshal(blob, &work); err != nil {
|
||||
t.Fatalf("failed to unmarshal miner notification: %v", err)
|
||||
}
|
||||
sink <- work
|
||||
}),
|
||||
}
|
||||
// Open a custom listener to extract its local address
|
||||
listener, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to open notification server: %v", err)
|
||||
}
|
||||
defer listener.Close()
|
||||
|
||||
go server.Serve(listener)
|
||||
|
||||
// Wait for server to start listening
|
||||
var tries int
|
||||
for tries = 0; tries < 10; tries++ {
|
||||
conn, _ := net.DialTimeout("tcp", listener.Addr().String(), 1*time.Second)
|
||||
if conn != nil {
|
||||
break
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
blob, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
t.Errorf("failed to read miner notification: %v", err)
|
||||
}
|
||||
}
|
||||
if tries == 10 {
|
||||
t.Fatal("tcp listener not ready for more than 10 seconds")
|
||||
}
|
||||
var work [3]string
|
||||
if err := json.Unmarshal(blob, &work); err != nil {
|
||||
t.Errorf("failed to unmarshal miner notification: %v", err)
|
||||
}
|
||||
sink <- work
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
// Create the custom ethash engine
|
||||
ethash := NewTester([]string{"http://" + listener.Addr().String()}, false)
|
||||
// Create the custom ethash engine.
|
||||
ethash := NewTester([]string{server.URL}, false)
|
||||
defer ethash.Close()
|
||||
|
||||
// Stream a work task and ensure the notification bubbles out
|
||||
// Stream a work task and ensure the notification bubbles out.
|
||||
header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)}
|
||||
block := types.NewBlockWithHeader(header)
|
||||
|
||||
@@ -97,46 +77,37 @@ func TestRemoteNotify(t *testing.T) {
|
||||
// Tests that pushing work packages fast to the miner doesn't cause any data race
|
||||
// issues in the notifications.
|
||||
func TestRemoteMultiNotify(t *testing.T) {
|
||||
// Start a simple webserver to capture notifications
|
||||
// Start a simple web server to capture notifications.
|
||||
sink := make(chan [3]string, 64)
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
blob, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
t.Errorf("failed to read miner notification: %v", err)
|
||||
}
|
||||
var work [3]string
|
||||
if err := json.Unmarshal(blob, &work); err != nil {
|
||||
t.Errorf("failed to unmarshal miner notification: %v", err)
|
||||
}
|
||||
sink <- work
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
server := &http.Server{
|
||||
Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
blob, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read miner notification: %v", err)
|
||||
}
|
||||
var work [3]string
|
||||
if err := json.Unmarshal(blob, &work); err != nil {
|
||||
t.Fatalf("failed to unmarshal miner notification: %v", err)
|
||||
}
|
||||
sink <- work
|
||||
}),
|
||||
}
|
||||
// Open a custom listener to extract its local address
|
||||
listener, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to open notification server: %v", err)
|
||||
}
|
||||
defer listener.Close()
|
||||
|
||||
go server.Serve(listener)
|
||||
|
||||
// Create the custom ethash engine
|
||||
ethash := NewTester([]string{"http://" + listener.Addr().String()}, false)
|
||||
// Create the custom ethash engine.
|
||||
ethash := NewTester([]string{server.URL}, false)
|
||||
ethash.config.Log = testlog.Logger(t, log.LvlWarn)
|
||||
defer ethash.Close()
|
||||
|
||||
// Stream a lot of work task and ensure all the notifications bubble out
|
||||
// Stream a lot of work task and ensure all the notifications bubble out.
|
||||
for i := 0; i < cap(sink); i++ {
|
||||
header := &types.Header{Number: big.NewInt(int64(i)), Difficulty: big.NewInt(100)}
|
||||
block := types.NewBlockWithHeader(header)
|
||||
|
||||
ethash.Seal(nil, block, nil, nil)
|
||||
}
|
||||
|
||||
for i := 0; i < cap(sink); i++ {
|
||||
select {
|
||||
case <-sink:
|
||||
case <-time.After(3 * time.Second):
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatalf("notification %d timed out", i)
|
||||
}
|
||||
}
|
||||
@@ -206,10 +177,10 @@ func TestStaleSubmission(t *testing.T) {
|
||||
select {
|
||||
case res := <-results:
|
||||
if res.Header().Nonce != fakeNonce {
|
||||
t.Errorf("case %d block nonce mismatch, want %s, get %s", id+1, fakeNonce, res.Header().Nonce)
|
||||
t.Errorf("case %d block nonce mismatch, want %x, get %x", id+1, fakeNonce, res.Header().Nonce)
|
||||
}
|
||||
if res.Header().MixDigest != fakeDigest {
|
||||
t.Errorf("case %d block digest mismatch, want %s, get %s", id+1, fakeDigest, res.Header().MixDigest)
|
||||
t.Errorf("case %d block digest mismatch, want %x, get %x", id+1, fakeDigest, res.Header().MixDigest)
|
||||
}
|
||||
if res.Header().Difficulty.Uint64() != c.headers[c.submitIndex].Difficulty.Uint64() {
|
||||
t.Errorf("case %d block difficulty mismatch, want %d, get %d", id+1, c.headers[c.submitIndex].Difficulty, res.Header().Difficulty)
|
||||
|
@@ -20,14 +20,16 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/dop251/goja"
|
||||
"github.com/ethereum/go-ethereum/accounts/scwallet"
|
||||
"github.com/ethereum/go-ethereum/accounts/usbwallet"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/internal/jsre"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/robertkrimen/otto"
|
||||
)
|
||||
|
||||
// bridge is a collection of JavaScript utility methods to bride the .js runtime
|
||||
@@ -47,10 +49,18 @@ func newBridge(client *rpc.Client, prompter UserPrompter, printer io.Writer) *br
|
||||
}
|
||||
}
|
||||
|
||||
func getJeth(vm *goja.Runtime) *goja.Object {
|
||||
jeth := vm.Get("jeth")
|
||||
if jeth == nil {
|
||||
panic(vm.ToValue("jeth object does not exist"))
|
||||
}
|
||||
return jeth.ToObject(vm)
|
||||
}
|
||||
|
||||
// NewAccount is a wrapper around the personal.newAccount RPC method that uses a
|
||||
// non-echoing password prompt to acquire the passphrase and executes the original
|
||||
// RPC method (saved in jeth.newAccount) with it to actually execute the RPC call.
|
||||
func (b *bridge) NewAccount(call otto.FunctionCall) (response otto.Value) {
|
||||
func (b *bridge) NewAccount(call jsre.Call) (goja.Value, error) {
|
||||
var (
|
||||
password string
|
||||
confirm string
|
||||
@@ -58,52 +68,57 @@ func (b *bridge) NewAccount(call otto.FunctionCall) (response otto.Value) {
|
||||
)
|
||||
switch {
|
||||
// No password was specified, prompt the user for it
|
||||
case len(call.ArgumentList) == 0:
|
||||
if password, err = b.prompter.PromptPassword("Password: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
case len(call.Arguments) == 0:
|
||||
if password, err = b.prompter.PromptPassword("Passphrase: "); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if confirm, err = b.prompter.PromptPassword("Repeat password: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
if confirm, err = b.prompter.PromptPassword("Repeat passphrase: "); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if password != confirm {
|
||||
throwJSException("passwords don't match!")
|
||||
return nil, fmt.Errorf("passwords don't match!")
|
||||
}
|
||||
|
||||
// A single string password was specified, use that
|
||||
case len(call.ArgumentList) == 1 && call.Argument(0).IsString():
|
||||
password, _ = call.Argument(0).ToString()
|
||||
|
||||
// Otherwise fail with some error
|
||||
case len(call.Arguments) == 1 && call.Argument(0).ToString() != nil:
|
||||
password = call.Argument(0).ToString().String()
|
||||
default:
|
||||
throwJSException("expected 0 or 1 string argument")
|
||||
return nil, fmt.Errorf("expected 0 or 1 string argument")
|
||||
}
|
||||
// Password acquired, execute the call and return
|
||||
ret, err := call.Otto.Call("jeth.newAccount", nil, password)
|
||||
if err != nil {
|
||||
throwJSException(err.Error())
|
||||
newAccount, callable := goja.AssertFunction(getJeth(call.VM).Get("newAccount"))
|
||||
if !callable {
|
||||
return nil, fmt.Errorf("jeth.newAccount is not callable")
|
||||
}
|
||||
return ret
|
||||
ret, err := newAccount(goja.Null(), call.VM.ToValue(password))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// OpenWallet is a wrapper around personal.openWallet which can interpret and
|
||||
// react to certain error messages, such as the Trezor PIN matrix request.
|
||||
func (b *bridge) OpenWallet(call otto.FunctionCall) (response otto.Value) {
|
||||
func (b *bridge) OpenWallet(call jsre.Call) (goja.Value, error) {
|
||||
// Make sure we have a wallet specified to open
|
||||
if !call.Argument(0).IsString() {
|
||||
throwJSException("first argument must be the wallet URL to open")
|
||||
if call.Argument(0).ToObject(call.VM).ClassName() != "String" {
|
||||
return nil, fmt.Errorf("first argument must be the wallet URL to open")
|
||||
}
|
||||
wallet := call.Argument(0)
|
||||
|
||||
var passwd otto.Value
|
||||
if call.Argument(1).IsUndefined() || call.Argument(1).IsNull() {
|
||||
passwd, _ = otto.ToValue("")
|
||||
var passwd goja.Value
|
||||
if goja.IsUndefined(call.Argument(1)) || goja.IsNull(call.Argument(1)) {
|
||||
passwd = call.VM.ToValue("")
|
||||
} else {
|
||||
passwd = call.Argument(1)
|
||||
}
|
||||
// Open the wallet and return if successful in itself
|
||||
val, err := call.Otto.Call("jeth.openWallet", nil, wallet, passwd)
|
||||
openWallet, callable := goja.AssertFunction(getJeth(call.VM).Get("openWallet"))
|
||||
if !callable {
|
||||
return nil, fmt.Errorf("jeth.openWallet is not callable")
|
||||
}
|
||||
val, err := openWallet(goja.Null(), wallet, passwd)
|
||||
if err == nil {
|
||||
return val
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// Wallet open failed, report error unless it's a PIN or PUK entry
|
||||
@@ -111,32 +126,31 @@ func (b *bridge) OpenWallet(call otto.FunctionCall) (response otto.Value) {
|
||||
case strings.HasSuffix(err.Error(), usbwallet.ErrTrezorPINNeeded.Error()):
|
||||
val, err = b.readPinAndReopenWallet(call)
|
||||
if err == nil {
|
||||
return val
|
||||
return val, nil
|
||||
}
|
||||
val, err = b.readPassphraseAndReopenWallet(call)
|
||||
if err != nil {
|
||||
throwJSException(err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
case strings.HasSuffix(err.Error(), scwallet.ErrPairingPasswordNeeded.Error()):
|
||||
// PUK input requested, fetch from the user and call open again
|
||||
if input, err := b.prompter.PromptPassword("Please enter the pairing password: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
} else {
|
||||
passwd, _ = otto.ToValue(input)
|
||||
input, err := b.prompter.PromptPassword("Please enter the pairing password: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if val, err = call.Otto.Call("jeth.openWallet", nil, wallet, passwd); err != nil {
|
||||
passwd = call.VM.ToValue(input)
|
||||
if val, err = openWallet(goja.Null(), wallet, passwd); err != nil {
|
||||
if !strings.HasSuffix(err.Error(), scwallet.ErrPINNeeded.Error()) {
|
||||
throwJSException(err.Error())
|
||||
return nil, err
|
||||
} else {
|
||||
// PIN input requested, fetch from the user and call open again
|
||||
if input, err := b.prompter.PromptPassword("Please enter current PIN: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
} else {
|
||||
passwd, _ = otto.ToValue(input)
|
||||
input, err := b.prompter.PromptPassword("Please enter current PIN: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if val, err = call.Otto.Call("jeth.openWallet", nil, wallet, passwd); err != nil {
|
||||
throwJSException(err.Error())
|
||||
if val, err = openWallet(goja.Null(), wallet, call.VM.ToValue(input)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -144,52 +158,52 @@ func (b *bridge) OpenWallet(call otto.FunctionCall) (response otto.Value) {
|
||||
case strings.HasSuffix(err.Error(), scwallet.ErrPINUnblockNeeded.Error()):
|
||||
// PIN unblock requested, fetch PUK and new PIN from the user
|
||||
var pukpin string
|
||||
if input, err := b.prompter.PromptPassword("Please enter current PUK: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
} else {
|
||||
pukpin = input
|
||||
input, err := b.prompter.PromptPassword("Please enter current PUK: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if input, err := b.prompter.PromptPassword("Please enter new PIN: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
} else {
|
||||
pukpin += input
|
||||
pukpin = input
|
||||
input, err = b.prompter.PromptPassword("Please enter new PIN: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
passwd, _ = otto.ToValue(pukpin)
|
||||
if val, err = call.Otto.Call("jeth.openWallet", nil, wallet, passwd); err != nil {
|
||||
throwJSException(err.Error())
|
||||
pukpin += input
|
||||
|
||||
if val, err = openWallet(goja.Null(), wallet, call.VM.ToValue(pukpin)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
case strings.HasSuffix(err.Error(), scwallet.ErrPINNeeded.Error()):
|
||||
// PIN input requested, fetch from the user and call open again
|
||||
if input, err := b.prompter.PromptPassword("Please enter current PIN: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
} else {
|
||||
passwd, _ = otto.ToValue(input)
|
||||
input, err := b.prompter.PromptPassword("Please enter current PIN: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if val, err = call.Otto.Call("jeth.openWallet", nil, wallet, passwd); err != nil {
|
||||
throwJSException(err.Error())
|
||||
if val, err = openWallet(goja.Null(), wallet, call.VM.ToValue(input)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
default:
|
||||
// Unknown error occurred, drop to the user
|
||||
throwJSException(err.Error())
|
||||
return nil, err
|
||||
}
|
||||
return val
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (b *bridge) readPassphraseAndReopenWallet(call otto.FunctionCall) (otto.Value, error) {
|
||||
var passwd otto.Value
|
||||
func (b *bridge) readPassphraseAndReopenWallet(call jsre.Call) (goja.Value, error) {
|
||||
wallet := call.Argument(0)
|
||||
if input, err := b.prompter.PromptPassword("Please enter your password: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
} else {
|
||||
passwd, _ = otto.ToValue(input)
|
||||
input, err := b.prompter.PromptPassword("Please enter your passphrase: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return call.Otto.Call("jeth.openWallet", nil, wallet, passwd)
|
||||
openWallet, callable := goja.AssertFunction(getJeth(call.VM).Get("openWallet"))
|
||||
if !callable {
|
||||
return nil, fmt.Errorf("jeth.openWallet is not callable")
|
||||
}
|
||||
return openWallet(goja.Null(), wallet, call.VM.ToValue(input))
|
||||
}
|
||||
|
||||
func (b *bridge) readPinAndReopenWallet(call otto.FunctionCall) (otto.Value, error) {
|
||||
var passwd otto.Value
|
||||
func (b *bridge) readPinAndReopenWallet(call jsre.Call) (goja.Value, error) {
|
||||
wallet := call.Argument(0)
|
||||
// Trezor PIN matrix input requested, display the matrix to the user and fetch the data
|
||||
fmt.Fprintf(b.printer, "Look at the device for number positions\n\n")
|
||||
@@ -199,155 +213,154 @@ func (b *bridge) readPinAndReopenWallet(call otto.FunctionCall) (otto.Value, err
|
||||
fmt.Fprintf(b.printer, "--+---+--\n")
|
||||
fmt.Fprintf(b.printer, "1 | 2 | 3\n\n")
|
||||
|
||||
if input, err := b.prompter.PromptPassword("Please enter current PIN: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
} else {
|
||||
passwd, _ = otto.ToValue(input)
|
||||
input, err := b.prompter.PromptPassword("Please enter current PIN: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return call.Otto.Call("jeth.openWallet", nil, wallet, passwd)
|
||||
openWallet, callable := goja.AssertFunction(getJeth(call.VM).Get("openWallet"))
|
||||
if !callable {
|
||||
return nil, fmt.Errorf("jeth.openWallet is not callable")
|
||||
}
|
||||
return openWallet(goja.Null(), wallet, call.VM.ToValue(input))
|
||||
}
|
||||
|
||||
// UnlockAccount is a wrapper around the personal.unlockAccount RPC method that
|
||||
// uses a non-echoing password prompt to acquire the passphrase and executes the
|
||||
// original RPC method (saved in jeth.unlockAccount) with it to actually execute
|
||||
// the RPC call.
|
||||
func (b *bridge) UnlockAccount(call otto.FunctionCall) (response otto.Value) {
|
||||
// Make sure we have an account specified to unlock
|
||||
if !call.Argument(0).IsString() {
|
||||
throwJSException("first argument must be the account to unlock")
|
||||
func (b *bridge) UnlockAccount(call jsre.Call) (goja.Value, error) {
|
||||
// Make sure we have an account specified to unlock.
|
||||
if call.Argument(0).ExportType().Kind() != reflect.String {
|
||||
return nil, fmt.Errorf("first argument must be the account to unlock")
|
||||
}
|
||||
account := call.Argument(0)
|
||||
|
||||
// If password is not given or is the null value, prompt the user for it
|
||||
var passwd otto.Value
|
||||
|
||||
if call.Argument(1).IsUndefined() || call.Argument(1).IsNull() {
|
||||
// If password is not given or is the null value, prompt the user for it.
|
||||
var passwd goja.Value
|
||||
if goja.IsUndefined(call.Argument(1)) || goja.IsNull(call.Argument(1)) {
|
||||
fmt.Fprintf(b.printer, "Unlock account %s\n", account)
|
||||
if input, err := b.prompter.PromptPassword("Password: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
} else {
|
||||
passwd, _ = otto.ToValue(input)
|
||||
input, err := b.prompter.PromptPassword("Passphrase: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
passwd = call.VM.ToValue(input)
|
||||
} else {
|
||||
if !call.Argument(1).IsString() {
|
||||
throwJSException("password must be a string")
|
||||
if call.Argument(1).ExportType().Kind() != reflect.String {
|
||||
return nil, fmt.Errorf("password must be a string")
|
||||
}
|
||||
passwd = call.Argument(1)
|
||||
}
|
||||
// Third argument is the duration how long the account must be unlocked.
|
||||
duration := otto.NullValue()
|
||||
if call.Argument(2).IsDefined() && !call.Argument(2).IsNull() {
|
||||
if !call.Argument(2).IsNumber() {
|
||||
throwJSException("unlock duration must be a number")
|
||||
|
||||
// Third argument is the duration how long the account should be unlocked.
|
||||
duration := goja.Null()
|
||||
if !goja.IsUndefined(call.Argument(2)) && !goja.IsNull(call.Argument(2)) {
|
||||
if !isNumber(call.Argument(2)) {
|
||||
return nil, fmt.Errorf("unlock duration must be a number")
|
||||
}
|
||||
duration = call.Argument(2)
|
||||
}
|
||||
// Send the request to the backend and return
|
||||
val, err := call.Otto.Call("jeth.unlockAccount", nil, account, passwd, duration)
|
||||
if err != nil {
|
||||
throwJSException(err.Error())
|
||||
|
||||
// Send the request to the backend and return.
|
||||
unlockAccount, callable := goja.AssertFunction(getJeth(call.VM).Get("unlockAccount"))
|
||||
if !callable {
|
||||
return nil, fmt.Errorf("jeth.unlockAccount is not callable")
|
||||
}
|
||||
return val
|
||||
return unlockAccount(goja.Null(), account, passwd, duration)
|
||||
}
|
||||
|
||||
// Sign is a wrapper around the personal.sign RPC method that uses a non-echoing password
|
||||
// prompt to acquire the passphrase and executes the original RPC method (saved in
|
||||
// jeth.sign) with it to actually execute the RPC call.
|
||||
func (b *bridge) Sign(call otto.FunctionCall) (response otto.Value) {
|
||||
func (b *bridge) Sign(call jsre.Call) (goja.Value, error) {
|
||||
var (
|
||||
message = call.Argument(0)
|
||||
account = call.Argument(1)
|
||||
passwd = call.Argument(2)
|
||||
)
|
||||
|
||||
if !message.IsString() {
|
||||
throwJSException("first argument must be the message to sign")
|
||||
if message.ExportType().Kind() != reflect.String {
|
||||
return nil, fmt.Errorf("first argument must be the message to sign")
|
||||
}
|
||||
if !account.IsString() {
|
||||
throwJSException("second argument must be the account to sign with")
|
||||
if account.ExportType().Kind() != reflect.String {
|
||||
return nil, fmt.Errorf("second argument must be the account to sign with")
|
||||
}
|
||||
|
||||
// if the password is not given or null ask the user and ensure password is a string
|
||||
if passwd.IsUndefined() || passwd.IsNull() {
|
||||
if goja.IsUndefined(passwd) || goja.IsNull(passwd) {
|
||||
fmt.Fprintf(b.printer, "Give password for account %s\n", account)
|
||||
if input, err := b.prompter.PromptPassword("Password: "); err != nil {
|
||||
throwJSException(err.Error())
|
||||
} else {
|
||||
passwd, _ = otto.ToValue(input)
|
||||
input, err := b.prompter.PromptPassword("Password: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if !passwd.IsString() {
|
||||
throwJSException("third argument must be the password to unlock the account")
|
||||
passwd = call.VM.ToValue(input)
|
||||
} else if passwd.ExportType().Kind() != reflect.String {
|
||||
return nil, fmt.Errorf("third argument must be the password to unlock the account")
|
||||
}
|
||||
|
||||
// Send the request to the backend and return
|
||||
val, err := call.Otto.Call("jeth.sign", nil, message, account, passwd)
|
||||
if err != nil {
|
||||
throwJSException(err.Error())
|
||||
sign, callable := goja.AssertFunction(getJeth(call.VM).Get("unlockAccount"))
|
||||
if !callable {
|
||||
return nil, fmt.Errorf("jeth.unlockAccount is not callable")
|
||||
}
|
||||
return val
|
||||
return sign(goja.Null(), message, account, passwd)
|
||||
}
|
||||
|
||||
// Sleep will block the console for the specified number of seconds.
|
||||
func (b *bridge) Sleep(call otto.FunctionCall) (response otto.Value) {
|
||||
if call.Argument(0).IsNumber() {
|
||||
sleep, _ := call.Argument(0).ToInteger()
|
||||
time.Sleep(time.Duration(sleep) * time.Second)
|
||||
return otto.TrueValue()
|
||||
func (b *bridge) Sleep(call jsre.Call) (goja.Value, error) {
|
||||
if !isNumber(call.Argument(0)) {
|
||||
return nil, fmt.Errorf("usage: sleep(<number of seconds>)")
|
||||
}
|
||||
return throwJSException("usage: sleep(<number of seconds>)")
|
||||
sleep := call.Argument(0).ToFloat()
|
||||
time.Sleep(time.Duration(sleep * float64(time.Second)))
|
||||
return call.VM.ToValue(true), nil
|
||||
}
|
||||
|
||||
// SleepBlocks will block the console for a specified number of new blocks optionally
|
||||
// until the given timeout is reached.
|
||||
func (b *bridge) SleepBlocks(call otto.FunctionCall) (response otto.Value) {
|
||||
func (b *bridge) SleepBlocks(call jsre.Call) (goja.Value, error) {
|
||||
// Parse the input parameters for the sleep.
|
||||
var (
|
||||
blocks = int64(0)
|
||||
sleep = int64(9999999999999999) // indefinitely
|
||||
)
|
||||
// Parse the input parameters for the sleep
|
||||
nArgs := len(call.ArgumentList)
|
||||
nArgs := len(call.Arguments)
|
||||
if nArgs == 0 {
|
||||
throwJSException("usage: sleepBlocks(<n blocks>[, max sleep in seconds])")
|
||||
return nil, fmt.Errorf("usage: sleepBlocks(<n blocks>[, max sleep in seconds])")
|
||||
}
|
||||
if nArgs >= 1 {
|
||||
if call.Argument(0).IsNumber() {
|
||||
blocks, _ = call.Argument(0).ToInteger()
|
||||
} else {
|
||||
throwJSException("expected number as first argument")
|
||||
if !isNumber(call.Argument(0)) {
|
||||
return nil, fmt.Errorf("expected number as first argument")
|
||||
}
|
||||
blocks = call.Argument(0).ToInteger()
|
||||
}
|
||||
if nArgs >= 2 {
|
||||
if call.Argument(1).IsNumber() {
|
||||
sleep, _ = call.Argument(1).ToInteger()
|
||||
} else {
|
||||
throwJSException("expected number as second argument")
|
||||
if isNumber(call.Argument(1)) {
|
||||
return nil, fmt.Errorf("expected number as second argument")
|
||||
}
|
||||
sleep = call.Argument(1).ToInteger()
|
||||
}
|
||||
// go through the console, this will allow web3 to call the appropriate
|
||||
// callbacks if a delayed response or notification is received.
|
||||
blockNumber := func() int64 {
|
||||
result, err := call.Otto.Run("eth.blockNumber")
|
||||
if err != nil {
|
||||
throwJSException(err.Error())
|
||||
}
|
||||
block, err := result.ToInteger()
|
||||
if err != nil {
|
||||
throwJSException(err.Error())
|
||||
}
|
||||
return block
|
||||
}
|
||||
// Poll the current block number until either it ot a timeout is reached
|
||||
targetBlockNr := blockNumber() + blocks
|
||||
deadline := time.Now().Add(time.Duration(sleep) * time.Second)
|
||||
|
||||
// Poll the current block number until either it or a timeout is reached.
|
||||
var (
|
||||
deadline = time.Now().Add(time.Duration(sleep) * time.Second)
|
||||
lastNumber = ^hexutil.Uint64(0)
|
||||
)
|
||||
for time.Now().Before(deadline) {
|
||||
if blockNumber() >= targetBlockNr {
|
||||
return otto.TrueValue()
|
||||
var number hexutil.Uint64
|
||||
err := b.client.Call(&number, "eth_blockNumber")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if number != lastNumber {
|
||||
lastNumber = number
|
||||
blocks--
|
||||
}
|
||||
if blocks <= 0 {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
return otto.FalseValue()
|
||||
return call.VM.ToValue(true), nil
|
||||
}
|
||||
|
||||
type jsonrpcCall struct {
|
||||
@@ -357,15 +370,15 @@ type jsonrpcCall struct {
|
||||
}
|
||||
|
||||
// Send implements the web3 provider "send" method.
|
||||
func (b *bridge) Send(call otto.FunctionCall) (response otto.Value) {
|
||||
func (b *bridge) Send(call jsre.Call) (goja.Value, error) {
|
||||
// Remarshal the request into a Go value.
|
||||
JSON, _ := call.Otto.Object("JSON")
|
||||
reqVal, err := JSON.Call("stringify", call.Argument(0))
|
||||
reqVal, err := call.Argument(0).ToObject(call.VM).MarshalJSON()
|
||||
if err != nil {
|
||||
throwJSException(err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
rawReq = reqVal.String()
|
||||
rawReq = string(reqVal)
|
||||
dec = json.NewDecoder(strings.NewReader(rawReq))
|
||||
reqs []jsonrpcCall
|
||||
batch bool
|
||||
@@ -381,10 +394,12 @@ func (b *bridge) Send(call otto.FunctionCall) (response otto.Value) {
|
||||
}
|
||||
|
||||
// Execute the requests.
|
||||
resps, _ := call.Otto.Object("new Array()")
|
||||
var resps []*goja.Object
|
||||
for _, req := range reqs {
|
||||
resp, _ := call.Otto.Object(`({"jsonrpc":"2.0"})`)
|
||||
resp := call.VM.NewObject()
|
||||
resp.Set("jsonrpc", "2.0")
|
||||
resp.Set("id", req.ID)
|
||||
|
||||
var result json.RawMessage
|
||||
err = b.client.Call(&result, req.Method, req.Params...)
|
||||
switch err := err.(type) {
|
||||
@@ -392,9 +407,14 @@ func (b *bridge) Send(call otto.FunctionCall) (response otto.Value) {
|
||||
if result == nil {
|
||||
// Special case null because it is decoded as an empty
|
||||
// raw message for some reason.
|
||||
resp.Set("result", otto.NullValue())
|
||||
resp.Set("result", goja.Null())
|
||||
} else {
|
||||
resultVal, err := JSON.Call("parse", string(result))
|
||||
JSON := call.VM.Get("JSON").ToObject(call.VM)
|
||||
parse, callable := goja.AssertFunction(JSON.Get("parse"))
|
||||
if !callable {
|
||||
return nil, fmt.Errorf("JSON.parse is not a function")
|
||||
}
|
||||
resultVal, err := parse(goja.Null(), call.VM.ToValue(string(result)))
|
||||
if err != nil {
|
||||
setError(resp, -32603, err.Error())
|
||||
} else {
|
||||
@@ -406,33 +426,38 @@ func (b *bridge) Send(call otto.FunctionCall) (response otto.Value) {
|
||||
default:
|
||||
setError(resp, -32603, err.Error())
|
||||
}
|
||||
resps.Call("push", resp)
|
||||
resps = append(resps, resp)
|
||||
}
|
||||
|
||||
// Return the responses either to the callback (if supplied)
|
||||
// or directly as the return value.
|
||||
var result goja.Value
|
||||
if batch {
|
||||
response = resps.Value()
|
||||
result = call.VM.ToValue(resps)
|
||||
} else {
|
||||
response, _ = resps.Get("0")
|
||||
result = resps[0]
|
||||
}
|
||||
if fn := call.Argument(1); fn.Class() == "Function" {
|
||||
fn.Call(otto.NullValue(), otto.NullValue(), response)
|
||||
return otto.UndefinedValue()
|
||||
if fn, isFunc := goja.AssertFunction(call.Argument(1)); isFunc {
|
||||
fn(goja.Null(), goja.Null(), result)
|
||||
return goja.Undefined(), nil
|
||||
}
|
||||
return response
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func setError(resp *otto.Object, code int, msg string) {
|
||||
func setError(resp *goja.Object, code int, msg string) {
|
||||
resp.Set("error", map[string]interface{}{"code": code, "message": msg})
|
||||
}
|
||||
|
||||
// throwJSException panics on an otto.Value. The Otto VM will recover from the
|
||||
// Go panic and throw msg as a JavaScript error.
|
||||
func throwJSException(msg interface{}) otto.Value {
|
||||
val, err := otto.ToValue(msg)
|
||||
if err != nil {
|
||||
log.Error("Failed to serialize JavaScript exception", "exception", msg, "err", err)
|
||||
}
|
||||
panic(val)
|
||||
// isNumber returns true if input value is a JS number.
|
||||
func isNumber(v goja.Value) bool {
|
||||
k := v.ExportType().Kind()
|
||||
return k >= reflect.Int && k <= reflect.Float64
|
||||
}
|
||||
|
||||
func getObject(vm *goja.Runtime, name string) *goja.Object {
|
||||
v := vm.Get(name)
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
return v.ToObject(vm)
|
||||
}
|
||||
|
@@ -28,12 +28,13 @@ import (
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/dop251/goja"
|
||||
"github.com/ethereum/go-ethereum/internal/jsre"
|
||||
"github.com/ethereum/go-ethereum/internal/jsre/deps"
|
||||
"github.com/ethereum/go-ethereum/internal/web3ext"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/mattn/go-colorable"
|
||||
"github.com/peterh/liner"
|
||||
"github.com/robertkrimen/otto"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -86,6 +87,7 @@ func New(config Config) (*Console, error) {
|
||||
if config.Printer == nil {
|
||||
config.Printer = colorable.NewColorableStdout()
|
||||
}
|
||||
|
||||
// Initialize the console and return
|
||||
console := &Console{
|
||||
client: config.Client,
|
||||
@@ -107,110 +109,35 @@ func New(config Config) (*Console, error) {
|
||||
// init retrieves the available APIs from the remote RPC provider and initializes
|
||||
// the console's JavaScript namespaces based on the exposed modules.
|
||||
func (c *Console) init(preload []string) error {
|
||||
// Initialize the JavaScript <-> Go RPC bridge
|
||||
c.initConsoleObject()
|
||||
|
||||
// Initialize the JavaScript <-> Go RPC bridge.
|
||||
bridge := newBridge(c.client, c.prompter, c.printer)
|
||||
c.jsre.Set("jeth", struct{}{})
|
||||
|
||||
jethObj, _ := c.jsre.Get("jeth")
|
||||
jethObj.Object().Set("send", bridge.Send)
|
||||
jethObj.Object().Set("sendAsync", bridge.Send)
|
||||
|
||||
consoleObj, _ := c.jsre.Get("console")
|
||||
consoleObj.Object().Set("log", c.consoleOutput)
|
||||
consoleObj.Object().Set("error", c.consoleOutput)
|
||||
|
||||
// Load all the internal utility JavaScript libraries
|
||||
if err := c.jsre.Compile("bignumber.js", jsre.BignumberJs); err != nil {
|
||||
return fmt.Errorf("bignumber.js: %v", err)
|
||||
}
|
||||
if err := c.jsre.Compile("web3.js", jsre.Web3Js); err != nil {
|
||||
return fmt.Errorf("web3.js: %v", err)
|
||||
}
|
||||
if _, err := c.jsre.Run("var Web3 = require('web3');"); err != nil {
|
||||
return fmt.Errorf("web3 require: %v", err)
|
||||
}
|
||||
if _, err := c.jsre.Run("var web3 = new Web3(jeth);"); err != nil {
|
||||
return fmt.Errorf("web3 provider: %v", err)
|
||||
}
|
||||
// Load the supported APIs into the JavaScript runtime environment
|
||||
apis, err := c.client.SupportedModules()
|
||||
if err != nil {
|
||||
return fmt.Errorf("api modules: %v", err)
|
||||
}
|
||||
flatten := "var eth = web3.eth; var personal = web3.personal; "
|
||||
for api := range apis {
|
||||
if api == "web3" {
|
||||
continue // manually mapped or ignore
|
||||
}
|
||||
if file, ok := web3ext.Modules[api]; ok {
|
||||
// Load our extension for the module.
|
||||
if err = c.jsre.Compile(fmt.Sprintf("%s.js", api), file); err != nil {
|
||||
return fmt.Errorf("%s.js: %v", api, err)
|
||||
}
|
||||
flatten += fmt.Sprintf("var %s = web3.%s; ", api, api)
|
||||
} else if obj, err := c.jsre.Run("web3." + api); err == nil && obj.IsObject() {
|
||||
// Enable web3.js built-in extension if available.
|
||||
flatten += fmt.Sprintf("var %s = web3.%s; ", api, api)
|
||||
}
|
||||
}
|
||||
if _, err = c.jsre.Run(flatten); err != nil {
|
||||
return fmt.Errorf("namespace flattening: %v", err)
|
||||
}
|
||||
// Initialize the global name register (disabled for now)
|
||||
//c.jsre.Run(`var GlobalRegistrar = eth.contract(` + registrar.GlobalRegistrarAbi + `); registrar = GlobalRegistrar.at("` + registrar.GlobalRegistrarAddr + `");`)
|
||||
|
||||
// If the console is in interactive mode, instrument password related methods to query the user
|
||||
if c.prompter != nil {
|
||||
// Retrieve the account management object to instrument
|
||||
personal, err := c.jsre.Get("personal")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Override the openWallet, unlockAccount, newAccount and sign methods since
|
||||
// these require user interaction. Assign these method in the Console the
|
||||
// original web3 callbacks. These will be called by the jeth.* methods after
|
||||
// they got the password from the user and send the original web3 request to
|
||||
// the backend.
|
||||
if obj := personal.Object(); obj != nil { // make sure the personal api is enabled over the interface
|
||||
if _, err = c.jsre.Run(`jeth.openWallet = personal.openWallet;`); err != nil {
|
||||
return fmt.Errorf("personal.openWallet: %v", err)
|
||||
}
|
||||
if _, err = c.jsre.Run(`jeth.unlockAccount = personal.unlockAccount;`); err != nil {
|
||||
return fmt.Errorf("personal.unlockAccount: %v", err)
|
||||
}
|
||||
if _, err = c.jsre.Run(`jeth.newAccount = personal.newAccount;`); err != nil {
|
||||
return fmt.Errorf("personal.newAccount: %v", err)
|
||||
}
|
||||
if _, err = c.jsre.Run(`jeth.sign = personal.sign;`); err != nil {
|
||||
return fmt.Errorf("personal.sign: %v", err)
|
||||
}
|
||||
obj.Set("openWallet", bridge.OpenWallet)
|
||||
obj.Set("unlockAccount", bridge.UnlockAccount)
|
||||
obj.Set("newAccount", bridge.NewAccount)
|
||||
obj.Set("sign", bridge.Sign)
|
||||
}
|
||||
}
|
||||
// The admin.sleep and admin.sleepBlocks are offered by the console and not by the RPC layer.
|
||||
admin, err := c.jsre.Get("admin")
|
||||
if err != nil {
|
||||
if err := c.initWeb3(bridge); err != nil {
|
||||
return err
|
||||
}
|
||||
if obj := admin.Object(); obj != nil { // make sure the admin api is enabled over the interface
|
||||
obj.Set("sleepBlocks", bridge.SleepBlocks)
|
||||
obj.Set("sleep", bridge.Sleep)
|
||||
obj.Set("clearHistory", c.clearHistory)
|
||||
if err := c.initExtensions(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Preload any JavaScript files before starting the console
|
||||
|
||||
// Add bridge overrides for web3.js functionality.
|
||||
c.jsre.Do(func(vm *goja.Runtime) {
|
||||
c.initAdmin(vm, bridge)
|
||||
c.initPersonal(vm, bridge)
|
||||
})
|
||||
|
||||
// Preload JavaScript files.
|
||||
for _, path := range preload {
|
||||
if err := c.jsre.Exec(path); err != nil {
|
||||
failure := err.Error()
|
||||
if ottoErr, ok := err.(*otto.Error); ok {
|
||||
failure = ottoErr.String()
|
||||
if gojaErr, ok := err.(*goja.Exception); ok {
|
||||
failure = gojaErr.String()
|
||||
}
|
||||
return fmt.Errorf("%s: %v", path, failure)
|
||||
}
|
||||
}
|
||||
// Configure the console's input prompter for scrollback and tab completion
|
||||
|
||||
// Configure the input prompter for history and tab completion.
|
||||
if c.prompter != nil {
|
||||
if content, err := ioutil.ReadFile(c.histPath); err != nil {
|
||||
c.prompter.SetHistory(nil)
|
||||
@@ -223,6 +150,102 @@ func (c *Console) init(preload []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Console) initConsoleObject() {
|
||||
c.jsre.Do(func(vm *goja.Runtime) {
|
||||
console := vm.NewObject()
|
||||
console.Set("log", c.consoleOutput)
|
||||
console.Set("error", c.consoleOutput)
|
||||
vm.Set("console", console)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *Console) initWeb3(bridge *bridge) error {
|
||||
bnJS := string(deps.MustAsset("bignumber.js"))
|
||||
web3JS := string(deps.MustAsset("web3.js"))
|
||||
if err := c.jsre.Compile("bignumber.js", bnJS); err != nil {
|
||||
return fmt.Errorf("bignumber.js: %v", err)
|
||||
}
|
||||
if err := c.jsre.Compile("web3.js", web3JS); err != nil {
|
||||
return fmt.Errorf("web3.js: %v", err)
|
||||
}
|
||||
if _, err := c.jsre.Run("var Web3 = require('web3');"); err != nil {
|
||||
return fmt.Errorf("web3 require: %v", err)
|
||||
}
|
||||
var err error
|
||||
c.jsre.Do(func(vm *goja.Runtime) {
|
||||
transport := vm.NewObject()
|
||||
transport.Set("send", jsre.MakeCallback(vm, bridge.Send))
|
||||
transport.Set("sendAsync", jsre.MakeCallback(vm, bridge.Send))
|
||||
vm.Set("_consoleWeb3Transport", transport)
|
||||
_, err = vm.RunString("var web3 = new Web3(_consoleWeb3Transport)")
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// initExtensions loads and registers web3.js extensions.
|
||||
func (c *Console) initExtensions() error {
|
||||
// Compute aliases from server-provided modules.
|
||||
apis, err := c.client.SupportedModules()
|
||||
if err != nil {
|
||||
return fmt.Errorf("api modules: %v", err)
|
||||
}
|
||||
aliases := map[string]struct{}{"eth": {}, "personal": {}}
|
||||
for api := range apis {
|
||||
if api == "web3" {
|
||||
continue
|
||||
}
|
||||
aliases[api] = struct{}{}
|
||||
if file, ok := web3ext.Modules[api]; ok {
|
||||
if err = c.jsre.Compile(api+".js", file); err != nil {
|
||||
return fmt.Errorf("%s.js: %v", api, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Apply aliases.
|
||||
c.jsre.Do(func(vm *goja.Runtime) {
|
||||
web3 := getObject(vm, "web3")
|
||||
for name := range aliases {
|
||||
if v := web3.Get(name); v != nil {
|
||||
vm.Set(name, v)
|
||||
}
|
||||
}
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// initAdmin creates additional admin APIs implemented by the bridge.
|
||||
func (c *Console) initAdmin(vm *goja.Runtime, bridge *bridge) {
|
||||
if admin := getObject(vm, "admin"); admin != nil {
|
||||
admin.Set("sleepBlocks", jsre.MakeCallback(vm, bridge.SleepBlocks))
|
||||
admin.Set("sleep", jsre.MakeCallback(vm, bridge.Sleep))
|
||||
admin.Set("clearHistory", c.clearHistory)
|
||||
}
|
||||
}
|
||||
|
||||
// initPersonal redirects account-related API methods through the bridge.
|
||||
//
|
||||
// If the console is in interactive mode and the 'personal' API is available, override
|
||||
// the openWallet, unlockAccount, newAccount and sign methods since these require user
|
||||
// interaction. The original web3 callbacks are stored in 'jeth'. These will be called
|
||||
// by the bridge after the prompt and send the original web3 request to the backend.
|
||||
func (c *Console) initPersonal(vm *goja.Runtime, bridge *bridge) {
|
||||
personal := getObject(vm, "personal")
|
||||
if personal == nil || c.prompter == nil {
|
||||
return
|
||||
}
|
||||
jeth := vm.NewObject()
|
||||
vm.Set("jeth", jeth)
|
||||
jeth.Set("openWallet", personal.Get("openWallet"))
|
||||
jeth.Set("unlockAccount", personal.Get("unlockAccount"))
|
||||
jeth.Set("newAccount", personal.Get("newAccount"))
|
||||
jeth.Set("sign", personal.Get("sign"))
|
||||
personal.Set("openWallet", jsre.MakeCallback(vm, bridge.OpenWallet))
|
||||
personal.Set("unlockAccount", jsre.MakeCallback(vm, bridge.UnlockAccount))
|
||||
personal.Set("newAccount", jsre.MakeCallback(vm, bridge.NewAccount))
|
||||
personal.Set("sign", jsre.MakeCallback(vm, bridge.Sign))
|
||||
}
|
||||
|
||||
func (c *Console) clearHistory() {
|
||||
c.history = nil
|
||||
c.prompter.ClearHistory()
|
||||
@@ -235,13 +258,13 @@ func (c *Console) clearHistory() {
|
||||
|
||||
// consoleOutput is an override for the console.log and console.error methods to
|
||||
// stream the output into the configured output stream instead of stdout.
|
||||
func (c *Console) consoleOutput(call otto.FunctionCall) otto.Value {
|
||||
func (c *Console) consoleOutput(call goja.FunctionCall) goja.Value {
|
||||
var output []string
|
||||
for _, argument := range call.ArgumentList {
|
||||
for _, argument := range call.Arguments {
|
||||
output = append(output, fmt.Sprintf("%v", argument))
|
||||
}
|
||||
fmt.Fprintln(c.printer, strings.Join(output, " "))
|
||||
return otto.Value{}
|
||||
return goja.Null()
|
||||
}
|
||||
|
||||
// AutoCompleteInput is a pre-assembled word completer to be used by the user
|
||||
@@ -304,13 +327,13 @@ func (c *Console) Welcome() {
|
||||
|
||||
// Evaluate executes code and pretty prints the result to the specified output
|
||||
// stream.
|
||||
func (c *Console) Evaluate(statement string) error {
|
||||
func (c *Console) Evaluate(statement string) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
fmt.Fprintf(c.printer, "[native] error: %v\n", r)
|
||||
}
|
||||
}()
|
||||
return c.jsre.Evaluate(statement, c.printer)
|
||||
c.jsre.Evaluate(statement, c.printer)
|
||||
}
|
||||
|
||||
// Interactive starts an interactive user session, where input is propted from
|
||||
|
@@ -289,7 +289,7 @@ func TestPrettyError(t *testing.T) {
|
||||
defer tester.Close(t)
|
||||
tester.console.Evaluate("throw 'hello'")
|
||||
|
||||
want := jsre.ErrorColor("hello") + "\n"
|
||||
want := jsre.ErrorColor("hello") + "\n\tat <eval>:1:7(1)\n\n"
|
||||
if output := tester.output.String(); output != want {
|
||||
t.Fatalf("pretty error mismatch: have %s, want %s", output, want)
|
||||
}
|
||||
|
@@ -29,8 +29,9 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// CheckpointOracle is a Go wrapper around an on-chain light client checkpoint oracle.
|
||||
// CheckpointOracle is a Go wrapper around an on-chain checkpoint oracle contract.
|
||||
type CheckpointOracle struct {
|
||||
address common.Address
|
||||
contract *contract.CheckpointOracle
|
||||
}
|
||||
|
||||
@@ -40,7 +41,12 @@ func NewCheckpointOracle(contractAddr common.Address, backend bind.ContractBacke
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &CheckpointOracle{contract: c}, nil
|
||||
return &CheckpointOracle{address: contractAddr, contract: c}, nil
|
||||
}
|
||||
|
||||
// ContractAddr returns the address of contract.
|
||||
func (oracle *CheckpointOracle) ContractAddr() common.Address {
|
||||
return oracle.address
|
||||
}
|
||||
|
||||
// Contract returns the underlying contract instance.
|
||||
|
@@ -407,6 +407,11 @@ func (bc *BlockChain) SetHead(head uint64) error {
|
||||
}
|
||||
}
|
||||
rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash())
|
||||
|
||||
// Degrade the chain markers if they are explicitly reverted.
|
||||
// In theory we should update all in-memory markers in the
|
||||
// last step, however the direction of SetHead is from high
|
||||
// to low, so it's safe the update in-memory markers directly.
|
||||
bc.currentBlock.Store(newHeadBlock)
|
||||
headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
|
||||
}
|
||||
@@ -419,6 +424,11 @@ func (bc *BlockChain) SetHead(head uint64) error {
|
||||
newHeadFastBlock = bc.genesisBlock
|
||||
}
|
||||
rawdb.WriteHeadFastBlockHash(db, newHeadFastBlock.Hash())
|
||||
|
||||
// Degrade the chain markers if they are explicitly reverted.
|
||||
// In theory we should update all in-memory markers in the
|
||||
// last step, however the direction of SetHead is from high
|
||||
// to low, so it's safe the update in-memory markers directly.
|
||||
bc.currentFastBlock.Store(newHeadFastBlock)
|
||||
headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64()))
|
||||
}
|
||||
@@ -538,21 +548,22 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
|
||||
defer bc.chainmu.Unlock()
|
||||
|
||||
// Prepare the genesis block and reinitialise the chain
|
||||
if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
|
||||
log.Crit("Failed to write genesis block TD", "err", err)
|
||||
batch := bc.db.NewBatch()
|
||||
rawdb.WriteTd(batch, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty())
|
||||
rawdb.WriteBlock(batch, genesis)
|
||||
if err := batch.Write(); err != nil {
|
||||
log.Crit("Failed to write genesis block", "err", err)
|
||||
}
|
||||
rawdb.WriteBlock(bc.db, genesis)
|
||||
bc.writeHeadBlock(genesis)
|
||||
|
||||
// Last update all in-memory chain markers
|
||||
bc.genesisBlock = genesis
|
||||
bc.insert(bc.genesisBlock)
|
||||
bc.currentBlock.Store(bc.genesisBlock)
|
||||
headBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
|
||||
|
||||
bc.hc.SetGenesis(bc.genesisBlock.Header())
|
||||
bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
|
||||
bc.currentFastBlock.Store(bc.genesisBlock)
|
||||
headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -610,31 +621,39 @@ func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// insert injects a new head block into the current block chain. This method
|
||||
// writeHeadBlock injects a new head block into the current block chain. This method
|
||||
// assumes that the block is indeed a true head. It will also reset the head
|
||||
// header and the head fast sync block to this very same block if they are older
|
||||
// or if they are on a different side chain.
|
||||
//
|
||||
// Note, this function assumes that the `mu` mutex is held!
|
||||
func (bc *BlockChain) insert(block *types.Block) {
|
||||
func (bc *BlockChain) writeHeadBlock(block *types.Block) {
|
||||
// If the block is on a side chain or an unknown one, force other heads onto it too
|
||||
updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash()
|
||||
|
||||
// Add the block to the canonical chain number scheme and mark as the head
|
||||
rawdb.WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64())
|
||||
rawdb.WriteHeadBlockHash(bc.db, block.Hash())
|
||||
|
||||
bc.currentBlock.Store(block)
|
||||
headBlockGauge.Update(int64(block.NumberU64()))
|
||||
batch := bc.db.NewBatch()
|
||||
rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64())
|
||||
rawdb.WriteTxLookupEntries(batch, block)
|
||||
rawdb.WriteHeadBlockHash(batch, block.Hash())
|
||||
|
||||
// If the block is better than our head or is on a different chain, force update heads
|
||||
if updateHeads {
|
||||
rawdb.WriteHeadHeaderHash(batch, block.Hash())
|
||||
rawdb.WriteHeadFastBlockHash(batch, block.Hash())
|
||||
}
|
||||
// Flush the whole batch into the disk, exit the node if failed
|
||||
if err := batch.Write(); err != nil {
|
||||
log.Crit("Failed to update chain indexes and markers", "err", err)
|
||||
}
|
||||
// Update all in-memory chain markers in the last step
|
||||
if updateHeads {
|
||||
bc.hc.SetCurrentHeader(block.Header())
|
||||
rawdb.WriteHeadFastBlockHash(bc.db, block.Hash())
|
||||
|
||||
bc.currentFastBlock.Store(block)
|
||||
headFastBlockGauge.Update(int64(block.NumberU64()))
|
||||
}
|
||||
bc.currentBlock.Store(block)
|
||||
headBlockGauge.Update(int64(block.NumberU64()))
|
||||
}
|
||||
|
||||
// Genesis retrieves the chain's genesis block.
|
||||
@@ -881,26 +900,36 @@ func (bc *BlockChain) Rollback(chain []common.Hash) {
|
||||
bc.chainmu.Lock()
|
||||
defer bc.chainmu.Unlock()
|
||||
|
||||
batch := bc.db.NewBatch()
|
||||
for i := len(chain) - 1; i >= 0; i-- {
|
||||
hash := chain[i]
|
||||
|
||||
// Degrade the chain markers if they are explicitly reverted.
|
||||
// In theory we should update all in-memory markers in the
|
||||
// last step, however the direction of rollback is from high
|
||||
// to low, so it's safe the update in-memory markers directly.
|
||||
currentHeader := bc.hc.CurrentHeader()
|
||||
if currentHeader.Hash() == hash {
|
||||
bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1))
|
||||
newHeadHeader := bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1)
|
||||
rawdb.WriteHeadHeaderHash(batch, currentHeader.ParentHash)
|
||||
bc.hc.SetCurrentHeader(newHeadHeader)
|
||||
}
|
||||
if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
|
||||
newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
|
||||
rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash())
|
||||
rawdb.WriteHeadFastBlockHash(batch, currentFastBlock.ParentHash())
|
||||
bc.currentFastBlock.Store(newFastBlock)
|
||||
headFastBlockGauge.Update(int64(newFastBlock.NumberU64()))
|
||||
}
|
||||
if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
|
||||
newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
|
||||
rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash())
|
||||
rawdb.WriteHeadBlockHash(batch, currentBlock.ParentHash())
|
||||
bc.currentBlock.Store(newBlock)
|
||||
headBlockGauge.Update(int64(newBlock.NumberU64()))
|
||||
}
|
||||
}
|
||||
if err := batch.Write(); err != nil {
|
||||
log.Crit("Failed to rollback chain markers", "err", err)
|
||||
}
|
||||
// Truncate ancient data which exceeds the current header.
|
||||
//
|
||||
// Notably, it can happen that system crashes without truncating the ancient data
|
||||
@@ -1063,7 +1092,6 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
||||
}
|
||||
// Don't collect too much in-memory, write it out every 100K blocks
|
||||
if len(deleted) > 100000 {
|
||||
|
||||
// Sync the ancient store explicitly to ensure all data has been flushed to disk.
|
||||
if err := bc.db.Sync(); err != nil {
|
||||
return 0, err
|
||||
@@ -1172,7 +1200,9 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
||||
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i])
|
||||
rawdb.WriteTxLookupEntries(batch, block)
|
||||
|
||||
stats.processed++
|
||||
// Write everything belongs to the blocks into the database. So that
|
||||
// we can ensure all components of body is completed(body, receipts,
|
||||
// tx indexes)
|
||||
if batch.ValueSize() >= ethdb.IdealBatchSize {
|
||||
if err := batch.Write(); err != nil {
|
||||
return 0, err
|
||||
@@ -1180,7 +1210,11 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
||||
size += batch.ValueSize()
|
||||
batch.Reset()
|
||||
}
|
||||
stats.processed++
|
||||
}
|
||||
// Write everything belongs to the blocks into the database. So that
|
||||
// we can ensure all components of body is completed(body, receipts,
|
||||
// tx indexes)
|
||||
if batch.ValueSize() > 0 {
|
||||
size += batch.ValueSize()
|
||||
if err := batch.Write(); err != nil {
|
||||
@@ -1231,11 +1265,12 @@ func (bc *BlockChain) writeBlockWithoutState(block *types.Block, td *big.Int) (e
|
||||
bc.wg.Add(1)
|
||||
defer bc.wg.Done()
|
||||
|
||||
if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil {
|
||||
return err
|
||||
batch := bc.db.NewBatch()
|
||||
rawdb.WriteTd(batch, block.Hash(), block.NumberU64(), td)
|
||||
rawdb.WriteBlock(batch, block)
|
||||
if err := batch.Write(); err != nil {
|
||||
log.Crit("Failed to write block into disk", "err", err)
|
||||
}
|
||||
rawdb.WriteBlock(bc.db, block)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1251,25 +1286,21 @@ func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Write the positional metadata for transaction/receipt lookups.
|
||||
// Preimages here is empty, ignore it.
|
||||
rawdb.WriteTxLookupEntries(bc.db, block)
|
||||
|
||||
bc.insert(block)
|
||||
bc.writeHeadBlock(block)
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteBlockWithState writes the block and all associated state to the database.
|
||||
func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
|
||||
func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
|
||||
bc.chainmu.Lock()
|
||||
defer bc.chainmu.Unlock()
|
||||
|
||||
return bc.writeBlockWithState(block, receipts, state)
|
||||
return bc.writeBlockWithState(block, receipts, logs, state, emitHeadEvent)
|
||||
}
|
||||
|
||||
// writeBlockWithState writes the block and all associated state to the database,
|
||||
// but is expects the chain mutex to be held.
|
||||
func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
|
||||
func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
|
||||
bc.wg.Add(1)
|
||||
defer bc.wg.Done()
|
||||
|
||||
@@ -1283,12 +1314,19 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
|
||||
localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
|
||||
externTd := new(big.Int).Add(block.Difficulty(), ptd)
|
||||
|
||||
// Irrelevant of the canonical status, write the block itself to the database
|
||||
if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
|
||||
return NonStatTy, err
|
||||
// Irrelevant of the canonical status, write the block itself to the database.
|
||||
//
|
||||
// Note all the components of block(td, hash->number map, header, body, receipts)
|
||||
// should be written atomically. BlockBatch is used for containing all components.
|
||||
blockBatch := bc.db.NewBatch()
|
||||
rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd)
|
||||
rawdb.WriteBlock(blockBatch, block)
|
||||
rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts)
|
||||
rawdb.WritePreimages(blockBatch, state.Preimages())
|
||||
if err := blockBatch.Write(); err != nil {
|
||||
log.Crit("Failed to write block into disk", "err", err)
|
||||
}
|
||||
rawdb.WriteBlock(bc.db, block)
|
||||
|
||||
// Commit all cached state changes into underlying memory database.
|
||||
root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
|
||||
if err != nil {
|
||||
return NonStatTy, err
|
||||
@@ -1347,11 +1385,6 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write other block data using a batch.
|
||||
batch := bc.db.NewBatch()
|
||||
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
|
||||
|
||||
// If the total difficulty is higher than our known, add it to the canonical chain
|
||||
// Second clause in the if statement reduces the vulnerability to selfish mining.
|
||||
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
|
||||
@@ -1377,23 +1410,32 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
|
||||
return NonStatTy, err
|
||||
}
|
||||
}
|
||||
// Write the positional metadata for transaction/receipt lookups and preimages
|
||||
rawdb.WriteTxLookupEntries(batch, block)
|
||||
rawdb.WritePreimages(batch, state.Preimages())
|
||||
|
||||
status = CanonStatTy
|
||||
} else {
|
||||
status = SideStatTy
|
||||
}
|
||||
if err := batch.Write(); err != nil {
|
||||
return NonStatTy, err
|
||||
}
|
||||
|
||||
// Set new head.
|
||||
if status == CanonStatTy {
|
||||
bc.insert(block)
|
||||
bc.writeHeadBlock(block)
|
||||
}
|
||||
bc.futureBlocks.Remove(block.Hash())
|
||||
|
||||
if status == CanonStatTy {
|
||||
bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
|
||||
if len(logs) > 0 {
|
||||
bc.logsFeed.Send(logs)
|
||||
}
|
||||
// In theory we should fire a ChainHeadEvent when we inject
|
||||
// a canonical block, but sometimes we can insert a batch of
|
||||
// canonicial blocks. Avoid firing too much ChainHeadEvents,
|
||||
// we will fire an accumulated ChainHeadEvent and disable fire
|
||||
// event here.
|
||||
if emitHeadEvent {
|
||||
bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
|
||||
}
|
||||
} else {
|
||||
bc.chainSideFeed.Send(ChainSideEvent{Block: block})
|
||||
}
|
||||
return status, nil
|
||||
}
|
||||
|
||||
@@ -1444,11 +1486,10 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
||||
// Pre-checks passed, start the full block imports
|
||||
bc.wg.Add(1)
|
||||
bc.chainmu.Lock()
|
||||
n, events, logs, err := bc.insertChain(chain, true)
|
||||
n, err := bc.insertChain(chain, true)
|
||||
bc.chainmu.Unlock()
|
||||
bc.wg.Done()
|
||||
|
||||
bc.PostChainEvents(events, logs)
|
||||
return n, err
|
||||
}
|
||||
|
||||
@@ -1460,23 +1501,24 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
||||
// racey behaviour. If a sidechain import is in progress, and the historic state
|
||||
// is imported, but then new canon-head is added before the actual sidechain
|
||||
// completes, then the historic state could be pruned again
|
||||
func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, error) {
|
||||
func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, error) {
|
||||
// If the chain is terminating, don't even bother starting up
|
||||
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
|
||||
return 0, nil, nil, nil
|
||||
return 0, nil
|
||||
}
|
||||
// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
|
||||
senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
|
||||
|
||||
// A queued approach to delivering events. This is generally
|
||||
// faster than direct delivery and requires much less mutex
|
||||
// acquiring.
|
||||
var (
|
||||
stats = insertStats{startTime: mclock.Now()}
|
||||
events = make([]interface{}, 0, len(chain))
|
||||
lastCanon *types.Block
|
||||
coalescedLogs []*types.Log
|
||||
stats = insertStats{startTime: mclock.Now()}
|
||||
lastCanon *types.Block
|
||||
)
|
||||
// Fire a single chain head event if we've progressed the chain
|
||||
defer func() {
|
||||
if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
|
||||
bc.chainHeadFeed.Send(ChainHeadEvent{lastCanon})
|
||||
}
|
||||
}()
|
||||
// Start the parallel header verifier
|
||||
headers := make([]*types.Header, len(chain))
|
||||
seals := make([]bool, len(chain))
|
||||
@@ -1526,7 +1568,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
for block != nil && err == ErrKnownBlock {
|
||||
log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash())
|
||||
if err := bc.writeKnownBlock(block); err != nil {
|
||||
return it.index, nil, nil, err
|
||||
return it.index, err
|
||||
}
|
||||
lastCanon = block
|
||||
|
||||
@@ -1545,7 +1587,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) {
|
||||
log.Debug("Future block, postponing import", "number", block.Number(), "hash", block.Hash())
|
||||
if err := bc.addFutureBlock(block); err != nil {
|
||||
return it.index, events, coalescedLogs, err
|
||||
return it.index, err
|
||||
}
|
||||
block, err = it.next()
|
||||
}
|
||||
@@ -1553,14 +1595,14 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
stats.ignored += it.remaining()
|
||||
|
||||
// If there are any still remaining, mark as ignored
|
||||
return it.index, events, coalescedLogs, err
|
||||
return it.index, err
|
||||
|
||||
// Some other error occurred, abort
|
||||
case err != nil:
|
||||
bc.futureBlocks.Remove(block.Hash())
|
||||
stats.ignored += len(it.chain)
|
||||
bc.reportBlock(block, nil, err)
|
||||
return it.index, events, coalescedLogs, err
|
||||
return it.index, err
|
||||
}
|
||||
// No validation errors for the first block (or chain prefix skipped)
|
||||
for ; block != nil && err == nil || err == ErrKnownBlock; block, err = it.next() {
|
||||
@@ -1572,7 +1614,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
// If the header is a banned one, straight out abort
|
||||
if BadHashes[block.Hash()] {
|
||||
bc.reportBlock(block, nil, ErrBlacklistedHash)
|
||||
return it.index, events, coalescedLogs, ErrBlacklistedHash
|
||||
return it.index, ErrBlacklistedHash
|
||||
}
|
||||
// If the block is known (in the middle of the chain), it's a special case for
|
||||
// Clique blocks where they can share state among each other, so importing an
|
||||
@@ -1589,15 +1631,13 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
"root", block.Root())
|
||||
|
||||
if err := bc.writeKnownBlock(block); err != nil {
|
||||
return it.index, nil, nil, err
|
||||
return it.index, err
|
||||
}
|
||||
stats.processed++
|
||||
|
||||
// We can assume that logs are empty here, since the only way for consecutive
|
||||
// Clique blocks to have the same state is if there are no transactions.
|
||||
events = append(events, ChainEvent{block, block.Hash(), nil})
|
||||
lastCanon = block
|
||||
|
||||
continue
|
||||
}
|
||||
// Retrieve the parent block and it's state to execute on top
|
||||
@@ -1609,23 +1649,22 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
}
|
||||
statedb, err := state.New(parent.Root, bc.stateCache)
|
||||
if err != nil {
|
||||
return it.index, events, coalescedLogs, err
|
||||
return it.index, err
|
||||
}
|
||||
// If we have a followup block, run that against the current state to pre-cache
|
||||
// transactions and probabilistically some of the account/storage trie nodes.
|
||||
var followupInterrupt uint32
|
||||
|
||||
if !bc.cacheConfig.TrieCleanNoPrefetch {
|
||||
if followup, err := it.peek(); followup != nil && err == nil {
|
||||
go func(start time.Time) {
|
||||
throwaway, _ := state.New(parent.Root, bc.stateCache)
|
||||
bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt)
|
||||
throwaway, _ := state.New(parent.Root, bc.stateCache)
|
||||
go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) {
|
||||
bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, interrupt)
|
||||
|
||||
blockPrefetchExecuteTimer.Update(time.Since(start))
|
||||
if atomic.LoadUint32(&followupInterrupt) == 1 {
|
||||
if atomic.LoadUint32(interrupt) == 1 {
|
||||
blockPrefetchInterruptMeter.Mark(1)
|
||||
}
|
||||
}(time.Now())
|
||||
}(time.Now(), followup, throwaway, &followupInterrupt)
|
||||
}
|
||||
}
|
||||
// Process block using the parent state as reference point
|
||||
@@ -1634,7 +1673,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
if err != nil {
|
||||
bc.reportBlock(block, receipts, err)
|
||||
atomic.StoreUint32(&followupInterrupt, 1)
|
||||
return it.index, events, coalescedLogs, err
|
||||
return it.index, err
|
||||
}
|
||||
// Update the metrics touched during block processing
|
||||
accountReadTimer.Update(statedb.AccountReads) // Account reads are complete, we can mark them
|
||||
@@ -1653,7 +1692,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil {
|
||||
bc.reportBlock(block, receipts, err)
|
||||
atomic.StoreUint32(&followupInterrupt, 1)
|
||||
return it.index, events, coalescedLogs, err
|
||||
return it.index, err
|
||||
}
|
||||
proctime := time.Since(start)
|
||||
|
||||
@@ -1665,10 +1704,10 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
|
||||
// Write the block to the chain and get the status.
|
||||
substart = time.Now()
|
||||
status, err := bc.writeBlockWithState(block, receipts, statedb)
|
||||
status, err := bc.writeBlockWithState(block, receipts, logs, statedb, false)
|
||||
if err != nil {
|
||||
atomic.StoreUint32(&followupInterrupt, 1)
|
||||
return it.index, events, coalescedLogs, err
|
||||
return it.index, err
|
||||
}
|
||||
atomic.StoreUint32(&followupInterrupt, 1)
|
||||
|
||||
@@ -1686,8 +1725,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
"elapsed", common.PrettyDuration(time.Since(start)),
|
||||
"root", block.Root())
|
||||
|
||||
coalescedLogs = append(coalescedLogs, logs...)
|
||||
events = append(events, ChainEvent{block, block.Hash(), logs})
|
||||
lastCanon = block
|
||||
|
||||
// Only count canonical blocks for GC processing time
|
||||
@@ -1698,7 +1735,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
|
||||
"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
|
||||
"root", block.Root())
|
||||
events = append(events, ChainSideEvent{block})
|
||||
|
||||
default:
|
||||
// This in theory is impossible, but lets be nice to our future selves and leave
|
||||
@@ -1717,24 +1753,20 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
// Any blocks remaining here? The only ones we care about are the future ones
|
||||
if block != nil && err == consensus.ErrFutureBlock {
|
||||
if err := bc.addFutureBlock(block); err != nil {
|
||||
return it.index, events, coalescedLogs, err
|
||||
return it.index, err
|
||||
}
|
||||
block, err = it.next()
|
||||
|
||||
for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() {
|
||||
if err := bc.addFutureBlock(block); err != nil {
|
||||
return it.index, events, coalescedLogs, err
|
||||
return it.index, err
|
||||
}
|
||||
stats.queued++
|
||||
}
|
||||
}
|
||||
stats.ignored += it.remaining()
|
||||
|
||||
// Append a single chain head event if we've progressed the chain
|
||||
if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
|
||||
events = append(events, ChainHeadEvent{lastCanon})
|
||||
}
|
||||
return it.index, events, coalescedLogs, err
|
||||
return it.index, err
|
||||
}
|
||||
|
||||
// insertSideChain is called when an import batch hits upon a pruned ancestor
|
||||
@@ -1743,7 +1775,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
//
|
||||
// The method writes all (header-and-body-valid) blocks to disk, then tries to
|
||||
// switch over to the new chain if the TD exceeded the current chain.
|
||||
func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, []interface{}, []*types.Log, error) {
|
||||
func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, error) {
|
||||
var (
|
||||
externTd *big.Int
|
||||
current = bc.CurrentBlock()
|
||||
@@ -1779,7 +1811,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
|
||||
// If someone legitimately side-mines blocks, they would still be imported as usual. However,
|
||||
// we cannot risk writing unverified blocks to disk when they obviously target the pruning
|
||||
// mechanism.
|
||||
return it.index, nil, nil, errors.New("sidechain ghost-state attack")
|
||||
return it.index, errors.New("sidechain ghost-state attack")
|
||||
}
|
||||
}
|
||||
if externTd == nil {
|
||||
@@ -1790,7 +1822,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
|
||||
if !bc.HasBlock(block.Hash(), block.NumberU64()) {
|
||||
start := time.Now()
|
||||
if err := bc.writeBlockWithoutState(block, externTd); err != nil {
|
||||
return it.index, nil, nil, err
|
||||
return it.index, err
|
||||
}
|
||||
log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(),
|
||||
"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
|
||||
@@ -1807,7 +1839,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
|
||||
localTd := bc.GetTd(current.Hash(), current.NumberU64())
|
||||
if localTd.Cmp(externTd) > 0 {
|
||||
log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd)
|
||||
return it.index, nil, nil, err
|
||||
return it.index, err
|
||||
}
|
||||
// Gather all the sidechain hashes (full blocks may be memory heavy)
|
||||
var (
|
||||
@@ -1822,7 +1854,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
|
||||
parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
|
||||
}
|
||||
if parent == nil {
|
||||
return it.index, nil, nil, errors.New("missing parent")
|
||||
return it.index, errors.New("missing parent")
|
||||
}
|
||||
// Import all the pruned blocks to make the state available
|
||||
var (
|
||||
@@ -1841,15 +1873,15 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
|
||||
// memory here.
|
||||
if len(blocks) >= 2048 || memory > 64*1024*1024 {
|
||||
log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
|
||||
if _, _, _, err := bc.insertChain(blocks, false); err != nil {
|
||||
return 0, nil, nil, err
|
||||
if _, err := bc.insertChain(blocks, false); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
blocks, memory = blocks[:0], 0
|
||||
|
||||
// If the chain is terminating, stop processing blocks
|
||||
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
|
||||
log.Debug("Premature abort during blocks processing")
|
||||
return 0, nil, nil, nil
|
||||
return 0, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1857,7 +1889,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
|
||||
log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
|
||||
return bc.insertChain(blocks, false)
|
||||
}
|
||||
return 0, nil, nil, nil
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// reorg takes two blocks, an old chain and a new chain and will reconstruct the
|
||||
@@ -1872,11 +1904,11 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
|
||||
deletedTxs types.Transactions
|
||||
addedTxs types.Transactions
|
||||
|
||||
deletedLogs []*types.Log
|
||||
rebirthLogs []*types.Log
|
||||
deletedLogs [][]*types.Log
|
||||
rebirthLogs [][]*types.Log
|
||||
|
||||
// collectLogs collects the logs that were generated during the
|
||||
// processing of the block that corresponds with the given hash.
|
||||
// collectLogs collects the logs that were generated or removed during
|
||||
// the processing of the block that corresponds with the given hash.
|
||||
// These logs are later announced as deleted or reborn
|
||||
collectLogs = func(hash common.Hash, removed bool) {
|
||||
number := bc.hc.GetBlockNumber(hash)
|
||||
@@ -1884,17 +1916,39 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
|
||||
return
|
||||
}
|
||||
receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig)
|
||||
|
||||
var logs []*types.Log
|
||||
for _, receipt := range receipts {
|
||||
for _, log := range receipt.Logs {
|
||||
l := *log
|
||||
if removed {
|
||||
l.Removed = true
|
||||
deletedLogs = append(deletedLogs, &l)
|
||||
} else {
|
||||
rebirthLogs = append(rebirthLogs, &l)
|
||||
}
|
||||
logs = append(logs, &l)
|
||||
}
|
||||
}
|
||||
if len(logs) > 0 {
|
||||
if removed {
|
||||
deletedLogs = append(deletedLogs, logs)
|
||||
} else {
|
||||
rebirthLogs = append(rebirthLogs, logs)
|
||||
}
|
||||
}
|
||||
}
|
||||
// mergeLogs returns a merged log slice with specified sort order.
|
||||
mergeLogs = func(logs [][]*types.Log, reverse bool) []*types.Log {
|
||||
var ret []*types.Log
|
||||
if reverse {
|
||||
for i := len(logs) - 1; i >= 0; i-- {
|
||||
ret = append(ret, logs[i]...)
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < len(logs); i++ {
|
||||
ret = append(ret, logs[i]...)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
)
|
||||
// Reduce the longer chain to the same number as the shorter one
|
||||
@@ -1961,20 +2015,19 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
|
||||
// taking care of the proper incremental order.
|
||||
for i := len(newChain) - 1; i >= 1; i-- {
|
||||
// Insert the block in the canonical way, re-writing history
|
||||
bc.insert(newChain[i])
|
||||
bc.writeHeadBlock(newChain[i])
|
||||
|
||||
// Collect reborn logs due to chain reorg
|
||||
collectLogs(newChain[i].Hash(), false)
|
||||
|
||||
// Write lookup entries for hash based transaction/receipt searches
|
||||
rawdb.WriteTxLookupEntries(bc.db, newChain[i])
|
||||
// Collect the new added transactions.
|
||||
addedTxs = append(addedTxs, newChain[i].Transactions()...)
|
||||
}
|
||||
// When transactions get deleted from the database, the receipts that were
|
||||
// created in the fork must also be deleted
|
||||
batch := bc.db.NewBatch()
|
||||
// Delete useless indexes right now which includes the non-canonical
|
||||
// transaction indexes, canonical chain indexes which above the head.
|
||||
indexesBatch := bc.db.NewBatch()
|
||||
for _, tx := range types.TxDifference(deletedTxs, addedTxs) {
|
||||
rawdb.DeleteTxLookupEntry(batch, tx.Hash())
|
||||
rawdb.DeleteTxLookupEntry(indexesBatch, tx.Hash())
|
||||
}
|
||||
// Delete any canonical number assignments above the new head
|
||||
number := bc.CurrentBlock().NumberU64()
|
||||
@@ -1983,54 +2036,29 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
|
||||
if hash == (common.Hash{}) {
|
||||
break
|
||||
}
|
||||
rawdb.DeleteCanonicalHash(batch, i)
|
||||
rawdb.DeleteCanonicalHash(indexesBatch, i)
|
||||
}
|
||||
if err := indexesBatch.Write(); err != nil {
|
||||
log.Crit("Failed to delete useless indexes", "err", err)
|
||||
}
|
||||
batch.Write()
|
||||
// If any logs need to be fired, do it now. In theory we could avoid creating
|
||||
// this goroutine if there are no events to fire, but realistcally that only
|
||||
// ever happens if we're reorging empty blocks, which will only happen on idle
|
||||
// networks where performance is not an issue either way.
|
||||
//
|
||||
// TODO(karalabe): Can we get rid of the goroutine somehow to guarantee correct
|
||||
// event ordering?
|
||||
go func() {
|
||||
if len(deletedLogs) > 0 {
|
||||
bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
|
||||
if len(deletedLogs) > 0 {
|
||||
bc.rmLogsFeed.Send(RemovedLogsEvent{mergeLogs(deletedLogs, true)})
|
||||
}
|
||||
if len(rebirthLogs) > 0 {
|
||||
bc.logsFeed.Send(mergeLogs(rebirthLogs, false))
|
||||
}
|
||||
if len(oldChain) > 0 {
|
||||
for i := len(oldChain) - 1; i >= 0; i-- {
|
||||
bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]})
|
||||
}
|
||||
if len(rebirthLogs) > 0 {
|
||||
bc.logsFeed.Send(rebirthLogs)
|
||||
}
|
||||
if len(oldChain) > 0 {
|
||||
for _, block := range oldChain {
|
||||
bc.chainSideFeed.Send(ChainSideEvent{Block: block})
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PostChainEvents iterates over the events generated by a chain insertion and
|
||||
// posts them into the event feed.
|
||||
// TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock.
|
||||
func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) {
|
||||
// post event logs for further processing
|
||||
if logs != nil {
|
||||
bc.logsFeed.Send(logs)
|
||||
}
|
||||
for _, event := range events {
|
||||
switch ev := event.(type) {
|
||||
case ChainEvent:
|
||||
bc.chainFeed.Send(ev)
|
||||
|
||||
case ChainHeadEvent:
|
||||
bc.chainHeadFeed.Send(ev)
|
||||
|
||||
case ChainSideEvent:
|
||||
bc.chainSideFeed.Send(ev)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bc *BlockChain) update() {
|
||||
futureTimer := time.NewTicker(5 * time.Second)
|
||||
defer futureTimer.Stop()
|
||||
|
@@ -22,6 +22,7 @@ import (
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -960,16 +961,20 @@ func TestLogReorgs(t *testing.T) {
|
||||
}
|
||||
|
||||
chain, _ = GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 3, func(i int, gen *BlockGen) {})
|
||||
if _, err := blockchain.InsertChain(chain); err != nil {
|
||||
t.Fatalf("failed to insert forked chain: %v", err)
|
||||
}
|
||||
|
||||
timeout := time.NewTimer(1 * time.Second)
|
||||
select {
|
||||
case ev := <-rmLogsCh:
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
ev := <-rmLogsCh
|
||||
if len(ev.Logs) == 0 {
|
||||
t.Error("expected logs")
|
||||
}
|
||||
close(done)
|
||||
}()
|
||||
if _, err := blockchain.InsertChain(chain); err != nil {
|
||||
t.Fatalf("failed to insert forked chain: %v", err)
|
||||
}
|
||||
timeout := time.NewTimer(1 * time.Second)
|
||||
select {
|
||||
case <-done:
|
||||
case <-timeout.C:
|
||||
t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.")
|
||||
}
|
||||
@@ -982,39 +987,47 @@ func TestLogRebirth(t *testing.T) {
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
|
||||
// this code generates a log
|
||||
code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
|
||||
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}}
|
||||
genesis = gspec.MustCommit(db)
|
||||
signer = types.NewEIP155Signer(gspec.Config.ChainID)
|
||||
newLogCh = make(chan bool)
|
||||
code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
|
||||
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}}
|
||||
genesis = gspec.MustCommit(db)
|
||||
signer = types.NewEIP155Signer(gspec.Config.ChainID)
|
||||
newLogCh = make(chan bool)
|
||||
removeLogCh = make(chan bool)
|
||||
)
|
||||
|
||||
// listenNewLog checks whether the received logs number is equal with expected.
|
||||
listenNewLog := func(sink chan []*types.Log, expect int) {
|
||||
// validateLogEvent checks whether the received logs number is equal with expected.
|
||||
validateLogEvent := func(sink interface{}, result chan bool, expect int) {
|
||||
chanval := reflect.ValueOf(sink)
|
||||
chantyp := chanval.Type()
|
||||
if chantyp.Kind() != reflect.Chan || chantyp.ChanDir()&reflect.RecvDir == 0 {
|
||||
t.Fatalf("invalid channel, given type %v", chantyp)
|
||||
}
|
||||
cnt := 0
|
||||
var recv []reflect.Value
|
||||
timeout := time.After(1 * time.Second)
|
||||
cases := []reflect.SelectCase{{Chan: chanval, Dir: reflect.SelectRecv}, {Chan: reflect.ValueOf(timeout), Dir: reflect.SelectRecv}}
|
||||
for {
|
||||
select {
|
||||
case logs := <-sink:
|
||||
cnt += len(logs)
|
||||
case <-time.NewTimer(5 * time.Second).C:
|
||||
// new logs timeout
|
||||
newLogCh <- false
|
||||
chose, v, _ := reflect.Select(cases)
|
||||
if chose == 1 {
|
||||
// Not enough event received
|
||||
result <- false
|
||||
return
|
||||
}
|
||||
cnt += 1
|
||||
recv = append(recv, v)
|
||||
if cnt == expect {
|
||||
break
|
||||
} else if cnt > expect {
|
||||
// redundant logs received
|
||||
newLogCh <- false
|
||||
return
|
||||
}
|
||||
}
|
||||
select {
|
||||
case <-sink:
|
||||
// redundant logs received
|
||||
newLogCh <- false
|
||||
case <-time.NewTimer(100 * time.Millisecond).C:
|
||||
newLogCh <- true
|
||||
done := time.After(50 * time.Millisecond)
|
||||
cases = cases[:1]
|
||||
cases = append(cases, reflect.SelectCase{Chan: reflect.ValueOf(done), Dir: reflect.SelectRecv})
|
||||
chose, _, _ := reflect.Select(cases)
|
||||
// If chose equal 0, it means receiving redundant events.
|
||||
if chose == 1 {
|
||||
result <- true
|
||||
} else {
|
||||
result <- false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1038,12 +1051,12 @@ func TestLogRebirth(t *testing.T) {
|
||||
})
|
||||
|
||||
// Spawn a goroutine to receive log events
|
||||
go listenNewLog(logsCh, 1)
|
||||
go validateLogEvent(logsCh, newLogCh, 1)
|
||||
if _, err := blockchain.InsertChain(chain); err != nil {
|
||||
t.Fatalf("failed to insert chain: %v", err)
|
||||
}
|
||||
if !<-newLogCh {
|
||||
t.Fatalf("failed to receive new log event")
|
||||
t.Fatal("failed to receive new log event")
|
||||
}
|
||||
|
||||
// Generate long reorg chain
|
||||
@@ -1060,40 +1073,31 @@ func TestLogRebirth(t *testing.T) {
|
||||
})
|
||||
|
||||
// Spawn a goroutine to receive log events
|
||||
go listenNewLog(logsCh, 1)
|
||||
go validateLogEvent(logsCh, newLogCh, 1)
|
||||
go validateLogEvent(rmLogsCh, removeLogCh, 1)
|
||||
if _, err := blockchain.InsertChain(forkChain); err != nil {
|
||||
t.Fatalf("failed to insert forked chain: %v", err)
|
||||
}
|
||||
if !<-newLogCh {
|
||||
t.Fatalf("failed to receive new log event")
|
||||
t.Fatal("failed to receive new log event")
|
||||
}
|
||||
// Ensure removedLog events received
|
||||
select {
|
||||
case ev := <-rmLogsCh:
|
||||
if len(ev.Logs) == 0 {
|
||||
t.Error("expected logs")
|
||||
}
|
||||
case <-time.NewTimer(1 * time.Second).C:
|
||||
t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.")
|
||||
if !<-removeLogCh {
|
||||
t.Fatal("failed to receive removed log event")
|
||||
}
|
||||
|
||||
newBlocks, _ := GenerateChain(params.TestChainConfig, chain[len(chain)-1], ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {})
|
||||
go listenNewLog(logsCh, 1)
|
||||
go validateLogEvent(logsCh, newLogCh, 1)
|
||||
go validateLogEvent(rmLogsCh, removeLogCh, 1)
|
||||
if _, err := blockchain.InsertChain(newBlocks); err != nil {
|
||||
t.Fatalf("failed to insert forked chain: %v", err)
|
||||
}
|
||||
// Ensure removedLog events received
|
||||
select {
|
||||
case ev := <-rmLogsCh:
|
||||
if len(ev.Logs) == 0 {
|
||||
t.Error("expected logs")
|
||||
}
|
||||
case <-time.NewTimer(1 * time.Second).C:
|
||||
t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.")
|
||||
}
|
||||
// Rebirth logs should omit a newLogEvent
|
||||
if !<-newLogCh {
|
||||
t.Fatalf("failed to receive new log event")
|
||||
t.Fatal("failed to receive new log event")
|
||||
}
|
||||
// Ensure removedLog events received
|
||||
if !<-removeLogCh {
|
||||
t.Fatal("failed to receive removed log event")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1145,7 +1149,6 @@ func TestSideLogRebirth(t *testing.T) {
|
||||
|
||||
logsCh := make(chan []*types.Log)
|
||||
blockchain.SubscribeLogsEvent(logsCh)
|
||||
|
||||
chain, _ := GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 2, func(i int, gen *BlockGen) {
|
||||
if i == 1 {
|
||||
// Higher block difficulty
|
||||
|
@@ -203,7 +203,7 @@ func (b *testChainIndexBackend) assertBlocks(headNum, failNum uint64) (uint64, b
|
||||
}
|
||||
|
||||
func (b *testChainIndexBackend) reorg(headNum uint64) uint64 {
|
||||
firstChanged := headNum / b.indexer.sectionSize
|
||||
firstChanged := (headNum + 1) / b.indexer.sectionSize
|
||||
if firstChanged < b.stored {
|
||||
b.stored = firstChanged
|
||||
}
|
||||
|
@@ -24,11 +24,6 @@ import (
|
||||
// NewTxsEvent is posted when a batch of transactions enter the transaction pool.
|
||||
type NewTxsEvent struct{ Txs []*types.Transaction }
|
||||
|
||||
// PendingLogsEvent is posted pre mining and notifies of pending logs.
|
||||
type PendingLogsEvent struct {
|
||||
Logs []*types.Log
|
||||
}
|
||||
|
||||
// NewMinedBlockEvent is posted when a block has been imported.
|
||||
type NewMinedBlockEvent struct{ Block *types.Block }
|
||||
|
||||
|
36
core/evm.go
36
core/evm.go
@@ -60,24 +60,32 @@ func NewEVMContext(msg Message, header *types.Header, chain ChainContext, author
|
||||
|
||||
// GetHashFn returns a GetHashFunc which retrieves header hashes by number
|
||||
func GetHashFn(ref *types.Header, chain ChainContext) func(n uint64) common.Hash {
|
||||
var cache map[uint64]common.Hash
|
||||
// Cache will initially contain [refHash.parent],
|
||||
// Then fill up with [refHash.p, refHash.pp, refHash.ppp, ...]
|
||||
var cache []common.Hash
|
||||
|
||||
return func(n uint64) common.Hash {
|
||||
// If there's no hash cache yet, make one
|
||||
if cache == nil {
|
||||
cache = map[uint64]common.Hash{
|
||||
ref.Number.Uint64() - 1: ref.ParentHash,
|
||||
if len(cache) == 0 {
|
||||
cache = append(cache, ref.ParentHash)
|
||||
}
|
||||
if idx := ref.Number.Uint64() - n - 1; idx < uint64(len(cache)) {
|
||||
return cache[idx]
|
||||
}
|
||||
// No luck in the cache, but we can start iterating from the last element we already know
|
||||
lastKnownHash := cache[len(cache)-1]
|
||||
lastKnownNumber := ref.Number.Uint64() - uint64(len(cache))
|
||||
|
||||
for {
|
||||
header := chain.GetHeader(lastKnownHash, lastKnownNumber)
|
||||
if header == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Try to fulfill the request from the cache
|
||||
if hash, ok := cache[n]; ok {
|
||||
return hash
|
||||
}
|
||||
// Not cached, iterate the blocks and cache the hashes
|
||||
for header := chain.GetHeader(ref.ParentHash, ref.Number.Uint64()-1); header != nil; header = chain.GetHeader(header.ParentHash, header.Number.Uint64()-1) {
|
||||
cache[header.Number.Uint64()-1] = header.ParentHash
|
||||
if n == header.Number.Uint64()-1 {
|
||||
return header.ParentHash
|
||||
cache = append(cache, header.ParentHash)
|
||||
lastKnownHash = header.ParentHash
|
||||
lastKnownNumber = header.Number.Uint64() - 1
|
||||
if n == lastKnownNumber {
|
||||
return lastKnownHash
|
||||
}
|
||||
}
|
||||
return common.Hash{}
|
||||
|
@@ -57,8 +57,10 @@ func TestCreation(t *testing.T) {
|
||||
{7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // Last Byzantium block
|
||||
{7280000, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // First and last Constantinople, first Petersburg block
|
||||
{9068999, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // Last Petersburg block
|
||||
{9069000, ID{Hash: checksumToBytes(0x879d6e30), Next: 0}}, // Today Istanbul block
|
||||
{10000000, ID{Hash: checksumToBytes(0x879d6e30), Next: 0}}, // Future Istanbul block
|
||||
{9069000, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // First Istanbul and first Muir Glacier block
|
||||
{9199999, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // Last Istanbul and first Muir Glacier block
|
||||
{9200000, ID{Hash: checksumToBytes(0xe029e991), Next: 0}}, // First Muir Glacier block
|
||||
{10000000, ID{Hash: checksumToBytes(0xe029e991), Next: 0}}, // Future Muir Glacier block
|
||||
},
|
||||
},
|
||||
// Ropsten test cases
|
||||
@@ -76,8 +78,10 @@ func TestCreation(t *testing.T) {
|
||||
{4939393, ID{Hash: checksumToBytes(0x97b544f3), Next: 4939394}}, // Last Constantinople block
|
||||
{4939394, ID{Hash: checksumToBytes(0xd6e2149b), Next: 6485846}}, // First Petersburg block
|
||||
{6485845, ID{Hash: checksumToBytes(0xd6e2149b), Next: 6485846}}, // Last Petersburg block
|
||||
{6485846, ID{Hash: checksumToBytes(0x4bc66396), Next: 0}}, // First Istanbul block
|
||||
{7500000, ID{Hash: checksumToBytes(0x4bc66396), Next: 0}}, // Future Istanbul block
|
||||
{6485846, ID{Hash: checksumToBytes(0x4bc66396), Next: 7117117}}, // First Istanbul block
|
||||
{7117116, ID{Hash: checksumToBytes(0x4bc66396), Next: 7117117}}, // Last Istanbul block
|
||||
{7117117, ID{Hash: checksumToBytes(0x6727ef90), Next: 0}}, // First Muir Glacier block
|
||||
{7500000, ID{Hash: checksumToBytes(0x6727ef90), Next: 0}}, // Future
|
||||
},
|
||||
},
|
||||
// Rinkeby test cases
|
||||
@@ -181,11 +185,11 @@ func TestValidation(t *testing.T) {
|
||||
// Local is mainnet Petersburg, remote is Rinkeby Petersburg.
|
||||
{7987396, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}, ErrLocalIncompatibleOrStale},
|
||||
|
||||
// Local is mainnet Istanbul, far in the future. Remote announces Gopherium (non existing fork)
|
||||
// Local is mainnet Muir Glacier, far in the future. Remote announces Gopherium (non existing fork)
|
||||
// at some future block 88888888, for itself, but past block for local. Local is incompatible.
|
||||
//
|
||||
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
|
||||
{88888888, ID{Hash: checksumToBytes(0x879d6e30), Next: 88888888}, ErrLocalIncompatibleOrStale},
|
||||
{88888888, ID{Hash: checksumToBytes(0xe029e991), Next: 88888888}, ErrLocalIncompatibleOrStale},
|
||||
|
||||
// Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing
|
||||
// fork) at block 7279999, before Petersburg. Local is incompatible.
|
||||
|
@@ -152,10 +152,10 @@ func (e *GenesisMismatchError) Error() string {
|
||||
//
|
||||
// The returned chain configuration is never nil.
|
||||
func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) {
|
||||
return SetupGenesisBlockWithOverride(db, genesis, nil)
|
||||
return SetupGenesisBlockWithOverride(db, genesis, nil, nil)
|
||||
}
|
||||
|
||||
func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideIstanbul *big.Int) (*params.ChainConfig, common.Hash, error) {
|
||||
func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideIstanbul, overrideMuirGlacier *big.Int) (*params.ChainConfig, common.Hash, error) {
|
||||
if genesis != nil && genesis.Config == nil {
|
||||
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
|
||||
}
|
||||
@@ -207,6 +207,9 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
|
||||
if overrideIstanbul != nil {
|
||||
newcfg.IstanbulBlock = overrideIstanbul
|
||||
}
|
||||
if overrideMuirGlacier != nil {
|
||||
newcfg.MuirGlacierBlock = overrideMuirGlacier
|
||||
}
|
||||
if err := newcfg.CheckConfigForkOrder(); err != nil {
|
||||
return newcfg, common.Hash{}, err
|
||||
}
|
||||
@@ -380,8 +383,7 @@ func DefaultGoerliGenesisBlock() *Genesis {
|
||||
}
|
||||
}
|
||||
|
||||
// DeveloperGenesisBlock returns the 'geth --dev' genesis block. Note, this must
|
||||
// be seeded with the
|
||||
// DeveloperGenesisBlock returns the 'geth --dev' genesis block.
|
||||
func DeveloperGenesisBlock(period uint64, faucet common.Address) *Genesis {
|
||||
// Override the default period to the user requested one
|
||||
config := *params.AllCliqueProtocolChanges
|
||||
|
@@ -45,6 +45,14 @@ const (
|
||||
// HeaderChain implements the basic block header chain logic that is shared by
|
||||
// core.BlockChain and light.LightChain. It is not usable in itself, only as
|
||||
// a part of either structure.
|
||||
//
|
||||
// HeaderChain is responsible for maintaining the header chain including the
|
||||
// header query and updating.
|
||||
//
|
||||
// The components maintained by headerchain includes: (1) total difficult
|
||||
// (2) header (3) block hash -> number mapping (4) canonical number -> hash mapping
|
||||
// and (5) head header flag.
|
||||
//
|
||||
// It is not thread safe either, the encapsulating chain structures should do
|
||||
// the necessary mutex locking/unlocking.
|
||||
type HeaderChain struct {
|
||||
@@ -66,10 +74,8 @@ type HeaderChain struct {
|
||||
engine consensus.Engine
|
||||
}
|
||||
|
||||
// NewHeaderChain creates a new HeaderChain structure.
|
||||
// getValidator should return the parent's validator
|
||||
// procInterrupt points to the parent's interrupt semaphore
|
||||
// wg points to the parent's shutdown wait group
|
||||
// NewHeaderChain creates a new HeaderChain structure. ProcInterrupt points
|
||||
// to the parent's interrupt semaphore.
|
||||
func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, procInterrupt func() bool) (*HeaderChain, error) {
|
||||
headerCache, _ := lru.New(headerCacheLimit)
|
||||
tdCache, _ := lru.New(tdCacheLimit)
|
||||
@@ -147,25 +153,33 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
|
||||
externTd := new(big.Int).Add(header.Difficulty, ptd)
|
||||
|
||||
// Irrelevant of the canonical status, write the td and header to the database
|
||||
if err := hc.WriteTd(hash, number, externTd); err != nil {
|
||||
log.Crit("Failed to write header total difficulty", "err", err)
|
||||
//
|
||||
// Note all the components of header(td, hash->number index and header) should
|
||||
// be written atomically.
|
||||
headerBatch := hc.chainDb.NewBatch()
|
||||
rawdb.WriteTd(headerBatch, hash, number, externTd)
|
||||
rawdb.WriteHeader(headerBatch, header)
|
||||
if err := headerBatch.Write(); err != nil {
|
||||
log.Crit("Failed to write header into disk", "err", err)
|
||||
}
|
||||
rawdb.WriteHeader(hc.chainDb, header)
|
||||
|
||||
// If the total difficulty is higher than our known, add it to the canonical chain
|
||||
// Second clause in the if statement reduces the vulnerability to selfish mining.
|
||||
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
|
||||
if externTd.Cmp(localTd) > 0 || (externTd.Cmp(localTd) == 0 && mrand.Float64() < 0.5) {
|
||||
// If the header can be added into canonical chain, adjust the
|
||||
// header chain markers(canonical indexes and head header flag).
|
||||
//
|
||||
// Note all markers should be written atomically.
|
||||
|
||||
// Delete any canonical number assignments above the new head
|
||||
batch := hc.chainDb.NewBatch()
|
||||
markerBatch := hc.chainDb.NewBatch()
|
||||
for i := number + 1; ; i++ {
|
||||
hash := rawdb.ReadCanonicalHash(hc.chainDb, i)
|
||||
if hash == (common.Hash{}) {
|
||||
break
|
||||
}
|
||||
rawdb.DeleteCanonicalHash(batch, i)
|
||||
rawdb.DeleteCanonicalHash(markerBatch, i)
|
||||
}
|
||||
batch.Write()
|
||||
|
||||
// Overwrite any stale canonical number assignments
|
||||
var (
|
||||
@@ -174,16 +188,19 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
|
||||
headHeader = hc.GetHeader(headHash, headNumber)
|
||||
)
|
||||
for rawdb.ReadCanonicalHash(hc.chainDb, headNumber) != headHash {
|
||||
rawdb.WriteCanonicalHash(hc.chainDb, headHash, headNumber)
|
||||
rawdb.WriteCanonicalHash(markerBatch, headHash, headNumber)
|
||||
|
||||
headHash = headHeader.ParentHash
|
||||
headNumber = headHeader.Number.Uint64() - 1
|
||||
headHeader = hc.GetHeader(headHash, headNumber)
|
||||
}
|
||||
// Extend the canonical chain with the new header
|
||||
rawdb.WriteCanonicalHash(hc.chainDb, hash, number)
|
||||
rawdb.WriteHeadHeaderHash(hc.chainDb, hash)
|
||||
|
||||
rawdb.WriteCanonicalHash(markerBatch, hash, number)
|
||||
rawdb.WriteHeadHeaderHash(markerBatch, hash)
|
||||
if err := markerBatch.Write(); err != nil {
|
||||
log.Crit("Failed to write header markers into disk", "err", err)
|
||||
}
|
||||
// Last step update all in-memory head header markers
|
||||
hc.currentHeaderHash = hash
|
||||
hc.currentHeader.Store(types.CopyHeader(header))
|
||||
headHeaderGauge.Update(header.Number.Int64())
|
||||
@@ -192,9 +209,9 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
|
||||
} else {
|
||||
status = SideStatTy
|
||||
}
|
||||
hc.tdCache.Add(hash, externTd)
|
||||
hc.headerCache.Add(hash, header)
|
||||
hc.numberCache.Add(hash, number)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -396,14 +413,6 @@ func (hc *HeaderChain) GetTdByHash(hash common.Hash) *big.Int {
|
||||
return hc.GetTd(hash, *number)
|
||||
}
|
||||
|
||||
// WriteTd stores a block's total difficulty into the database, also caching it
|
||||
// along the way.
|
||||
func (hc *HeaderChain) WriteTd(hash common.Hash, number uint64, td *big.Int) error {
|
||||
rawdb.WriteTd(hc.chainDb, hash, number, td)
|
||||
hc.tdCache.Add(hash, new(big.Int).Set(td))
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetHeader retrieves a block header from the database by hash and number,
|
||||
// caching it if found.
|
||||
func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header {
|
||||
@@ -431,6 +440,8 @@ func (hc *HeaderChain) GetHeaderByHash(hash common.Hash) *types.Header {
|
||||
}
|
||||
|
||||
// HasHeader checks if a block header is present in the database or not.
|
||||
// In theory, if header is present in the database, all relative components
|
||||
// like td and hash->number should be present too.
|
||||
func (hc *HeaderChain) HasHeader(hash common.Hash, number uint64) bool {
|
||||
if hc.numberCache.Contains(hash) || hc.headerCache.Contains(hash) {
|
||||
return true
|
||||
@@ -458,10 +469,9 @@ func (hc *HeaderChain) CurrentHeader() *types.Header {
|
||||
return hc.currentHeader.Load().(*types.Header)
|
||||
}
|
||||
|
||||
// SetCurrentHeader sets the current head header of the canonical chain.
|
||||
// SetCurrentHeader sets the in-memory head header marker of the canonical chan
|
||||
// as the given header.
|
||||
func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
|
||||
rawdb.WriteHeadHeaderHash(hc.chainDb, head.Hash())
|
||||
|
||||
hc.currentHeader.Store(head)
|
||||
hc.currentHeaderHash = head.Hash()
|
||||
headHeaderGauge.Update(head.Number.Int64())
|
||||
@@ -500,11 +510,18 @@ func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, d
|
||||
// first then remove the relative data from the database.
|
||||
//
|
||||
// Update head first(head fast block, head full block) before deleting the data.
|
||||
markerBatch := hc.chainDb.NewBatch()
|
||||
if updateFn != nil {
|
||||
updateFn(hc.chainDb, parent)
|
||||
updateFn(markerBatch, parent)
|
||||
}
|
||||
// Update head header then.
|
||||
rawdb.WriteHeadHeaderHash(hc.chainDb, parentHash)
|
||||
rawdb.WriteHeadHeaderHash(markerBatch, parentHash)
|
||||
if err := markerBatch.Write(); err != nil {
|
||||
log.Crit("Failed to update chain markers", "error", err)
|
||||
}
|
||||
hc.currentHeader.Store(parent)
|
||||
hc.currentHeaderHash = parentHash
|
||||
headHeaderGauge.Update(parent.Number.Int64())
|
||||
|
||||
// Remove the relative data from the database.
|
||||
if delFn != nil {
|
||||
@@ -514,13 +531,11 @@ func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, d
|
||||
rawdb.DeleteHeader(batch, hash, num)
|
||||
rawdb.DeleteTd(batch, hash, num)
|
||||
rawdb.DeleteCanonicalHash(batch, num)
|
||||
|
||||
hc.currentHeader.Store(parent)
|
||||
hc.currentHeaderHash = parentHash
|
||||
headHeaderGauge.Update(parent.Number.Int64())
|
||||
}
|
||||
batch.Write()
|
||||
|
||||
// Flush all accumulated deletions.
|
||||
if err := batch.Write(); err != nil {
|
||||
log.Crit("Failed to rewind block", "error", err)
|
||||
}
|
||||
// Clear out any stale content from the caches
|
||||
hc.headerCache.Purge()
|
||||
hc.tdCache.Purge()
|
||||
|
@@ -150,11 +150,10 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace st
|
||||
}
|
||||
// Database contains only older data than the freezer, this happens if the
|
||||
// state was wiped and reinited from an existing freezer.
|
||||
} else {
|
||||
// Key-value store continues where the freezer left off, all is fine. We might
|
||||
// have duplicate blocks (crash after freezer write but before kay-value store
|
||||
// deletion, but that's fine).
|
||||
}
|
||||
// Otherwise, key-value store continues where the freezer left off, all is fine.
|
||||
// We might have duplicate blocks (crash after freezer write but before key-value
|
||||
// store deletion, but that's fine).
|
||||
} else {
|
||||
// If the freezer is empty, ensure nothing was moved yet from the key-value
|
||||
// store, otherwise we'll end up missing data. We check block #1 to decide
|
||||
@@ -167,9 +166,9 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace st
|
||||
return nil, errors.New("ancient chain segments already extracted, please set --datadir.ancient to the correct path")
|
||||
}
|
||||
// Block #1 is still in the database, we're allowed to init a new feezer
|
||||
} else {
|
||||
// The head header is still the genesis, we're allowed to init a new feezer
|
||||
}
|
||||
// Otherwise, the head header is still the genesis, we're allowed to init a new
|
||||
// feezer.
|
||||
}
|
||||
}
|
||||
// Freezer is consistent with the key-value database, permit combining the two
|
||||
|
@@ -55,10 +55,10 @@ func InitDatabaseFromFreezer(db ethdb.Database) error {
|
||||
if n >= frozen {
|
||||
return
|
||||
}
|
||||
// Retrieve the block from the freezer (no need for the hash, we pull by
|
||||
// number from the freezer). If successful, pre-cache the block hash and
|
||||
// the individual transaction hashes for storing into the database.
|
||||
block := ReadBlock(db, common.Hash{}, n)
|
||||
// Retrieve the block from the freezer. If successful, pre-cache
|
||||
// the block hash and the individual transaction hashes for storing
|
||||
// into the database.
|
||||
block := ReadBlock(db, ReadCanonicalHash(db, n), n)
|
||||
if block != nil {
|
||||
block.Hash()
|
||||
for _, tx := range block.Transactions() {
|
||||
|
@@ -272,10 +272,13 @@ func (s *stateObject) finalise() {
|
||||
}
|
||||
|
||||
// updateTrie writes cached storage modifications into the object's storage trie.
|
||||
// It will return nil if the trie has not been loaded and no changes have been made
|
||||
func (s *stateObject) updateTrie(db Database) Trie {
|
||||
// Make sure all dirty slots are finalized into the pending storage area
|
||||
s.finalise()
|
||||
|
||||
if len(s.pendingStorage) == 0 {
|
||||
return s.trie
|
||||
}
|
||||
// Track the amount of time wasted on updating the storge trie
|
||||
if metrics.EnabledExpensive {
|
||||
defer func(start time.Time) { s.db.StorageUpdates += time.Since(start) }(time.Now())
|
||||
@@ -305,8 +308,10 @@ func (s *stateObject) updateTrie(db Database) Trie {
|
||||
|
||||
// UpdateRoot sets the trie root to the current root hash of
|
||||
func (s *stateObject) updateRoot(db Database) {
|
||||
s.updateTrie(db)
|
||||
|
||||
// If nothing changed, don't bother with hashing anything
|
||||
if s.updateTrie(db) == nil {
|
||||
return
|
||||
}
|
||||
// Track the amount of time wasted on hashing the storge trie
|
||||
if metrics.EnabledExpensive {
|
||||
defer func(start time.Time) { s.db.StorageHashes += time.Since(start) }(time.Now())
|
||||
@@ -317,7 +322,10 @@ func (s *stateObject) updateRoot(db Database) {
|
||||
// CommitTrie the storage trie of the object to db.
|
||||
// This updates the trie root.
|
||||
func (s *stateObject) CommitTrie(db Database) error {
|
||||
s.updateTrie(db)
|
||||
// If nothing changed, don't bother with hashing anything
|
||||
if s.updateTrie(db) == nil {
|
||||
return nil
|
||||
}
|
||||
if s.dbErr != nil {
|
||||
return s.dbErr
|
||||
}
|
||||
|
@@ -204,7 +204,7 @@ func (s *StateDB) AddRefund(gas uint64) {
|
||||
func (s *StateDB) SubRefund(gas uint64) {
|
||||
s.journal.append(refundChange{prev: s.refund})
|
||||
if gas > s.refund {
|
||||
panic("Refund counter below zero")
|
||||
panic(fmt.Sprintf("Refund counter below zero (gas: %d > refund: %d)", gas, s.refund))
|
||||
}
|
||||
s.refund -= gas
|
||||
}
|
||||
@@ -330,7 +330,8 @@ func (s *StateDB) StorageTrie(addr common.Address) Trie {
|
||||
return nil
|
||||
}
|
||||
cpy := stateObject.deepCopy(s)
|
||||
return cpy.updateTrie(s.db)
|
||||
cpy.updateTrie(s.db)
|
||||
return cpy.getTrie(s.db)
|
||||
}
|
||||
|
||||
func (s *StateDB) HasSuicided(addr common.Address) bool {
|
||||
@@ -750,8 +751,10 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
|
||||
if metrics.EnabledExpensive {
|
||||
defer func(start time.Time) { s.AccountCommits += time.Since(start) }(time.Now())
|
||||
}
|
||||
// The onleaf func is called _serially_, so we can reuse the same account
|
||||
// for unmarshalling every time.
|
||||
var account Account
|
||||
return s.trie.Commit(func(leaf []byte, parent common.Hash) error {
|
||||
var account Account
|
||||
if err := rlp.DecodeBytes(leaf, &account); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
@@ -494,11 +494,11 @@ func (l *txPricedList) Underpriced(tx *types.Transaction, local *accountSet) boo
|
||||
|
||||
// Discard finds a number of most underpriced transactions, removes them from the
|
||||
// priced list and returns them for further removal from the entire pool.
|
||||
func (l *txPricedList) Discard(count int, local *accountSet) types.Transactions {
|
||||
drop := make(types.Transactions, 0, count) // Remote underpriced transactions to drop
|
||||
func (l *txPricedList) Discard(slots int, local *accountSet) types.Transactions {
|
||||
drop := make(types.Transactions, 0, slots) // Remote underpriced transactions to drop
|
||||
save := make(types.Transactions, 0, 64) // Local underpriced transactions to keep
|
||||
|
||||
for len(*l.items) > 0 && count > 0 {
|
||||
for len(*l.items) > 0 && slots > 0 {
|
||||
// Discard stale transactions if found during cleanup
|
||||
tx := heap.Pop(l.items).(*types.Transaction)
|
||||
if l.all.Get(tx.Hash()) == nil {
|
||||
@@ -510,7 +510,7 @@ func (l *txPricedList) Discard(count int, local *accountSet) types.Transactions
|
||||
save = append(save, tx)
|
||||
} else {
|
||||
drop = append(drop, tx)
|
||||
count--
|
||||
slots -= numSlots(tx)
|
||||
}
|
||||
}
|
||||
for _, tx := range save {
|
||||
|
@@ -18,7 +18,6 @@ package core
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"sort"
|
||||
@@ -38,9 +37,25 @@ import (
|
||||
const (
|
||||
// chainHeadChanSize is the size of channel listening to ChainHeadEvent.
|
||||
chainHeadChanSize = 10
|
||||
|
||||
// txSlotSize is used to calculate how many data slots a single transaction
|
||||
// takes up based on its size. The slots are used as DoS protection, ensuring
|
||||
// that validating a new transaction remains a constant operation (in reality
|
||||
// O(maxslots), where max slots are 4 currently).
|
||||
txSlotSize = 32 * 1024
|
||||
|
||||
// txMaxSize is the maximum size a single transaction can have. This field has
|
||||
// non-trivial consequences: larger transactions are significantly harder and
|
||||
// more expensive to propagate; larger transactions also take more resources
|
||||
// to validate whether they fit into the pool or not.
|
||||
txMaxSize = 2 * txSlotSize // 64KB, don't bump without EIP-2464 support
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrAlreadyKnown is returned if the transactions is already contained
|
||||
// within the pool.
|
||||
ErrAlreadyKnown = errors.New("already known")
|
||||
|
||||
// ErrInvalidSender is returned if the transaction contains an invalid signature.
|
||||
ErrInvalidSender = errors.New("invalid sender")
|
||||
|
||||
@@ -105,6 +120,7 @@ var (
|
||||
pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil)
|
||||
queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil)
|
||||
localGauge = metrics.NewRegisteredGauge("txpool/local", nil)
|
||||
slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil)
|
||||
)
|
||||
|
||||
// TxStatus is the current status of a transaction as seen by the pool.
|
||||
@@ -510,8 +526,8 @@ func (pool *TxPool) local() map[common.Address]types.Transactions {
|
||||
// validateTx checks whether a transaction is valid according to the consensus
|
||||
// rules and adheres to some heuristic limits of the local node (price and size).
|
||||
func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
|
||||
// Heuristic limit, reject transactions over 32KB to prevent DOS attacks
|
||||
if tx.Size() > 32*1024 {
|
||||
// Reject transactions over defined size to prevent DOS attacks
|
||||
if uint64(tx.Size()) > txMaxSize {
|
||||
return ErrOversizedData
|
||||
}
|
||||
// Transactions can't be negative. This may never happen using RLP decoded
|
||||
@@ -566,7 +582,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
|
||||
if pool.all.Get(hash) != nil {
|
||||
log.Trace("Discarding already known transaction", "hash", hash)
|
||||
knownTxMeter.Mark(1)
|
||||
return false, fmt.Errorf("known transaction: %x", hash)
|
||||
return false, ErrAlreadyKnown
|
||||
}
|
||||
// If the transaction fails basic validation, discard it
|
||||
if err := pool.validateTx(tx, local); err != nil {
|
||||
@@ -583,7 +599,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
|
||||
return false, ErrUnderpriced
|
||||
}
|
||||
// New transaction is better than our worse ones, make room for it
|
||||
drop := pool.priced.Discard(pool.all.Count()-int(pool.config.GlobalSlots+pool.config.GlobalQueue-1), pool.locals)
|
||||
drop := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), pool.locals)
|
||||
for _, tx := range drop {
|
||||
log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice())
|
||||
underpricedTxMeter.Mark(1)
|
||||
@@ -773,7 +789,7 @@ func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error {
|
||||
for i, tx := range txs {
|
||||
// If the transaction is known, pre-set the error slot
|
||||
if pool.all.Get(tx.Hash()) != nil {
|
||||
errs[i] = fmt.Errorf("known transaction: %x", tx.Hash())
|
||||
errs[i] = ErrAlreadyKnown
|
||||
knownTxMeter.Mark(1)
|
||||
continue
|
||||
}
|
||||
@@ -851,6 +867,12 @@ func (pool *TxPool) Get(hash common.Hash) *types.Transaction {
|
||||
return pool.all.Get(hash)
|
||||
}
|
||||
|
||||
// Has returns an indicator whether txpool has a transaction cached with the
|
||||
// given hash.
|
||||
func (pool *TxPool) Has(hash common.Hash) bool {
|
||||
return pool.all.Get(hash) != nil
|
||||
}
|
||||
|
||||
// removeTx removes a single transaction from the queue, moving all subsequent
|
||||
// transactions back to the future queue.
|
||||
func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
|
||||
@@ -1493,8 +1515,9 @@ func (as *accountSet) merge(other *accountSet) {
|
||||
// peeking into the pool in TxPool.Get without having to acquire the widely scoped
|
||||
// TxPool.mu mutex.
|
||||
type txLookup struct {
|
||||
all map[common.Hash]*types.Transaction
|
||||
lock sync.RWMutex
|
||||
all map[common.Hash]*types.Transaction
|
||||
slots int
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// newTxLookup returns a new txLookup structure.
|
||||
@@ -1532,11 +1555,22 @@ func (t *txLookup) Count() int {
|
||||
return len(t.all)
|
||||
}
|
||||
|
||||
// Slots returns the current number of slots used in the lookup.
|
||||
func (t *txLookup) Slots() int {
|
||||
t.lock.RLock()
|
||||
defer t.lock.RUnlock()
|
||||
|
||||
return t.slots
|
||||
}
|
||||
|
||||
// Add adds a transaction to the lookup.
|
||||
func (t *txLookup) Add(tx *types.Transaction) {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
|
||||
t.slots += numSlots(tx)
|
||||
slotsGauge.Update(int64(t.slots))
|
||||
|
||||
t.all[tx.Hash()] = tx
|
||||
}
|
||||
|
||||
@@ -1545,5 +1579,13 @@ func (t *txLookup) Remove(hash common.Hash) {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
|
||||
t.slots -= numSlots(t.all[hash])
|
||||
slotsGauge.Update(int64(t.slots))
|
||||
|
||||
delete(t.all, hash)
|
||||
}
|
||||
|
||||
// numSlots calculates the number of slots needed for a single transaction.
|
||||
func numSlots(tx *types.Transaction) int {
|
||||
return int((tx.Size() + txSlotSize - 1) / txSlotSize)
|
||||
}
|
||||
|
@@ -77,9 +77,17 @@ func pricedTransaction(nonce uint64, gaslimit uint64, gasprice *big.Int, key *ec
|
||||
return tx
|
||||
}
|
||||
|
||||
func pricedDataTransaction(nonce uint64, gaslimit uint64, gasprice *big.Int, key *ecdsa.PrivateKey, bytes uint64) *types.Transaction {
|
||||
data := make([]byte, bytes)
|
||||
rand.Read(data)
|
||||
|
||||
tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{}, big.NewInt(0), gaslimit, gasprice, data), types.HomesteadSigner{}, key)
|
||||
return tx
|
||||
}
|
||||
|
||||
func setupTxPool() (*TxPool, *ecdsa.PrivateKey) {
|
||||
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()))
|
||||
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
|
||||
blockchain := &testBlockChain{statedb, 10000000, new(event.Feed)}
|
||||
|
||||
key, _ := crypto.GenerateKey()
|
||||
pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
|
||||
@@ -465,7 +473,7 @@ func TestTransactionDropping(t *testing.T) {
|
||||
pool, key := setupTxPool()
|
||||
defer pool.Stop()
|
||||
|
||||
account, _ := deriveSender(transaction(0, 0, key))
|
||||
account := crypto.PubkeyToAddress(key.PublicKey)
|
||||
pool.currentState.AddBalance(account, big.NewInt(1000))
|
||||
|
||||
// Add some pending and some queued transactions
|
||||
@@ -674,7 +682,7 @@ func TestTransactionGapFilling(t *testing.T) {
|
||||
pool, key := setupTxPool()
|
||||
defer pool.Stop()
|
||||
|
||||
account, _ := deriveSender(transaction(0, 0, key))
|
||||
account := crypto.PubkeyToAddress(key.PublicKey)
|
||||
pool.currentState.AddBalance(account, big.NewInt(1000000))
|
||||
|
||||
// Keep track of transaction events to ensure all executables get announced
|
||||
@@ -728,7 +736,7 @@ func TestTransactionQueueAccountLimiting(t *testing.T) {
|
||||
pool, key := setupTxPool()
|
||||
defer pool.Stop()
|
||||
|
||||
account, _ := deriveSender(transaction(0, 0, key))
|
||||
account := crypto.PubkeyToAddress(key.PublicKey)
|
||||
pool.currentState.AddBalance(account, big.NewInt(1000000))
|
||||
|
||||
// Keep queuing up transactions and make sure all above a limit are dropped
|
||||
@@ -923,7 +931,7 @@ func TestTransactionPendingLimiting(t *testing.T) {
|
||||
pool, key := setupTxPool()
|
||||
defer pool.Stop()
|
||||
|
||||
account, _ := deriveSender(transaction(0, 0, key))
|
||||
account := crypto.PubkeyToAddress(key.PublicKey)
|
||||
pool.currentState.AddBalance(account, big.NewInt(1000000))
|
||||
|
||||
// Keep track of transaction events to ensure all executables get announced
|
||||
@@ -1002,6 +1010,62 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Test the limit on transaction size is enforced correctly.
|
||||
// This test verifies every transaction having allowed size
|
||||
// is added to the pool, and longer transactions are rejected.
|
||||
func TestTransactionAllowedTxSize(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a test account and fund it
|
||||
pool, key := setupTxPool()
|
||||
defer pool.Stop()
|
||||
|
||||
account := crypto.PubkeyToAddress(key.PublicKey)
|
||||
pool.currentState.AddBalance(account, big.NewInt(1000000000))
|
||||
|
||||
// Compute maximal data size for transactions (lower bound).
|
||||
//
|
||||
// It is assumed the fields in the transaction (except of the data) are:
|
||||
// - nonce <= 32 bytes
|
||||
// - gasPrice <= 32 bytes
|
||||
// - gasLimit <= 32 bytes
|
||||
// - recipient == 20 bytes
|
||||
// - value <= 32 bytes
|
||||
// - signature == 65 bytes
|
||||
// All those fields are summed up to at most 213 bytes.
|
||||
baseSize := uint64(213)
|
||||
dataSize := txMaxSize - baseSize
|
||||
|
||||
// Try adding a transaction with maximal allowed size
|
||||
tx := pricedDataTransaction(0, pool.currentMaxGas, big.NewInt(1), key, dataSize)
|
||||
if err := pool.addRemoteSync(tx); err != nil {
|
||||
t.Fatalf("failed to add transaction of size %d, close to maximal: %v", int(tx.Size()), err)
|
||||
}
|
||||
// Try adding a transaction with random allowed size
|
||||
if err := pool.addRemoteSync(pricedDataTransaction(1, pool.currentMaxGas, big.NewInt(1), key, uint64(rand.Intn(int(dataSize))))); err != nil {
|
||||
t.Fatalf("failed to add transaction of random allowed size: %v", err)
|
||||
}
|
||||
// Try adding a transaction of minimal not allowed size
|
||||
if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentMaxGas, big.NewInt(1), key, txMaxSize)); err == nil {
|
||||
t.Fatalf("expected rejection on slightly oversize transaction")
|
||||
}
|
||||
// Try adding a transaction of random not allowed size
|
||||
if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentMaxGas, big.NewInt(1), key, dataSize+1+uint64(rand.Intn(int(10*txMaxSize))))); err == nil {
|
||||
t.Fatalf("expected rejection on oversize transaction")
|
||||
}
|
||||
// Run some sanity checks on the pool internals
|
||||
pending, queued := pool.Stats()
|
||||
if pending != 2 {
|
||||
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
|
||||
}
|
||||
if queued != 0 {
|
||||
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
|
||||
}
|
||||
if err := validateTxPoolInternals(pool); err != nil {
|
||||
t.Fatalf("pool internal state corrupted: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that if transactions start being capped, transactions are also removed from 'all'
|
||||
func TestTransactionCapClearsFromAll(t *testing.T) {
|
||||
t.Parallel()
|
||||
@@ -1752,6 +1816,24 @@ func TestTransactionStatusCheck(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Test the transaction slots consumption is computed correctly
|
||||
func TestTransactionSlotCount(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
key, _ := crypto.GenerateKey()
|
||||
|
||||
// Check that an empty transaction consumes a single slot
|
||||
smallTx := pricedDataTransaction(0, 0, big.NewInt(0), key, 0)
|
||||
if slots := numSlots(smallTx); slots != 1 {
|
||||
t.Fatalf("small transactions slot count mismatch: have %d want %d", slots, 1)
|
||||
}
|
||||
// Check that a large transaction consumes the correct number of slots
|
||||
bigTx := pricedDataTransaction(0, 0, big.NewInt(0), key, uint64(10*txSlotSize))
|
||||
if slots := numSlots(bigTx); slots != 11 {
|
||||
t.Fatalf("big transactions slot count mismatch: have %d want %d", slots, 11)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the speed of validating the contents of the pending queue of the
|
||||
// transaction pool.
|
||||
func BenchmarkPendingDemotion100(b *testing.B) { benchmarkPendingDemotion(b, 100) }
|
||||
@@ -1763,7 +1845,7 @@ func benchmarkPendingDemotion(b *testing.B, size int) {
|
||||
pool, key := setupTxPool()
|
||||
defer pool.Stop()
|
||||
|
||||
account, _ := deriveSender(transaction(0, 0, key))
|
||||
account := crypto.PubkeyToAddress(key.PublicKey)
|
||||
pool.currentState.AddBalance(account, big.NewInt(1000000))
|
||||
|
||||
for i := 0; i < size; i++ {
|
||||
@@ -1788,7 +1870,7 @@ func benchmarkFuturePromotion(b *testing.B, size int) {
|
||||
pool, key := setupTxPool()
|
||||
defer pool.Stop()
|
||||
|
||||
account, _ := deriveSender(transaction(0, 0, key))
|
||||
account := crypto.PubkeyToAddress(key.PublicKey)
|
||||
pool.currentState.AddBalance(account, big.NewInt(1000000))
|
||||
|
||||
for i := 0; i < size; i++ {
|
||||
@@ -1812,7 +1894,7 @@ func benchmarkPoolBatchInsert(b *testing.B, size int) {
|
||||
pool, key := setupTxPool()
|
||||
defer pool.Stop()
|
||||
|
||||
account, _ := deriveSender(transaction(0, 0, key))
|
||||
account := crypto.PubkeyToAddress(key.PublicKey)
|
||||
pool.currentState.AddBalance(account, big.NewInt(1000000))
|
||||
|
||||
batches := make([]types.Transactions, b.N)
|
||||
|
@@ -28,6 +28,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto/blake2b"
|
||||
"github.com/ethereum/go-ethereum/crypto/bn256"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
|
||||
//lint:ignore SA1019 Needed for precompile
|
||||
"golang.org/x/crypto/ripemd160"
|
||||
)
|
||||
|
||||
|
@@ -29,7 +29,6 @@ import (
|
||||
// precompiledTest defines the input/output pairs for precompiled contract tests.
|
||||
type precompiledTest struct {
|
||||
input, expected string
|
||||
gas uint64
|
||||
name string
|
||||
noBenchmark bool // Benchmark primarily the worst-cases
|
||||
}
|
||||
@@ -418,6 +417,24 @@ func testPrecompiled(addr string, test precompiledTest, t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func testPrecompiledOOG(addr string, test precompiledTest, t *testing.T) {
|
||||
p := PrecompiledContractsIstanbul[common.HexToAddress(addr)]
|
||||
in := common.Hex2Bytes(test.input)
|
||||
contract := NewContract(AccountRef(common.HexToAddress("1337")),
|
||||
nil, new(big.Int), p.RequiredGas(in)-1)
|
||||
t.Run(fmt.Sprintf("%s-Gas=%d", test.name, contract.Gas), func(t *testing.T) {
|
||||
_, err := RunPrecompiledContract(p, in, contract)
|
||||
if err.Error() != "out of gas" {
|
||||
t.Errorf("Expected error [out of gas], got [%v]", err)
|
||||
}
|
||||
// Verify that the precompile did not touch the input buffer
|
||||
exp := common.Hex2Bytes(test.input)
|
||||
if !bytes.Equal(in, exp) {
|
||||
t.Errorf("Precompiled %v modified input data", addr)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func testPrecompiledFailure(addr string, test precompiledFailureTest, t *testing.T) {
|
||||
p := PrecompiledContractsIstanbul[common.HexToAddress(addr)]
|
||||
in := common.Hex2Bytes(test.input)
|
||||
@@ -541,6 +558,13 @@ func BenchmarkPrecompiledBn256Add(bench *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
// Tests OOG
|
||||
func TestPrecompiledModExpOOG(t *testing.T) {
|
||||
for _, test := range modexpTests {
|
||||
testPrecompiledOOG("05", test, t)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests the sample inputs from the elliptic curve scalar multiplication EIP 213.
|
||||
func TestPrecompiledBn256ScalarMul(t *testing.T) {
|
||||
for _, test := range bn256ScalarMulTests {
|
||||
|
@@ -46,9 +46,9 @@ func EnableEIP(eipNum int, jt *JumpTable) error {
|
||||
// - Define SELFBALANCE, with cost GasFastStep (5)
|
||||
func enable1884(jt *JumpTable) {
|
||||
// Gas cost changes
|
||||
jt[SLOAD].constantGas = params.SloadGasEIP1884
|
||||
jt[BALANCE].constantGas = params.BalanceGasEIP1884
|
||||
jt[EXTCODEHASH].constantGas = params.ExtcodeHashGasEIP1884
|
||||
jt[SLOAD].constantGas = params.SloadGasEIP1884
|
||||
|
||||
// New opcode
|
||||
jt[SELFBALANCE] = operation{
|
||||
@@ -88,5 +88,6 @@ func opChainID(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memo
|
||||
|
||||
// enable2200 applies EIP-2200 (Rebalance net-metered SSTORE)
|
||||
func enable2200(jt *JumpTable) {
|
||||
jt[SLOAD].constantGas = params.SloadGasEIP2200
|
||||
jt[SSTORE].dynamicGas = gasSStoreEIP2200
|
||||
}
|
||||
|
@@ -17,7 +17,6 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
)
|
||||
@@ -26,8 +25,7 @@ func NewEnv(cfg *Config) *vm.EVM {
|
||||
context := vm.Context{
|
||||
CanTransfer: core.CanTransfer,
|
||||
Transfer: core.Transfer,
|
||||
GetHash: func(uint64) common.Hash { return common.Hash{} },
|
||||
|
||||
GetHash: cfg.GetHashFn,
|
||||
Origin: cfg.Origin,
|
||||
Coinbase: cfg.Coinbase,
|
||||
BlockNumber: cfg.BlockNumber,
|
||||
|
@@ -90,8 +90,8 @@ func setDefaults(cfg *Config) {
|
||||
// Execute executes the code using the input as call data during the execution.
|
||||
// It returns the EVM's return value, the new state and an error if it failed.
|
||||
//
|
||||
// Executes sets up a in memory, temporarily, environment for the execution of
|
||||
// the given code. It makes sure that it's restored to it's original state afterwards.
|
||||
// Execute sets up an in-memory, temporary, environment for the execution of
|
||||
// the given code. It makes sure that it's restored to its original state afterwards.
|
||||
func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) {
|
||||
if cfg == nil {
|
||||
cfg = new(Config)
|
||||
|
@@ -23,8 +23,11 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
@@ -203,3 +206,113 @@ func BenchmarkEVM_CREATE2_1200(bench *testing.B) {
|
||||
// initcode size 1200K, repeatedly calls CREATE2 and then modifies the mem contents
|
||||
benchmarkEVM_Create(bench, "5b5862124f80600080f5600152600056")
|
||||
}
|
||||
|
||||
func fakeHeader(n uint64, parentHash common.Hash) *types.Header {
|
||||
header := types.Header{
|
||||
Coinbase: common.HexToAddress("0x00000000000000000000000000000000deadbeef"),
|
||||
Number: big.NewInt(int64(n)),
|
||||
ParentHash: parentHash,
|
||||
Time: 1000,
|
||||
Nonce: types.BlockNonce{0x1},
|
||||
Extra: []byte{},
|
||||
Difficulty: big.NewInt(0),
|
||||
GasLimit: 100000,
|
||||
}
|
||||
return &header
|
||||
}
|
||||
|
||||
type dummyChain struct {
|
||||
counter int
|
||||
}
|
||||
|
||||
// Engine retrieves the chain's consensus engine.
|
||||
func (d *dummyChain) Engine() consensus.Engine {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetHeader returns the hash corresponding to their hash.
|
||||
func (d *dummyChain) GetHeader(h common.Hash, n uint64) *types.Header {
|
||||
d.counter++
|
||||
parentHash := common.Hash{}
|
||||
s := common.LeftPadBytes(big.NewInt(int64(n-1)).Bytes(), 32)
|
||||
copy(parentHash[:], s)
|
||||
|
||||
//parentHash := common.Hash{byte(n - 1)}
|
||||
//fmt.Printf("GetHeader(%x, %d) => header with parent %x\n", h, n, parentHash)
|
||||
return fakeHeader(n, parentHash)
|
||||
}
|
||||
|
||||
// TestBlockhash tests the blockhash operation. It's a bit special, since it internally
|
||||
// requires access to a chain reader.
|
||||
func TestBlockhash(t *testing.T) {
|
||||
// Current head
|
||||
n := uint64(1000)
|
||||
parentHash := common.Hash{}
|
||||
s := common.LeftPadBytes(big.NewInt(int64(n-1)).Bytes(), 32)
|
||||
copy(parentHash[:], s)
|
||||
header := fakeHeader(n, parentHash)
|
||||
|
||||
// This is the contract we're using. It requests the blockhash for current num (should be all zeroes),
|
||||
// then iteratively fetches all blockhashes back to n-260.
|
||||
// It returns
|
||||
// 1. the first (should be zero)
|
||||
// 2. the second (should be the parent hash)
|
||||
// 3. the last non-zero hash
|
||||
// By making the chain reader return hashes which correlate to the number, we can
|
||||
// verify that it obtained the right hashes where it should
|
||||
|
||||
/*
|
||||
|
||||
pragma solidity ^0.5.3;
|
||||
contract Hasher{
|
||||
|
||||
function test() public view returns (bytes32, bytes32, bytes32){
|
||||
uint256 x = block.number;
|
||||
bytes32 first;
|
||||
bytes32 last;
|
||||
bytes32 zero;
|
||||
zero = blockhash(x); // Should be zeroes
|
||||
first = blockhash(x-1);
|
||||
for(uint256 i = 2 ; i < 260; i++){
|
||||
bytes32 hash = blockhash(x - i);
|
||||
if (uint256(hash) != 0){
|
||||
last = hash;
|
||||
}
|
||||
}
|
||||
return (zero, first, last);
|
||||
}
|
||||
}
|
||||
|
||||
*/
|
||||
// The contract above
|
||||
data := common.Hex2Bytes("6080604052348015600f57600080fd5b50600436106045576000357c010000000000000000000000000000000000000000000000000000000090048063f8a8fd6d14604a575b600080fd5b60506074565b60405180848152602001838152602001828152602001935050505060405180910390f35b600080600080439050600080600083409050600184034092506000600290505b61010481101560c35760008186034090506000816001900414151560b6578093505b5080806001019150506094565b508083839650965096505050505090919256fea165627a7a72305820462d71b510c1725ff35946c20b415b0d50b468ea157c8c77dff9466c9cb85f560029")
|
||||
// The method call to 'test()'
|
||||
input := common.Hex2Bytes("f8a8fd6d")
|
||||
chain := &dummyChain{}
|
||||
ret, _, err := Execute(data, input, &Config{
|
||||
GetHashFn: core.GetHashFn(header, chain),
|
||||
BlockNumber: new(big.Int).Set(header.Number),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got %v", err)
|
||||
}
|
||||
if len(ret) != 96 {
|
||||
t.Fatalf("expected returndata to be 96 bytes, got %d", len(ret))
|
||||
}
|
||||
|
||||
zero := new(big.Int).SetBytes(ret[0:32])
|
||||
first := new(big.Int).SetBytes(ret[32:64])
|
||||
last := new(big.Int).SetBytes(ret[64:96])
|
||||
if zero.BitLen() != 0 {
|
||||
t.Fatalf("expected zeroes, got %x", ret[0:32])
|
||||
}
|
||||
if first.Uint64() != 999 {
|
||||
t.Fatalf("second block should be 999, got %d (%x)", first, ret[32:64])
|
||||
}
|
||||
if last.Uint64() != 744 {
|
||||
t.Fatalf("last block should be 744, got %d (%x)", last, ret[64:96])
|
||||
}
|
||||
if exp, got := 255, chain.counter; exp != got {
|
||||
t.Errorf("suboptimal; too much chain iteration, expected %d, got %d", exp, got)
|
||||
}
|
||||
}
|
||||
|
@@ -74,13 +74,6 @@ func (st *Stack) Back(n int) *big.Int {
|
||||
return st.data[st.len()-n-1]
|
||||
}
|
||||
|
||||
func (st *Stack) require(n int) error {
|
||||
if st.len() < n {
|
||||
return fmt.Errorf("stack underflow (%d <=> %d)", len(st.data), n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Print dumps the content of the stack
|
||||
func (st *Stack) Print() {
|
||||
fmt.Println("### stack ###")
|
||||
|
18
eth/api.go
18
eth/api.go
@@ -166,8 +166,16 @@ func NewPrivateAdminAPI(eth *Ethereum) *PrivateAdminAPI {
|
||||
return &PrivateAdminAPI{eth: eth}
|
||||
}
|
||||
|
||||
// ExportChain exports the current blockchain into a local file.
|
||||
func (api *PrivateAdminAPI) ExportChain(file string) (bool, error) {
|
||||
// ExportChain exports the current blockchain into a local file,
|
||||
// or a range of blocks if first and last are non-nil
|
||||
func (api *PrivateAdminAPI) ExportChain(file string, first *uint64, last *uint64) (bool, error) {
|
||||
if first == nil && last != nil {
|
||||
return false, errors.New("last cannot be specified without first")
|
||||
}
|
||||
if first != nil && last == nil {
|
||||
head := api.eth.BlockChain().CurrentHeader().Number.Uint64()
|
||||
last = &head
|
||||
}
|
||||
if _, err := os.Stat(file); err == nil {
|
||||
// File already exists. Allowing overwrite could be a DoS vecotor,
|
||||
// since the 'file' may point to arbitrary paths on the drive
|
||||
@@ -187,7 +195,11 @@ func (api *PrivateAdminAPI) ExportChain(file string) (bool, error) {
|
||||
}
|
||||
|
||||
// Export the blockchain
|
||||
if err := api.eth.BlockChain().Export(writer); err != nil {
|
||||
if first != nil {
|
||||
if err := api.eth.BlockChain().ExportN(writer, *first, *last); err != nil {
|
||||
return false, err
|
||||
}
|
||||
} else if err := api.eth.BlockChain().Export(writer); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
|
@@ -202,6 +202,10 @@ func (b *EthAPIBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEven
|
||||
return b.eth.BlockChain().SubscribeRemovedLogsEvent(ch)
|
||||
}
|
||||
|
||||
func (b *EthAPIBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {
|
||||
return b.eth.miner.SubscribePendingLogs(ch)
|
||||
}
|
||||
|
||||
func (b *EthAPIBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
|
||||
return b.eth.BlockChain().SubscribeChainEvent(ch)
|
||||
}
|
||||
|
@@ -47,6 +47,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/miner"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
@@ -74,6 +75,7 @@ type Ethereum struct {
|
||||
blockchain *core.BlockChain
|
||||
protocolManager *ProtocolManager
|
||||
lesServer LesServer
|
||||
dialCandiates enode.Iterator
|
||||
|
||||
// DB interfaces
|
||||
chainDb ethdb.Database // Block chain database
|
||||
@@ -135,7 +137,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideIstanbul)
|
||||
chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideIstanbul, config.OverrideMuirGlacier)
|
||||
if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {
|
||||
return nil, genesisErr
|
||||
}
|
||||
@@ -220,6 +222,11 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
||||
}
|
||||
eth.APIBackend.gpo = gasprice.NewOracle(eth.APIBackend, gpoParams)
|
||||
|
||||
eth.dialCandiates, err = eth.setupDiscovery(&ctx.Config.P2P)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return eth, nil
|
||||
}
|
||||
|
||||
@@ -510,6 +517,7 @@ func (s *Ethereum) Protocols() []p2p.Protocol {
|
||||
for i, vsn := range ProtocolVersions {
|
||||
protos[i] = s.protocolManager.makeProtocol(vsn)
|
||||
protos[i].Attributes = []enr.Entry{s.currentEthEntry()}
|
||||
protos[i].DialCandidates = s.dialCandiates
|
||||
}
|
||||
if s.lesServer != nil {
|
||||
protos = append(protos, s.lesServer.Protocols()...)
|
||||
|
@@ -95,6 +95,10 @@ type Config struct {
|
||||
NetworkId uint64 // Network ID to use for selecting peers to connect to
|
||||
SyncMode downloader.SyncMode
|
||||
|
||||
// This can be set to list of enrtree:// URLs which will be queried for
|
||||
// for nodes to connect to.
|
||||
DiscoveryURLs []string
|
||||
|
||||
NoPruning bool // Whether to disable pruning and flush everything to disk
|
||||
NoPrefetch bool // Whether to disable prefetching and only load state on demand
|
||||
|
||||
@@ -156,5 +160,8 @@ type Config struct {
|
||||
CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
|
||||
|
||||
// Istanbul block override (TODO: remove after the fork)
|
||||
OverrideIstanbul *big.Int
|
||||
OverrideIstanbul *big.Int `toml:",omitempty"`
|
||||
|
||||
// MuirGlacier block override (TODO: remove after the fork)
|
||||
OverrideMuirGlacier *big.Int `toml:",omitempty"`
|
||||
}
|
||||
|
@@ -19,6 +19,8 @@ package eth
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/forkid"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/dnsdisc"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
@@ -37,6 +39,7 @@ func (e ethEntry) ENRKey() string {
|
||||
return "eth"
|
||||
}
|
||||
|
||||
// startEthEntryUpdate starts the ENR updater loop.
|
||||
func (eth *Ethereum) startEthEntryUpdate(ln *enode.LocalNode) {
|
||||
var newHead = make(chan core.ChainHeadEvent, 10)
|
||||
sub := eth.blockchain.SubscribeChainHeadEvent(newHead)
|
||||
@@ -59,3 +62,12 @@ func (eth *Ethereum) startEthEntryUpdate(ln *enode.LocalNode) {
|
||||
func (eth *Ethereum) currentEthEntry() *ethEntry {
|
||||
return ðEntry{ForkID: forkid.NewID(eth.blockchain)}
|
||||
}
|
||||
|
||||
// setupDiscovery creates the node discovery source for the eth protocol.
|
||||
func (eth *Ethereum) setupDiscovery(cfg *p2p.Config) (enode.Iterator, error) {
|
||||
if cfg.NoDiscovery || len(eth.config.DiscoveryURLs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
client := dnsdisc.NewClient(dnsdisc.Config{})
|
||||
return client.NewIterator(eth.config.DiscoveryURLs...)
|
||||
}
|
@@ -470,7 +470,7 @@ func (ps *peerSet) HeaderIdlePeers() ([]*peerConnection, int) {
|
||||
defer p.lock.RUnlock()
|
||||
return p.headerThroughput
|
||||
}
|
||||
return ps.idlePeers(62, 64, idle, throughput)
|
||||
return ps.idlePeers(62, 65, idle, throughput)
|
||||
}
|
||||
|
||||
// BodyIdlePeers retrieves a flat list of all the currently body-idle peers within
|
||||
@@ -484,7 +484,7 @@ func (ps *peerSet) BodyIdlePeers() ([]*peerConnection, int) {
|
||||
defer p.lock.RUnlock()
|
||||
return p.blockThroughput
|
||||
}
|
||||
return ps.idlePeers(62, 64, idle, throughput)
|
||||
return ps.idlePeers(62, 65, idle, throughput)
|
||||
}
|
||||
|
||||
// ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers
|
||||
@@ -498,7 +498,7 @@ func (ps *peerSet) ReceiptIdlePeers() ([]*peerConnection, int) {
|
||||
defer p.lock.RUnlock()
|
||||
return p.receiptThroughput
|
||||
}
|
||||
return ps.idlePeers(63, 64, idle, throughput)
|
||||
return ps.idlePeers(63, 65, idle, throughput)
|
||||
}
|
||||
|
||||
// NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle
|
||||
@@ -512,7 +512,7 @@ func (ps *peerSet) NodeDataIdlePeers() ([]*peerConnection, int) {
|
||||
defer p.lock.RUnlock()
|
||||
return p.stateThroughput
|
||||
}
|
||||
return ps.idlePeers(63, 64, idle, throughput)
|
||||
return ps.idlePeers(63, 65, idle, throughput)
|
||||
}
|
||||
|
||||
// idlePeers retrieves a flat list of all currently idle peers satisfying the
|
||||
|
@@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package fetcher contains the block announcement based synchronisation.
|
||||
// Package fetcher contains the announcement based blocks or transaction synchronisation.
|
||||
package fetcher
|
||||
|
||||
import (
|
||||
@@ -27,16 +27,40 @@ import (
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
)
|
||||
|
||||
const (
|
||||
arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block is explicitly requested
|
||||
arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block/transaction is explicitly requested
|
||||
gatherSlack = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches
|
||||
fetchTimeout = 5 * time.Second // Maximum allotted time to return an explicitly requested block
|
||||
maxUncleDist = 7 // Maximum allowed backward distance from the chain head
|
||||
maxQueueDist = 32 // Maximum allowed distance from the chain head to queue
|
||||
hashLimit = 256 // Maximum number of unique blocks a peer may have announced
|
||||
blockLimit = 64 // Maximum number of unique blocks a peer may have delivered
|
||||
fetchTimeout = 5 * time.Second // Maximum allotted time to return an explicitly requested block/transaction
|
||||
)
|
||||
|
||||
const (
|
||||
maxUncleDist = 7 // Maximum allowed backward distance from the chain head
|
||||
maxQueueDist = 32 // Maximum allowed distance from the chain head to queue
|
||||
hashLimit = 256 // Maximum number of unique blocks a peer may have announced
|
||||
blockLimit = 64 // Maximum number of unique blocks a peer may have delivered
|
||||
)
|
||||
|
||||
var (
|
||||
blockAnnounceInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/in", nil)
|
||||
blockAnnounceOutTimer = metrics.NewRegisteredTimer("eth/fetcher/block/announces/out", nil)
|
||||
blockAnnounceDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/drop", nil)
|
||||
blockAnnounceDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/dos", nil)
|
||||
|
||||
blockBroadcastInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/in", nil)
|
||||
blockBroadcastOutTimer = metrics.NewRegisteredTimer("eth/fetcher/block/broadcasts/out", nil)
|
||||
blockBroadcastDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/drop", nil)
|
||||
blockBroadcastDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/dos", nil)
|
||||
|
||||
headerFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/block/headers", nil)
|
||||
bodyFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/block/bodies", nil)
|
||||
|
||||
headerFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/in", nil)
|
||||
headerFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/out", nil)
|
||||
bodyFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/in", nil)
|
||||
bodyFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/out", nil)
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -67,9 +91,9 @@ type chainInsertFn func(types.Blocks) (int, error)
|
||||
// peerDropFn is a callback type for dropping a peer detected as malicious.
|
||||
type peerDropFn func(id string)
|
||||
|
||||
// announce is the hash notification of the availability of a new block in the
|
||||
// blockAnnounce is the hash notification of the availability of a new block in the
|
||||
// network.
|
||||
type announce struct {
|
||||
type blockAnnounce struct {
|
||||
hash common.Hash // Hash of the block being announced
|
||||
number uint64 // Number of the block being announced (0 = unknown | old protocol)
|
||||
header *types.Header // Header of the block partially reassembled (new protocol)
|
||||
@@ -97,18 +121,18 @@ type bodyFilterTask struct {
|
||||
time time.Time // Arrival time of the blocks' contents
|
||||
}
|
||||
|
||||
// inject represents a schedules import operation.
|
||||
type inject struct {
|
||||
// blockInject represents a schedules import operation.
|
||||
type blockInject struct {
|
||||
origin string
|
||||
block *types.Block
|
||||
}
|
||||
|
||||
// Fetcher is responsible for accumulating block announcements from various peers
|
||||
// BlockFetcher is responsible for accumulating block announcements from various peers
|
||||
// and scheduling them for retrieval.
|
||||
type Fetcher struct {
|
||||
type BlockFetcher struct {
|
||||
// Various event channels
|
||||
notify chan *announce
|
||||
inject chan *inject
|
||||
notify chan *blockAnnounce
|
||||
inject chan *blockInject
|
||||
|
||||
headerFilter chan chan *headerFilterTask
|
||||
bodyFilter chan chan *bodyFilterTask
|
||||
@@ -117,16 +141,16 @@ type Fetcher struct {
|
||||
quit chan struct{}
|
||||
|
||||
// Announce states
|
||||
announces map[string]int // Per peer announce counts to prevent memory exhaustion
|
||||
announced map[common.Hash][]*announce // Announced blocks, scheduled for fetching
|
||||
fetching map[common.Hash]*announce // Announced blocks, currently fetching
|
||||
fetched map[common.Hash][]*announce // Blocks with headers fetched, scheduled for body retrieval
|
||||
completing map[common.Hash]*announce // Blocks with headers, currently body-completing
|
||||
announces map[string]int // Per peer blockAnnounce counts to prevent memory exhaustion
|
||||
announced map[common.Hash][]*blockAnnounce // Announced blocks, scheduled for fetching
|
||||
fetching map[common.Hash]*blockAnnounce // Announced blocks, currently fetching
|
||||
fetched map[common.Hash][]*blockAnnounce // Blocks with headers fetched, scheduled for body retrieval
|
||||
completing map[common.Hash]*blockAnnounce // Blocks with headers, currently body-completing
|
||||
|
||||
// Block cache
|
||||
queue *prque.Prque // Queue containing the import operations (block number sorted)
|
||||
queues map[string]int // Per peer block counts to prevent memory exhaustion
|
||||
queued map[common.Hash]*inject // Set of already queued blocks (to dedupe imports)
|
||||
queue *prque.Prque // Queue containing the import operations (block number sorted)
|
||||
queues map[string]int // Per peer block counts to prevent memory exhaustion
|
||||
queued map[common.Hash]*blockInject // Set of already queued blocks (to dedupe imports)
|
||||
|
||||
// Callbacks
|
||||
getBlock blockRetrievalFn // Retrieves a block from the local chain
|
||||
@@ -137,30 +161,30 @@ type Fetcher struct {
|
||||
dropPeer peerDropFn // Drops a peer for misbehaving
|
||||
|
||||
// Testing hooks
|
||||
announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the announce list
|
||||
announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the blockAnnounce list
|
||||
queueChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a block from the import queue
|
||||
fetchingHook func([]common.Hash) // Method to call upon starting a block (eth/61) or header (eth/62) fetch
|
||||
completingHook func([]common.Hash) // Method to call upon starting a block body fetch (eth/62)
|
||||
importedHook func(*types.Block) // Method to call upon successful block import (both eth/61 and eth/62)
|
||||
}
|
||||
|
||||
// New creates a block fetcher to retrieve blocks based on hash announcements.
|
||||
func New(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertChain chainInsertFn, dropPeer peerDropFn) *Fetcher {
|
||||
return &Fetcher{
|
||||
notify: make(chan *announce),
|
||||
inject: make(chan *inject),
|
||||
// NewBlockFetcher creates a block fetcher to retrieve blocks based on hash announcements.
|
||||
func NewBlockFetcher(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertChain chainInsertFn, dropPeer peerDropFn) *BlockFetcher {
|
||||
return &BlockFetcher{
|
||||
notify: make(chan *blockAnnounce),
|
||||
inject: make(chan *blockInject),
|
||||
headerFilter: make(chan chan *headerFilterTask),
|
||||
bodyFilter: make(chan chan *bodyFilterTask),
|
||||
done: make(chan common.Hash),
|
||||
quit: make(chan struct{}),
|
||||
announces: make(map[string]int),
|
||||
announced: make(map[common.Hash][]*announce),
|
||||
fetching: make(map[common.Hash]*announce),
|
||||
fetched: make(map[common.Hash][]*announce),
|
||||
completing: make(map[common.Hash]*announce),
|
||||
announced: make(map[common.Hash][]*blockAnnounce),
|
||||
fetching: make(map[common.Hash]*blockAnnounce),
|
||||
fetched: make(map[common.Hash][]*blockAnnounce),
|
||||
completing: make(map[common.Hash]*blockAnnounce),
|
||||
queue: prque.New(nil),
|
||||
queues: make(map[string]int),
|
||||
queued: make(map[common.Hash]*inject),
|
||||
queued: make(map[common.Hash]*blockInject),
|
||||
getBlock: getBlock,
|
||||
verifyHeader: verifyHeader,
|
||||
broadcastBlock: broadcastBlock,
|
||||
@@ -172,21 +196,21 @@ func New(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBloc
|
||||
|
||||
// Start boots up the announcement based synchroniser, accepting and processing
|
||||
// hash notifications and block fetches until termination requested.
|
||||
func (f *Fetcher) Start() {
|
||||
func (f *BlockFetcher) Start() {
|
||||
go f.loop()
|
||||
}
|
||||
|
||||
// Stop terminates the announcement based synchroniser, canceling all pending
|
||||
// operations.
|
||||
func (f *Fetcher) Stop() {
|
||||
func (f *BlockFetcher) Stop() {
|
||||
close(f.quit)
|
||||
}
|
||||
|
||||
// Notify announces the fetcher of the potential availability of a new block in
|
||||
// the network.
|
||||
func (f *Fetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time,
|
||||
func (f *BlockFetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time,
|
||||
headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error {
|
||||
block := &announce{
|
||||
block := &blockAnnounce{
|
||||
hash: hash,
|
||||
number: number,
|
||||
time: time,
|
||||
@@ -203,8 +227,8 @@ func (f *Fetcher) Notify(peer string, hash common.Hash, number uint64, time time
|
||||
}
|
||||
|
||||
// Enqueue tries to fill gaps the fetcher's future import queue.
|
||||
func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
|
||||
op := &inject{
|
||||
func (f *BlockFetcher) Enqueue(peer string, block *types.Block) error {
|
||||
op := &blockInject{
|
||||
origin: peer,
|
||||
block: block,
|
||||
}
|
||||
@@ -218,7 +242,7 @@ func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
|
||||
|
||||
// FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
|
||||
// returning those that should be handled differently.
|
||||
func (f *Fetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header {
|
||||
func (f *BlockFetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header {
|
||||
log.Trace("Filtering headers", "peer", peer, "headers", len(headers))
|
||||
|
||||
// Send the filter channel to the fetcher
|
||||
@@ -246,7 +270,7 @@ func (f *Fetcher) FilterHeaders(peer string, headers []*types.Header, time time.
|
||||
|
||||
// FilterBodies extracts all the block bodies that were explicitly requested by
|
||||
// the fetcher, returning those that should be handled differently.
|
||||
func (f *Fetcher) FilterBodies(peer string, transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
|
||||
func (f *BlockFetcher) FilterBodies(peer string, transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
|
||||
log.Trace("Filtering bodies", "peer", peer, "txs", len(transactions), "uncles", len(uncles))
|
||||
|
||||
// Send the filter channel to the fetcher
|
||||
@@ -274,7 +298,7 @@ func (f *Fetcher) FilterBodies(peer string, transactions [][]*types.Transaction,
|
||||
|
||||
// Loop is the main fetcher loop, checking and processing various notification
|
||||
// events.
|
||||
func (f *Fetcher) loop() {
|
||||
func (f *BlockFetcher) loop() {
|
||||
// Iterate the block fetching until a quit is requested
|
||||
fetchTimer := time.NewTimer(0)
|
||||
completeTimer := time.NewTimer(0)
|
||||
@@ -289,7 +313,7 @@ func (f *Fetcher) loop() {
|
||||
// Import any queued blocks that could potentially fit
|
||||
height := f.chainHeight()
|
||||
for !f.queue.Empty() {
|
||||
op := f.queue.PopItem().(*inject)
|
||||
op := f.queue.PopItem().(*blockInject)
|
||||
hash := op.block.Hash()
|
||||
if f.queueChangeHook != nil {
|
||||
f.queueChangeHook(hash, false)
|
||||
@@ -313,24 +337,24 @@ func (f *Fetcher) loop() {
|
||||
// Wait for an outside event to occur
|
||||
select {
|
||||
case <-f.quit:
|
||||
// Fetcher terminating, abort all operations
|
||||
// BlockFetcher terminating, abort all operations
|
||||
return
|
||||
|
||||
case notification := <-f.notify:
|
||||
// A block was announced, make sure the peer isn't DOSing us
|
||||
propAnnounceInMeter.Mark(1)
|
||||
blockAnnounceInMeter.Mark(1)
|
||||
|
||||
count := f.announces[notification.origin] + 1
|
||||
if count > hashLimit {
|
||||
log.Debug("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit)
|
||||
propAnnounceDOSMeter.Mark(1)
|
||||
blockAnnounceDOSMeter.Mark(1)
|
||||
break
|
||||
}
|
||||
// If we have a valid block number, check that it's potentially useful
|
||||
if notification.number > 0 {
|
||||
if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
|
||||
log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist)
|
||||
propAnnounceDropMeter.Mark(1)
|
||||
blockAnnounceDropMeter.Mark(1)
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -352,7 +376,7 @@ func (f *Fetcher) loop() {
|
||||
|
||||
case op := <-f.inject:
|
||||
// A direct block insertion was requested, try and fill any pending gaps
|
||||
propBroadcastInMeter.Mark(1)
|
||||
blockBroadcastInMeter.Mark(1)
|
||||
f.enqueue(op.origin, op.block)
|
||||
|
||||
case hash := <-f.done:
|
||||
@@ -439,7 +463,7 @@ func (f *Fetcher) loop() {
|
||||
|
||||
// Split the batch of headers into unknown ones (to return to the caller),
|
||||
// known incomplete ones (requiring body retrievals) and completed blocks.
|
||||
unknown, incomplete, complete := []*types.Header{}, []*announce{}, []*types.Block{}
|
||||
unknown, incomplete, complete := []*types.Header{}, []*blockAnnounce{}, []*types.Block{}
|
||||
for _, header := range task.headers {
|
||||
hash := header.Hash()
|
||||
|
||||
@@ -475,7 +499,7 @@ func (f *Fetcher) loop() {
|
||||
f.forgetHash(hash)
|
||||
}
|
||||
} else {
|
||||
// Fetcher doesn't know about it, add to the return list
|
||||
// BlockFetcher doesn't know about it, add to the return list
|
||||
unknown = append(unknown, header)
|
||||
}
|
||||
}
|
||||
@@ -562,8 +586,8 @@ func (f *Fetcher) loop() {
|
||||
}
|
||||
}
|
||||
|
||||
// rescheduleFetch resets the specified fetch timer to the next announce timeout.
|
||||
func (f *Fetcher) rescheduleFetch(fetch *time.Timer) {
|
||||
// rescheduleFetch resets the specified fetch timer to the next blockAnnounce timeout.
|
||||
func (f *BlockFetcher) rescheduleFetch(fetch *time.Timer) {
|
||||
// Short circuit if no blocks are announced
|
||||
if len(f.announced) == 0 {
|
||||
return
|
||||
@@ -579,7 +603,7 @@ func (f *Fetcher) rescheduleFetch(fetch *time.Timer) {
|
||||
}
|
||||
|
||||
// rescheduleComplete resets the specified completion timer to the next fetch timeout.
|
||||
func (f *Fetcher) rescheduleComplete(complete *time.Timer) {
|
||||
func (f *BlockFetcher) rescheduleComplete(complete *time.Timer) {
|
||||
// Short circuit if no headers are fetched
|
||||
if len(f.fetched) == 0 {
|
||||
return
|
||||
@@ -596,27 +620,27 @@ func (f *Fetcher) rescheduleComplete(complete *time.Timer) {
|
||||
|
||||
// enqueue schedules a new future import operation, if the block to be imported
|
||||
// has not yet been seen.
|
||||
func (f *Fetcher) enqueue(peer string, block *types.Block) {
|
||||
func (f *BlockFetcher) enqueue(peer string, block *types.Block) {
|
||||
hash := block.Hash()
|
||||
|
||||
// Ensure the peer isn't DOSing us
|
||||
count := f.queues[peer] + 1
|
||||
if count > blockLimit {
|
||||
log.Debug("Discarded propagated block, exceeded allowance", "peer", peer, "number", block.Number(), "hash", hash, "limit", blockLimit)
|
||||
propBroadcastDOSMeter.Mark(1)
|
||||
blockBroadcastDOSMeter.Mark(1)
|
||||
f.forgetHash(hash)
|
||||
return
|
||||
}
|
||||
// Discard any past or too distant blocks
|
||||
if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
|
||||
log.Debug("Discarded propagated block, too far away", "peer", peer, "number", block.Number(), "hash", hash, "distance", dist)
|
||||
propBroadcastDropMeter.Mark(1)
|
||||
blockBroadcastDropMeter.Mark(1)
|
||||
f.forgetHash(hash)
|
||||
return
|
||||
}
|
||||
// Schedule the block for future importing
|
||||
if _, ok := f.queued[hash]; !ok {
|
||||
op := &inject{
|
||||
op := &blockInject{
|
||||
origin: peer,
|
||||
block: block,
|
||||
}
|
||||
@@ -633,7 +657,7 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) {
|
||||
// insert spawns a new goroutine to run a block insertion into the chain. If the
|
||||
// block's number is at the same height as the current import phase, it updates
|
||||
// the phase states accordingly.
|
||||
func (f *Fetcher) insert(peer string, block *types.Block) {
|
||||
func (f *BlockFetcher) insert(peer string, block *types.Block) {
|
||||
hash := block.Hash()
|
||||
|
||||
// Run the import on a new thread
|
||||
@@ -651,7 +675,7 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
|
||||
switch err := f.verifyHeader(block.Header()); err {
|
||||
case nil:
|
||||
// All ok, quickly propagate to our peers
|
||||
propBroadcastOutTimer.UpdateSince(block.ReceivedAt)
|
||||
blockBroadcastOutTimer.UpdateSince(block.ReceivedAt)
|
||||
go f.broadcastBlock(block, true)
|
||||
|
||||
case consensus.ErrFutureBlock:
|
||||
@@ -669,7 +693,7 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
|
||||
return
|
||||
}
|
||||
// If import succeeded, broadcast the block
|
||||
propAnnounceOutTimer.UpdateSince(block.ReceivedAt)
|
||||
blockAnnounceOutTimer.UpdateSince(block.ReceivedAt)
|
||||
go f.broadcastBlock(block, false)
|
||||
|
||||
// Invoke the testing hook if needed
|
||||
@@ -681,7 +705,7 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
|
||||
|
||||
// forgetHash removes all traces of a block announcement from the fetcher's
|
||||
// internal state.
|
||||
func (f *Fetcher) forgetHash(hash common.Hash) {
|
||||
func (f *BlockFetcher) forgetHash(hash common.Hash) {
|
||||
// Remove all pending announces and decrement DOS counters
|
||||
for _, announce := range f.announced[hash] {
|
||||
f.announces[announce.origin]--
|
||||
@@ -723,7 +747,7 @@ func (f *Fetcher) forgetHash(hash common.Hash) {
|
||||
|
||||
// forgetBlock removes all traces of a queued block from the fetcher's internal
|
||||
// state.
|
||||
func (f *Fetcher) forgetBlock(hash common.Hash) {
|
||||
func (f *BlockFetcher) forgetBlock(hash common.Hash) {
|
||||
if insert := f.queued[hash]; insert != nil {
|
||||
f.queues[insert.origin]--
|
||||
if f.queues[insert.origin] == 0 {
|
@@ -76,7 +76,7 @@ func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common
|
||||
|
||||
// fetcherTester is a test simulator for mocking out local block chain.
|
||||
type fetcherTester struct {
|
||||
fetcher *Fetcher
|
||||
fetcher *BlockFetcher
|
||||
|
||||
hashes []common.Hash // Hash chain belonging to the tester
|
||||
blocks map[common.Hash]*types.Block // Blocks belonging to the tester
|
||||
@@ -92,7 +92,7 @@ func newTester() *fetcherTester {
|
||||
blocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
|
||||
drops: make(map[string]bool),
|
||||
}
|
||||
tester.fetcher = New(tester.getBlock, tester.verifyHeader, tester.broadcastBlock, tester.chainHeight, tester.insertChain, tester.dropPeer)
|
||||
tester.fetcher = NewBlockFetcher(tester.getBlock, tester.verifyHeader, tester.broadcastBlock, tester.chainHeight, tester.insertChain, tester.dropPeer)
|
||||
tester.fetcher.Start()
|
||||
|
||||
return tester
|
@@ -1,43 +0,0 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Contains the metrics collected by the fetcher.
|
||||
|
||||
package fetcher
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
propAnnounceInMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/announces/in", nil)
|
||||
propAnnounceOutTimer = metrics.NewRegisteredTimer("eth/fetcher/prop/announces/out", nil)
|
||||
propAnnounceDropMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/announces/drop", nil)
|
||||
propAnnounceDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/announces/dos", nil)
|
||||
|
||||
propBroadcastInMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/broadcasts/in", nil)
|
||||
propBroadcastOutTimer = metrics.NewRegisteredTimer("eth/fetcher/prop/broadcasts/out", nil)
|
||||
propBroadcastDropMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/broadcasts/drop", nil)
|
||||
propBroadcastDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/broadcasts/dos", nil)
|
||||
|
||||
headerFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/fetch/headers", nil)
|
||||
bodyFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/fetch/bodies", nil)
|
||||
|
||||
headerFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/filter/headers/in", nil)
|
||||
headerFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/filter/headers/out", nil)
|
||||
bodyFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/filter/bodies/in", nil)
|
||||
bodyFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/filter/bodies/out", nil)
|
||||
)
|
894
eth/fetcher/tx_fetcher.go
Normal file
894
eth/fetcher/tx_fetcher.go
Normal file
@@ -0,0 +1,894 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package fetcher
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
mrand "math/rand"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
mapset "github.com/deckarep/golang-set"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/mclock"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxTxAnnounces is the maximum number of unique transaction a peer
|
||||
// can announce in a short time.
|
||||
maxTxAnnounces = 4096
|
||||
|
||||
// maxTxRetrievals is the maximum transaction number can be fetched in one
|
||||
// request. The rationale to pick 256 is:
|
||||
// - In eth protocol, the softResponseLimit is 2MB. Nowadays according to
|
||||
// Etherscan the average transaction size is around 200B, so in theory
|
||||
// we can include lots of transaction in a single protocol packet.
|
||||
// - However the maximum size of a single transaction is raised to 128KB,
|
||||
// so pick a middle value here to ensure we can maximize the efficiency
|
||||
// of the retrieval and response size overflow won't happen in most cases.
|
||||
maxTxRetrievals = 256
|
||||
|
||||
// maxTxUnderpricedSetSize is the size of the underpriced transaction set that
|
||||
// is used to track recent transactions that have been dropped so we don't
|
||||
// re-request them.
|
||||
maxTxUnderpricedSetSize = 32768
|
||||
|
||||
// txArriveTimeout is the time allowance before an announced transaction is
|
||||
// explicitly requested.
|
||||
txArriveTimeout = 500 * time.Millisecond
|
||||
|
||||
// txGatherSlack is the interval used to collate almost-expired announces
|
||||
// with network fetches.
|
||||
txGatherSlack = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
var (
|
||||
// txFetchTimeout is the maximum allotted time to return an explicitly
|
||||
// requested transaction.
|
||||
txFetchTimeout = 5 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
txAnnounceInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/in", nil)
|
||||
txAnnounceKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/known", nil)
|
||||
txAnnounceUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/underpriced", nil)
|
||||
txAnnounceDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/dos", nil)
|
||||
|
||||
txBroadcastInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/in", nil)
|
||||
txBroadcastKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/known", nil)
|
||||
txBroadcastUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/underpriced", nil)
|
||||
txBroadcastOtherRejectMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/otherreject", nil)
|
||||
|
||||
txRequestOutMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/out", nil)
|
||||
txRequestFailMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/fail", nil)
|
||||
txRequestDoneMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/done", nil)
|
||||
txRequestTimeoutMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/timeout", nil)
|
||||
|
||||
txReplyInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/in", nil)
|
||||
txReplyKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/known", nil)
|
||||
txReplyUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/underpriced", nil)
|
||||
txReplyOtherRejectMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/otherreject", nil)
|
||||
|
||||
txFetcherWaitingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/waiting/peers", nil)
|
||||
txFetcherWaitingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/waiting/hashes", nil)
|
||||
txFetcherQueueingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/queueing/peers", nil)
|
||||
txFetcherQueueingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/queueing/hashes", nil)
|
||||
txFetcherFetchingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/fetching/peers", nil)
|
||||
txFetcherFetchingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/fetching/hashes", nil)
|
||||
)
|
||||
|
||||
// txAnnounce is the notification of the availability of a batch
|
||||
// of new transactions in the network.
|
||||
type txAnnounce struct {
|
||||
origin string // Identifier of the peer originating the notification
|
||||
hashes []common.Hash // Batch of transaction hashes being announced
|
||||
}
|
||||
|
||||
// txRequest represents an in-flight transaction retrieval request destined to
|
||||
// a specific peers.
|
||||
type txRequest struct {
|
||||
hashes []common.Hash // Transactions having been requested
|
||||
stolen map[common.Hash]struct{} // Deliveries by someone else (don't re-request)
|
||||
time mclock.AbsTime // Timestamp of the request
|
||||
}
|
||||
|
||||
// txDelivery is the notification that a batch of transactions have been added
|
||||
// to the pool and should be untracked.
|
||||
type txDelivery struct {
|
||||
origin string // Identifier of the peer originating the notification
|
||||
hashes []common.Hash // Batch of transaction hashes having been delivered
|
||||
direct bool // Whether this is a direct reply or a broadcast
|
||||
}
|
||||
|
||||
// txDrop is the notiication that a peer has disconnected.
|
||||
type txDrop struct {
|
||||
peer string
|
||||
}
|
||||
|
||||
// TxFetcher is responsible for retrieving new transaction based on announcements.
|
||||
//
|
||||
// The fetcher operates in 3 stages:
|
||||
// - Transactions that are newly discovered are moved into a wait list.
|
||||
// - After ~500ms passes, transactions from the wait list that have not been
|
||||
// broadcast to us in whole are moved into a queueing area.
|
||||
// - When a connected peer doesn't have in-flight retrieval requests, any
|
||||
// transaction queued up (and announced by the peer) are allocated to the
|
||||
// peer and moved into a fetching status until it's fulfilled or fails.
|
||||
//
|
||||
// The invariants of the fetcher are:
|
||||
// - Each tracked transaction (hash) must only be present in one of the
|
||||
// three stages. This ensures that the fetcher operates akin to a finite
|
||||
// state automata and there's do data leak.
|
||||
// - Each peer that announced transactions may be scheduled retrievals, but
|
||||
// only ever one concurrently. This ensures we can immediately know what is
|
||||
// missing from a reply and reschedule it.
|
||||
type TxFetcher struct {
|
||||
notify chan *txAnnounce
|
||||
cleanup chan *txDelivery
|
||||
drop chan *txDrop
|
||||
quit chan struct{}
|
||||
|
||||
underpriced mapset.Set // Transactions discarded as too cheap (don't re-fetch)
|
||||
|
||||
// Stage 1: Waiting lists for newly discovered transactions that might be
|
||||
// broadcast without needing explicit request/reply round trips.
|
||||
waitlist map[common.Hash]map[string]struct{} // Transactions waiting for an potential broadcast
|
||||
waittime map[common.Hash]mclock.AbsTime // Timestamps when transactions were added to the waitlist
|
||||
waitslots map[string]map[common.Hash]struct{} // Waiting announcement sgroupped by peer (DoS protection)
|
||||
|
||||
// Stage 2: Queue of transactions that waiting to be allocated to some peer
|
||||
// to be retrieved directly.
|
||||
announces map[string]map[common.Hash]struct{} // Set of announced transactions, grouped by origin peer
|
||||
announced map[common.Hash]map[string]struct{} // Set of download locations, grouped by transaction hash
|
||||
|
||||
// Stage 3: Set of transactions currently being retrieved, some which may be
|
||||
// fulfilled and some rescheduled. Note, this step shares 'announces' from the
|
||||
// previous stage to avoid having to duplicate (need it for DoS checks).
|
||||
fetching map[common.Hash]string // Transaction set currently being retrieved
|
||||
requests map[string]*txRequest // In-flight transaction retrievals
|
||||
alternates map[common.Hash]map[string]struct{} // In-flight transaction alternate origins if retrieval fails
|
||||
|
||||
// Callbacks
|
||||
hasTx func(common.Hash) bool // Retrieves a tx from the local txpool
|
||||
addTxs func([]*types.Transaction) []error // Insert a batch of transactions into local txpool
|
||||
fetchTxs func(string, []common.Hash) error // Retrieves a set of txs from a remote peer
|
||||
|
||||
step chan struct{} // Notification channel when the fetcher loop iterates
|
||||
clock mclock.Clock // Time wrapper to simulate in tests
|
||||
rand *mrand.Rand // Randomizer to use in tests instead of map range loops (soft-random)
|
||||
}
|
||||
|
||||
// NewTxFetcher creates a transaction fetcher to retrieve transaction
|
||||
// based on hash announcements.
|
||||
func NewTxFetcher(hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error) *TxFetcher {
|
||||
return NewTxFetcherForTests(hasTx, addTxs, fetchTxs, mclock.System{}, nil)
|
||||
}
|
||||
|
||||
// NewTxFetcherForTests is a testing method to mock out the realtime clock with
|
||||
// a simulated version and the internal randomness with a deterministic one.
|
||||
func NewTxFetcherForTests(
|
||||
hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error,
|
||||
clock mclock.Clock, rand *mrand.Rand) *TxFetcher {
|
||||
return &TxFetcher{
|
||||
notify: make(chan *txAnnounce),
|
||||
cleanup: make(chan *txDelivery),
|
||||
drop: make(chan *txDrop),
|
||||
quit: make(chan struct{}),
|
||||
waitlist: make(map[common.Hash]map[string]struct{}),
|
||||
waittime: make(map[common.Hash]mclock.AbsTime),
|
||||
waitslots: make(map[string]map[common.Hash]struct{}),
|
||||
announces: make(map[string]map[common.Hash]struct{}),
|
||||
announced: make(map[common.Hash]map[string]struct{}),
|
||||
fetching: make(map[common.Hash]string),
|
||||
requests: make(map[string]*txRequest),
|
||||
alternates: make(map[common.Hash]map[string]struct{}),
|
||||
underpriced: mapset.NewSet(),
|
||||
hasTx: hasTx,
|
||||
addTxs: addTxs,
|
||||
fetchTxs: fetchTxs,
|
||||
clock: clock,
|
||||
rand: rand,
|
||||
}
|
||||
}
|
||||
|
||||
// Notify announces the fetcher of the potential availability of a new batch of
|
||||
// transactions in the network.
|
||||
func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error {
|
||||
// Keep track of all the announced transactions
|
||||
txAnnounceInMeter.Mark(int64(len(hashes)))
|
||||
|
||||
// Skip any transaction announcements that we already know of, or that we've
|
||||
// previously marked as cheap and discarded. This check is of course racey,
|
||||
// because multiple concurrent notifies will still manage to pass it, but it's
|
||||
// still valuable to check here because it runs concurrent to the internal
|
||||
// loop, so anything caught here is time saved internally.
|
||||
var (
|
||||
unknowns = make([]common.Hash, 0, len(hashes))
|
||||
duplicate, underpriced int64
|
||||
)
|
||||
for _, hash := range hashes {
|
||||
switch {
|
||||
case f.hasTx(hash):
|
||||
duplicate++
|
||||
|
||||
case f.underpriced.Contains(hash):
|
||||
underpriced++
|
||||
|
||||
default:
|
||||
unknowns = append(unknowns, hash)
|
||||
}
|
||||
}
|
||||
txAnnounceKnownMeter.Mark(duplicate)
|
||||
txAnnounceUnderpricedMeter.Mark(underpriced)
|
||||
|
||||
// If anything's left to announce, push it into the internal loop
|
||||
if len(unknowns) == 0 {
|
||||
return nil
|
||||
}
|
||||
announce := &txAnnounce{
|
||||
origin: peer,
|
||||
hashes: unknowns,
|
||||
}
|
||||
select {
|
||||
case f.notify <- announce:
|
||||
return nil
|
||||
case <-f.quit:
|
||||
return errTerminated
|
||||
}
|
||||
}
|
||||
|
||||
// Enqueue imports a batch of received transaction into the transaction pool
|
||||
// and the fetcher. This method may be called by both transaction broadcasts and
|
||||
// direct request replies. The differentiation is important so the fetcher can
|
||||
// re-shedule missing transactions as soon as possible.
|
||||
func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) error {
|
||||
// Keep track of all the propagated transactions
|
||||
if direct {
|
||||
txReplyInMeter.Mark(int64(len(txs)))
|
||||
} else {
|
||||
txBroadcastInMeter.Mark(int64(len(txs)))
|
||||
}
|
||||
// Push all the transactions into the pool, tracking underpriced ones to avoid
|
||||
// re-requesting them and dropping the peer in case of malicious transfers.
|
||||
var (
|
||||
added = make([]common.Hash, 0, len(txs))
|
||||
duplicate int64
|
||||
underpriced int64
|
||||
otherreject int64
|
||||
)
|
||||
errs := f.addTxs(txs)
|
||||
for i, err := range errs {
|
||||
if err != nil {
|
||||
// Track the transaction hash if the price is too low for us.
|
||||
// Avoid re-request this transaction when we receive another
|
||||
// announcement.
|
||||
if err == core.ErrUnderpriced || err == core.ErrReplaceUnderpriced {
|
||||
for f.underpriced.Cardinality() >= maxTxUnderpricedSetSize {
|
||||
f.underpriced.Pop()
|
||||
}
|
||||
f.underpriced.Add(txs[i].Hash())
|
||||
}
|
||||
// Track a few interesting failure types
|
||||
switch err {
|
||||
case nil: // Noop, but need to handle to not count these
|
||||
|
||||
case core.ErrAlreadyKnown:
|
||||
duplicate++
|
||||
|
||||
case core.ErrUnderpriced, core.ErrReplaceUnderpriced:
|
||||
underpriced++
|
||||
|
||||
default:
|
||||
otherreject++
|
||||
}
|
||||
}
|
||||
added = append(added, txs[i].Hash())
|
||||
}
|
||||
if direct {
|
||||
txReplyKnownMeter.Mark(duplicate)
|
||||
txReplyUnderpricedMeter.Mark(underpriced)
|
||||
txReplyOtherRejectMeter.Mark(otherreject)
|
||||
} else {
|
||||
txBroadcastKnownMeter.Mark(duplicate)
|
||||
txBroadcastUnderpricedMeter.Mark(underpriced)
|
||||
txBroadcastOtherRejectMeter.Mark(otherreject)
|
||||
}
|
||||
select {
|
||||
case f.cleanup <- &txDelivery{origin: peer, hashes: added, direct: direct}:
|
||||
return nil
|
||||
case <-f.quit:
|
||||
return errTerminated
|
||||
}
|
||||
}
|
||||
|
||||
// Drop should be called when a peer disconnects. It cleans up all the internal
|
||||
// data structures of the given node.
|
||||
func (f *TxFetcher) Drop(peer string) error {
|
||||
select {
|
||||
case f.drop <- &txDrop{peer: peer}:
|
||||
return nil
|
||||
case <-f.quit:
|
||||
return errTerminated
|
||||
}
|
||||
}
|
||||
|
||||
// Start boots up the announcement based synchroniser, accepting and processing
|
||||
// hash notifications and block fetches until termination requested.
|
||||
func (f *TxFetcher) Start() {
|
||||
go f.loop()
|
||||
}
|
||||
|
||||
// Stop terminates the announcement based synchroniser, canceling all pending
|
||||
// operations.
|
||||
func (f *TxFetcher) Stop() {
|
||||
close(f.quit)
|
||||
}
|
||||
|
||||
func (f *TxFetcher) loop() {
|
||||
var (
|
||||
waitTimer = new(mclock.Timer)
|
||||
timeoutTimer = new(mclock.Timer)
|
||||
|
||||
waitTrigger = make(chan struct{}, 1)
|
||||
timeoutTrigger = make(chan struct{}, 1)
|
||||
)
|
||||
for {
|
||||
select {
|
||||
case ann := <-f.notify:
|
||||
// Drop part of the new announcements if there are too many accumulated.
|
||||
// Note, we could but do not filter already known transactions here as
|
||||
// the probability of something arriving between this call and the pre-
|
||||
// filter outside is essentially zero.
|
||||
used := len(f.waitslots[ann.origin]) + len(f.announces[ann.origin])
|
||||
if used >= maxTxAnnounces {
|
||||
// This can happen if a set of transactions are requested but not
|
||||
// all fulfilled, so the remainder are rescheduled without the cap
|
||||
// check. Should be fine as the limit is in the thousands and the
|
||||
// request size in the hundreds.
|
||||
txAnnounceDOSMeter.Mark(int64(len(ann.hashes)))
|
||||
break
|
||||
}
|
||||
want := used + len(ann.hashes)
|
||||
if want > maxTxAnnounces {
|
||||
txAnnounceDOSMeter.Mark(int64(want - maxTxAnnounces))
|
||||
ann.hashes = ann.hashes[:want-maxTxAnnounces]
|
||||
}
|
||||
// All is well, schedule the remainder of the transactions
|
||||
idleWait := len(f.waittime) == 0
|
||||
_, oldPeer := f.announces[ann.origin]
|
||||
|
||||
for _, hash := range ann.hashes {
|
||||
// If the transaction is already downloading, add it to the list
|
||||
// of possible alternates (in case the current retrieval fails) and
|
||||
// also account it for the peer.
|
||||
if f.alternates[hash] != nil {
|
||||
f.alternates[hash][ann.origin] = struct{}{}
|
||||
|
||||
// Stage 2 and 3 share the set of origins per tx
|
||||
if announces := f.announces[ann.origin]; announces != nil {
|
||||
announces[hash] = struct{}{}
|
||||
} else {
|
||||
f.announces[ann.origin] = map[common.Hash]struct{}{hash: struct{}{}}
|
||||
}
|
||||
continue
|
||||
}
|
||||
// If the transaction is not downloading, but is already queued
|
||||
// from a different peer, track it for the new peer too.
|
||||
if f.announced[hash] != nil {
|
||||
f.announced[hash][ann.origin] = struct{}{}
|
||||
|
||||
// Stage 2 and 3 share the set of origins per tx
|
||||
if announces := f.announces[ann.origin]; announces != nil {
|
||||
announces[hash] = struct{}{}
|
||||
} else {
|
||||
f.announces[ann.origin] = map[common.Hash]struct{}{hash: struct{}{}}
|
||||
}
|
||||
continue
|
||||
}
|
||||
// If the transaction is already known to the fetcher, but not
|
||||
// yet downloading, add the peer as an alternate origin in the
|
||||
// waiting list.
|
||||
if f.waitlist[hash] != nil {
|
||||
f.waitlist[hash][ann.origin] = struct{}{}
|
||||
|
||||
if waitslots := f.waitslots[ann.origin]; waitslots != nil {
|
||||
waitslots[hash] = struct{}{}
|
||||
} else {
|
||||
f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: struct{}{}}
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Transaction unknown to the fetcher, insert it into the waiting list
|
||||
f.waitlist[hash] = map[string]struct{}{ann.origin: struct{}{}}
|
||||
f.waittime[hash] = f.clock.Now()
|
||||
|
||||
if waitslots := f.waitslots[ann.origin]; waitslots != nil {
|
||||
waitslots[hash] = struct{}{}
|
||||
} else {
|
||||
f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: struct{}{}}
|
||||
}
|
||||
}
|
||||
// If a new item was added to the waitlist, schedule it into the fetcher
|
||||
if idleWait && len(f.waittime) > 0 {
|
||||
f.rescheduleWait(waitTimer, waitTrigger)
|
||||
}
|
||||
// If this peer is new and announced something already queued, maybe
|
||||
// request transactions from them
|
||||
if !oldPeer && len(f.announces[ann.origin]) > 0 {
|
||||
f.scheduleFetches(timeoutTimer, timeoutTrigger, map[string]struct{}{ann.origin: struct{}{}})
|
||||
}
|
||||
|
||||
case <-waitTrigger:
|
||||
// At least one transaction's waiting time ran out, push all expired
|
||||
// ones into the retrieval queues
|
||||
actives := make(map[string]struct{})
|
||||
for hash, instance := range f.waittime {
|
||||
if time.Duration(f.clock.Now()-instance)+txGatherSlack > txArriveTimeout {
|
||||
// Transaction expired without propagation, schedule for retrieval
|
||||
if f.announced[hash] != nil {
|
||||
panic("announce tracker already contains waitlist item")
|
||||
}
|
||||
f.announced[hash] = f.waitlist[hash]
|
||||
for peer := range f.waitlist[hash] {
|
||||
if announces := f.announces[peer]; announces != nil {
|
||||
announces[hash] = struct{}{}
|
||||
} else {
|
||||
f.announces[peer] = map[common.Hash]struct{}{hash: struct{}{}}
|
||||
}
|
||||
delete(f.waitslots[peer], hash)
|
||||
if len(f.waitslots[peer]) == 0 {
|
||||
delete(f.waitslots, peer)
|
||||
}
|
||||
actives[peer] = struct{}{}
|
||||
}
|
||||
delete(f.waittime, hash)
|
||||
delete(f.waitlist, hash)
|
||||
}
|
||||
}
|
||||
// If transactions are still waiting for propagation, reschedule the wait timer
|
||||
if len(f.waittime) > 0 {
|
||||
f.rescheduleWait(waitTimer, waitTrigger)
|
||||
}
|
||||
// If any peers became active and are idle, request transactions from them
|
||||
if len(actives) > 0 {
|
||||
f.scheduleFetches(timeoutTimer, timeoutTrigger, actives)
|
||||
}
|
||||
|
||||
case <-timeoutTrigger:
|
||||
// Clean up any expired retrievals and avoid re-requesting them from the
|
||||
// same peer (either overloaded or malicious, useless in both cases). We
|
||||
// could also penalize (Drop), but there's nothing to gain, and if could
|
||||
// possibly further increase the load on it.
|
||||
for peer, req := range f.requests {
|
||||
if time.Duration(f.clock.Now()-req.time)+txGatherSlack > txFetchTimeout {
|
||||
txRequestTimeoutMeter.Mark(int64(len(req.hashes)))
|
||||
|
||||
// Reschedule all the not-yet-delivered fetches to alternate peers
|
||||
for _, hash := range req.hashes {
|
||||
// Skip rescheduling hashes already delivered by someone else
|
||||
if req.stolen != nil {
|
||||
if _, ok := req.stolen[hash]; ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Move the delivery back from fetching to queued
|
||||
if _, ok := f.announced[hash]; ok {
|
||||
panic("announced tracker already contains alternate item")
|
||||
}
|
||||
if f.alternates[hash] != nil { // nil if tx was broadcast during fetch
|
||||
f.announced[hash] = f.alternates[hash]
|
||||
}
|
||||
delete(f.announced[hash], peer)
|
||||
if len(f.announced[hash]) == 0 {
|
||||
delete(f.announced, hash)
|
||||
}
|
||||
delete(f.announces[peer], hash)
|
||||
delete(f.alternates, hash)
|
||||
delete(f.fetching, hash)
|
||||
}
|
||||
if len(f.announces[peer]) == 0 {
|
||||
delete(f.announces, peer)
|
||||
}
|
||||
// Keep track of the request as dangling, but never expire
|
||||
f.requests[peer].hashes = nil
|
||||
}
|
||||
}
|
||||
// Schedule a new transaction retrieval
|
||||
f.scheduleFetches(timeoutTimer, timeoutTrigger, nil)
|
||||
|
||||
// No idea if we sheduled something or not, trigger the timer if needed
|
||||
// TODO(karalabe): this is kind of lame, can't we dump it into scheduleFetches somehow?
|
||||
f.rescheduleTimeout(timeoutTimer, timeoutTrigger)
|
||||
|
||||
case delivery := <-f.cleanup:
|
||||
// Independent if the delivery was direct or broadcast, remove all
|
||||
// traces of the hash from internal trackers
|
||||
for _, hash := range delivery.hashes {
|
||||
if _, ok := f.waitlist[hash]; ok {
|
||||
for peer, txset := range f.waitslots {
|
||||
delete(txset, hash)
|
||||
if len(txset) == 0 {
|
||||
delete(f.waitslots, peer)
|
||||
}
|
||||
}
|
||||
delete(f.waitlist, hash)
|
||||
delete(f.waittime, hash)
|
||||
} else {
|
||||
for peer, txset := range f.announces {
|
||||
delete(txset, hash)
|
||||
if len(txset) == 0 {
|
||||
delete(f.announces, peer)
|
||||
}
|
||||
}
|
||||
delete(f.announced, hash)
|
||||
delete(f.alternates, hash)
|
||||
|
||||
// If a transaction currently being fetched from a different
|
||||
// origin was delivered (delivery stolen), mark it so the
|
||||
// actual delivery won't double schedule it.
|
||||
if origin, ok := f.fetching[hash]; ok && (origin != delivery.origin || !delivery.direct) {
|
||||
stolen := f.requests[origin].stolen
|
||||
if stolen == nil {
|
||||
f.requests[origin].stolen = make(map[common.Hash]struct{})
|
||||
stolen = f.requests[origin].stolen
|
||||
}
|
||||
stolen[hash] = struct{}{}
|
||||
}
|
||||
delete(f.fetching, hash)
|
||||
}
|
||||
}
|
||||
// In case of a direct delivery, also reschedule anything missing
|
||||
// from the original query
|
||||
if delivery.direct {
|
||||
// Mark the reqesting successful (independent of individual status)
|
||||
txRequestDoneMeter.Mark(int64(len(delivery.hashes)))
|
||||
|
||||
// Make sure something was pending, nuke it
|
||||
req := f.requests[delivery.origin]
|
||||
if req == nil {
|
||||
log.Warn("Unexpected transaction delivery", "peer", delivery.origin)
|
||||
break
|
||||
}
|
||||
delete(f.requests, delivery.origin)
|
||||
|
||||
// Anything not delivered should be re-scheduled (with or without
|
||||
// this peer, depending on the response cutoff)
|
||||
delivered := make(map[common.Hash]struct{})
|
||||
for _, hash := range delivery.hashes {
|
||||
delivered[hash] = struct{}{}
|
||||
}
|
||||
cutoff := len(req.hashes) // If nothing is delivered, assume everything is missing, don't retry!!!
|
||||
for i, hash := range req.hashes {
|
||||
if _, ok := delivered[hash]; ok {
|
||||
cutoff = i
|
||||
}
|
||||
}
|
||||
// Reschedule missing hashes from alternates, not-fulfilled from alt+self
|
||||
for i, hash := range req.hashes {
|
||||
// Skip rescheduling hashes already delivered by someone else
|
||||
if req.stolen != nil {
|
||||
if _, ok := req.stolen[hash]; ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if _, ok := delivered[hash]; !ok {
|
||||
if i < cutoff {
|
||||
delete(f.alternates[hash], delivery.origin)
|
||||
delete(f.announces[delivery.origin], hash)
|
||||
if len(f.announces[delivery.origin]) == 0 {
|
||||
delete(f.announces, delivery.origin)
|
||||
}
|
||||
}
|
||||
if len(f.alternates[hash]) > 0 {
|
||||
if _, ok := f.announced[hash]; ok {
|
||||
panic(fmt.Sprintf("announced tracker already contains alternate item: %v", f.announced[hash]))
|
||||
}
|
||||
f.announced[hash] = f.alternates[hash]
|
||||
}
|
||||
}
|
||||
delete(f.alternates, hash)
|
||||
delete(f.fetching, hash)
|
||||
}
|
||||
// Something was delivered, try to rechedule requests
|
||||
f.scheduleFetches(timeoutTimer, timeoutTrigger, nil) // Partial delivery may enable others to deliver too
|
||||
}
|
||||
|
||||
case drop := <-f.drop:
|
||||
// A peer was dropped, remove all traces of it
|
||||
if _, ok := f.waitslots[drop.peer]; ok {
|
||||
for hash := range f.waitslots[drop.peer] {
|
||||
delete(f.waitlist[hash], drop.peer)
|
||||
if len(f.waitlist[hash]) == 0 {
|
||||
delete(f.waitlist, hash)
|
||||
delete(f.waittime, hash)
|
||||
}
|
||||
}
|
||||
delete(f.waitslots, drop.peer)
|
||||
if len(f.waitlist) > 0 {
|
||||
f.rescheduleWait(waitTimer, waitTrigger)
|
||||
}
|
||||
}
|
||||
// Clean up any active requests
|
||||
var request *txRequest
|
||||
if request = f.requests[drop.peer]; request != nil {
|
||||
for _, hash := range request.hashes {
|
||||
// Skip rescheduling hashes already delivered by someone else
|
||||
if request.stolen != nil {
|
||||
if _, ok := request.stolen[hash]; ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Undelivered hash, reschedule if there's an alternative origin available
|
||||
delete(f.alternates[hash], drop.peer)
|
||||
if len(f.alternates[hash]) == 0 {
|
||||
delete(f.alternates, hash)
|
||||
} else {
|
||||
f.announced[hash] = f.alternates[hash]
|
||||
delete(f.alternates, hash)
|
||||
}
|
||||
delete(f.fetching, hash)
|
||||
}
|
||||
delete(f.requests, drop.peer)
|
||||
}
|
||||
// Clean up general announcement tracking
|
||||
if _, ok := f.announces[drop.peer]; ok {
|
||||
for hash := range f.announces[drop.peer] {
|
||||
delete(f.announced[hash], drop.peer)
|
||||
if len(f.announced[hash]) == 0 {
|
||||
delete(f.announced, hash)
|
||||
}
|
||||
}
|
||||
delete(f.announces, drop.peer)
|
||||
}
|
||||
// If a request was cancelled, check if anything needs to be rescheduled
|
||||
if request != nil {
|
||||
f.scheduleFetches(timeoutTimer, timeoutTrigger, nil)
|
||||
f.rescheduleTimeout(timeoutTimer, timeoutTrigger)
|
||||
}
|
||||
|
||||
case <-f.quit:
|
||||
return
|
||||
}
|
||||
// No idea what happened, but bump some sanity metrics
|
||||
txFetcherWaitingPeers.Update(int64(len(f.waitslots)))
|
||||
txFetcherWaitingHashes.Update(int64(len(f.waitlist)))
|
||||
txFetcherQueueingPeers.Update(int64(len(f.announces) - len(f.requests)))
|
||||
txFetcherQueueingHashes.Update(int64(len(f.announced)))
|
||||
txFetcherFetchingPeers.Update(int64(len(f.requests)))
|
||||
txFetcherFetchingHashes.Update(int64(len(f.fetching)))
|
||||
|
||||
// Loop did something, ping the step notifier if needed (tests)
|
||||
if f.step != nil {
|
||||
f.step <- struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// rescheduleWait iterates over all the transactions currently in the waitlist
|
||||
// and schedules the movement into the fetcher for the earliest.
|
||||
//
|
||||
// The method has a granularity of 'gatherSlack', since there's not much point in
|
||||
// spinning over all the transactions just to maybe find one that should trigger
|
||||
// a few ms earlier.
|
||||
func (f *TxFetcher) rescheduleWait(timer *mclock.Timer, trigger chan struct{}) {
|
||||
if *timer != nil {
|
||||
(*timer).Stop()
|
||||
}
|
||||
now := f.clock.Now()
|
||||
|
||||
earliest := now
|
||||
for _, instance := range f.waittime {
|
||||
if earliest > instance {
|
||||
earliest = instance
|
||||
if txArriveTimeout-time.Duration(now-earliest) < gatherSlack {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
*timer = f.clock.AfterFunc(txArriveTimeout-time.Duration(now-earliest), func() {
|
||||
trigger <- struct{}{}
|
||||
})
|
||||
}
|
||||
|
||||
// rescheduleTimeout iterates over all the transactions currently in flight and
|
||||
// schedules a cleanup run when the first would trigger.
|
||||
//
|
||||
// The method has a granularity of 'gatherSlack', since there's not much point in
|
||||
// spinning over all the transactions just to maybe find one that should trigger
|
||||
// a few ms earlier.
|
||||
//
|
||||
// This method is a bit "flaky" "by design". In theory the timeout timer only ever
|
||||
// should be rescheduled if some request is pending. In practice, a timeout will
|
||||
// cause the timer to be rescheduled every 5 secs (until the peer comes through or
|
||||
// disconnects). This is a limitation of the fetcher code because we don't trac
|
||||
// pending requests and timed out requests separatey. Without double tracking, if
|
||||
// we simply didn't reschedule the timer on all-timeout then the timer would never
|
||||
// be set again since len(request) > 0 => something's running.
|
||||
func (f *TxFetcher) rescheduleTimeout(timer *mclock.Timer, trigger chan struct{}) {
|
||||
if *timer != nil {
|
||||
(*timer).Stop()
|
||||
}
|
||||
now := f.clock.Now()
|
||||
|
||||
earliest := now
|
||||
for _, req := range f.requests {
|
||||
// If this request already timed out, skip it altogether
|
||||
if req.hashes == nil {
|
||||
continue
|
||||
}
|
||||
if earliest > req.time {
|
||||
earliest = req.time
|
||||
if txFetchTimeout-time.Duration(now-earliest) < gatherSlack {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
*timer = f.clock.AfterFunc(txFetchTimeout-time.Duration(now-earliest), func() {
|
||||
trigger <- struct{}{}
|
||||
})
|
||||
}
|
||||
|
||||
// scheduleFetches starts a batch of retrievals for all available idle peers.
|
||||
func (f *TxFetcher) scheduleFetches(timer *mclock.Timer, timeout chan struct{}, whitelist map[string]struct{}) {
|
||||
// Gather the set of peers we want to retrieve from (default to all)
|
||||
actives := whitelist
|
||||
if actives == nil {
|
||||
actives = make(map[string]struct{})
|
||||
for peer := range f.announces {
|
||||
actives[peer] = struct{}{}
|
||||
}
|
||||
}
|
||||
if len(actives) == 0 {
|
||||
return
|
||||
}
|
||||
// For each active peer, try to schedule some transaction fetches
|
||||
idle := len(f.requests) == 0
|
||||
|
||||
f.forEachPeer(actives, func(peer string) {
|
||||
if f.requests[peer] != nil {
|
||||
return // continue in the for-each
|
||||
}
|
||||
if len(f.announces[peer]) == 0 {
|
||||
return // continue in the for-each
|
||||
}
|
||||
hashes := make([]common.Hash, 0, maxTxRetrievals)
|
||||
f.forEachHash(f.announces[peer], func(hash common.Hash) bool {
|
||||
if _, ok := f.fetching[hash]; !ok {
|
||||
// Mark the hash as fetching and stash away possible alternates
|
||||
f.fetching[hash] = peer
|
||||
|
||||
if _, ok := f.alternates[hash]; ok {
|
||||
panic(fmt.Sprintf("alternate tracker already contains fetching item: %v", f.alternates[hash]))
|
||||
}
|
||||
f.alternates[hash] = f.announced[hash]
|
||||
delete(f.announced, hash)
|
||||
|
||||
// Accumulate the hash and stop if the limit was reached
|
||||
hashes = append(hashes, hash)
|
||||
if len(hashes) >= maxTxRetrievals {
|
||||
return false // break in the for-each
|
||||
}
|
||||
}
|
||||
return true // continue in the for-each
|
||||
})
|
||||
// If any hashes were allocated, request them from the peer
|
||||
if len(hashes) > 0 {
|
||||
f.requests[peer] = &txRequest{hashes: hashes, time: f.clock.Now()}
|
||||
txRequestOutMeter.Mark(int64(len(hashes)))
|
||||
|
||||
go func(peer string, hashes []common.Hash) {
|
||||
// Try to fetch the transactions, but in case of a request
|
||||
// failure (e.g. peer disconnected), reschedule the hashes.
|
||||
if err := f.fetchTxs(peer, hashes); err != nil {
|
||||
txRequestFailMeter.Mark(int64(len(hashes)))
|
||||
f.Drop(peer)
|
||||
}
|
||||
}(peer, hashes)
|
||||
}
|
||||
})
|
||||
// If a new request was fired, schedule a timeout timer
|
||||
if idle && len(f.requests) > 0 {
|
||||
f.rescheduleTimeout(timer, timeout)
|
||||
}
|
||||
}
|
||||
|
||||
// forEachPeer does a range loop over a map of peers in production, but during
|
||||
// testing it does a deterministic sorted random to allow reproducing issues.
|
||||
func (f *TxFetcher) forEachPeer(peers map[string]struct{}, do func(peer string)) {
|
||||
// If we're running production, use whatever Go's map gives us
|
||||
if f.rand == nil {
|
||||
for peer := range peers {
|
||||
do(peer)
|
||||
}
|
||||
return
|
||||
}
|
||||
// We're running the test suite, make iteration deterministic
|
||||
list := make([]string, 0, len(peers))
|
||||
for peer := range peers {
|
||||
list = append(list, peer)
|
||||
}
|
||||
sort.Strings(list)
|
||||
rotateStrings(list, f.rand.Intn(len(list)))
|
||||
for _, peer := range list {
|
||||
do(peer)
|
||||
}
|
||||
}
|
||||
|
||||
// forEachHash does a range loop over a map of hashes in production, but during
|
||||
// testing it does a deterministic sorted random to allow reproducing issues.
|
||||
func (f *TxFetcher) forEachHash(hashes map[common.Hash]struct{}, do func(hash common.Hash) bool) {
|
||||
// If we're running production, use whatever Go's map gives us
|
||||
if f.rand == nil {
|
||||
for hash := range hashes {
|
||||
if !do(hash) {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
// We're running the test suite, make iteration deterministic
|
||||
list := make([]common.Hash, 0, len(hashes))
|
||||
for hash := range hashes {
|
||||
list = append(list, hash)
|
||||
}
|
||||
sortHashes(list)
|
||||
rotateHashes(list, f.rand.Intn(len(list)))
|
||||
for _, hash := range list {
|
||||
if !do(hash) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// rotateStrings rotates the contents of a slice by n steps. This method is only
|
||||
// used in tests to simulate random map iteration but keep it deterministic.
|
||||
func rotateStrings(slice []string, n int) {
|
||||
orig := make([]string, len(slice))
|
||||
copy(orig, slice)
|
||||
|
||||
for i := 0; i < len(orig); i++ {
|
||||
slice[i] = orig[(i+n)%len(orig)]
|
||||
}
|
||||
}
|
||||
|
||||
// sortHashes sorts a slice of hashes. This method is only used in tests in order
|
||||
// to simulate random map iteration but keep it deterministic.
|
||||
func sortHashes(slice []common.Hash) {
|
||||
for i := 0; i < len(slice); i++ {
|
||||
for j := i + 1; j < len(slice); j++ {
|
||||
if bytes.Compare(slice[i][:], slice[j][:]) > 0 {
|
||||
slice[i], slice[j] = slice[j], slice[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// rotateHashes rotates the contents of a slice by n steps. This method is only
|
||||
// used in tests to simulate random map iteration but keep it deterministic.
|
||||
func rotateHashes(slice []common.Hash, n int) {
|
||||
orig := make([]common.Hash, len(slice))
|
||||
copy(orig, slice)
|
||||
|
||||
for i := 0; i < len(orig); i++ {
|
||||
slice[i] = orig[(i+n)%len(orig)]
|
||||
}
|
||||
}
|
1528
eth/fetcher/tx_fetcher_test.go
Normal file
1528
eth/fetcher/tx_fetcher_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -65,9 +65,8 @@ type PublicFilterAPI struct {
|
||||
func NewPublicFilterAPI(backend Backend, lightMode bool) *PublicFilterAPI {
|
||||
api := &PublicFilterAPI{
|
||||
backend: backend,
|
||||
mux: backend.EventMux(),
|
||||
chainDb: backend.ChainDb(),
|
||||
events: NewEventSystem(backend.EventMux(), backend, lightMode),
|
||||
events: NewEventSystem(backend, lightMode),
|
||||
filters: make(map[rpc.ID]*filter),
|
||||
}
|
||||
go api.timeoutLoop()
|
||||
@@ -428,7 +427,7 @@ func (api *PublicFilterAPI) GetFilterChanges(id rpc.ID) (interface{}, error) {
|
||||
hashes := f.hashes
|
||||
f.hashes = nil
|
||||
return returnHashes(hashes), nil
|
||||
case LogsSubscription:
|
||||
case LogsSubscription, MinedAndPendingLogsSubscription:
|
||||
logs := f.logs
|
||||
f.logs = nil
|
||||
return returnLogs(logs), nil
|
||||
|
@@ -28,7 +28,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
)
|
||||
|
||||
@@ -122,14 +121,13 @@ func benchmarkBloomBits(b *testing.B, sectionSize uint64) {
|
||||
|
||||
b.Log("Running filter benchmarks...")
|
||||
start = time.Now()
|
||||
mux := new(event.TypeMux)
|
||||
var backend *testBackend
|
||||
|
||||
for i := 0; i < benchFilterCnt; i++ {
|
||||
if i%20 == 0 {
|
||||
db.Close()
|
||||
db, _ = rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "")
|
||||
backend = &testBackend{mux, db, cnt, new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed)}
|
||||
backend = &testBackend{db: db, sections: cnt}
|
||||
}
|
||||
var addr common.Address
|
||||
addr[0] = byte(i)
|
||||
@@ -173,8 +171,7 @@ func BenchmarkNoBloomBits(b *testing.B) {
|
||||
|
||||
b.Log("Running filter benchmarks...")
|
||||
start := time.Now()
|
||||
mux := new(event.TypeMux)
|
||||
backend := &testBackend{mux, db, 0, new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed)}
|
||||
backend := &testBackend{db: db}
|
||||
filter := NewRangeFilter(backend, 0, int64(*headNum), []common.Address{{}}, nil)
|
||||
filter.Logs(context.Background())
|
||||
d := time.Since(start)
|
||||
|
@@ -32,7 +32,6 @@ import (
|
||||
|
||||
type Backend interface {
|
||||
ChainDb() ethdb.Database
|
||||
EventMux() *event.TypeMux
|
||||
HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error)
|
||||
HeaderByHash(ctx context.Context, blockHash common.Hash) (*types.Header, error)
|
||||
GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error)
|
||||
@@ -42,6 +41,7 @@ type Backend interface {
|
||||
SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription
|
||||
SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription
|
||||
SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription
|
||||
SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription
|
||||
|
||||
BloomStatus() (uint64, uint64)
|
||||
ServiceFilter(ctx context.Context, session *bloombits.MatcherSession)
|
||||
|
@@ -20,7 +20,6 @@ package filters
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -58,7 +57,6 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
// txChanSize is the size of channel listening to NewTxsEvent.
|
||||
// The number is referenced from the size of tx pool.
|
||||
txChanSize = 4096
|
||||
@@ -70,10 +68,6 @@ const (
|
||||
chainEvChanSize = 10
|
||||
)
|
||||
|
||||
var (
|
||||
ErrInvalidSubscriptionID = errors.New("invalid id")
|
||||
)
|
||||
|
||||
type subscription struct {
|
||||
id rpc.ID
|
||||
typ Type
|
||||
@@ -89,25 +83,25 @@ type subscription struct {
|
||||
// EventSystem creates subscriptions, processes events and broadcasts them to the
|
||||
// subscription which match the subscription criteria.
|
||||
type EventSystem struct {
|
||||
mux *event.TypeMux
|
||||
backend Backend
|
||||
lightMode bool
|
||||
lastHead *types.Header
|
||||
|
||||
// Subscriptions
|
||||
txsSub event.Subscription // Subscription for new transaction event
|
||||
logsSub event.Subscription // Subscription for new log event
|
||||
rmLogsSub event.Subscription // Subscription for removed log event
|
||||
chainSub event.Subscription // Subscription for new chain event
|
||||
pendingLogSub *event.TypeMuxSubscription // Subscription for pending log event
|
||||
txsSub event.Subscription // Subscription for new transaction event
|
||||
logsSub event.Subscription // Subscription for new log event
|
||||
rmLogsSub event.Subscription // Subscription for removed log event
|
||||
pendingLogsSub event.Subscription // Subscription for pending log event
|
||||
chainSub event.Subscription // Subscription for new chain event
|
||||
|
||||
// Channels
|
||||
install chan *subscription // install filter for event notification
|
||||
uninstall chan *subscription // remove filter for event notification
|
||||
txsCh chan core.NewTxsEvent // Channel to receive new transactions event
|
||||
logsCh chan []*types.Log // Channel to receive new log event
|
||||
rmLogsCh chan core.RemovedLogsEvent // Channel to receive removed log event
|
||||
chainCh chan core.ChainEvent // Channel to receive new chain event
|
||||
install chan *subscription // install filter for event notification
|
||||
uninstall chan *subscription // remove filter for event notification
|
||||
txsCh chan core.NewTxsEvent // Channel to receive new transactions event
|
||||
logsCh chan []*types.Log // Channel to receive new log event
|
||||
pendingLogsCh chan []*types.Log // Channel to receive new log event
|
||||
rmLogsCh chan core.RemovedLogsEvent // Channel to receive removed log event
|
||||
chainCh chan core.ChainEvent // Channel to receive new chain event
|
||||
}
|
||||
|
||||
// NewEventSystem creates a new manager that listens for event on the given mux,
|
||||
@@ -116,17 +110,17 @@ type EventSystem struct {
|
||||
//
|
||||
// The returned manager has a loop that needs to be stopped with the Stop function
|
||||
// or by stopping the given mux.
|
||||
func NewEventSystem(mux *event.TypeMux, backend Backend, lightMode bool) *EventSystem {
|
||||
func NewEventSystem(backend Backend, lightMode bool) *EventSystem {
|
||||
m := &EventSystem{
|
||||
mux: mux,
|
||||
backend: backend,
|
||||
lightMode: lightMode,
|
||||
install: make(chan *subscription),
|
||||
uninstall: make(chan *subscription),
|
||||
txsCh: make(chan core.NewTxsEvent, txChanSize),
|
||||
logsCh: make(chan []*types.Log, logsChanSize),
|
||||
rmLogsCh: make(chan core.RemovedLogsEvent, rmLogsChanSize),
|
||||
chainCh: make(chan core.ChainEvent, chainEvChanSize),
|
||||
backend: backend,
|
||||
lightMode: lightMode,
|
||||
install: make(chan *subscription),
|
||||
uninstall: make(chan *subscription),
|
||||
txsCh: make(chan core.NewTxsEvent, txChanSize),
|
||||
logsCh: make(chan []*types.Log, logsChanSize),
|
||||
rmLogsCh: make(chan core.RemovedLogsEvent, rmLogsChanSize),
|
||||
pendingLogsCh: make(chan []*types.Log, logsChanSize),
|
||||
chainCh: make(chan core.ChainEvent, chainEvChanSize),
|
||||
}
|
||||
|
||||
// Subscribe events
|
||||
@@ -134,12 +128,10 @@ func NewEventSystem(mux *event.TypeMux, backend Backend, lightMode bool) *EventS
|
||||
m.logsSub = m.backend.SubscribeLogsEvent(m.logsCh)
|
||||
m.rmLogsSub = m.backend.SubscribeRemovedLogsEvent(m.rmLogsCh)
|
||||
m.chainSub = m.backend.SubscribeChainEvent(m.chainCh)
|
||||
// TODO(rjl493456442): use feed to subscribe pending log event
|
||||
m.pendingLogSub = m.mux.Subscribe(core.PendingLogsEvent{})
|
||||
m.pendingLogsSub = m.backend.SubscribePendingLogsEvent(m.pendingLogsCh)
|
||||
|
||||
// Make sure none of the subscriptions are empty
|
||||
if m.txsSub == nil || m.logsSub == nil || m.rmLogsSub == nil || m.chainSub == nil ||
|
||||
m.pendingLogSub.Closed() {
|
||||
if m.txsSub == nil || m.logsSub == nil || m.rmLogsSub == nil || m.chainSub == nil || m.pendingLogsSub == nil {
|
||||
log.Crit("Subscribe for event system failed")
|
||||
}
|
||||
|
||||
@@ -316,58 +308,61 @@ func (es *EventSystem) SubscribePendingTxs(hashes chan []common.Hash) *Subscript
|
||||
|
||||
type filterIndex map[Type]map[rpc.ID]*subscription
|
||||
|
||||
// broadcast event to filters that match criteria.
|
||||
func (es *EventSystem) broadcast(filters filterIndex, ev interface{}) {
|
||||
if ev == nil {
|
||||
func (es *EventSystem) handleLogs(filters filterIndex, ev []*types.Log) {
|
||||
if len(ev) == 0 {
|
||||
return
|
||||
}
|
||||
for _, f := range filters[LogsSubscription] {
|
||||
matchedLogs := filterLogs(ev, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics)
|
||||
if len(matchedLogs) > 0 {
|
||||
f.logs <- matchedLogs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch e := ev.(type) {
|
||||
case []*types.Log:
|
||||
if len(e) > 0 {
|
||||
func (es *EventSystem) handlePendingLogs(filters filterIndex, ev []*types.Log) {
|
||||
if len(ev) == 0 {
|
||||
return
|
||||
}
|
||||
for _, f := range filters[PendingLogsSubscription] {
|
||||
matchedLogs := filterLogs(ev, nil, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics)
|
||||
if len(matchedLogs) > 0 {
|
||||
f.logs <- matchedLogs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (es *EventSystem) handleRemovedLogs(filters filterIndex, ev core.RemovedLogsEvent) {
|
||||
for _, f := range filters[LogsSubscription] {
|
||||
matchedLogs := filterLogs(ev.Logs, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics)
|
||||
if len(matchedLogs) > 0 {
|
||||
f.logs <- matchedLogs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (es *EventSystem) handleTxsEvent(filters filterIndex, ev core.NewTxsEvent) {
|
||||
hashes := make([]common.Hash, 0, len(ev.Txs))
|
||||
for _, tx := range ev.Txs {
|
||||
hashes = append(hashes, tx.Hash())
|
||||
}
|
||||
for _, f := range filters[PendingTransactionsSubscription] {
|
||||
f.hashes <- hashes
|
||||
}
|
||||
}
|
||||
|
||||
func (es *EventSystem) handleChainEvent(filters filterIndex, ev core.ChainEvent) {
|
||||
for _, f := range filters[BlocksSubscription] {
|
||||
f.headers <- ev.Block.Header()
|
||||
}
|
||||
if es.lightMode && len(filters[LogsSubscription]) > 0 {
|
||||
es.lightFilterNewHead(ev.Block.Header(), func(header *types.Header, remove bool) {
|
||||
for _, f := range filters[LogsSubscription] {
|
||||
if matchedLogs := filterLogs(e, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics); len(matchedLogs) > 0 {
|
||||
if matchedLogs := es.lightFilterLogs(header, f.logsCrit.Addresses, f.logsCrit.Topics, remove); len(matchedLogs) > 0 {
|
||||
f.logs <- matchedLogs
|
||||
}
|
||||
}
|
||||
}
|
||||
case core.RemovedLogsEvent:
|
||||
for _, f := range filters[LogsSubscription] {
|
||||
if matchedLogs := filterLogs(e.Logs, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics); len(matchedLogs) > 0 {
|
||||
f.logs <- matchedLogs
|
||||
}
|
||||
}
|
||||
case *event.TypeMuxEvent:
|
||||
if muxe, ok := e.Data.(core.PendingLogsEvent); ok {
|
||||
for _, f := range filters[PendingLogsSubscription] {
|
||||
if e.Time.After(f.created) {
|
||||
if matchedLogs := filterLogs(muxe.Logs, nil, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics); len(matchedLogs) > 0 {
|
||||
f.logs <- matchedLogs
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case core.NewTxsEvent:
|
||||
hashes := make([]common.Hash, 0, len(e.Txs))
|
||||
for _, tx := range e.Txs {
|
||||
hashes = append(hashes, tx.Hash())
|
||||
}
|
||||
for _, f := range filters[PendingTransactionsSubscription] {
|
||||
f.hashes <- hashes
|
||||
}
|
||||
case core.ChainEvent:
|
||||
for _, f := range filters[BlocksSubscription] {
|
||||
f.headers <- e.Block.Header()
|
||||
}
|
||||
if es.lightMode && len(filters[LogsSubscription]) > 0 {
|
||||
es.lightFilterNewHead(e.Block.Header(), func(header *types.Header, remove bool) {
|
||||
for _, f := range filters[LogsSubscription] {
|
||||
if matchedLogs := es.lightFilterLogs(header, f.logsCrit.Addresses, f.logsCrit.Topics, remove); len(matchedLogs) > 0 {
|
||||
f.logs <- matchedLogs
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -448,10 +443,10 @@ func (es *EventSystem) lightFilterLogs(header *types.Header, addresses []common.
|
||||
func (es *EventSystem) eventLoop() {
|
||||
// Ensure all subscriptions get cleaned up
|
||||
defer func() {
|
||||
es.pendingLogSub.Unsubscribe()
|
||||
es.txsSub.Unsubscribe()
|
||||
es.logsSub.Unsubscribe()
|
||||
es.rmLogsSub.Unsubscribe()
|
||||
es.pendingLogsSub.Unsubscribe()
|
||||
es.chainSub.Unsubscribe()
|
||||
}()
|
||||
|
||||
@@ -462,20 +457,16 @@ func (es *EventSystem) eventLoop() {
|
||||
|
||||
for {
|
||||
select {
|
||||
// Handle subscribed events
|
||||
case ev := <-es.txsCh:
|
||||
es.broadcast(index, ev)
|
||||
es.handleTxsEvent(index, ev)
|
||||
case ev := <-es.logsCh:
|
||||
es.broadcast(index, ev)
|
||||
es.handleLogs(index, ev)
|
||||
case ev := <-es.rmLogsCh:
|
||||
es.broadcast(index, ev)
|
||||
es.handleRemovedLogs(index, ev)
|
||||
case ev := <-es.pendingLogsCh:
|
||||
es.handlePendingLogs(index, ev)
|
||||
case ev := <-es.chainCh:
|
||||
es.broadcast(index, ev)
|
||||
case ev, active := <-es.pendingLogSub.Chan():
|
||||
if !active { // system stopped
|
||||
return
|
||||
}
|
||||
es.broadcast(index, ev)
|
||||
es.handleChainEvent(index, ev)
|
||||
|
||||
case f := <-es.install:
|
||||
if f.typ == MinedAndPendingLogsSubscription {
|
||||
|
@@ -39,23 +39,20 @@ import (
|
||||
)
|
||||
|
||||
type testBackend struct {
|
||||
mux *event.TypeMux
|
||||
db ethdb.Database
|
||||
sections uint64
|
||||
txFeed *event.Feed
|
||||
rmLogsFeed *event.Feed
|
||||
logsFeed *event.Feed
|
||||
chainFeed *event.Feed
|
||||
mux *event.TypeMux
|
||||
db ethdb.Database
|
||||
sections uint64
|
||||
txFeed event.Feed
|
||||
logsFeed event.Feed
|
||||
rmLogsFeed event.Feed
|
||||
pendingLogsFeed event.Feed
|
||||
chainFeed event.Feed
|
||||
}
|
||||
|
||||
func (b *testBackend) ChainDb() ethdb.Database {
|
||||
return b.db
|
||||
}
|
||||
|
||||
func (b *testBackend) EventMux() *event.TypeMux {
|
||||
return b.mux
|
||||
}
|
||||
|
||||
func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) {
|
||||
var (
|
||||
hash common.Hash
|
||||
@@ -116,6 +113,10 @@ func (b *testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscript
|
||||
return b.logsFeed.Subscribe(ch)
|
||||
}
|
||||
|
||||
func (b *testBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {
|
||||
return b.pendingLogsFeed.Subscribe(ch)
|
||||
}
|
||||
|
||||
func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
|
||||
return b.chainFeed.Subscribe(ch)
|
||||
}
|
||||
@@ -160,13 +161,8 @@ func TestBlockSubscription(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
mux = new(event.TypeMux)
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
txFeed = new(event.Feed)
|
||||
rmLogsFeed = new(event.Feed)
|
||||
logsFeed = new(event.Feed)
|
||||
chainFeed = new(event.Feed)
|
||||
backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
|
||||
backend = &testBackend{db: db}
|
||||
api = NewPublicFilterAPI(backend, false)
|
||||
genesis = new(core.Genesis).MustCommit(db)
|
||||
chain, _ = core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {})
|
||||
@@ -205,7 +201,7 @@ func TestBlockSubscription(t *testing.T) {
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
for _, e := range chainEvents {
|
||||
chainFeed.Send(e)
|
||||
backend.chainFeed.Send(e)
|
||||
}
|
||||
|
||||
<-sub0.Err()
|
||||
@@ -217,14 +213,9 @@ func TestPendingTxFilter(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
mux = new(event.TypeMux)
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
txFeed = new(event.Feed)
|
||||
rmLogsFeed = new(event.Feed)
|
||||
logsFeed = new(event.Feed)
|
||||
chainFeed = new(event.Feed)
|
||||
backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
|
||||
api = NewPublicFilterAPI(backend, false)
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
backend = &testBackend{db: db}
|
||||
api = NewPublicFilterAPI(backend, false)
|
||||
|
||||
transactions = []*types.Transaction{
|
||||
types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
|
||||
@@ -240,7 +231,7 @@ func TestPendingTxFilter(t *testing.T) {
|
||||
fid0 := api.NewPendingTransactionFilter()
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
txFeed.Send(core.NewTxsEvent{Txs: transactions})
|
||||
backend.txFeed.Send(core.NewTxsEvent{Txs: transactions})
|
||||
|
||||
timeout := time.Now().Add(1 * time.Second)
|
||||
for {
|
||||
@@ -277,14 +268,9 @@ func TestPendingTxFilter(t *testing.T) {
|
||||
// If not it must return an error.
|
||||
func TestLogFilterCreation(t *testing.T) {
|
||||
var (
|
||||
mux = new(event.TypeMux)
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
txFeed = new(event.Feed)
|
||||
rmLogsFeed = new(event.Feed)
|
||||
logsFeed = new(event.Feed)
|
||||
chainFeed = new(event.Feed)
|
||||
backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
|
||||
api = NewPublicFilterAPI(backend, false)
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
backend = &testBackend{db: db}
|
||||
api = NewPublicFilterAPI(backend, false)
|
||||
|
||||
testCases = []struct {
|
||||
crit FilterCriteria
|
||||
@@ -326,14 +312,9 @@ func TestInvalidLogFilterCreation(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
mux = new(event.TypeMux)
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
txFeed = new(event.Feed)
|
||||
rmLogsFeed = new(event.Feed)
|
||||
logsFeed = new(event.Feed)
|
||||
chainFeed = new(event.Feed)
|
||||
backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
|
||||
api = NewPublicFilterAPI(backend, false)
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
backend = &testBackend{db: db}
|
||||
api = NewPublicFilterAPI(backend, false)
|
||||
)
|
||||
|
||||
// different situations where log filter creation should fail.
|
||||
@@ -353,15 +334,10 @@ func TestInvalidLogFilterCreation(t *testing.T) {
|
||||
|
||||
func TestInvalidGetLogsRequest(t *testing.T) {
|
||||
var (
|
||||
mux = new(event.TypeMux)
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
txFeed = new(event.Feed)
|
||||
rmLogsFeed = new(event.Feed)
|
||||
logsFeed = new(event.Feed)
|
||||
chainFeed = new(event.Feed)
|
||||
backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
|
||||
api = NewPublicFilterAPI(backend, false)
|
||||
blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
backend = &testBackend{db: db}
|
||||
api = NewPublicFilterAPI(backend, false)
|
||||
blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
|
||||
)
|
||||
|
||||
// Reason: Cannot specify both BlockHash and FromBlock/ToBlock)
|
||||
@@ -383,14 +359,9 @@ func TestLogFilter(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
mux = new(event.TypeMux)
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
txFeed = new(event.Feed)
|
||||
rmLogsFeed = new(event.Feed)
|
||||
logsFeed = new(event.Feed)
|
||||
chainFeed = new(event.Feed)
|
||||
backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
|
||||
api = NewPublicFilterAPI(backend, false)
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
backend = &testBackend{db: db}
|
||||
api = NewPublicFilterAPI(backend, false)
|
||||
|
||||
firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111")
|
||||
secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222")
|
||||
@@ -400,7 +371,7 @@ func TestLogFilter(t *testing.T) {
|
||||
secondTopic = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
|
||||
notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
|
||||
|
||||
// posted twice, once as vm.Logs and once as core.PendingLogsEvent
|
||||
// posted twice, once as regular logs and once as pending logs.
|
||||
allLogs = []*types.Log{
|
||||
{Address: firstAddr},
|
||||
{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
|
||||
@@ -453,11 +424,11 @@ func TestLogFilter(t *testing.T) {
|
||||
|
||||
// raise events
|
||||
time.Sleep(1 * time.Second)
|
||||
if nsend := logsFeed.Send(allLogs); nsend == 0 {
|
||||
t.Fatal("Shoud have at least one subscription")
|
||||
if nsend := backend.logsFeed.Send(allLogs); nsend == 0 {
|
||||
t.Fatal("Logs event not delivered")
|
||||
}
|
||||
if err := mux.Post(core.PendingLogsEvent{Logs: allLogs}); err != nil {
|
||||
t.Fatal(err)
|
||||
if nsend := backend.pendingLogsFeed.Send(allLogs); nsend == 0 {
|
||||
t.Fatal("Pending logs event not delivered")
|
||||
}
|
||||
|
||||
for i, tt := range testCases {
|
||||
@@ -502,14 +473,9 @@ func TestPendingLogsSubscription(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
mux = new(event.TypeMux)
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
txFeed = new(event.Feed)
|
||||
rmLogsFeed = new(event.Feed)
|
||||
logsFeed = new(event.Feed)
|
||||
chainFeed = new(event.Feed)
|
||||
backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
|
||||
api = NewPublicFilterAPI(backend, false)
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
backend = &testBackend{db: db}
|
||||
api = NewPublicFilterAPI(backend, false)
|
||||
|
||||
firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111")
|
||||
secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222")
|
||||
@@ -521,26 +487,18 @@ func TestPendingLogsSubscription(t *testing.T) {
|
||||
fourthTopic = common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444")
|
||||
notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
|
||||
|
||||
allLogs = []core.PendingLogsEvent{
|
||||
{Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}}},
|
||||
{Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}}},
|
||||
{Logs: []*types.Log{{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}}},
|
||||
{Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}}},
|
||||
{Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}}},
|
||||
{Logs: []*types.Log{
|
||||
allLogs = [][]*types.Log{
|
||||
{{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}},
|
||||
{{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}},
|
||||
{{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}},
|
||||
{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}},
|
||||
{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}},
|
||||
{
|
||||
{Address: thirdAddress, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
|
||||
{Address: thirdAddress, Topics: []common.Hash{thirdTopic}, BlockNumber: 5},
|
||||
{Address: thirdAddress, Topics: []common.Hash{fourthTopic}, BlockNumber: 5},
|
||||
{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
|
||||
}},
|
||||
}
|
||||
|
||||
convertLogs = func(pl []core.PendingLogsEvent) []*types.Log {
|
||||
var logs []*types.Log
|
||||
for _, l := range pl {
|
||||
logs = append(logs, l.Logs...)
|
||||
}
|
||||
return logs
|
||||
},
|
||||
}
|
||||
|
||||
testCases = []struct {
|
||||
@@ -550,21 +508,52 @@ func TestPendingLogsSubscription(t *testing.T) {
|
||||
sub *Subscription
|
||||
}{
|
||||
// match all
|
||||
{ethereum.FilterQuery{}, convertLogs(allLogs), nil, nil},
|
||||
{
|
||||
ethereum.FilterQuery{}, flattenLogs(allLogs),
|
||||
nil, nil,
|
||||
},
|
||||
// match none due to no matching addresses
|
||||
{ethereum.FilterQuery{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, nil, nil},
|
||||
{
|
||||
ethereum.FilterQuery{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}},
|
||||
nil,
|
||||
nil, nil,
|
||||
},
|
||||
// match logs based on addresses, ignore topics
|
||||
{ethereum.FilterQuery{Addresses: []common.Address{firstAddr}}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil},
|
||||
{
|
||||
ethereum.FilterQuery{Addresses: []common.Address{firstAddr}},
|
||||
append(flattenLogs(allLogs[:2]), allLogs[5][3]),
|
||||
nil, nil,
|
||||
},
|
||||
// match none due to no matching topics (match with address)
|
||||
{ethereum.FilterQuery{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, nil, nil},
|
||||
{
|
||||
ethereum.FilterQuery{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}},
|
||||
nil, nil, nil,
|
||||
},
|
||||
// match logs based on addresses and topics
|
||||
{ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[3:5]), allLogs[5].Logs[0]), nil, nil},
|
||||
{
|
||||
ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}},
|
||||
append(flattenLogs(allLogs[3:5]), allLogs[5][0]),
|
||||
nil, nil,
|
||||
},
|
||||
// match logs based on multiple addresses and "or" topics
|
||||
{ethereum.FilterQuery{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[2:5]), allLogs[5].Logs[0]), nil, nil},
|
||||
{
|
||||
ethereum.FilterQuery{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}},
|
||||
append(flattenLogs(allLogs[2:5]), allLogs[5][0]),
|
||||
nil,
|
||||
nil,
|
||||
},
|
||||
// block numbers are ignored for filters created with New***Filter, these return all logs that match the given criteria when the state changes
|
||||
{ethereum.FilterQuery{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(2), ToBlock: big.NewInt(3)}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil},
|
||||
{
|
||||
ethereum.FilterQuery{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(2), ToBlock: big.NewInt(3)},
|
||||
append(flattenLogs(allLogs[:2]), allLogs[5][3]),
|
||||
nil, nil,
|
||||
},
|
||||
// multiple pending logs, should match only 2 topics from the logs in block 5
|
||||
{ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}}, []*types.Log{allLogs[5].Logs[0], allLogs[5].Logs[2]}, nil, nil},
|
||||
{
|
||||
ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}},
|
||||
[]*types.Log{allLogs[5][0], allLogs[5][2]},
|
||||
nil, nil,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -607,10 +596,15 @@ func TestPendingLogsSubscription(t *testing.T) {
|
||||
|
||||
// raise events
|
||||
time.Sleep(1 * time.Second)
|
||||
// allLogs are type of core.PendingLogsEvent
|
||||
for _, l := range allLogs {
|
||||
if err := mux.Post(l); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, ev := range allLogs {
|
||||
backend.pendingLogsFeed.Send(ev)
|
||||
}
|
||||
}
|
||||
|
||||
func flattenLogs(pl [][]*types.Log) []*types.Log {
|
||||
var logs []*types.Log
|
||||
for _, l := range pl {
|
||||
logs = append(logs, l...)
|
||||
}
|
||||
return logs
|
||||
}
|
||||
|
@@ -29,7 +29,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
@@ -50,18 +49,13 @@ func BenchmarkFilters(b *testing.B) {
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
var (
|
||||
db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "")
|
||||
mux = new(event.TypeMux)
|
||||
txFeed = new(event.Feed)
|
||||
rmLogsFeed = new(event.Feed)
|
||||
logsFeed = new(event.Feed)
|
||||
chainFeed = new(event.Feed)
|
||||
backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
|
||||
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
||||
addr2 = common.BytesToAddress([]byte("jeff"))
|
||||
addr3 = common.BytesToAddress([]byte("ethereum"))
|
||||
addr4 = common.BytesToAddress([]byte("random addresses please"))
|
||||
db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "")
|
||||
backend = &testBackend{db: db}
|
||||
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
||||
addr2 = common.BytesToAddress([]byte("jeff"))
|
||||
addr3 = common.BytesToAddress([]byte("ethereum"))
|
||||
addr4 = common.BytesToAddress([]byte("random addresses please"))
|
||||
)
|
||||
defer db.Close()
|
||||
|
||||
@@ -109,15 +103,10 @@ func TestFilters(t *testing.T) {
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
var (
|
||||
db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "")
|
||||
mux = new(event.TypeMux)
|
||||
txFeed = new(event.Feed)
|
||||
rmLogsFeed = new(event.Feed)
|
||||
logsFeed = new(event.Feed)
|
||||
chainFeed = new(event.Feed)
|
||||
backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
|
||||
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
addr = crypto.PubkeyToAddress(key1.PublicKey)
|
||||
db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "")
|
||||
backend = &testBackend{db: db}
|
||||
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
addr = crypto.PubkeyToAddress(key1.PublicKey)
|
||||
|
||||
hash1 = common.BytesToHash([]byte("topic1"))
|
||||
hash2 = common.BytesToHash([]byte("topic2"))
|
||||
|
@@ -21,6 +21,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
||||
Genesis *core.Genesis `toml:",omitempty"`
|
||||
NetworkId uint64
|
||||
SyncMode downloader.SyncMode
|
||||
DiscoveryURLs []string
|
||||
NoPruning bool
|
||||
NoPrefetch bool
|
||||
Whitelist map[uint64]common.Hash `toml:"-"`
|
||||
@@ -49,11 +50,14 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
||||
RPCGasCap *big.Int `toml:",omitempty"`
|
||||
Checkpoint *params.TrustedCheckpoint `toml:",omitempty"`
|
||||
CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
|
||||
OverrideIstanbul *big.Int `toml:",omitempty"`
|
||||
OverrideMuirGlacier *big.Int `toml:",omitempty"`
|
||||
}
|
||||
var enc Config
|
||||
enc.Genesis = c.Genesis
|
||||
enc.NetworkId = c.NetworkId
|
||||
enc.SyncMode = c.SyncMode
|
||||
enc.DiscoveryURLs = c.DiscoveryURLs
|
||||
enc.NoPruning = c.NoPruning
|
||||
enc.NoPrefetch = c.NoPrefetch
|
||||
enc.Whitelist = c.Whitelist
|
||||
@@ -82,6 +86,8 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
||||
enc.RPCGasCap = c.RPCGasCap
|
||||
enc.Checkpoint = c.Checkpoint
|
||||
enc.CheckpointOracle = c.CheckpointOracle
|
||||
enc.OverrideIstanbul = c.OverrideIstanbul
|
||||
enc.OverrideMuirGlacier = c.OverrideMuirGlacier
|
||||
return &enc, nil
|
||||
}
|
||||
|
||||
@@ -91,6 +97,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
||||
Genesis *core.Genesis `toml:",omitempty"`
|
||||
NetworkId *uint64
|
||||
SyncMode *downloader.SyncMode
|
||||
DiscoveryURLs []string
|
||||
NoPruning *bool
|
||||
NoPrefetch *bool
|
||||
Whitelist map[uint64]common.Hash `toml:"-"`
|
||||
@@ -119,6 +126,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
||||
RPCGasCap *big.Int `toml:",omitempty"`
|
||||
Checkpoint *params.TrustedCheckpoint `toml:",omitempty"`
|
||||
CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
|
||||
OverrideIstanbul *big.Int `toml:",omitempty"`
|
||||
OverrideMuirGlacier *big.Int `toml:",omitempty"`
|
||||
}
|
||||
var dec Config
|
||||
if err := unmarshal(&dec); err != nil {
|
||||
@@ -133,6 +142,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
||||
if dec.SyncMode != nil {
|
||||
c.SyncMode = *dec.SyncMode
|
||||
}
|
||||
if dec.DiscoveryURLs != nil {
|
||||
c.DiscoveryURLs = dec.DiscoveryURLs
|
||||
}
|
||||
if dec.NoPruning != nil {
|
||||
c.NoPruning = *dec.NoPruning
|
||||
}
|
||||
@@ -217,5 +229,11 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
||||
if dec.CheckpointOracle != nil {
|
||||
c.CheckpointOracle = dec.CheckpointOracle
|
||||
}
|
||||
if dec.OverrideIstanbul != nil {
|
||||
c.OverrideIstanbul = dec.OverrideIstanbul
|
||||
}
|
||||
if dec.OverrideMuirGlacier != nil {
|
||||
c.OverrideMuirGlacier = dec.OverrideMuirGlacier
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
169
eth/handler.go
169
eth/handler.go
@@ -50,9 +50,6 @@ const (
|
||||
// txChanSize is the size of channel listening to NewTxsEvent.
|
||||
// The number is referenced from the size of tx pool.
|
||||
txChanSize = 4096
|
||||
|
||||
// minimim number of peers to broadcast new blocks to
|
||||
minBroadcastPeers = 4
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -77,9 +74,10 @@ type ProtocolManager struct {
|
||||
blockchain *core.BlockChain
|
||||
maxPeers int
|
||||
|
||||
downloader *downloader.Downloader
|
||||
fetcher *fetcher.Fetcher
|
||||
peers *peerSet
|
||||
downloader *downloader.Downloader
|
||||
blockFetcher *fetcher.BlockFetcher
|
||||
txFetcher *fetcher.TxFetcher
|
||||
peers *peerSet
|
||||
|
||||
eventMux *event.TypeMux
|
||||
txsCh chan core.NewTxsEvent
|
||||
@@ -97,6 +95,9 @@ type ProtocolManager struct {
|
||||
// wait group is used for graceful shutdowns during downloading
|
||||
// and processing
|
||||
wg sync.WaitGroup
|
||||
|
||||
// Test fields or hooks
|
||||
broadcastTxAnnouncesOnly bool // Testing field, disable transaction propagation
|
||||
}
|
||||
|
||||
// NewProtocolManager returns a new Ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
|
||||
@@ -187,7 +188,16 @@ func NewProtocolManager(config *params.ChainConfig, checkpoint *params.TrustedCh
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
manager.fetcher = fetcher.New(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
|
||||
manager.blockFetcher = fetcher.NewBlockFetcher(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
|
||||
|
||||
fetchTx := func(peer string, hashes []common.Hash) error {
|
||||
p := manager.peers.Peer(peer)
|
||||
if p == nil {
|
||||
return errors.New("unknown peer")
|
||||
}
|
||||
return p.RequestTxs(hashes)
|
||||
}
|
||||
manager.txFetcher = fetcher.NewTxFetcher(txpool.Has, txpool.AddRemotes, fetchTx)
|
||||
|
||||
return manager, nil
|
||||
}
|
||||
@@ -203,7 +213,7 @@ func (pm *ProtocolManager) makeProtocol(version uint) p2p.Protocol {
|
||||
Version: version,
|
||||
Length: length,
|
||||
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
||||
peer := pm.newPeer(int(version), p, rw)
|
||||
peer := pm.newPeer(int(version), p, rw, pm.txpool.Get)
|
||||
select {
|
||||
case pm.newPeerCh <- peer:
|
||||
pm.wg.Add(1)
|
||||
@@ -235,6 +245,8 @@ func (pm *ProtocolManager) removePeer(id string) {
|
||||
|
||||
// Unregister the peer from the downloader and Ethereum peer set
|
||||
pm.downloader.UnregisterPeer(id)
|
||||
pm.txFetcher.Drop(id)
|
||||
|
||||
if err := pm.peers.Unregister(id); err != nil {
|
||||
log.Error("Peer removal failed", "peer", id, "err", err)
|
||||
}
|
||||
@@ -258,7 +270,7 @@ func (pm *ProtocolManager) Start(maxPeers int) {
|
||||
|
||||
// start sync handlers
|
||||
go pm.syncer()
|
||||
go pm.txsyncLoop()
|
||||
go pm.txsyncLoop64() // TODO(karalabe): Legacy initial tx echange, drop with eth/64.
|
||||
}
|
||||
|
||||
func (pm *ProtocolManager) Stop() {
|
||||
@@ -286,8 +298,8 @@ func (pm *ProtocolManager) Stop() {
|
||||
log.Info("Ethereum protocol stopped")
|
||||
}
|
||||
|
||||
func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
|
||||
return newPeer(pv, p, newMeteredMsgWriter(rw))
|
||||
func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter, getPooledTx func(hash common.Hash) *types.Transaction) *peer {
|
||||
return newPeer(pv, p, rw, getPooledTx)
|
||||
}
|
||||
|
||||
// handle is the callback invoked to manage the life cycle of an eth peer. When
|
||||
@@ -311,9 +323,6 @@ func (pm *ProtocolManager) handle(p *peer) error {
|
||||
p.Log().Debug("Ethereum handshake failed", "err", err)
|
||||
return err
|
||||
}
|
||||
if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
|
||||
rw.Init(p.version)
|
||||
}
|
||||
// Register the peer locally
|
||||
if err := pm.peers.Register(p); err != nil {
|
||||
p.Log().Error("Ethereum peer registration failed", "err", err)
|
||||
@@ -514,7 +523,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
p.Log().Debug("Whitelist block verified", "number", headers[0].Number.Uint64(), "hash", want)
|
||||
}
|
||||
// Irrelevant of the fork checks, send the header to the fetcher just in case
|
||||
headers = pm.fetcher.FilterHeaders(p.id, headers, time.Now())
|
||||
headers = pm.blockFetcher.FilterHeaders(p.id, headers, time.Now())
|
||||
}
|
||||
if len(headers) > 0 || !filter {
|
||||
err := pm.downloader.DeliverHeaders(p.id, headers)
|
||||
@@ -567,7 +576,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
// Filter out any explicitly requested bodies, deliver the rest to the downloader
|
||||
filter := len(transactions) > 0 || len(uncles) > 0
|
||||
if filter {
|
||||
transactions, uncles = pm.fetcher.FilterBodies(p.id, transactions, uncles, time.Now())
|
||||
transactions, uncles = pm.blockFetcher.FilterBodies(p.id, transactions, uncles, time.Now())
|
||||
}
|
||||
if len(transactions) > 0 || len(uncles) > 0 || !filter {
|
||||
err := pm.downloader.DeliverBodies(p.id, transactions, uncles)
|
||||
@@ -678,7 +687,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
}
|
||||
}
|
||||
for _, block := range unknown {
|
||||
pm.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestOneHeader, p.RequestBodies)
|
||||
pm.blockFetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestOneHeader, p.RequestBodies)
|
||||
}
|
||||
|
||||
case msg.Code == NewBlockMsg:
|
||||
@@ -687,6 +696,14 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
if err := msg.Decode(&request); err != nil {
|
||||
return errResp(ErrDecode, "%v: %v", msg, err)
|
||||
}
|
||||
if hash := types.CalcUncleHash(request.Block.Uncles()); hash != request.Block.UncleHash() {
|
||||
log.Warn("Propagated block has invalid uncles", "have", hash, "exp", request.Block.UncleHash())
|
||||
break // TODO(karalabe): return error eventually, but wait a few releases
|
||||
}
|
||||
if hash := types.DeriveSha(request.Block.Transactions()); hash != request.Block.TxHash() {
|
||||
log.Warn("Propagated block has invalid body", "have", hash, "exp", request.Block.TxHash())
|
||||
break // TODO(karalabe): return error eventually, but wait a few releases
|
||||
}
|
||||
if err := request.sanityCheck(); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -695,7 +712,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
|
||||
// Mark the peer as owning the block and schedule it for import
|
||||
p.MarkBlock(request.Block.Hash())
|
||||
pm.fetcher.Enqueue(p.id, request.Block)
|
||||
pm.blockFetcher.Enqueue(p.id, request.Block)
|
||||
|
||||
// Assuming the block is importable by the peer, but possibly not yet done so,
|
||||
// calculate the head hash and TD that the peer truly must have.
|
||||
@@ -716,7 +733,59 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
}
|
||||
}
|
||||
|
||||
case msg.Code == TxMsg:
|
||||
case msg.Code == NewPooledTransactionHashesMsg && p.version >= eth65:
|
||||
// New transaction announcement arrived, make sure we have
|
||||
// a valid and fresh chain to handle them
|
||||
if atomic.LoadUint32(&pm.acceptTxs) == 0 {
|
||||
break
|
||||
}
|
||||
var hashes []common.Hash
|
||||
if err := msg.Decode(&hashes); err != nil {
|
||||
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
||||
}
|
||||
// Schedule all the unknown hashes for retrieval
|
||||
for _, hash := range hashes {
|
||||
p.MarkTransaction(hash)
|
||||
}
|
||||
pm.txFetcher.Notify(p.id, hashes)
|
||||
|
||||
case msg.Code == GetPooledTransactionsMsg && p.version >= eth65:
|
||||
// Decode the retrieval message
|
||||
msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
|
||||
if _, err := msgStream.List(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Gather transactions until the fetch or network limits is reached
|
||||
var (
|
||||
hash common.Hash
|
||||
bytes int
|
||||
hashes []common.Hash
|
||||
txs []rlp.RawValue
|
||||
)
|
||||
for bytes < softResponseLimit {
|
||||
// Retrieve the hash of the next block
|
||||
if err := msgStream.Decode(&hash); err == rlp.EOL {
|
||||
break
|
||||
} else if err != nil {
|
||||
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
||||
}
|
||||
// Retrieve the requested transaction, skipping if unknown to us
|
||||
tx := pm.txpool.Get(hash)
|
||||
if tx == nil {
|
||||
continue
|
||||
}
|
||||
// If known, encode and queue for response packet
|
||||
if encoded, err := rlp.EncodeToBytes(tx); err != nil {
|
||||
log.Error("Failed to encode transaction", "err", err)
|
||||
} else {
|
||||
hashes = append(hashes, hash)
|
||||
txs = append(txs, encoded)
|
||||
bytes += len(encoded)
|
||||
}
|
||||
}
|
||||
return p.SendPooledTransactionsRLP(hashes, txs)
|
||||
|
||||
case msg.Code == TransactionMsg || (msg.Code == PooledTransactionsMsg && p.version >= eth65):
|
||||
// Transactions arrived, make sure we have a valid and fresh chain to handle them
|
||||
if atomic.LoadUint32(&pm.acceptTxs) == 0 {
|
||||
break
|
||||
@@ -733,7 +802,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
}
|
||||
p.MarkTransaction(tx.Hash())
|
||||
}
|
||||
pm.txpool.AddRemotes(txs)
|
||||
pm.txFetcher.Enqueue(p.id, txs, msg.Code == PooledTransactionsMsg)
|
||||
|
||||
default:
|
||||
return errResp(ErrInvalidMsgCode, "%v", msg.Code)
|
||||
@@ -741,8 +810,8 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastBlock will either propagate a block to a subset of it's peers, or
|
||||
// will only announce it's availability (depending what's requested).
|
||||
// BroadcastBlock will either propagate a block to a subset of its peers, or
|
||||
// will only announce its availability (depending what's requested).
|
||||
func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
|
||||
hash := block.Hash()
|
||||
peers := pm.peers.PeersWithoutBlock(hash)
|
||||
@@ -758,14 +827,7 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
|
||||
return
|
||||
}
|
||||
// Send the block to a subset of our peers
|
||||
transferLen := int(math.Sqrt(float64(len(peers))))
|
||||
if transferLen < minBroadcastPeers {
|
||||
transferLen = minBroadcastPeers
|
||||
}
|
||||
if transferLen > len(peers) {
|
||||
transferLen = len(peers)
|
||||
}
|
||||
transfer := peers[:transferLen]
|
||||
transfer := peers[:int(math.Sqrt(float64(len(peers))))]
|
||||
for _, peer := range transfer {
|
||||
peer.AsyncSendNewBlock(block, td)
|
||||
}
|
||||
@@ -781,22 +843,43 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// BroadcastTxs will propagate a batch of transactions to all peers which are not known to
|
||||
// BroadcastTransactions will propagate a batch of transactions to all peers which are not known to
|
||||
// already have the given transaction.
|
||||
func (pm *ProtocolManager) BroadcastTxs(txs types.Transactions) {
|
||||
var txset = make(map[*peer]types.Transactions)
|
||||
|
||||
func (pm *ProtocolManager) BroadcastTransactions(txs types.Transactions, propagate bool) {
|
||||
var (
|
||||
txset = make(map[*peer][]common.Hash)
|
||||
annos = make(map[*peer][]common.Hash)
|
||||
)
|
||||
// Broadcast transactions to a batch of peers not knowing about it
|
||||
if propagate {
|
||||
for _, tx := range txs {
|
||||
peers := pm.peers.PeersWithoutTx(tx.Hash())
|
||||
|
||||
// Send the block to a subset of our peers
|
||||
transfer := peers[:int(math.Sqrt(float64(len(peers))))]
|
||||
for _, peer := range transfer {
|
||||
txset[peer] = append(txset[peer], tx.Hash())
|
||||
}
|
||||
log.Trace("Broadcast transaction", "hash", tx.Hash(), "recipients", len(peers))
|
||||
}
|
||||
for peer, hashes := range txset {
|
||||
peer.AsyncSendTransactions(hashes)
|
||||
}
|
||||
return
|
||||
}
|
||||
// Otherwise only broadcast the announcement to peers
|
||||
for _, tx := range txs {
|
||||
peers := pm.peers.PeersWithoutTx(tx.Hash())
|
||||
for _, peer := range peers {
|
||||
txset[peer] = append(txset[peer], tx)
|
||||
annos[peer] = append(annos[peer], tx.Hash())
|
||||
}
|
||||
log.Trace("Broadcast transaction", "hash", tx.Hash(), "recipients", len(peers))
|
||||
}
|
||||
// FIXME include this again: peers = peers[:int(math.Sqrt(float64(len(peers))))]
|
||||
for peer, txs := range txset {
|
||||
peer.AsyncSendTransactions(txs)
|
||||
for peer, hashes := range annos {
|
||||
if peer.version >= eth65 {
|
||||
peer.AsyncSendPooledTransactionHashes(hashes)
|
||||
} else {
|
||||
peer.AsyncSendTransactions(hashes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -815,7 +898,13 @@ func (pm *ProtocolManager) txBroadcastLoop() {
|
||||
for {
|
||||
select {
|
||||
case event := <-pm.txsCh:
|
||||
pm.BroadcastTxs(event.Txs)
|
||||
// For testing purpose only, disable propagation
|
||||
if pm.broadcastTxAnnouncesOnly {
|
||||
pm.BroadcastTransactions(event.Txs, false)
|
||||
continue
|
||||
}
|
||||
pm.BroadcastTransactions(event.Txs, true) // First propagate transactions to peers
|
||||
pm.BroadcastTransactions(event.Txs, false) // Only then announce to the rest
|
||||
|
||||
// Err() channel will be closed when unsubscribing.
|
||||
case <-pm.txsSub.Err():
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user