Compare commits

...

87 Commits

Author SHA1 Message Date
34b622a248 Merge pull request #2287 from obscuren/release/1.3.5
Release/1.3.5
2016-03-03 15:22:12 +01:00
cab802b40f VERSION, cmd/geth: bumped version 1.3.5 2016-03-03 15:10:48 +01:00
869d9792b3 [release/1.3.5] eth: forward empty body responses to the downlaoder
(cherry picked from commit ae4982a365)
2016-03-03 15:10:32 +01:00
d3f8b763bf VERSION: bumped version to 1.3.4 2016-02-29 20:57:48 +01:00
4044a8cea4 Merge pull request #2258 from obscuren/release/1.3.4
Homestead Release Candidate
2016-02-29 15:05:37 +01:00
61be63bb9b [release/1.3.4] cmd/utils, params: homestead block
(cherry picked from commit e22fd22c97b4f5d4af118dca3fb2cb6292a520a6)

Conflicts:
	cmd/utils/flags.go
2016-02-29 14:59:30 +01:00
5f7e74d5c8 [release/1.3.4] cmd/utils: lower the min accepted gas price for relay and GPO to 20 shannon
(cherry picked from commit ab92678fb3)
2016-02-29 14:59:20 +01:00
2be2842758 [release/1.3.4] eth/downloader: fix premature exit before notifying all part fetchers
(cherry picked from commit 64ee5763ee)
2016-02-29 13:44:34 +01:00
c2df9d356a [release/1.3.4] crypto/secp256k1: remove dependency on libgmp
Turns out we actually don't need it, USE_NUM_NONE works
because we also set USE_FIELD_INV_BUILTIN.

Conflicts:
	Makefile
	crypto/secp256k1/secp256.go
2016-02-29 13:32:26 +01:00
a4f4846fff [release/1.3.4] eth/downloader: fix header download limiting
Fixes #2201
(cherry picked from commit 26e72b2ccd)
2016-02-29 13:24:33 +01:00
7f83e68b13 [release/1.3.4] eth: fixed homestead tx check
When a block is queried for retrieval we should add a check whether the
block falls within the frontier rules. If we'd always use `From`
retrieving transaction might fail. This PR temporarily changes
everything to `FromFrontier` (safe!).

This is a backport of c616391df2
2016-02-29 13:24:33 +01:00
5570b11398 [release/1.3.4] params: settle the Pi vs Tau dispute
This commit increases the artificial gas floor to 4712388
(cherry picked from commit f954a8b666)
2016-02-29 13:24:33 +01:00
e7fb300053 [release/1.3.4] cmd/geth: bump version v1.3.4 2016-02-29 13:24:32 +01:00
4e0fe48e20 [release/1.3.4] xeth: backward fix for messages 2016-02-24 13:46:37 +01:00
bcf565730b [release/1.3.4] core: Added new TD strategy which mitigate the risk for selfish mining
Assuming the following scenario where a miner has 15% of all hashing
power and the ability to exert a moderate control over the network to
the point where if the attacker sees a message A, it can't stop A from
propagating, but what it **can** do is send a message B and ensure that
most nodes see B before A. The attacker can then selfish mine and
augment selfish mining strategy by giving his own blocks an advantage.

This change makes the time at which a block is received less relevant
and so the level of control an attacker has over the network no longer
makes a difference.

This change changes the current td algorithm `B_td > C_td` to the new
algorithm `B_td > C_td || B_td == C_td && rnd < 0.5`.
2016-02-24 13:46:33 +01:00
587bafaa9f [release/1.3.4] core, core/vm, crypto: fixes for homestead
* Removed some strange code that didn't apply state reverting properly
* Refactored code setting from vm & state transition to the executioner
* Updated tests

Conflicts:
	common/registrar/ethreg/api.go
	core/tx_pool.go
	core/vm/jit_test.go
2016-02-24 13:46:30 +01:00
7bb496f737 [release/1.3.4] tests: updated homestead tests 2016-02-24 13:46:23 +01:00
61404979ed [release/1.3.4] parmas, crypto, core, core/vm: homestead consensus protocol changes
* change gas cost for contract creating txs
* invalidate signature with s value greater than secp256k1 N / 2
* OOG contract creation if not enough gas to store code
* new difficulty adjustment algorithm
* new DELEGATECALL op code

Conflicts:
	core/vm/environment.go
	crypto/crypto.go
	crypto/secp256k1/secp256.go
	eth/api.go
2016-02-24 13:46:11 +01:00
300f1e2abf [release/1.3.4] core, core/types, miner: fix transaction nonce-price combo sort 2016-02-24 13:46:06 +01:00
4ce7970340 [release/1.3.4] p2p/discover: fix Windows-specific issue for larger-than-buffer packets
On Windows, UDPConn.ReadFrom returns an error for packets larger
than the receive buffer. The error is not marked temporary, causing
our loop to exit when the first oversized packet arrived. The fix
is to treat this particular error as temporary.

Fixes: #1579, #2087
Updates: #2082

Conflicts:
	p2p/discover/udp_test.go
2016-02-24 13:46:03 +01:00
0c17be92fb [release/1.3.4] p2p: backward fix for S256 curve 2016-02-24 13:45:59 +01:00
b5a0cf488c [release/1.3.4] p2p: EIP-8 changes
Conflicts:
	p2p/rlpx.go
2016-02-24 13:45:56 +01:00
b25da7c3f4 [release/1.3.4] p2p/discover: EIP-8 changes
Conflicts:
	p2p/discover/udp_test.go
2016-02-24 13:45:53 +01:00
93077941d3 [release/1.3.4] rlp: add "tail" struct tag 2016-02-24 13:44:56 +01:00
8cb69b9e9b [release/1.3.4] crypto/ecies: make authenticated shared data work
The s2 parameter was not actually written to the MAC.
2016-02-24 13:44:37 +01:00
c541b38fb3 VERSION, cmd/geth: bumped version number 1.3.3 2016-01-05 13:02:07 +01:00
336a4d7b8d core: fix transaction reorg issues within the tx pool 2016-01-05 13:02:07 +01:00
8938768f75 core, eth/downloader: ensure state presence in ancestor lookup 2016-01-05 12:31:45 +01:00
5490437942 Merge branch 'develop' into release/1.3.2
Conflicts:
	VERSION
	cmd/geth/main.go
2015-11-24 13:48:47 +01:00
b0fb48c389 Merge pull request #1988 from bas-vk/issue1971
miner: bugfix where blockhash in receipts and logs is left empty
2015-11-24 10:55:07 +01:00
ae9e9efa31 Merge pull request #1991 from Gustav-Simonsson/common_tests
Update common test files
2015-11-23 10:26:21 +01:00
6bb29aebee Merge pull request #1666 from obscuren/create-transaction
rpc/api, xeth: added signTransaction method
2015-11-20 21:36:56 +01:00
314c031ff2 Merge pull request #1995 from karalabe/parametrize-crosscompile-go
Makefile: individual platforms, configurable Go runtime
2015-11-20 21:10:21 +01:00
fea819f74f Makefile: individual platforms, configurable Go runtime 2015-11-20 16:06:35 +02:00
220b0bf6e5 Update common test files 2015-11-20 12:53:36 +01:00
f16fab91c8 Merge pull request #1953 from karalabe/switch-to-fast-peers
eth/downloader: fetch data proportionally to peer capacity
2015-11-19 18:48:53 +01:00
98cbe1356e miner: bugfix were blockhash in receipts and logs is left empty 2015-11-19 16:02:49 +01:00
b6f5523bdc eth/downloader: fetch data proportionally to peer capacity 2015-11-19 17:01:39 +02:00
4c2933ad82 Merge pull request #1993 from obscuren/remove-legalese
cmd/geth, cmd/utils: removed legalese
2015-11-19 15:29:49 +01:00
7399b138a8 Merge pull request #1923 from karalabe/cleanup-receipt-data-access
core, eth, miner, xeth: clean up tx/receipt db accessors
2015-11-19 15:28:15 +01:00
65bb07fb4e Merge pull request #1980 from fjl/downloader-deliver-hang
eth/downloader: don't hang for spurious deliveries
2015-11-19 15:19:21 +01:00
e86e0ecdc8 core, eth, miner, xeth: clean up tx/receipt db accessors 2015-11-19 16:03:32 +02:00
dd09af27af eth/downloader: run tests in parallel 2015-11-19 14:18:35 +01:00
b7b62d4b3c eth/downloader: also drain stateCh, receiptCh in eth/61 mode
State and receipt deliveries from a previous eth/62+ sync can hang if
the downloader has moved on to syncing with eth/61. Fix this by also
draining the eth/63 channels while waiting for eth/61 data.

A nicer solution would be to take care of the channels in a central
place, but that would involve a major rewrite.
2015-11-19 14:18:35 +01:00
db52a6a0ff eth: remove workaround for asynchronous processing in the downloader 2015-11-19 14:18:34 +01:00
900da3d800 eth/downloader: don't hang for spurious deliveries
Unexpected deliveries could block indefinitely if they arrived at the
right time. The fix is to ensure that the cancellation channel is
always closed when the sync ends, unblocking any deliveries. Also remove
the atomic check for whether a sync is currently running because it
doesn't help and can be misleading.

Cancelling always seems to break the tests though. The downloader
spawned d.process whenever new data arrived, making it somewhat hard to
track when block processing was actually done. Fix this by running
d.process in a dedicated goroutine that is tied to the lifecycle of the
sync. d.process gets notified of new work by the queue instead of being
invoked all the time. This removes a ton of weird workaround code,
including a hairy use of atomic CAS.
2015-11-19 14:18:34 +01:00
1c63d08ed1 cmd/geth, cmd/utils: removed legalese
Removed the legalese confirmation dialog. This closes #1992
2015-11-19 12:03:33 +01:00
ae37a8013d Merge pull request #1917 from obscuren/validator-interface
core, eth, rpc: split out block validator and state processor
2015-11-19 10:57:00 +01:00
23f42d9463 Merge pull request #1964 from obscuren/evm-runtime
core/vm/runtime: added simple execution runtime
2015-11-18 17:39:27 +01:00
1372b991c3 core/vm/runtime: added simple execution runtime
The runtime environment can be used for simple basic execution of
contract code without the requirement of setting up a full stack and
operates fully in memory.
2015-11-18 16:50:20 +01:00
a1d9ef48c5 core, eth, rpc: split out block validator and state processor
This removes the burden on a single object to take care of all
validation and state processing. Now instead the validation is done by
the `core.BlockValidator` (`types.Validator`) that takes care of both
header and uncle validation through the `ValidateBlock` method and state
validation through the `ValidateState` method. The state processing is
done by a new object `core.StateProcessor` (`types.Processor`) and
accepts a new state as input and uses that to process the given block's
transactions (and uncles for rewords) to calculate the state root for
the next block (P_n + 1).
2015-11-18 14:24:42 +01:00
4a938406dc Merge pull request #1990 from karalabe/fix-whisper-autocomplete
rpc/api: fix #1986, newIdentity autocomplete
2015-11-18 12:32:39 +01:00
53f28e71dc rpc/api: fix #1986, newIdentity autocomplete 2015-11-18 13:03:20 +02:00
10475f444c Merge pull request #1984 from fjl/secp256k1-recover-id-verify
crypto/secp256k1: verify recovery ID before calling libsecp256k1
2015-11-17 19:39:42 +01:00
6ea05f5a54 rpc/api, xeth: added signTransaction method
SignTransaction creates a transaction but does submit it to the
network. SignTransaction returns a structure which includes the
transaction object details as well as the RLP encoded transaction that
could possibly be submitted by the SendRawTransaction method.
2015-11-17 17:51:05 +01:00
e344e1d490 crypto/secp256k1: drop pkgsrc paths from CFLAGS
They cause compiler warnings for people who don't have these
directories. People with pkgsrc can add the directory through CGO_CFLAGS
instead.
2015-11-17 09:53:10 +01:00
5159f8f649 crypto/secp256k1: raise internal errors as recoverable Go panic 2015-11-17 09:53:10 +01:00
1b29aed128 crypto/secp256k1: verify recovery ID before calling libsecp256k1
The C library treats the recovery ID as trusted input and crashes
the process for invalid values, so it needs to be verified before
calling into C. This will inhibit the crash in #1983.

Also remove VerifySignature because we don't use it.
2015-11-17 09:51:59 +01:00
9422eec554 Merge pull request #1976 from karalabe/enable-light-kdf
cmd/geth, cmd/utils: surface the light KDF flag to the CLI
2015-11-11 12:45:30 +01:00
9aa77a3769 cmd/geth, cmd/utils: surface the light KDF flag to the CLI 2015-11-10 15:47:19 +02:00
da6696862e Merge pull request #1974 from karalabe/fix-rpc-regression
rpc/api: fix #1972 api regression (nil eth panic) in attach
2015-11-06 13:10:28 +01:00
6e5349880e rpc/api: fix #1972 api regression (nil eth panic) in attach 2015-11-06 11:47:57 +02:00
6d09468cab Merge pull request #1967 from karalabe/fix-filter-test-datarace
event/filter: fix data race in the test
2015-11-05 20:31:11 +01:00
2334ee97d0 Merge pull request #1963 from karalabe/fix-database-regression
eth: fix error casting regression during database open
2015-11-05 20:27:11 +01:00
5d89bbdda1 eth: fix error casting regression during database open 2015-11-05 16:59:16 +02:00
8e2bf42c46 event/filter: fix data race in the test 2015-11-05 16:55:53 +02:00
76390ef892 Merge pull request #1966 from karalabe/fix-recover-noparam-panic
cmd/geth: fix recover command crash if no param is supplied
2015-11-05 13:15:23 +01:00
636f67f232 Merge pull request #1969 from karalabe/fix-whisper-tests-datarace
whisper: fix datarace in expiration test
2015-11-05 13:03:36 +01:00
eb11c0e597 Merge pull request #1965 from karalabe/fix-natto-test
jsre: fix #1876, sleep too short on a slow test server
2015-11-05 13:03:06 +01:00
ca4f6d0fdd Merge pull request #1968 from karalabe/fix-json-tests-datarace
tests: fix data race in bad-block-report disabling during tests
2015-11-05 12:59:07 +01:00
60e0abb595 whisper: fix datarace in expiration test 2015-11-05 13:36:25 +02:00
9dc5de51a2 tests: fix data race in bad-block-report disabling during tests 2015-11-05 13:29:50 +02:00
90655736ed cmd/geth: fix recover command crash if no param is supplied 2015-11-05 11:53:50 +02:00
bddb13d436 jsre: fix #1876, sleep too short on a slow test server 2015-11-05 11:36:10 +02:00
e3f36d9728 Merge pull request #1960 from karalabe/fix-peer-ignore-list
eth/downloader: fix dysfunctional ignore list hidden by generic set
2015-11-04 14:43:58 +01:00
b658a73ed5 eth/downloader: fix dysfunctional ignore list hidden by generic set 2015-11-04 13:11:52 +02:00
e165c2d23c Merge pull request #1934 from karalabe/polish-protocol-infos
eth, p2p, rpc/api: polish protocol info gathering
2015-11-04 11:59:31 +01:00
dda3bf3ce7 Merge pull request #1943 from obscuren/abi-fixes
accounts/abi: ABI fixes & added types
2015-11-03 15:22:37 +01:00
6dfbbc3e11 Merge pull request #1948 from bas-vk/rpcfix
Infinite loop in filters
2015-11-03 15:22:02 +01:00
5ff0814b1f VERSION, cmd/geth: bumped version 1.4.0 2015-11-03 11:48:18 +01:00
e5532154a5 Merge branch 'release/1.3.0'
Conflicts:
	VERSION
	cmd/geth/main.go
2015-11-03 11:47:07 +01:00
001684fb11 Merge pull request #1958 from fjl/secp256k1-pkgsrc
crypto/secp256k1: add C compiler flags for pkgsrc
2015-11-03 10:46:04 +01:00
16b0bc7c3b crypto/secp256k1: add C compiler flags for pkgsrc
pkgsrc is a cross-platform package manager that also
supports OS X.
2015-11-03 10:33:31 +01:00
1f72952f04 accounts/abi: ABI fixes & added types
Changed field `input` to new `inputs`. Addad Hash and Address as input
types.

Added bytes[N] and N validation
2015-10-29 21:40:18 +01:00
76410df6a2 rpc: return an unsupported error when "pending" was used to create a filter 2015-10-29 17:35:43 +01:00
e46ab3bdcd eth, p2p, rpc/api: polish protocol info gathering 2015-10-28 12:44:15 +02:00
9666db2a44 VERSION, cmd/geth: bumped version 1.2.1 2015-10-01 10:38:43 +02:00
912 changed files with 200047 additions and 11915 deletions

View File

@ -20,10 +20,6 @@ env:
global:
- secure: "U2U1AmkU4NJBgKR/uUAebQY87cNL0+1JHjnLOmmXwxYYyj5ralWb1aSuSH3qSXiT93qLBmtaUkuv9fberHVqrbAeVlztVdUsKAq7JMQH+M99iFkC9UiRMqHmtjWJ0ok4COD1sRYixxi21wb/JrMe3M1iL4QJVS61iltjHhVdM64="
sudo: false
addons:
apt:
packages:
- libgmp3-dev
notifications:
webhooks:
urls:

View File

@ -2,9 +2,17 @@
# with Go source code. If you know what GOPATH is then you probably
# don't need to bother with make.
.PHONY: geth geth-cross geth-linux geth-darwin geth-windows geth-android evm all test travis-test-with-coverage xgo clean
.PHONY: geth geth-cross evm all test travis-test-with-coverage xgo clean
.PHONY: geth-linux geth-linux-arm geth-linux-386 geth-linux-amd64
.PHONY: geth-darwin geth-darwin-386 geth-darwin-amd64
.PHONY: geth-windows geth-windows-386 geth-windows-amd64
.PHONY: geth-android geth-android-16 geth-android-21
GOBIN = build/bin
MODE ?= default
GO ?= latest
geth:
build/env.sh go install -v $(shell build/flags.sh) ./cmd/geth
@echo "Done building."
@ -14,26 +22,91 @@ geth-cross: geth-linux geth-darwin geth-windows geth-android
@echo "Full cross compilation done:"
@ls -l $(GOBIN)/geth-*
geth-linux: xgo
build/env.sh $(GOBIN)/xgo --dest=$(GOBIN) --deps=https://gmplib.org/download/gmp/gmp-6.0.0a.tar.bz2 --targets=linux/* -v $(shell build/flags.sh) ./cmd/geth
geth-linux: xgo geth-linux-arm geth-linux-386 geth-linux-amd64
@echo "Linux cross compilation done:"
@ls -l $(GOBIN)/geth-linux-*
geth-darwin: xgo
build/env.sh $(GOBIN)/xgo --dest=$(GOBIN) --deps=https://gmplib.org/download/gmp/gmp-6.0.0a.tar.bz2 --targets=darwin/* -v $(shell build/flags.sh) ./cmd/geth
geth-linux-386: xgo
build/env.sh $(GOBIN)/xgo --go=$(GO) --buildmode=$(MODE) --dest=$(GOBIN) --targets=linux/386 -v $(shell build/flags.sh) ./cmd/geth
@echo "Linux 386 cross compilation done:"
@ls -l $(GOBIN)/geth-linux-* | grep 386
geth-linux-amd64: xgo
build/env.sh $(GOBIN)/xgo --go=$(GO) --buildmode=$(MODE) --dest=$(GOBIN) --targets=linux/amd64 -v $(shell build/flags.sh) ./cmd/geth
@echo "Linux amd64 cross compilation done:"
@ls -l $(GOBIN)/geth-linux-* | grep amd64
geth-linux-arm: geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64
@echo "Linux ARM cross compilation done:"
@ls -l $(GOBIN)/geth-linux-* | grep arm
geth-linux-arm-5: xgo
build/env.sh $(GOBIN)/xgo --go=$(GO) --buildmode=$(MODE) --dest=$(GOBIN) --targets=linux/arm-5 -v $(shell build/flags.sh) ./cmd/geth
@echo "Linux ARMv5 cross compilation done:"
@ls -l $(GOBIN)/geth-linux-* | grep arm-5
geth-linux-arm-6: xgo
build/env.sh $(GOBIN)/xgo --go=$(GO) --buildmode=$(MODE) --dest=$(GOBIN) --targets=linux/arm-6 -v $(shell build/flags.sh) ./cmd/geth
@echo "Linux ARMv6 cross compilation done:"
@ls -l $(GOBIN)/geth-linux-* | grep arm-6
geth-linux-arm-7: xgo
build/env.sh $(GOBIN)/xgo --go=$(GO) --buildmode=$(MODE) --dest=$(GOBIN) --targets=linux/arm-7 -v $(shell build/flags.sh) ./cmd/geth
@echo "Linux ARMv7 cross compilation done:"
@ls -l $(GOBIN)/geth-linux-* | grep arm-7
geth-linux-arm64: xgo
build/env.sh $(GOBIN)/xgo --go=$(GO) --buildmode=$(MODE) --dest=$(GOBIN) --targets=linux/arm64 -v $(shell build/flags.sh) ./cmd/geth
@echo "Linux ARM64 cross compilation done:"
@ls -l $(GOBIN)/geth-linux-* | grep arm64
geth-darwin: geth-darwin-386 geth-darwin-amd64
@echo "Darwin cross compilation done:"
@ls -l $(GOBIN)/geth-darwin-*
geth-windows: xgo
build/env.sh $(GOBIN)/xgo --dest=$(GOBIN) --deps=https://gmplib.org/download/gmp/gmp-6.0.0a.tar.bz2 --targets=windows/* -v $(shell build/flags.sh) ./cmd/geth
geth-darwin-386: xgo
build/env.sh $(GOBIN)/xgo --go=$(GO) --buildmode=$(MODE) --dest=$(GOBIN) --targets=darwin/386 -v $(shell build/flags.sh) ./cmd/geth
@echo "Darwin 386 cross compilation done:"
@ls -l $(GOBIN)/geth-darwin-* | grep 386
geth-darwin-amd64: xgo
build/env.sh $(GOBIN)/xgo --go=$(GO) --buildmode=$(MODE) --dest=$(GOBIN) --targets=darwin/amd64 -v $(shell build/flags.sh) ./cmd/geth
@echo "Darwin amd64 cross compilation done:"
@ls -l $(GOBIN)/geth-darwin-* | grep amd64
geth-windows: xgo geth-windows-386 geth-windows-amd64
@echo "Windows cross compilation done:"
@ls -l $(GOBIN)/geth-windows-*
geth-windows-386: xgo
build/env.sh $(GOBIN)/xgo --go=$(GO) --buildmode=$(MODE) --dest=$(GOBIN) --targets=windows/386 -v $(shell build/flags.sh) ./cmd/geth
@echo "Windows 386 cross compilation done:"
@ls -l $(GOBIN)/geth-windows-* | grep 386
geth-windows-amd64: xgo
build/env.sh $(GOBIN)/xgo --go=$(GO) --buildmode=$(MODE) --dest=$(GOBIN) --targets=windows/amd64 -v $(shell build/flags.sh) ./cmd/geth
@echo "Windows amd64 cross compilation done:"
@ls -l $(GOBIN)/geth-windows-* | grep amd64
geth-android: xgo
build/env.sh $(GOBIN)/xgo --dest=$(GOBIN) --deps=https://gmplib.org/download/gmp/gmp-6.0.0a.tar.bz2 --targets=android-16/*,android-21/* -v $(shell build/flags.sh) ./cmd/geth
build/env.sh $(GOBIN)/xgo --go=$(GO) --buildmode=$(MODE) --dest=$(GOBIN) --targets=android/* -v $(shell build/flags.sh) ./cmd/geth
@echo "Android cross compilation done:"
@ls -l $(GOBIN)/geth-android-*
geth-ios: geth-ios-arm-7 geth-ios-arm64
@echo "iOS cross compilation done:"
@ls -l $(GOBIN)/geth-ios-*
geth-ios-arm-7: xgo
build/env.sh $(GOBIN)/xgo --go=$(GO) --buildmode=$(MODE) --dest=$(GOBIN) --targets=ios/arm-7 -v $(shell build/flags.sh) ./cmd/geth
@echo "iOS ARMv7 cross compilation done:"
@ls -l $(GOBIN)/geth-ios-* | grep arm-7
geth-ios-arm64: xgo
build/env.sh $(GOBIN)/xgo --go=$(GO) --buildmode=$(MODE) --dest=$(GOBIN) --targets=ios-7.0/arm64 -v $(shell build/flags.sh) ./cmd/geth
@echo "iOS ARM64 cross compilation done:"
@ls -l $(GOBIN)/geth-ios-* | grep arm64
evm:
build/env.sh $(GOROOT)/bin/go install -v $(shell build/flags.sh) ./cmd/evm
@echo "Done building."

View File

@ -30,7 +30,7 @@ For prerequisites and detailed build instructions please read the
[Installation Instructions](https://github.com/ethereum/go-ethereum/wiki/Building-Ethereum)
on the wiki.
Building geth requires two external dependencies, Go and GMP.
Building geth requires both a Go and a C compiler.
You can install them using your favourite package manager.
Once the dependencies are installed, run

View File

@ -1 +1 @@
1.3.1
1.3.5

View File

@ -36,7 +36,7 @@ import (
type Method struct {
Name string
Const bool
Input []Argument
Inputs []Argument
Return Type // not yet implemented
}
@ -49,9 +49,9 @@ type Method struct {
// Please note that "int" is substitute for its canonical representation "int256"
func (m Method) String() (out string) {
out += m.Name
types := make([]string, len(m.Input))
types := make([]string, len(m.Inputs))
i := 0
for _, input := range m.Input {
for _, input := range m.Inputs {
types[i] = input.Type.String()
i++
}
@ -104,7 +104,7 @@ func (abi ABI) pack(name string, args ...interface{}) ([]byte, error) {
var ret []byte
for i, a := range args {
input := method.Input[i]
input := method.Inputs[i]
packed, err := input.Type.pack(a)
if err != nil {
@ -129,8 +129,8 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
}
// start with argument count match
if len(args) != len(method.Input) {
return nil, fmt.Errorf("argument count mismatch: %d for %d", len(args), len(method.Input))
if len(args) != len(method.Inputs) {
return nil, fmt.Errorf("argument count mismatch: %d for %d", len(args), len(method.Inputs))
}
arguments, err := abi.pack(name, args...)

View File

@ -18,35 +18,38 @@ package abi
import (
"bytes"
"fmt"
"log"
"math/big"
"reflect"
"strings"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
)
const jsondata = `
[
{ "name" : "balance", "const" : true },
{ "name" : "send", "const" : false, "input" : [ { "name" : "amount", "type" : "uint256" } ] }
{ "name" : "send", "const" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] }
]`
const jsondata2 = `
[
{ "name" : "balance", "const" : true },
{ "name" : "send", "const" : false, "input" : [ { "name" : "amount", "type" : "uint256" } ] },
{ "name" : "test", "const" : false, "input" : [ { "name" : "number", "type" : "uint32" } ] },
{ "name" : "string", "const" : false, "input" : [ { "name" : "input", "type" : "string" } ] },
{ "name" : "bool", "const" : false, "input" : [ { "name" : "input", "type" : "bool" } ] },
{ "name" : "address", "const" : false, "input" : [ { "name" : "input", "type" : "address" } ] },
{ "name" : "string32", "const" : false, "input" : [ { "name" : "input", "type" : "string32" } ] },
{ "name" : "uint64[2]", "const" : false, "input" : [ { "name" : "input", "type" : "uint64[2]" } ] },
{ "name" : "uint64[]", "const" : false, "input" : [ { "name" : "input", "type" : "uint64[]" } ] },
{ "name" : "foo", "const" : false, "input" : [ { "name" : "input", "type" : "uint32" } ] },
{ "name" : "bar", "const" : false, "input" : [ { "name" : "input", "type" : "uint32" }, { "name" : "string", "type" : "uint16" } ] },
{ "name" : "slice", "const" : false, "input" : [ { "name" : "input", "type" : "uint32[2]" } ] },
{ "name" : "slice256", "const" : false, "input" : [ { "name" : "input", "type" : "uint256[2]" } ] }
{ "name" : "send", "const" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] },
{ "name" : "test", "const" : false, "inputs" : [ { "name" : "number", "type" : "uint32" } ] },
{ "name" : "string", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "string" } ] },
{ "name" : "bool", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "bool" } ] },
{ "name" : "address", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "address" } ] },
{ "name" : "string32", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "string32" } ] },
{ "name" : "uint64[2]", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "uint64[2]" } ] },
{ "name" : "uint64[]", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "uint64[]" } ] },
{ "name" : "foo", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32" } ] },
{ "name" : "bar", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32" }, { "name" : "string", "type" : "uint16" } ] },
{ "name" : "slice", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32[2]" } ] },
{ "name" : "slice256", "const" : false, "inputs" : [ { "name" : "inputs", "type" : "uint256[2]" } ] }
]`
func TestType(t *testing.T) {
@ -344,3 +347,49 @@ func TestPackSliceBig(t *testing.T) {
t.Errorf("expected %x got %x", sig, packed)
}
}
func ExampleJSON() {
const definition = `[{"constant":true,"inputs":[{"name":"","type":"address"}],"name":"isBar","outputs":[{"name":"","type":"bool"}],"type":"function"}]`
abi, err := JSON(strings.NewReader(definition))
if err != nil {
log.Fatalln(err)
}
out, err := abi.Pack("isBar", common.HexToAddress("01"))
if err != nil {
log.Fatalln(err)
}
fmt.Printf("%x\n", out)
// Output:
// 1f2c40920000000000000000000000000000000000000000000000000000000000000001
}
func TestBytes(t *testing.T) {
const definition = `[
{ "name" : "balance", "const" : true, "inputs" : [ { "name" : "address", "type" : "bytes20" } ] },
{ "name" : "send", "const" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] }
]`
abi, err := JSON(strings.NewReader(definition))
if err != nil {
t.Fatal(err)
}
ok := make([]byte, 20)
_, err = abi.Pack("balance", ok)
if err != nil {
t.Error(err)
}
toosmall := make([]byte, 19)
_, err = abi.Pack("balance", toosmall)
if err != nil {
t.Error(err)
}
toobig := make([]byte, 21)
_, err = abi.Pack("balance", toobig)
if err == nil {
t.Error("expected error")
}
}

View File

@ -43,7 +43,7 @@ type Type struct {
stringKind string // holds the unparsed string for deriving signatures
}
// New type returns a fully parsed Type given by the input string or an error if it can't be parsed.
// NewType returns a fully parsed Type given by the input string or an error if it can't be parsed.
//
// Strings can be in the format of:
//
@ -130,6 +130,10 @@ func NewType(t string) (typ Type, err error) {
if vsize > 0 {
typ.Size = 32
}
case "bytes":
typ.Kind = reflect.Slice
typ.Type = byte_ts
typ.Size = vsize
default:
return Type{}, fmt.Errorf("unsupported arg type: %s", t)
}
@ -200,7 +204,13 @@ func (t Type) pack(v interface{}) ([]byte, error) {
} else {
return common.LeftPadBytes(common.Big0.Bytes(), 32), nil
}
case reflect.Array:
if v, ok := value.Interface().(common.Address); ok {
return t.pack(v[:])
} else if v, ok := value.Interface().(common.Hash); ok {
return t.pack(v[:])
}
}
panic("unreached")
return nil, fmt.Errorf("ABI: bad input given %T", value.Kind())
}

View File

@ -225,10 +225,15 @@ func (self *VMEnv) Call(caller vm.ContractRef, addr common.Address, data []byte,
self.Gas = gas
return core.Call(self, caller, addr, data, gas, price, value)
}
func (self *VMEnv) CallCode(caller vm.ContractRef, addr common.Address, data []byte, gas, price, value *big.Int) ([]byte, error) {
return core.CallCode(self, caller, addr, data, gas, price, value)
}
func (self *VMEnv) DelegateCall(caller vm.ContractRef, addr common.Address, data []byte, gas, price *big.Int) ([]byte, error) {
return core.DelegateCall(self, caller, addr, data, gas, price)
}
func (self *VMEnv) Create(caller vm.ContractRef, data []byte, gas, price, value *big.Int) ([]byte, common.Address, error) {
return core.Create(self, caller, data, gas, price, value)
}

View File

@ -48,10 +48,10 @@ import (
const (
ClientIdentifier = "Geth"
Version = "1.3.1"
Version = "1.3.5"
VersionMajor = 1
VersionMinor = 3
VersionPatch = 1
VersionPatch = 5
)
var (
@ -305,6 +305,7 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
utils.OlympicFlag,
utils.FastSyncFlag,
utils.CacheFlag,
utils.LightKDFFlag,
utils.JSpathFlag,
utils.ListenPortFlag,
utils.MaxPeersFlag,
@ -404,8 +405,6 @@ func makeDefaultExtra() []byte {
}
func run(ctx *cli.Context) {
utils.CheckLegalese(utils.MustDataDir(ctx))
cfg := utils.MakeEthConfig(ClientIdentifier, nodeNameVersion, ctx)
cfg.ExtraData = makeExtra(ctx)
@ -420,8 +419,6 @@ func run(ctx *cli.Context) {
}
func attach(ctx *cli.Context) {
utils.CheckLegalese(utils.MustDataDir(ctx))
var client comms.EthereumClient
var err error
if ctx.Args().Present() {
@ -453,8 +450,6 @@ func attach(ctx *cli.Context) {
}
func console(ctx *cli.Context) {
utils.CheckLegalese(utils.MustDataDir(ctx))
cfg := utils.MakeEthConfig(ClientIdentifier, nodeNameVersion, ctx)
cfg.ExtraData = makeExtra(ctx)
@ -487,8 +482,6 @@ func console(ctx *cli.Context) {
}
func execJSFiles(ctx *cli.Context) {
utils.CheckLegalese(utils.MustDataDir(ctx))
cfg := utils.MakeEthConfig(ClientIdentifier, nodeNameVersion, ctx)
ethereum, err := eth.New(cfg)
if err != nil {
@ -514,8 +507,6 @@ func execJSFiles(ctx *cli.Context) {
}
func unlockAccount(ctx *cli.Context, am *accounts.Manager, addr string, i int, inputpassphrases []string) (addrHex, auth string, passphrases []string) {
utils.CheckLegalese(ctx.GlobalString(utils.DataDirFlag.Name))
var err error
passphrases = inputpassphrases
addrHex, err = utils.ParamToAddress(addr, am)
@ -540,16 +531,12 @@ func unlockAccount(ctx *cli.Context, am *accounts.Manager, addr string, i int, i
}
func blockRecovery(ctx *cli.Context) {
utils.CheckLegalese(utils.MustDataDir(ctx))
arg := ctx.Args().First()
if len(ctx.Args()) < 1 && len(arg) > 0 {
if len(ctx.Args()) < 1 {
glog.Fatal("recover requires block number or hash")
}
arg := ctx.Args().First()
cfg := utils.MakeEthConfig(ClientIdentifier, nodeNameVersion, ctx)
utils.CheckLegalese(cfg.DataDir)
blockDb, err := ethdb.NewLDBDatabase(filepath.Join(cfg.DataDir, "blockchain"), cfg.DatabaseCache)
if err != nil {
glog.Fatalln("could not open db:", err)
@ -610,8 +597,6 @@ func startEth(ctx *cli.Context, eth *eth.Ethereum) {
}
func accountList(ctx *cli.Context) {
utils.CheckLegalese(utils.MustDataDir(ctx))
am := utils.MakeAccountManager(ctx)
accts, err := am.Accounts()
if err != nil {
@ -663,8 +648,6 @@ func getPassPhrase(ctx *cli.Context, desc string, confirmation bool, i int, inpu
}
func accountCreate(ctx *cli.Context) {
utils.CheckLegalese(utils.MustDataDir(ctx))
am := utils.MakeAccountManager(ctx)
passphrase, _ := getPassPhrase(ctx, "Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, nil)
acct, err := am.NewAccount(passphrase)
@ -675,8 +658,6 @@ func accountCreate(ctx *cli.Context) {
}
func accountUpdate(ctx *cli.Context) {
utils.CheckLegalese(utils.MustDataDir(ctx))
am := utils.MakeAccountManager(ctx)
arg := ctx.Args().First()
if len(arg) == 0 {
@ -692,8 +673,6 @@ func accountUpdate(ctx *cli.Context) {
}
func importWallet(ctx *cli.Context) {
utils.CheckLegalese(utils.MustDataDir(ctx))
keyfile := ctx.Args().First()
if len(keyfile) == 0 {
utils.Fatalf("keyfile must be given as argument")
@ -714,8 +693,6 @@ func importWallet(ctx *cli.Context) {
}
func accountImport(ctx *cli.Context) {
utils.CheckLegalese(utils.MustDataDir(ctx))
keyfile := ctx.Args().First()
if len(keyfile) == 0 {
utils.Fatalf("keyfile must be given as argument")
@ -730,8 +707,6 @@ func accountImport(ctx *cli.Context) {
}
func makedag(ctx *cli.Context) {
utils.CheckLegalese(utils.MustDataDir(ctx))
args := ctx.Args()
wrongArgs := func() {
utils.Fatalf(`Usage: geth makedag <block number> <outputdir>`)

View File

@ -69,6 +69,7 @@ var AppHelpFlagGroups = []flagGroup{
utils.GenesisFileFlag,
utils.IdentityFlag,
utils.FastSyncFlag,
utils.LightKDFFlag,
utils.CacheFlag,
utils.BlockchainVersionFlag,
},

View File

@ -95,16 +95,6 @@ func PromptPassword(prompt string, warnTerm bool) (string, error) {
return input, err
}
func CheckLegalese(datadir string) {
// check "first run"
if !common.FileExist(datadir) {
r, _ := PromptConfirm(legalese)
if !r {
Fatalf("Must accept to continue. Shutting down...\n")
}
}
}
// Fatalf formats a message to standard error and exits the program.
// The message is also printed to standard output if standard error
// is redirected to a different file.

View File

@ -150,11 +150,11 @@ var (
}
FastSyncFlag = cli.BoolFlag{
Name: "fast",
Usage: "Enables fast syncing through state downloads",
Usage: "Enable fast syncing through state downloads",
}
LightKDFFlag = cli.BoolFlag{
Name: "lightkdf",
Usage: "Reduce KDF memory & CPU usage at some expense of KDF strength",
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
}
// Miner settings
// TODO: refactor CPU vs GPU mining flags
@ -183,7 +183,7 @@ var (
GasPriceFlag = cli.StringFlag{
Name: "gasprice",
Usage: "Minimal gas price to accept for mining a transactions",
Value: new(big.Int).Mul(big.NewInt(50), common.Shannon).String(),
Value: new(big.Int).Mul(big.NewInt(20), common.Shannon).String(),
}
ExtraDataFlag = cli.StringFlag{
Name: "extradata",
@ -356,7 +356,7 @@ var (
GpoMinGasPriceFlag = cli.StringFlag{
Name: "gpomin",
Usage: "Minimum suggested gas price",
Value: new(big.Int).Mul(big.NewInt(50), common.Shannon).String(),
Value: new(big.Int).Mul(big.NewInt(20), common.Shannon).String(),
}
GpoMaxGasPriceFlag = cli.StringFlag{
Name: "gpomax",
@ -472,6 +472,8 @@ func MakeEthConfig(clientID, version string, ctx *cli.Context) *eth.Config {
cfg.DataDir += "/testnet"
cfg.NetworkId = 2
cfg.TestNet = true
// overwrite homestead block
params.HomesteadBlock = params.TestNetHomesteadBlock
}
if ctx.GlobalBool(VMEnableJitFlag.Name) {
@ -557,8 +559,6 @@ func MakeChain(ctx *cli.Context) (chain *core.BlockChain, chainDb ethdb.Database
Fatalf("Could not start chainmanager: %v", err)
}
proc := core.NewBlockProcessor(chainDb, pow, chain, eventMux)
chain.SetProcessor(proc)
return chain, chainDb
}

View File

@ -1,41 +0,0 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package utils
const (
legalese = `
=======================================
Disclaimer of Liabilites and Warranties
=======================================
THE USER EXPRESSLY KNOWS AND AGREES THAT THE USER IS USING THE ETHEREUM PLATFORM AT THE USERS SOLE
RISK. THE USER REPRESENTS THAT THE USER HAS AN ADEQUATE UNDERSTANDING OF THE RISKS, USAGE AND
INTRICACIES OF CRYPTOGRAPHIC TOKENS AND BLOCKCHAIN-BASED OPEN SOURCE SOFTWARE, ETH PLATFORM AND ETH.
THE USER ACKNOWLEDGES AND AGREES THAT, TO THE FULLEST EXTENT PERMITTED BY ANY APPLICABLE LAW, THE
DISCLAIMERS OF LIABILITY CONTAINED HEREIN APPLY TO ANY AND ALL DAMAGES OR INJURY WHATSOEVER CAUSED
BY OR RELATED TO RISKS OF, USE OF, OR INABILITY TO USE, ETH OR THE ETHEREUM PLATFORM UNDER ANY CAUSE
OR ACTION WHATSOEVER OF ANY KIND IN ANY JURISDICTION, INCLUDING, WITHOUT LIMITATION, ACTIONS FOR
BREACH OF WARRANTY, BREACH OF CONTRACT OR TORT (INCLUDING NEGLIGENCE) AND THAT NEITHER STIFTUNG
ETHEREUM NOR ETHEREUM TEAM SHALL BE LIABLE FOR ANY INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY OR
CONSEQUENTIAL DAMAGES, INCLUDING FOR LOSS OF PROFITS, GOODWILL OR DATA. SOME JURISDICTIONS DO NOT
ALLOW THE EXCLUSION OF CERTAIN WARRANTIES OR THE LIMITATION OR EXCLUSION OF LIABILITY FOR CERTAIN
TYPES OF DAMAGES. THEREFORE, SOME OF THE ABOVE LIMITATIONS IN THIS SECTION MAY NOT APPLY TO A USER.
IN PARTICULAR, NOTHING IN THESE TERMS SHALL AFFECT THE STATUTORY RIGHTS OF ANY USER OR EXCLUDE
INJURY ARISING FROM ANY WILLFUL MISCONDUCT OR FRAUD OF STIFTUNG ETHEREUM.
Do you accept this agreement?`
)

View File

@ -82,7 +82,7 @@ func genValueTx(nbytes int) func(int, *BlockGen) {
return func(i int, gen *BlockGen) {
toaddr := common.Address{}
data := make([]byte, nbytes)
gas := IntrinsicGas(data)
gas := IntrinsicGas(data, false, false)
tx, _ := types.NewTransaction(gen.TxNonce(benchRootAddr), toaddr, big.NewInt(1), gas, nil, data).SignECDSA(benchRootKey)
gen.AddTx(tx)
}
@ -169,7 +169,6 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
// State and blocks are stored in the same DB.
evmux := new(event.TypeMux)
chainman, _ := NewBlockChain(db, FakePow{}, evmux)
chainman.SetProcessor(NewBlockProcessor(db, FakePow{}, chainman, evmux))
defer chainman.Stop()
b.ReportAllocs()
b.ResetTimer()

View File

@ -1,460 +0,0 @@
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"fmt"
"math/big"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/pow"
"gopkg.in/fatih/set.v0"
)
const (
// must be bumped when consensus algorithm is changed, this forces the upgradedb
// command to be run (forces the blocks to be imported again using the new algorithm)
BlockChainVersion = 3
)
type BlockProcessor struct {
chainDb ethdb.Database
// Mutex for locking the block processor. Blocks can only be handled one at a time
mutex sync.Mutex
// Canonical block chain
bc *BlockChain
// non-persistent key/value memory storage
mem map[string]*big.Int
// Proof of work used for validating
Pow pow.PoW
events event.Subscription
eventMux *event.TypeMux
}
// GasPool tracks the amount of gas available during
// execution of the transactions in a block.
// The zero value is a pool with zero gas available.
type GasPool big.Int
// AddGas makes gas available for execution.
func (gp *GasPool) AddGas(amount *big.Int) *GasPool {
i := (*big.Int)(gp)
i.Add(i, amount)
return gp
}
// SubGas deducts the given amount from the pool if enough gas is
// available and returns an error otherwise.
func (gp *GasPool) SubGas(amount *big.Int) error {
i := (*big.Int)(gp)
if i.Cmp(amount) < 0 {
return &GasLimitErr{Have: new(big.Int).Set(i), Want: amount}
}
i.Sub(i, amount)
return nil
}
func (gp *GasPool) String() string {
return (*big.Int)(gp).String()
}
func NewBlockProcessor(db ethdb.Database, pow pow.PoW, blockchain *BlockChain, eventMux *event.TypeMux) *BlockProcessor {
sm := &BlockProcessor{
chainDb: db,
mem: make(map[string]*big.Int),
Pow: pow,
bc: blockchain,
eventMux: eventMux,
}
return sm
}
func (sm *BlockProcessor) TransitionState(statedb *state.StateDB, parent, block *types.Block, transientProcess bool) (receipts types.Receipts, err error) {
gp := new(GasPool).AddGas(block.GasLimit())
if glog.V(logger.Core) {
glog.Infof("%x: gas (+ %v)", block.Coinbase(), gp)
}
// Process the transactions on to parent state
receipts, err = sm.ApplyTransactions(gp, statedb, block, block.Transactions(), transientProcess)
if err != nil {
return nil, err
}
return receipts, nil
}
func (self *BlockProcessor) ApplyTransaction(gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *big.Int, transientProcess bool) (*types.Receipt, *big.Int, error) {
_, gas, err := ApplyMessage(NewEnv(statedb, self.bc, tx, header), tx, gp)
if err != nil {
return nil, nil, err
}
// Update the state with pending changes
usedGas.Add(usedGas, gas)
receipt := types.NewReceipt(statedb.IntermediateRoot().Bytes(), usedGas)
receipt.TxHash = tx.Hash()
receipt.GasUsed = new(big.Int).Set(gas)
if MessageCreatesContract(tx) {
from, _ := tx.From()
receipt.ContractAddress = crypto.CreateAddress(from, tx.Nonce())
}
logs := statedb.GetLogs(tx.Hash())
receipt.Logs = logs
receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
glog.V(logger.Debug).Infoln(receipt)
// Notify all subscribers
if !transientProcess {
go self.eventMux.Post(TxPostEvent{tx})
go self.eventMux.Post(logs)
}
return receipt, gas, err
}
func (self *BlockProcessor) BlockChain() *BlockChain {
return self.bc
}
func (self *BlockProcessor) ApplyTransactions(gp *GasPool, statedb *state.StateDB, block *types.Block, txs types.Transactions, transientProcess bool) (types.Receipts, error) {
var (
receipts types.Receipts
totalUsedGas = big.NewInt(0)
err error
cumulativeSum = new(big.Int)
header = block.Header()
)
for i, tx := range txs {
statedb.StartRecord(tx.Hash(), block.Hash(), i)
receipt, txGas, err := self.ApplyTransaction(gp, statedb, header, tx, totalUsedGas, transientProcess)
if err != nil {
return nil, err
}
if err != nil {
glog.V(logger.Core).Infoln("TX err:", err)
}
receipts = append(receipts, receipt)
cumulativeSum.Add(cumulativeSum, new(big.Int).Mul(txGas, tx.GasPrice()))
}
if block.GasUsed().Cmp(totalUsedGas) != 0 {
return nil, ValidationError(fmt.Sprintf("gas used error (%v / %v)", block.GasUsed(), totalUsedGas))
}
if transientProcess {
go self.eventMux.Post(PendingBlockEvent{block, statedb.Logs()})
}
return receipts, err
}
func (sm *BlockProcessor) RetryProcess(block *types.Block) (logs vm.Logs, err error) {
// Processing a blocks may never happen simultaneously
sm.mutex.Lock()
defer sm.mutex.Unlock()
if !sm.bc.HasBlock(block.ParentHash()) {
return nil, ParentError(block.ParentHash())
}
parent := sm.bc.GetBlock(block.ParentHash())
// FIXME Change to full header validation. See #1225
errch := make(chan bool)
go func() { errch <- sm.Pow.Verify(block) }()
logs, _, err = sm.processWithParent(block, parent)
if !<-errch {
return nil, ValidationError("Block's nonce is invalid (= %x)", block.Nonce)
}
return logs, err
}
// Process block will attempt to process the given block's transactions and applies them
// on top of the block's parent state (given it exists) and will return wether it was
// successful or not.
func (sm *BlockProcessor) Process(block *types.Block) (logs vm.Logs, receipts types.Receipts, err error) {
// Processing a blocks may never happen simultaneously
sm.mutex.Lock()
defer sm.mutex.Unlock()
if sm.bc.HasBlock(block.Hash()) {
if _, err := state.New(block.Root(), sm.chainDb); err == nil {
return nil, nil, &KnownBlockError{block.Number(), block.Hash()}
}
}
if parent := sm.bc.GetBlock(block.ParentHash()); parent != nil {
if _, err := state.New(parent.Root(), sm.chainDb); err == nil {
return sm.processWithParent(block, parent)
}
}
return nil, nil, ParentError(block.ParentHash())
}
func (sm *BlockProcessor) processWithParent(block, parent *types.Block) (logs vm.Logs, receipts types.Receipts, err error) {
// Create a new state based on the parent's root (e.g., create copy)
state, err := state.New(parent.Root(), sm.chainDb)
if err != nil {
return nil, nil, err
}
header := block.Header()
uncles := block.Uncles()
txs := block.Transactions()
// Block validation
if err = ValidateHeader(sm.Pow, header, parent.Header(), false, false); err != nil {
return
}
// There can be at most two uncles
if len(uncles) > 2 {
return nil, nil, ValidationError("Block can only contain maximum 2 uncles (contained %v)", len(uncles))
}
receipts, err = sm.TransitionState(state, parent, block, false)
if err != nil {
return
}
// Validate the received block's bloom with the one derived from the generated receipts.
// For valid blocks this should always validate to true.
rbloom := types.CreateBloom(receipts)
if rbloom != header.Bloom {
err = fmt.Errorf("unable to replicate block's bloom=%x", rbloom)
return
}
// The transactions Trie's root (R = (Tr [[i, RLP(T1)], [i, RLP(T2)], ... [n, RLP(Tn)]]))
// can be used by light clients to make sure they've received the correct Txs
txSha := types.DeriveSha(txs)
if txSha != header.TxHash {
err = fmt.Errorf("invalid transaction root hash. received=%x calculated=%x", header.TxHash, txSha)
return
}
// Tre receipt Trie's root (R = (Tr [[H1, R1], ... [Hn, R1]]))
receiptSha := types.DeriveSha(receipts)
if receiptSha != header.ReceiptHash {
err = fmt.Errorf("invalid receipt root hash. received=%x calculated=%x", header.ReceiptHash, receiptSha)
return
}
// Verify UncleHash before running other uncle validations
unclesSha := types.CalcUncleHash(uncles)
if unclesSha != header.UncleHash {
err = fmt.Errorf("invalid uncles root hash. received=%x calculated=%x", header.UncleHash, unclesSha)
return
}
// Verify uncles
if err = sm.VerifyUncles(state, block, parent); err != nil {
return
}
// Accumulate static rewards; block reward, uncle's and uncle inclusion.
AccumulateRewards(state, header, uncles)
// Commit state objects/accounts to a database batch and calculate
// the state root. The database is not modified if the root
// doesn't match.
root, batch := state.CommitBatch()
if header.Root != root {
return nil, nil, fmt.Errorf("invalid merkle root: header=%x computed=%x", header.Root, root)
}
// Execute the database writes.
batch.Write()
return state.Logs(), receipts, nil
}
var (
big8 = big.NewInt(8)
big32 = big.NewInt(32)
)
// AccumulateRewards credits the coinbase of the given block with the
// mining reward. The total reward consists of the static block reward
// and rewards for included uncles. The coinbase of each uncle block is
// also rewarded.
func AccumulateRewards(statedb *state.StateDB, header *types.Header, uncles []*types.Header) {
reward := new(big.Int).Set(BlockReward)
r := new(big.Int)
for _, uncle := range uncles {
r.Add(uncle.Number, big8)
r.Sub(r, header.Number)
r.Mul(r, BlockReward)
r.Div(r, big8)
statedb.AddBalance(uncle.Coinbase, r)
r.Div(BlockReward, big32)
reward.Add(reward, r)
}
statedb.AddBalance(header.Coinbase, reward)
}
func (sm *BlockProcessor) VerifyUncles(statedb *state.StateDB, block, parent *types.Block) error {
uncles := set.New()
ancestors := make(map[common.Hash]*types.Block)
for _, ancestor := range sm.bc.GetBlocksFromHash(block.ParentHash(), 7) {
ancestors[ancestor.Hash()] = ancestor
// Include ancestors uncles in the uncle set. Uncles must be unique.
for _, uncle := range ancestor.Uncles() {
uncles.Add(uncle.Hash())
}
}
ancestors[block.Hash()] = block
uncles.Add(block.Hash())
for i, uncle := range block.Uncles() {
hash := uncle.Hash()
if uncles.Has(hash) {
// Error not unique
return UncleError("uncle[%d](%x) not unique", i, hash[:4])
}
uncles.Add(hash)
if ancestors[hash] != nil {
branch := fmt.Sprintf(" O - %x\n |\n", block.Hash())
for h := range ancestors {
branch += fmt.Sprintf(" O - %x\n |\n", h)
}
glog.Infoln(branch)
return UncleError("uncle[%d](%x) is ancestor", i, hash[:4])
}
if ancestors[uncle.ParentHash] == nil || uncle.ParentHash == parent.Hash() {
return UncleError("uncle[%d](%x)'s parent is not ancestor (%x)", i, hash[:4], uncle.ParentHash[0:4])
}
if err := ValidateHeader(sm.Pow, uncle, ancestors[uncle.ParentHash].Header(), true, true); err != nil {
return ValidationError(fmt.Sprintf("uncle[%d](%x) header invalid: %v", i, hash[:4], err))
}
}
return nil
}
// GetBlockReceipts returns the receipts beloniging to the block hash
func (sm *BlockProcessor) GetBlockReceipts(bhash common.Hash) types.Receipts {
if block := sm.BlockChain().GetBlock(bhash); block != nil {
return GetBlockReceipts(sm.chainDb, block.Hash())
}
return nil
}
// GetLogs returns the logs of the given block. This method is using a two step approach
// where it tries to get it from the (updated) method which gets them from the receipts or
// the depricated way by re-processing the block.
func (sm *BlockProcessor) GetLogs(block *types.Block) (logs vm.Logs, err error) {
receipts := GetBlockReceipts(sm.chainDb, block.Hash())
// coalesce logs
for _, receipt := range receipts {
logs = append(logs, receipt.Logs...)
}
return logs, nil
}
// ValidateHeader verifies the validity of a header, relying on the database and
// POW behind the block processor.
func (sm *BlockProcessor) ValidateHeader(header *types.Header, checkPow, uncle bool) error {
// Short circuit if the header's already known or its parent missing
if sm.bc.HasHeader(header.Hash()) {
return nil
}
if parent := sm.bc.GetHeader(header.ParentHash); parent == nil {
return ParentError(header.ParentHash)
} else {
return ValidateHeader(sm.Pow, header, parent, checkPow, uncle)
}
}
// ValidateHeaderWithParent verifies the validity of a header, relying on the database and
// POW behind the block processor.
func (sm *BlockProcessor) ValidateHeaderWithParent(header, parent *types.Header, checkPow, uncle bool) error {
if sm.bc.HasHeader(header.Hash()) {
return nil
}
return ValidateHeader(sm.Pow, header, parent, checkPow, uncle)
}
// See YP section 4.3.4. "Block Header Validity"
// Validates a header. Returns an error if the header is invalid.
func ValidateHeader(pow pow.PoW, header *types.Header, parent *types.Header, checkPow, uncle bool) error {
if big.NewInt(int64(len(header.Extra))).Cmp(params.MaximumExtraDataSize) == 1 {
return fmt.Errorf("Header extra data too long (%d)", len(header.Extra))
}
if uncle {
if header.Time.Cmp(common.MaxBig) == 1 {
return BlockTSTooBigErr
}
} else {
if header.Time.Cmp(big.NewInt(time.Now().Unix())) == 1 {
return BlockFutureErr
}
}
if header.Time.Cmp(parent.Time) != 1 {
return BlockEqualTSErr
}
expd := CalcDifficulty(header.Time.Uint64(), parent.Time.Uint64(), parent.Number, parent.Difficulty)
if expd.Cmp(header.Difficulty) != 0 {
return fmt.Errorf("Difficulty check failed for header %v, %v", header.Difficulty, expd)
}
a := new(big.Int).Set(parent.GasLimit)
a = a.Sub(a, header.GasLimit)
a.Abs(a)
b := new(big.Int).Set(parent.GasLimit)
b = b.Div(b, params.GasLimitBoundDivisor)
if !(a.Cmp(b) < 0) || (header.GasLimit.Cmp(params.MinGasLimit) == -1) {
return fmt.Errorf("GasLimit check failed for header %v (%v > %v)", header.GasLimit, a, b)
}
num := new(big.Int).Set(parent.Number)
num.Sub(header.Number, num)
if num.Cmp(big.NewInt(1)) != 0 {
return BlockNumberErr
}
if checkPow {
// Verify the nonce of the header. Return an error if it's not valid
if !pow.Verify(types.NewBlockWithHeader(header)) {
return &BlockNonceErr{Hash: header.Hash(), Number: header.Number, Nonce: header.Nonce.Uint64()}
}
}
return nil
}

373
core/block_validator.go Normal file
View File

@ -0,0 +1,373 @@
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"fmt"
"math/big"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/pow"
"gopkg.in/fatih/set.v0"
)
var (
ExpDiffPeriod = big.NewInt(100000)
big10 = big.NewInt(10)
bigMinus99 = big.NewInt(-99)
)
// BlockValidator is responsible for validating block headers, uncles and
// processed state.
//
// BlockValidator implements Validator.
type BlockValidator struct {
bc *BlockChain // Canonical block chain
Pow pow.PoW // Proof of work used for validating
}
// NewBlockValidator returns a new block validator which is safe for re-use
func NewBlockValidator(blockchain *BlockChain, pow pow.PoW) *BlockValidator {
validator := &BlockValidator{
Pow: pow,
bc: blockchain,
}
return validator
}
// ValidateBlock validates the given block's header and uncles and verifies the
// the block header's transaction and uncle roots.
//
// ValidateBlock does not validate the header's pow. The pow work validated
// seperately so we can process them in paralel.
//
// ValidateBlock also validates and makes sure that any previous state (or present)
// state that might or might not be present is checked to make sure that fast
// sync has done it's job proper. This prevents the block validator form accepting
// false positives where a header is present but the state is not.
func (v *BlockValidator) ValidateBlock(block *types.Block) error {
if v.bc.HasBlock(block.Hash()) {
if _, err := state.New(block.Root(), v.bc.chainDb); err == nil {
return &KnownBlockError{block.Number(), block.Hash()}
}
}
parent := v.bc.GetBlock(block.ParentHash())
if parent == nil {
return ParentError(block.ParentHash())
}
if _, err := state.New(parent.Root(), v.bc.chainDb); err != nil {
return ParentError(block.ParentHash())
}
header := block.Header()
// validate the block header
if err := ValidateHeader(v.Pow, header, parent.Header(), false, false); err != nil {
return err
}
// verify the uncles are correctly rewarded
if err := v.VerifyUncles(block, parent); err != nil {
return err
}
// Verify UncleHash before running other uncle validations
unclesSha := types.CalcUncleHash(block.Uncles())
if unclesSha != header.UncleHash {
return fmt.Errorf("invalid uncles root hash. received=%x calculated=%x", header.UncleHash, unclesSha)
}
// The transactions Trie's root (R = (Tr [[i, RLP(T1)], [i, RLP(T2)], ... [n, RLP(Tn)]]))
// can be used by light clients to make sure they've received the correct Txs
txSha := types.DeriveSha(block.Transactions())
if txSha != header.TxHash {
return fmt.Errorf("invalid transaction root hash. received=%x calculated=%x", header.TxHash, txSha)
}
return nil
}
// ValidateState validates the various changes that happen after a state
// transition, such as amount of used gas, the receipt roots and the state root
// itself. ValidateState returns a database batch if the validation was a succes
// otherwise nil and an error is returned.
func (v *BlockValidator) ValidateState(block, parent *types.Block, statedb *state.StateDB, receipts types.Receipts, usedGas *big.Int) (err error) {
header := block.Header()
if block.GasUsed().Cmp(usedGas) != 0 {
return ValidationError(fmt.Sprintf("gas used error (%v / %v)", block.GasUsed(), usedGas))
}
// Validate the received block's bloom with the one derived from the generated receipts.
// For valid blocks this should always validate to true.
rbloom := types.CreateBloom(receipts)
if rbloom != header.Bloom {
return fmt.Errorf("unable to replicate block's bloom=%x vs calculated bloom=%x", header.Bloom, rbloom)
}
// Tre receipt Trie's root (R = (Tr [[H1, R1], ... [Hn, R1]]))
receiptSha := types.DeriveSha(receipts)
if receiptSha != header.ReceiptHash {
return fmt.Errorf("invalid receipt root hash. received=%x calculated=%x", header.ReceiptHash, receiptSha)
}
// Validate the state root against the received state root and throw
// an error if they don't match.
if root := statedb.IntermediateRoot(); header.Root != root {
return fmt.Errorf("invalid merkle root: header=%x computed=%x", header.Root, root)
}
return nil
}
// VerifyUncles verifies the given block's uncles and applies the Ethereum
// consensus rules to the various block headers included; it will return an
// error if any of the included uncle headers were invalid. It returns an error
// if the validation failed.
func (v *BlockValidator) VerifyUncles(block, parent *types.Block) error {
// validate that there at most 2 uncles included in this block
if len(block.Uncles()) > 2 {
return ValidationError("Block can only contain maximum 2 uncles (contained %v)", len(block.Uncles()))
}
uncles := set.New()
ancestors := make(map[common.Hash]*types.Block)
for _, ancestor := range v.bc.GetBlocksFromHash(block.ParentHash(), 7) {
ancestors[ancestor.Hash()] = ancestor
// Include ancestors uncles in the uncle set. Uncles must be unique.
for _, uncle := range ancestor.Uncles() {
uncles.Add(uncle.Hash())
}
}
ancestors[block.Hash()] = block
uncles.Add(block.Hash())
for i, uncle := range block.Uncles() {
hash := uncle.Hash()
if uncles.Has(hash) {
// Error not unique
return UncleError("uncle[%d](%x) not unique", i, hash[:4])
}
uncles.Add(hash)
if ancestors[hash] != nil {
branch := fmt.Sprintf(" O - %x\n |\n", block.Hash())
for h := range ancestors {
branch += fmt.Sprintf(" O - %x\n |\n", h)
}
glog.Infoln(branch)
return UncleError("uncle[%d](%x) is ancestor", i, hash[:4])
}
if ancestors[uncle.ParentHash] == nil || uncle.ParentHash == parent.Hash() {
return UncleError("uncle[%d](%x)'s parent is not ancestor (%x)", i, hash[:4], uncle.ParentHash[0:4])
}
if err := ValidateHeader(v.Pow, uncle, ancestors[uncle.ParentHash].Header(), true, true); err != nil {
return ValidationError(fmt.Sprintf("uncle[%d](%x) header invalid: %v", i, hash[:4], err))
}
}
return nil
}
// ValidateHeader validates the given header and, depending on the pow arg,
// checks the proof of work of the given header. Returns an error if the
// validation failed.
func (v *BlockValidator) ValidateHeader(header, parent *types.Header, checkPow bool) error {
// Short circuit if the parent is missing.
if parent == nil {
return ParentError(header.ParentHash)
}
// Short circuit if the header's already known or its parent missing
if v.bc.HasHeader(header.Hash()) {
return nil
}
return ValidateHeader(v.Pow, header, parent, checkPow, false)
}
// Validates a header. Returns an error if the header is invalid.
//
// See YP section 4.3.4. "Block Header Validity"
func ValidateHeader(pow pow.PoW, header *types.Header, parent *types.Header, checkPow, uncle bool) error {
if big.NewInt(int64(len(header.Extra))).Cmp(params.MaximumExtraDataSize) == 1 {
return fmt.Errorf("Header extra data too long (%d)", len(header.Extra))
}
if uncle {
if header.Time.Cmp(common.MaxBig) == 1 {
return BlockTSTooBigErr
}
} else {
if header.Time.Cmp(big.NewInt(time.Now().Unix())) == 1 {
return BlockFutureErr
}
}
if header.Time.Cmp(parent.Time) != 1 {
return BlockEqualTSErr
}
expd := CalcDifficulty(header.Time.Uint64(), parent.Time.Uint64(), parent.Number, parent.Difficulty)
if expd.Cmp(header.Difficulty) != 0 {
return fmt.Errorf("Difficulty check failed for header %v, %v", header.Difficulty, expd)
}
a := new(big.Int).Set(parent.GasLimit)
a = a.Sub(a, header.GasLimit)
a.Abs(a)
b := new(big.Int).Set(parent.GasLimit)
b = b.Div(b, params.GasLimitBoundDivisor)
if !(a.Cmp(b) < 0) || (header.GasLimit.Cmp(params.MinGasLimit) == -1) {
return fmt.Errorf("GasLimit check failed for header %v (%v > %v)", header.GasLimit, a, b)
}
num := new(big.Int).Set(parent.Number)
num.Sub(header.Number, num)
if num.Cmp(big.NewInt(1)) != 0 {
return BlockNumberErr
}
if checkPow {
// Verify the nonce of the header. Return an error if it's not valid
if !pow.Verify(types.NewBlockWithHeader(header)) {
return &BlockNonceErr{header.Number, header.Hash(), header.Nonce.Uint64()}
}
}
return nil
}
// CalcDifficulty is the difficulty adjustment algorithm. It returns
// the difficulty that a new block should have when created at time
// given the parent block's time and difficulty.
func CalcDifficulty(time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
if params.IsHomestead(new(big.Int).Add(parentNumber, common.Big1)) {
return calcDifficultyHomestead(time, parentTime, parentNumber, parentDiff)
} else {
return calcDifficultyFrontier(time, parentTime, parentNumber, parentDiff)
}
}
func calcDifficultyHomestead(time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.mediawiki
// algorithm:
// diff = (parent_diff +
// (parent_diff / 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
// ) + 2^(periodCount - 2)
bigTime := new(big.Int).SetUint64(time)
bigParentTime := new(big.Int).SetUint64(parentTime)
// holds intermediate values to make the algo easier to read & audit
x := new(big.Int)
y := new(big.Int)
// 1 - (block_timestamp -parent_timestamp) // 10
x.Sub(bigTime, bigParentTime)
x.Div(x, big10)
x.Sub(common.Big1, x)
// max(1 - (block_timestamp - parent_timestamp) // 10, -99)))
if x.Cmp(bigMinus99) < 0 {
x.Set(bigMinus99)
}
// (parent_diff + parent_diff // 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
y.Div(parentDiff, params.DifficultyBoundDivisor)
x.Mul(y, x)
x.Add(parentDiff, x)
// minimum difficulty can ever be (before exponential factor)
if x.Cmp(params.MinimumDifficulty) < 0 {
x = params.MinimumDifficulty
}
// for the exponential factor
periodCount := new(big.Int).Add(parentNumber, common.Big1)
periodCount.Div(periodCount, ExpDiffPeriod)
// the exponential factor, commonly refered to as "the bomb"
// diff = diff + 2^(periodCount - 2)
if periodCount.Cmp(common.Big1) > 0 {
y.Sub(periodCount, common.Big2)
y.Exp(common.Big2, y, nil)
x.Add(x, y)
}
return x
}
func calcDifficultyFrontier(time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
diff := new(big.Int)
adjust := new(big.Int).Div(parentDiff, params.DifficultyBoundDivisor)
bigTime := new(big.Int)
bigParentTime := new(big.Int)
bigTime.SetUint64(time)
bigParentTime.SetUint64(parentTime)
if bigTime.Sub(bigTime, bigParentTime).Cmp(params.DurationLimit) < 0 {
diff.Add(parentDiff, adjust)
} else {
diff.Sub(parentDiff, adjust)
}
if diff.Cmp(params.MinimumDifficulty) < 0 {
diff = params.MinimumDifficulty
}
periodCount := new(big.Int).Add(parentNumber, common.Big1)
periodCount.Div(periodCount, ExpDiffPeriod)
if periodCount.Cmp(common.Big1) > 0 {
// diff = diff + 2^(periodCount - 2)
expDiff := periodCount.Sub(periodCount, common.Big2)
expDiff.Exp(common.Big2, expDiff, nil)
diff.Add(diff, expDiff)
diff = common.BigMax(diff, params.MinimumDifficulty)
}
return diff
}
// CalcGasLimit computes the gas limit of the next block after parent.
// The result may be modified by the caller.
// This is miner strategy, not consensus protocol.
func CalcGasLimit(parent *types.Block) *big.Int {
// contrib = (parentGasUsed * 3 / 2) / 1024
contrib := new(big.Int).Mul(parent.GasUsed(), big.NewInt(3))
contrib = contrib.Div(contrib, big.NewInt(2))
contrib = contrib.Div(contrib, params.GasLimitBoundDivisor)
// decay = parentGasLimit / 1024 -1
decay := new(big.Int).Div(parent.GasLimit(), params.GasLimitBoundDivisor)
decay.Sub(decay, big.NewInt(1))
/*
strategy: gasLimit of block-to-mine is set based on parent's
gasUsed value. if parentGasUsed > parentGasLimit * (2/3) then we
increase it, otherwise lower it (or leave it unchanged if it's right
at that usage) the amount increased/decreased depends on how far away
from parentGasLimit * (2/3) parentGasUsed is.
*/
gl := new(big.Int).Sub(parent.GasLimit(), decay)
gl = gl.Add(gl, contrib)
gl.Set(common.BigMax(gl, params.MinGasLimit))
// however, if we're now below the target (GenesisGasLimit) we increase the
// limit as much as we can (parentGasLimit / 1024 -1)
if gl.Cmp(params.GenesisGasLimit) < 0 {
gl.Add(parent.GasLimit(), decay)
gl.Set(common.BigMin(gl, params.GenesisGasLimit))
}
return gl
}

View File

@ -30,7 +30,7 @@ import (
"github.com/ethereum/go-ethereum/pow/ezp"
)
func proc() (*BlockProcessor, *BlockChain) {
func proc() (Validator, *BlockChain) {
db, _ := ethdb.NewMemDatabase()
var mux event.TypeMux
@ -39,7 +39,7 @@ func proc() (*BlockProcessor, *BlockChain) {
if err != nil {
fmt.Println(err)
}
return NewBlockProcessor(db, ezp.New(), blockchain, &mux), blockchain
return blockchain.validator, blockchain
}
func TestNumber(t *testing.T) {
@ -81,7 +81,7 @@ func TestPutReceipt(t *testing.T) {
Index: 0,
}}
PutReceipts(db, types.Receipts{receipt})
WriteReceipts(db, types.Receipts{receipt})
receipt = GetReceipt(db, common.Hash{})
if receipt == nil {
t.Error("expected to get 1 receipt, got none.")

View File

@ -33,6 +33,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
@ -61,17 +62,34 @@ const (
blockCacheLimit = 256
maxFutureBlocks = 256
maxTimeFutureBlocks = 30
// must be bumped when consensus algorithm is changed, this forces the upgradedb
// command to be run (forces the blocks to be imported again using the new algorithm)
BlockChainVersion = 3
)
// BlockChain represents the canonical chain given a database with a genesis
// block. The Blockchain manages chain imports, reverts, chain reorganisations.
//
// Importing blocks in to the block chain happens according to the set of rules
// defined by the two stage Validator. Processing of blocks is done using the
// Processor which processes the included transaction. The validation of the state
// is done in the second part of the Validator. Failing results in aborting of
// the import.
//
// The BlockChain also helps in returning blocks from **any** chain included
// in the database as well as blocks that represents the canonical chain. It's
// important to note that GetBlock can return any block and does not need to be
// included in the canonical one where as GetBlockByNumber always represents the
// canonical chain.
type BlockChain struct {
chainDb ethdb.Database
processor types.BlockProcessor
eventMux *event.TypeMux
genesisBlock *types.Block
// Last known total difficulty
mu sync.RWMutex
chainmu sync.RWMutex
tsmu sync.RWMutex
procmu sync.RWMutex
checkpoint int // checkpoint counts towards the new checkpoint
currentHeader *types.Header // Current head of the header chain (may be above the block chain!)
@ -91,10 +109,15 @@ type BlockChain struct {
procInterrupt int32 // interrupt signaler for block processing
wg sync.WaitGroup
pow pow.PoW
rand *mrand.Rand
pow pow.PoW
rand *mrand.Rand
processor Processor
validator Validator
}
// NewBlockChain returns a fully initialised block chain using information
// available in the database. It initialiser the default Ethereum Validator and
// Processor.
func NewBlockChain(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*BlockChain, error) {
headerCache, _ := lru.New(headerCacheLimit)
bodyCache, _ := lru.New(bodyCacheLimit)
@ -121,6 +144,8 @@ func NewBlockChain(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*Bl
return nil, err
}
bc.rand = mrand.New(mrand.NewSource(seed.Int64()))
bc.SetValidator(NewBlockValidator(bc, pow))
bc.SetProcessor(NewStateProcessor(bc))
bc.genesisBlock = bc.GetBlockByNumber(0)
if bc.genesisBlock == nil {
@ -292,6 +317,7 @@ func (self *BlockChain) FastSyncCommitHead(hash common.Hash) error {
return nil
}
// GasLimit returns the gas limit of the current HEAD block.
func (self *BlockChain) GasLimit() *big.Int {
self.mu.RLock()
defer self.mu.RUnlock()
@ -299,6 +325,7 @@ func (self *BlockChain) GasLimit() *big.Int {
return self.currentBlock.GasLimit()
}
// LastBlockHash return the hash of the HEAD block.
func (self *BlockChain) LastBlockHash() common.Hash {
self.mu.RLock()
defer self.mu.RUnlock()
@ -333,6 +360,8 @@ func (self *BlockChain) CurrentFastBlock() *types.Block {
return self.currentFastBlock
}
// Status returns status information about the current chain such as the HEAD Td,
// the HEAD hash and the hash of the genesis block.
func (self *BlockChain) Status() (td *big.Int, currentBlock common.Hash, genesisBlock common.Hash) {
self.mu.RLock()
defer self.mu.RUnlock()
@ -340,10 +369,38 @@ func (self *BlockChain) Status() (td *big.Int, currentBlock common.Hash, genesis
return self.GetTd(self.currentBlock.Hash()), self.currentBlock.Hash(), self.genesisBlock.Hash()
}
func (self *BlockChain) SetProcessor(proc types.BlockProcessor) {
self.processor = proc
// SetProcessor sets the processor required for making state modifications.
func (self *BlockChain) SetProcessor(processor Processor) {
self.procmu.Lock()
defer self.procmu.Unlock()
self.processor = processor
}
// SetValidator sets the validator which is used to validate incoming blocks.
func (self *BlockChain) SetValidator(validator Validator) {
self.procmu.Lock()
defer self.procmu.Unlock()
self.validator = validator
}
// Validator returns the current validator.
func (self *BlockChain) Validator() Validator {
self.procmu.RLock()
defer self.procmu.RUnlock()
return self.validator
}
// Processor returns the current processor.
func (self *BlockChain) Processor() Processor {
self.procmu.RLock()
defer self.procmu.RUnlock()
return self.processor
}
// AuxValidator returns the auxiliary validator (Proof of work atm)
func (self *BlockChain) AuxValidator() pow.PoW { return self.pow }
// State returns a new mutable state based on the current HEAD block.
func (self *BlockChain) State() (*state.StateDB, error) {
return state.New(self.CurrentBlock().Root(), self.chainDb)
}
@ -534,6 +591,19 @@ func (bc *BlockChain) HasBlock(hash common.Hash) bool {
return bc.GetBlock(hash) != nil
}
// HasBlockAndState checks if a block and associated state trie is fully present
// in the database or not, caching it if present.
func (bc *BlockChain) HasBlockAndState(hash common.Hash) bool {
// Check first that the block itself is known
block := bc.GetBlock(hash)
if block == nil {
return false
}
// Ensure the associated state is also present
_, err := state.New(block.Root(), bc.chainDb)
return err == nil
}
// GetBlock retrieves a block from the database by hash, caching it if found.
func (self *BlockChain) GetBlock(hash common.Hash) *types.Block {
// Short circuit if the block's already in the cache, retrieve otherwise
@ -606,6 +676,8 @@ func (self *BlockChain) GetUnclesInChain(block *types.Block, length int) []*type
return uncles
}
// Stop stops the blockchain service. If any imports are currently in progress
// it will abort them using the procInterrupt.
func (bc *BlockChain) Stop() {
if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
return
@ -657,14 +729,18 @@ func (self *BlockChain) writeHeader(header *types.Header) error {
if ptd == nil {
return ParentError(header.ParentHash)
}
td := new(big.Int).Add(header.Difficulty, ptd)
localTd := self.GetTd(self.currentHeader.Hash())
externTd := new(big.Int).Add(header.Difficulty, ptd)
// Make sure no inconsistent state is leaked during insertion
self.mu.Lock()
defer self.mu.Unlock()
// If the total difficulty is higher than our known, add it to the canonical chain
if td.Cmp(self.GetTd(self.currentHeader.Hash())) > 0 {
// Second clause in the if statement reduces the vulnerability to selfish mining.
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
if externTd.Cmp(localTd) > 0 || (externTd.Cmp(localTd) == 0 && mrand.Float64() < 0.5) {
// Delete any canonical number assignments above the new head
for i := header.Number.Uint64() + 1; GetCanonicalHash(self.chainDb, i) != (common.Hash{}); i++ {
DeleteCanonicalHash(self.chainDb, i)
@ -685,7 +761,7 @@ func (self *BlockChain) writeHeader(header *types.Header) error {
self.currentHeader = types.CopyHeader(header)
}
// Irrelevant of the canonical status, write the header itself to the database
if err := WriteTd(self.chainDb, header.Hash(), td); err != nil {
if err := WriteTd(self.chainDb, header.Hash(), externTd); err != nil {
glog.Fatalf("failed to write header total difficulty: %v", err)
}
if err := WriteHeader(self.chainDb, header); err != nil {
@ -758,9 +834,9 @@ func (self *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int)
var err error
if index == 0 {
err = self.processor.ValidateHeader(header, checkPow, false)
err = self.Validator().ValidateHeader(header, self.GetHeader(header.ParentHash), checkPow)
} else {
err = self.processor.ValidateHeaderWithParent(header, chain[index-1], checkPow, false)
err = self.Validator().ValidateHeader(header, chain[index-1], checkPow)
}
if err != nil {
errs[index] = err
@ -913,7 +989,7 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
glog.Fatal(errs[index])
return
}
if err := PutBlockReceipts(self.chainDb, block.Hash(), receipts); err != nil {
if err := WriteBlockReceipts(self.chainDb, block.Hash(), receipts); err != nil {
errs[index] = fmt.Errorf("failed to write block receipts: %v", err)
atomic.AddInt32(&failed, 1)
glog.Fatal(errs[index])
@ -980,14 +1056,18 @@ func (self *BlockChain) WriteBlock(block *types.Block) (status writeStatus, err
if ptd == nil {
return NonStatTy, ParentError(block.ParentHash())
}
td := new(big.Int).Add(block.Difficulty(), ptd)
localTd := self.GetTd(self.currentBlock.Hash())
externTd := new(big.Int).Add(block.Difficulty(), ptd)
// Make sure no inconsistent state is leaked during insertion
self.mu.Lock()
defer self.mu.Unlock()
// If the total difficulty is higher than our known, add it to the canonical chain
if td.Cmp(self.GetTd(self.currentBlock.Hash())) > 0 {
// Second clause in the if statement reduces the vulnerability to selfish mining.
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
if externTd.Cmp(localTd) > 0 || (externTd.Cmp(localTd) == 0 && mrand.Float64() < 0.5) {
// Reorganize the chain if the parent is not the head block
if block.ParentHash() != self.currentBlock.Hash() {
if err := self.reorg(self.currentBlock, block); err != nil {
@ -1001,7 +1081,7 @@ func (self *BlockChain) WriteBlock(block *types.Block) (status writeStatus, err
status = SideStatTy
}
// Irrelevant of the canonical status, write the block itself to the database
if err := WriteTd(self.chainDb, block.Hash(), td); err != nil {
if err := WriteTd(self.chainDb, block.Hash(), externTd); err != nil {
glog.Fatalf("failed to write block total difficulty: %v", err)
}
if err := WriteBlock(self.chainDb, block); err != nil {
@ -1025,9 +1105,10 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
// faster than direct delivery and requires much less mutex
// acquiring.
var (
stats struct{ queued, processed, ignored int }
events = make([]interface{}, 0, len(chain))
tstart = time.Now()
stats struct{ queued, processed, ignored int }
events = make([]interface{}, 0, len(chain))
coalescedLogs vm.Logs
tstart = time.Now()
nonceChecked = make([]bool, len(chain))
)
@ -1057,12 +1138,12 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
if BadHashes[block.Hash()] {
err := BadHashError(block.Hash())
blockErr(block, err)
reportBlock(block, err)
return i, err
}
// Call in to the block processor and check for errors. It's likely that if one block fails
// all others will fail too (unless a known block is returned).
logs, receipts, err := self.processor.Process(block)
// Stage 1 validation of the block using the chain's validator
// interface.
err := self.Validator().ValidateBlock(block)
if err != nil {
if IsKnownBlockErr(err) {
stats.ignored++
@ -1089,14 +1170,41 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
continue
}
blockErr(block, err)
go ReportBlock(block, err)
reportBlock(block, err)
return i, err
}
if err := PutBlockReceipts(self.chainDb, block.Hash(), receipts); err != nil {
glog.V(logger.Warn).Infoln("error writing block receipts:", err)
// Create a new statedb using the parent block and report an
// error if it fails.
statedb, err := state.New(self.GetBlock(block.ParentHash()).Root(), self.chainDb)
if err != nil {
reportBlock(block, err)
return i, err
}
// Process block using the parent state as reference point.
receipts, logs, usedGas, err := self.processor.Process(block, statedb)
if err != nil {
reportBlock(block, err)
return i, err
}
// Validate the state using the default validator
err = self.Validator().ValidateState(block, self.GetBlock(block.ParentHash()), statedb, receipts, usedGas)
if err != nil {
reportBlock(block, err)
return i, err
}
// Write state changes to database
_, err = statedb.Commit()
if err != nil {
return i, err
}
// coalesce logs for later processing
coalescedLogs = append(coalescedLogs, logs...)
if err := WriteBlockReceipts(self.chainDb, block.Hash(), receipts); err != nil {
return i, err
}
txcount += len(block.Transactions())
@ -1105,6 +1213,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
if err != nil {
return i, err
}
switch status {
case CanonStatTy:
if glog.V(logger.Debug) {
@ -1113,11 +1222,11 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
events = append(events, ChainEvent{block, block.Hash(), logs})
// This puts transactions in a extra db for rpc
if err := PutTransactions(self.chainDb, block, block.Transactions()); err != nil {
if err := WriteTransactions(self.chainDb, block); err != nil {
return i, err
}
// store the receipts
if err := PutReceipts(self.chainDb, receipts); err != nil {
if err := WriteReceipts(self.chainDb, receipts); err != nil {
return i, err
}
// Write map map bloom filters
@ -1141,7 +1250,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
start, end := chain[0], chain[len(chain)-1]
glog.Infof("imported %d block(s) (%d queued %d ignored) including %d txs in %v. #%v [%x / %x]\n", stats.processed, stats.queued, stats.ignored, txcount, tend, end.Number(), start.Hash().Bytes()[:4], end.Hash().Bytes()[:4])
}
go self.postChainEvents(events)
go self.postChainEvents(events, coalescedLogs)
return 0, nil
}
@ -1206,12 +1315,12 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
// insert the block in the canonical way, re-writing history
self.insert(block)
// write canonical receipts and transactions
if err := PutTransactions(self.chainDb, block, block.Transactions()); err != nil {
if err := WriteTransactions(self.chainDb, block); err != nil {
return err
}
receipts := GetBlockReceipts(self.chainDb, block.Hash())
// write receipts
if err := PutReceipts(self.chainDb, receipts); err != nil {
if err := WriteReceipts(self.chainDb, receipts); err != nil {
return err
}
// Write map map bloom filters
@ -1239,7 +1348,9 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
// postChainEvents iterates over the events generated by a chain insertion and
// posts them into the event mux.
func (self *BlockChain) postChainEvents(events []interface{}) {
func (self *BlockChain) postChainEvents(events []interface{}, logs vm.Logs) {
// post event logs for further processing
self.eventMux.Post(logs)
for _, event := range events {
if event, ok := event.(ChainEvent); ok {
// We need some control over the mining operation. Acquiring locks and waiting for the miner to create new block takes too long
@ -1265,9 +1376,13 @@ func (self *BlockChain) update() {
}
}
func blockErr(block *types.Block, err error) {
// reportBlock reports the given block and error using the canonical block
// reporting tool. Reporting the block to the service is handled in a separate
// goroutine.
func reportBlock(block *types.Block, err error) {
if glog.V(logger.Error) {
glog.Errorf("Bad block #%v (%s)\n", block.Number(), block.Hash().Hex())
glog.Errorf(" %v", err)
}
go ReportBlock(block, err)
}

View File

@ -28,6 +28,7 @@ import (
"github.com/ethereum/ethash"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
@ -53,31 +54,29 @@ func theBlockChain(db ethdb.Database, t *testing.T) *BlockChain {
WriteTestNetGenesisBlock(db, 0)
blockchain, err := NewBlockChain(db, thePow(), &eventMux)
if err != nil {
t.Error("failed creating chainmanager:", err)
t.Error("failed creating blockchain:", err)
t.FailNow()
return nil
}
blockMan := NewBlockProcessor(db, nil, blockchain, &eventMux)
blockchain.SetProcessor(blockMan)
return blockchain
}
// Test fork of length N starting from block i
func testFork(t *testing.T, processor *BlockProcessor, i, n int, full bool, comparator func(td1, td2 *big.Int)) {
func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int)) {
// Copy old chain up to #i into a new db
db, processor2, err := newCanonical(i, full)
db, blockchain2, err := newCanonical(i, full)
if err != nil {
t.Fatal("could not make new canonical in testFork", err)
}
// Assert the chains have the same header/block at #i
var hash1, hash2 common.Hash
if full {
hash1 = processor.bc.GetBlockByNumber(uint64(i)).Hash()
hash2 = processor2.bc.GetBlockByNumber(uint64(i)).Hash()
hash1 = blockchain.GetBlockByNumber(uint64(i)).Hash()
hash2 = blockchain2.GetBlockByNumber(uint64(i)).Hash()
} else {
hash1 = processor.bc.GetHeaderByNumber(uint64(i)).Hash()
hash2 = processor2.bc.GetHeaderByNumber(uint64(i)).Hash()
hash1 = blockchain.GetHeaderByNumber(uint64(i)).Hash()
hash2 = blockchain2.GetHeaderByNumber(uint64(i)).Hash()
}
if hash1 != hash2 {
t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1)
@ -88,13 +87,13 @@ func testFork(t *testing.T, processor *BlockProcessor, i, n int, full bool, comp
headerChainB []*types.Header
)
if full {
blockChainB = makeBlockChain(processor2.bc.CurrentBlock(), n, db, forkSeed)
if _, err := processor2.bc.InsertChain(blockChainB); err != nil {
blockChainB = makeBlockChain(blockchain2.CurrentBlock(), n, db, forkSeed)
if _, err := blockchain2.InsertChain(blockChainB); err != nil {
t.Fatalf("failed to insert forking chain: %v", err)
}
} else {
headerChainB = makeHeaderChain(processor2.bc.CurrentHeader(), n, db, forkSeed)
if _, err := processor2.bc.InsertHeaderChain(headerChainB, 1); err != nil {
headerChainB = makeHeaderChain(blockchain2.CurrentHeader(), n, db, forkSeed)
if _, err := blockchain2.InsertHeaderChain(headerChainB, 1); err != nil {
t.Fatalf("failed to insert forking chain: %v", err)
}
}
@ -102,17 +101,17 @@ func testFork(t *testing.T, processor *BlockProcessor, i, n int, full bool, comp
var tdPre, tdPost *big.Int
if full {
tdPre = processor.bc.GetTd(processor.bc.CurrentBlock().Hash())
if err := testBlockChainImport(blockChainB, processor); err != nil {
tdPre = blockchain.GetTd(blockchain.CurrentBlock().Hash())
if err := testBlockChainImport(blockChainB, blockchain); err != nil {
t.Fatalf("failed to import forked block chain: %v", err)
}
tdPost = processor.bc.GetTd(blockChainB[len(blockChainB)-1].Hash())
tdPost = blockchain.GetTd(blockChainB[len(blockChainB)-1].Hash())
} else {
tdPre = processor.bc.GetTd(processor.bc.CurrentHeader().Hash())
if err := testHeaderChainImport(headerChainB, processor); err != nil {
tdPre = blockchain.GetTd(blockchain.CurrentHeader().Hash())
if err := testHeaderChainImport(headerChainB, blockchain); err != nil {
t.Fatalf("failed to import forked header chain: %v", err)
}
tdPost = processor.bc.GetTd(headerChainB[len(headerChainB)-1].Hash())
tdPost = blockchain.GetTd(headerChainB[len(headerChainB)-1].Hash())
}
// Compare the total difficulties of the chains
comparator(tdPre, tdPost)
@ -127,37 +126,52 @@ func printChain(bc *BlockChain) {
// testBlockChainImport tries to process a chain of blocks, writing them into
// the database if successful.
func testBlockChainImport(chain []*types.Block, processor *BlockProcessor) error {
func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
for _, block := range chain {
// Try and process the block
if _, _, err := processor.Process(block); err != nil {
err := blockchain.Validator().ValidateBlock(block)
if err != nil {
if IsKnownBlockErr(err) {
continue
}
return err
}
// Manually insert the block into the database, but don't reorganize (allows subsequent testing)
processor.bc.mu.Lock()
WriteTd(processor.chainDb, block.Hash(), new(big.Int).Add(block.Difficulty(), processor.bc.GetTd(block.ParentHash())))
WriteBlock(processor.chainDb, block)
processor.bc.mu.Unlock()
statedb, err := state.New(blockchain.GetBlock(block.ParentHash()).Root(), blockchain.chainDb)
if err != nil {
return err
}
receipts, _, usedGas, err := blockchain.Processor().Process(block, statedb)
if err != nil {
reportBlock(block, err)
return err
}
err = blockchain.Validator().ValidateState(block, blockchain.GetBlock(block.ParentHash()), statedb, receipts, usedGas)
if err != nil {
reportBlock(block, err)
return err
}
blockchain.mu.Lock()
WriteTd(blockchain.chainDb, block.Hash(), new(big.Int).Add(block.Difficulty(), blockchain.GetTd(block.ParentHash())))
WriteBlock(blockchain.chainDb, block)
statedb.Commit()
blockchain.mu.Unlock()
}
return nil
}
// testHeaderChainImport tries to process a chain of header, writing them into
// the database if successful.
func testHeaderChainImport(chain []*types.Header, processor *BlockProcessor) error {
func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error {
for _, header := range chain {
// Try and validate the header
if err := processor.ValidateHeader(header, false, false); err != nil {
if err := blockchain.Validator().ValidateHeader(header, blockchain.GetHeader(header.ParentHash), false); err != nil {
return err
}
// Manually insert the header into the database, but don't reorganize (allows subsequent testing)
processor.bc.mu.Lock()
WriteTd(processor.chainDb, header.Hash(), new(big.Int).Add(header.Difficulty, processor.bc.GetTd(header.ParentHash)))
WriteHeader(processor.chainDb, header)
processor.bc.mu.Unlock()
blockchain.mu.Lock()
WriteTd(blockchain.chainDb, header.Hash(), new(big.Int).Add(header.Difficulty, blockchain.GetTd(header.ParentHash)))
WriteHeader(blockchain.chainDb, header)
blockchain.mu.Unlock()
}
return nil
}
@ -313,19 +327,19 @@ func TestBrokenBlockChain(t *testing.T) { testBrokenChain(t, true) }
func testBrokenChain(t *testing.T, full bool) {
// Make chain starting from genesis
db, processor, err := newCanonical(10, full)
db, blockchain, err := newCanonical(10, full)
if err != nil {
t.Fatalf("failed to make new canonical chain: %v", err)
}
// Create a forked chain, and try to insert with a missing link
if full {
chain := makeBlockChain(processor.bc.CurrentBlock(), 5, db, forkSeed)[1:]
if err := testBlockChainImport(chain, processor); err == nil {
chain := makeBlockChain(blockchain.CurrentBlock(), 5, db, forkSeed)[1:]
if err := testBlockChainImport(chain, blockchain); err == nil {
t.Errorf("broken block chain not reported")
}
} else {
chain := makeHeaderChain(processor.bc.CurrentHeader(), 5, db, forkSeed)[1:]
if err := testHeaderChainImport(chain, processor); err == nil {
chain := makeHeaderChain(blockchain.CurrentHeader(), 5, db, forkSeed)[1:]
if err := testHeaderChainImport(chain, blockchain); err == nil {
t.Errorf("broken header chain not reported")
}
}
@ -415,9 +429,14 @@ func TestChainMultipleInsertions(t *testing.T) {
type bproc struct{}
func (bproc) Process(*types.Block) (vm.Logs, types.Receipts, error) { return nil, nil, nil }
func (bproc) ValidateHeader(*types.Header, bool, bool) error { return nil }
func (bproc) ValidateHeaderWithParent(*types.Header, *types.Header, bool, bool) error { return nil }
func (bproc) ValidateBlock(*types.Block) error { return nil }
func (bproc) ValidateHeader(*types.Header, *types.Header, bool) error { return nil }
func (bproc) ValidateState(block, parent *types.Block, state *state.StateDB, receipts types.Receipts, usedGas *big.Int) error {
return nil
}
func (bproc) Process(block *types.Block, statedb *state.StateDB) (types.Receipts, vm.Logs, *big.Int, error) {
return nil, nil, nil, nil
}
func makeHeaderChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Header {
blocks := makeBlockChainWithDiff(genesis, d, seed)
@ -459,7 +478,8 @@ func chm(genesis *types.Block, db ethdb.Database) *BlockChain {
bc.tdCache, _ = lru.New(100)
bc.blockCache, _ = lru.New(100)
bc.futureBlocks, _ = lru.New(100)
bc.processor = bproc{}
bc.SetValidator(bproc{})
bc.SetProcessor(bproc{})
bc.ResetWithGenesisBlock(genesis)
return bc
@ -612,12 +632,10 @@ func TestBlocksInsertNonceError(t *testing.T) { testInsertNonceError(t, true) }
func testInsertNonceError(t *testing.T, full bool) {
for i := 1; i < 25 && !t.Failed(); i++ {
// Create a pristine chain and database
db, processor, err := newCanonical(0, full)
db, blockchain, err := newCanonical(0, full)
if err != nil {
t.Fatalf("failed to create pristine chain: %v", err)
}
bc := processor.bc
// Create and insert a chain with a failing nonce
var (
failAt int
@ -626,34 +644,33 @@ func testInsertNonceError(t *testing.T, full bool) {
failHash common.Hash
)
if full {
blocks := makeBlockChain(processor.bc.CurrentBlock(), i, db, 0)
blocks := makeBlockChain(blockchain.CurrentBlock(), i, db, 0)
failAt = rand.Int() % len(blocks)
failNum = blocks[failAt].NumberU64()
failHash = blocks[failAt].Hash()
processor.bc.pow = failPow{failNum}
processor.Pow = failPow{failNum}
blockchain.pow = failPow{failNum}
failRes, err = processor.bc.InsertChain(blocks)
failRes, err = blockchain.InsertChain(blocks)
} else {
headers := makeHeaderChain(processor.bc.CurrentHeader(), i, db, 0)
headers := makeHeaderChain(blockchain.CurrentHeader(), i, db, 0)
failAt = rand.Int() % len(headers)
failNum = headers[failAt].Number.Uint64()
failHash = headers[failAt].Hash()
processor.bc.pow = failPow{failNum}
processor.Pow = failPow{failNum}
blockchain.pow = failPow{failNum}
blockchain.validator = NewBlockValidator(blockchain, failPow{failNum})
failRes, err = processor.bc.InsertHeaderChain(headers, 1)
failRes, err = blockchain.InsertHeaderChain(headers, 1)
}
// Check that the returned error indicates the nonce failure.
if failRes != failAt {
t.Errorf("test %d: failure index mismatch: have %d, want %d", i, failRes, failAt)
}
if !IsBlockNonceErr(err) {
t.Fatalf("test %d: error mismatch: have %v, want nonce error", i, err)
t.Fatalf("test %d: error mismatch: have %v, want nonce error %T", i, err, err)
}
nerr := err.(*BlockNonceErr)
if nerr.Number.Uint64() != failNum {
@ -665,11 +682,11 @@ func testInsertNonceError(t *testing.T, full bool) {
// Check that all no blocks after the failing block have been inserted.
for j := 0; j < i-failAt; j++ {
if full {
if block := bc.GetBlockByNumber(failNum + uint64(j)); block != nil {
if block := blockchain.GetBlockByNumber(failNum + uint64(j)); block != nil {
t.Errorf("test %d: invalid block in chain: %v", i, block)
}
} else {
if header := bc.GetHeaderByNumber(failNum + uint64(j)); header != nil {
if header := blockchain.GetHeaderByNumber(failNum + uint64(j)); header != nil {
t.Errorf("test %d: invalid header in chain: %v", i, header)
}
}
@ -711,7 +728,6 @@ func TestFastVsFullChains(t *testing.T) {
WriteGenesisBlockForTesting(archiveDb, GenesisAccount{address, funds})
archive, _ := NewBlockChain(archiveDb, FakePow{}, new(event.TypeMux))
archive.SetProcessor(NewBlockProcessor(archiveDb, FakePow{}, archive, new(event.TypeMux)))
if n, err := archive.InsertChain(blocks); err != nil {
t.Fatalf("failed to process block %d: %v", n, err)
@ -720,7 +736,6 @@ func TestFastVsFullChains(t *testing.T) {
fastDb, _ := ethdb.NewMemDatabase()
WriteGenesisBlockForTesting(fastDb, GenesisAccount{address, funds})
fast, _ := NewBlockChain(fastDb, FakePow{}, new(event.TypeMux))
fast.SetProcessor(NewBlockProcessor(fastDb, FakePow{}, fast, new(event.TypeMux)))
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
@ -797,7 +812,6 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
WriteGenesisBlockForTesting(archiveDb, GenesisAccount{address, funds})
archive, _ := NewBlockChain(archiveDb, FakePow{}, new(event.TypeMux))
archive.SetProcessor(NewBlockProcessor(archiveDb, FakePow{}, archive, new(event.TypeMux)))
if n, err := archive.InsertChain(blocks); err != nil {
t.Fatalf("failed to process block %d: %v", n, err)
@ -810,7 +824,6 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
fastDb, _ := ethdb.NewMemDatabase()
WriteGenesisBlockForTesting(fastDb, GenesisAccount{address, funds})
fast, _ := NewBlockChain(fastDb, FakePow{}, new(event.TypeMux))
fast.SetProcessor(NewBlockProcessor(fastDb, FakePow{}, fast, new(event.TypeMux)))
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
@ -830,7 +843,6 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
lightDb, _ := ethdb.NewMemDatabase()
WriteGenesisBlockForTesting(lightDb, GenesisAccount{address, funds})
light, _ := NewBlockChain(lightDb, FakePow{}, new(event.TypeMux))
light.SetProcessor(NewBlockProcessor(lightDb, FakePow{}, light, new(event.TypeMux)))
if n, err := light.InsertHeaderChain(headers, 1); err != nil {
t.Fatalf("failed to insert header %d: %v", n, err)
@ -895,9 +907,8 @@ func TestChainTxReorgs(t *testing.T) {
})
// Import the chain. This runs all block validation rules.
evmux := &event.TypeMux{}
chainman, _ := NewBlockChain(db, FakePow{}, evmux)
chainman.SetProcessor(NewBlockProcessor(db, FakePow{}, chainman, evmux))
if i, err := chainman.InsertChain(chain); err != nil {
blockchain, _ := NewBlockChain(db, FakePow{}, evmux)
if i, err := blockchain.InsertChain(chain); err != nil {
t.Fatalf("failed to insert original chain[%d]: %v", i, err)
}
@ -920,14 +931,14 @@ func TestChainTxReorgs(t *testing.T) {
gen.AddTx(futureAdd) // This transaction will be added after a full reorg
}
})
if _, err := chainman.InsertChain(chain); err != nil {
if _, err := blockchain.InsertChain(chain); err != nil {
t.Fatalf("failed to insert forked chain: %v", err)
}
// removed tx
for i, tx := range (types.Transactions{pastDrop, freshDrop}) {
if GetTransaction(db, tx.Hash()) != nil {
t.Errorf("drop %d: tx found while shouldn't have been", i)
if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn != nil {
t.Errorf("drop %d: tx %v found while shouldn't have been", i, txn)
}
if GetReceipt(db, tx.Hash()) != nil {
t.Errorf("drop %d: receipt found while shouldn't have been", i)
@ -935,7 +946,7 @@ func TestChainTxReorgs(t *testing.T) {
}
// added tx
for i, tx := range (types.Transactions{pastAdd, freshAdd, futureAdd}) {
if GetTransaction(db, tx.Hash()) == nil {
if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn == nil {
t.Errorf("add %d: expected tx to be found", i)
}
if GetReceipt(db, tx.Hash()) == nil {
@ -944,7 +955,7 @@ func TestChainTxReorgs(t *testing.T) {
}
// shared tx
for i, tx := range (types.Transactions{postponed, swapped}) {
if GetTransaction(db, tx.Hash()) == nil {
if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn == nil {
t.Errorf("share %d: expected tx to be found", i)
}
if GetReceipt(db, tx.Hash()) == nil {

View File

@ -214,7 +214,7 @@ func makeHeader(parent *types.Block, state *state.StateDB) *types.Header {
// newCanonical creates a chain database, and injects a deterministic canonical
// chain. Depending on the full flag, if creates either a full block chain or a
// header only chain.
func newCanonical(n int, full bool) (ethdb.Database, *BlockProcessor, error) {
func newCanonical(n int, full bool) (ethdb.Database, *BlockChain, error) {
// Create te new chain database
db, _ := ethdb.NewMemDatabase()
evmux := &event.TypeMux{}
@ -223,23 +223,20 @@ func newCanonical(n int, full bool) (ethdb.Database, *BlockProcessor, error) {
genesis, _ := WriteTestNetGenesisBlock(db, 0)
blockchain, _ := NewBlockChain(db, FakePow{}, evmux)
processor := NewBlockProcessor(db, FakePow{}, blockchain, evmux)
processor.bc.SetProcessor(processor)
// Create and inject the requested chain
if n == 0 {
return db, processor, nil
return db, blockchain, nil
}
if full {
// Full block-chain requested
blocks := makeBlockChain(genesis, n, db, canonicalSeed)
_, err := blockchain.InsertChain(blocks)
return db, processor, err
return db, blockchain, err
}
// Header-only chain requested
headers := makeHeaderChain(genesis.Header(), n, db, canonicalSeed)
_, err := blockchain.InsertHeaderChain(headers, 1)
return db, processor, err
return db, blockchain, err
}
// makeHeaderChain creates a deterministic chain of headers rooted at parent.

View File

@ -77,15 +77,14 @@ func ExampleGenerateChain() {
// Import the chain. This runs all block validation rules.
evmux := &event.TypeMux{}
chainman, _ := NewBlockChain(db, FakePow{}, evmux)
chainman.SetProcessor(NewBlockProcessor(db, FakePow{}, chainman, evmux))
if i, err := chainman.InsertChain(chain); err != nil {
blockchain, _ := NewBlockChain(db, FakePow{}, evmux)
if i, err := blockchain.InsertChain(chain); err != nil {
fmt.Printf("insert error (block %d): %v\n", i, err)
return
}
state, _ := chainman.State()
fmt.Printf("last block: #%d\n", chainman.CurrentBlock().Number())
state, _ := blockchain.State()
fmt.Printf("last block: #%d\n", blockchain.CurrentBlock().Number())
fmt.Println("balance of addr1:", state.GetBalance(addr1))
fmt.Println("balance of addr2:", state.GetBalance(addr2))
fmt.Println("balance of addr3:", state.GetBalance(addr3))

View File

@ -27,7 +27,6 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
)
@ -43,80 +42,16 @@ var (
bodySuffix = []byte("-body")
tdSuffix = []byte("-td")
ExpDiffPeriod = big.NewInt(100000)
blockHashPre = []byte("block-hash-") // [deprecated by eth/63]
txMetaSuffix = []byte{0x01}
receiptsPrefix = []byte("receipts-")
blockReceiptsPrefix = []byte("receipts-block-")
mipmapPre = []byte("mipmap-log-bloom-")
MIPMapLevels = []uint64{1000000, 500000, 100000, 50000, 1000}
blockHashPrefix = []byte("block-hash-") // [deprecated by the header/block split, remove eventually]
)
// CalcDifficulty is the difficulty adjustment algorithm. It returns
// the difficulty that a new block b should have when created at time
// given the parent block's time and difficulty.
func CalcDifficulty(time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
diff := new(big.Int)
adjust := new(big.Int).Div(parentDiff, params.DifficultyBoundDivisor)
bigTime := new(big.Int)
bigParentTime := new(big.Int)
bigTime.SetUint64(time)
bigParentTime.SetUint64(parentTime)
if bigTime.Sub(bigTime, bigParentTime).Cmp(params.DurationLimit) < 0 {
diff.Add(parentDiff, adjust)
} else {
diff.Sub(parentDiff, adjust)
}
if diff.Cmp(params.MinimumDifficulty) < 0 {
diff = params.MinimumDifficulty
}
periodCount := new(big.Int).Add(parentNumber, common.Big1)
periodCount.Div(periodCount, ExpDiffPeriod)
if periodCount.Cmp(common.Big1) > 0 {
// diff = diff + 2^(periodCount - 2)
expDiff := periodCount.Sub(periodCount, common.Big2)
expDiff.Exp(common.Big2, expDiff, nil)
diff.Add(diff, expDiff)
diff = common.BigMax(diff, params.MinimumDifficulty)
}
return diff
}
// CalcGasLimit computes the gas limit of the next block after parent.
// The result may be modified by the caller.
// This is miner strategy, not consensus protocol.
func CalcGasLimit(parent *types.Block) *big.Int {
// contrib = (parentGasUsed * 3 / 2) / 1024
contrib := new(big.Int).Mul(parent.GasUsed(), big.NewInt(3))
contrib = contrib.Div(contrib, big.NewInt(2))
contrib = contrib.Div(contrib, params.GasLimitBoundDivisor)
// decay = parentGasLimit / 1024 -1
decay := new(big.Int).Div(parent.GasLimit(), params.GasLimitBoundDivisor)
decay.Sub(decay, big.NewInt(1))
/*
strategy: gasLimit of block-to-mine is set based on parent's
gasUsed value. if parentGasUsed > parentGasLimit * (2/3) then we
increase it, otherwise lower it (or leave it unchanged if it's right
at that usage) the amount increased/decreased depends on how far away
from parentGasLimit * (2/3) parentGasUsed is.
*/
gl := new(big.Int).Sub(parent.GasLimit(), decay)
gl = gl.Add(gl, contrib)
gl.Set(common.BigMax(gl, params.MinGasLimit))
// however, if we're now below the target (GenesisGasLimit) we increase the
// limit as much as we can (parentGasLimit / 1024 -1)
if gl.Cmp(params.GenesisGasLimit) < 0 {
gl.Add(parent.GasLimit(), decay)
gl.Set(common.BigMin(gl, params.GenesisGasLimit))
}
return gl
}
// GetCanonicalHash retrieves a hash assigned to a canonical block number.
func GetCanonicalHash(db ethdb.Database, number uint64) common.Hash {
data, _ := db.Get(append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...))
@ -234,6 +169,67 @@ func GetBlock(db ethdb.Database, hash common.Hash) *types.Block {
return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles)
}
// GetBlockReceipts retrieves the receipts generated by the transactions included
// in a block given by its hash.
func GetBlockReceipts(db ethdb.Database, hash common.Hash) types.Receipts {
data, _ := db.Get(append(blockReceiptsPrefix, hash[:]...))
if len(data) == 0 {
return nil
}
storageReceipts := []*types.ReceiptForStorage{}
if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
glog.V(logger.Error).Infof("invalid receipt array RLP for hash %x: %v", hash, err)
return nil
}
receipts := make(types.Receipts, len(storageReceipts))
for i, receipt := range storageReceipts {
receipts[i] = (*types.Receipt)(receipt)
}
return receipts
}
// GetTransaction retrieves a specific transaction from the database, along with
// its added positional metadata.
func GetTransaction(db ethdb.Database, hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) {
// Retrieve the transaction itself from the database
data, _ := db.Get(hash.Bytes())
if len(data) == 0 {
return nil, common.Hash{}, 0, 0
}
var tx types.Transaction
if err := rlp.DecodeBytes(data, &tx); err != nil {
return nil, common.Hash{}, 0, 0
}
// Retrieve the blockchain positional metadata
data, _ = db.Get(append(hash.Bytes(), txMetaSuffix...))
if len(data) == 0 {
return nil, common.Hash{}, 0, 0
}
var meta struct {
BlockHash common.Hash
BlockIndex uint64
Index uint64
}
if err := rlp.DecodeBytes(data, &meta); err != nil {
return nil, common.Hash{}, 0, 0
}
return &tx, meta.BlockHash, meta.BlockIndex, meta.Index
}
// GetReceipt returns a receipt by hash
func GetReceipt(db ethdb.Database, txHash common.Hash) *types.Receipt {
data, _ := db.Get(append(receiptsPrefix, txHash[:]...))
if len(data) == 0 {
return nil
}
var receipt types.ReceiptForStorage
err := rlp.DecodeBytes(data, &receipt)
if err != nil {
glog.V(logger.Core).Infoln("GetReceipt err:", err)
}
return (*types.Receipt)(&receipt)
}
// WriteCanonicalHash stores the canonical hash for the given block number.
func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) error {
key := append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...)
@ -329,6 +325,94 @@ func WriteBlock(db ethdb.Database, block *types.Block) error {
return nil
}
// WriteBlockReceipts stores all the transaction receipts belonging to a block
// as a single receipt slice. This is used during chain reorganisations for
// rescheduling dropped transactions.
func WriteBlockReceipts(db ethdb.Database, hash common.Hash, receipts types.Receipts) error {
// Convert the receipts into their storage form and serialize them
storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
for i, receipt := range receipts {
storageReceipts[i] = (*types.ReceiptForStorage)(receipt)
}
bytes, err := rlp.EncodeToBytes(storageReceipts)
if err != nil {
return err
}
// Store the flattened receipt slice
if err := db.Put(append(blockReceiptsPrefix, hash.Bytes()...), bytes); err != nil {
glog.Fatalf("failed to store block receipts into database: %v", err)
return err
}
glog.V(logger.Debug).Infof("stored block receipts [%x…]", hash.Bytes()[:4])
return nil
}
// WriteTransactions stores the transactions associated with a specific block
// into the given database. Beside writing the transaction, the function also
// stores a metadata entry along with the transaction, detailing the position
// of this within the blockchain.
func WriteTransactions(db ethdb.Database, block *types.Block) error {
batch := db.NewBatch()
// Iterate over each transaction and encode it with its metadata
for i, tx := range block.Transactions() {
// Encode and queue up the transaction for storage
data, err := rlp.EncodeToBytes(tx)
if err != nil {
return err
}
if err := batch.Put(tx.Hash().Bytes(), data); err != nil {
return err
}
// Encode and queue up the transaction metadata for storage
meta := struct {
BlockHash common.Hash
BlockIndex uint64
Index uint64
}{
BlockHash: block.Hash(),
BlockIndex: block.NumberU64(),
Index: uint64(i),
}
data, err = rlp.EncodeToBytes(meta)
if err != nil {
return err
}
if err := batch.Put(append(tx.Hash().Bytes(), txMetaSuffix...), data); err != nil {
return err
}
}
// Write the scheduled data into the database
if err := batch.Write(); err != nil {
glog.Fatalf("failed to store transactions into database: %v", err)
return err
}
return nil
}
// WriteReceipts stores a batch of transaction receipts into the database.
func WriteReceipts(db ethdb.Database, receipts types.Receipts) error {
batch := db.NewBatch()
// Iterate over all the receipts and queue them for database injection
for _, receipt := range receipts {
storageReceipt := (*types.ReceiptForStorage)(receipt)
data, err := rlp.EncodeToBytes(storageReceipt)
if err != nil {
return err
}
if err := batch.Put(append(receiptsPrefix, receipt.TxHash.Bytes()...), data); err != nil {
return err
}
}
// Write the scheduled data into the database
if err := batch.Write(); err != nil {
glog.Fatalf("failed to store receipts into database: %v", err)
return err
}
return nil
}
// DeleteCanonicalHash removes the number to hash canonical mapping.
func DeleteCanonicalHash(db ethdb.Database, number uint64) {
db.Delete(append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...))
@ -351,18 +435,35 @@ func DeleteTd(db ethdb.Database, hash common.Hash) {
// DeleteBlock removes all block data associated with a hash.
func DeleteBlock(db ethdb.Database, hash common.Hash) {
DeleteBlockReceipts(db, hash)
DeleteHeader(db, hash)
DeleteBody(db, hash)
DeleteTd(db, hash)
}
// [deprecated by eth/63]
// DeleteBlockReceipts removes all receipt data associated with a block hash.
func DeleteBlockReceipts(db ethdb.Database, hash common.Hash) {
db.Delete(append(blockReceiptsPrefix, hash.Bytes()...))
}
// DeleteTransaction removes all transaction data associated with a hash.
func DeleteTransaction(db ethdb.Database, hash common.Hash) {
db.Delete(hash.Bytes())
db.Delete(append(hash.Bytes(), txMetaSuffix...))
}
// DeleteReceipt removes all receipt data associated with a transaction hash.
func DeleteReceipt(db ethdb.Database, hash common.Hash) {
db.Delete(append(receiptsPrefix, hash.Bytes()...))
}
// [deprecated by the header/block split, remove eventually]
// GetBlockByHashOld returns the old combined block corresponding to the hash
// or nil if not found. This method is only used by the upgrade mechanism to
// access the old combined block representation. It will be dropped after the
// network transitions to eth/63.
func GetBlockByHashOld(db ethdb.Database, hash common.Hash) *types.Block {
data, _ := db.Get(append(blockHashPre, hash[:]...))
data, _ := db.Get(append(blockHashPrefix, hash[:]...))
if len(data) == 0 {
return nil
}

View File

@ -17,6 +17,7 @@
package core
import (
"bytes"
"encoding/json"
"io/ioutil"
"math/big"
@ -61,7 +62,7 @@ func (d *diffTest) UnmarshalJSON(b []byte) (err error) {
return nil
}
func TestDifficulty(t *testing.T) {
func TestDifficultyFrontier(t *testing.T) {
file, err := os.Open("../tests/files/BasicTests/difficulty.json")
if err != nil {
t.Fatal(err)
@ -76,7 +77,7 @@ func TestDifficulty(t *testing.T) {
for name, test := range tests {
number := new(big.Int).Sub(test.CurrentBlocknumber, big.NewInt(1))
diff := CalcDifficulty(test.CurrentTimestamp, test.ParentTimestamp, number, test.ParentDifficulty)
diff := calcDifficultyFrontier(test.CurrentTimestamp, test.ParentTimestamp, number, test.ParentDifficulty)
if diff.Cmp(test.CurrentDifficulty) != 0 {
t.Error(name, "failed. Expected", test.CurrentDifficulty, "and calculated", diff)
}
@ -341,6 +342,163 @@ func TestHeadStorage(t *testing.T) {
}
}
// Tests that transactions and associated metadata can be stored and retrieved.
func TestTransactionStorage(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
tx1 := types.NewTransaction(1, common.BytesToAddress([]byte{0x11}), big.NewInt(111), big.NewInt(1111), big.NewInt(11111), []byte{0x11, 0x11, 0x11})
tx2 := types.NewTransaction(2, common.BytesToAddress([]byte{0x22}), big.NewInt(222), big.NewInt(2222), big.NewInt(22222), []byte{0x22, 0x22, 0x22})
tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), big.NewInt(333), big.NewInt(3333), big.NewInt(33333), []byte{0x33, 0x33, 0x33})
txs := []*types.Transaction{tx1, tx2, tx3}
block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil)
// Check that no transactions entries are in a pristine database
for i, tx := range txs {
if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn != nil {
t.Fatalf("tx #%d [%x]: non existent transaction returned: %v", i, tx.Hash(), txn)
}
}
// Insert all the transactions into the database, and verify contents
if err := WriteTransactions(db, block); err != nil {
t.Fatalf("failed to write transactions: %v", err)
}
for i, tx := range txs {
if txn, hash, number, index := GetTransaction(db, tx.Hash()); txn == nil {
t.Fatalf("tx #%d [%x]: transaction not found", i, tx.Hash())
} else {
if hash != block.Hash() || number != block.NumberU64() || index != uint64(i) {
t.Fatalf("tx #%d [%x]: positional metadata mismatch: have %x/%d/%d, want %x/%v/%v", i, tx.Hash(), hash, number, index, block.Hash(), block.NumberU64(), i)
}
if tx.String() != txn.String() {
t.Fatalf("tx #%d [%x]: transaction mismatch: have %v, want %v", i, tx.Hash(), txn, tx)
}
}
}
// Delete the transactions and check purge
for i, tx := range txs {
DeleteTransaction(db, tx.Hash())
if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn != nil {
t.Fatalf("tx #%d [%x]: deleted transaction returned: %v", i, tx.Hash(), txn)
}
}
}
// Tests that receipts can be stored and retrieved.
func TestReceiptStorage(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
receipt1 := &types.Receipt{
PostState: []byte{0x01},
CumulativeGasUsed: big.NewInt(1),
Logs: vm.Logs{
&vm.Log{Address: common.BytesToAddress([]byte{0x11})},
&vm.Log{Address: common.BytesToAddress([]byte{0x01, 0x11})},
},
TxHash: common.BytesToHash([]byte{0x11, 0x11}),
ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),
GasUsed: big.NewInt(111111),
}
receipt2 := &types.Receipt{
PostState: []byte{0x02},
CumulativeGasUsed: big.NewInt(2),
Logs: vm.Logs{
&vm.Log{Address: common.BytesToAddress([]byte{0x22})},
&vm.Log{Address: common.BytesToAddress([]byte{0x02, 0x22})},
},
TxHash: common.BytesToHash([]byte{0x22, 0x22}),
ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}),
GasUsed: big.NewInt(222222),
}
receipts := []*types.Receipt{receipt1, receipt2}
// Check that no receipt entries are in a pristine database
for i, receipt := range receipts {
if r := GetReceipt(db, receipt.TxHash); r != nil {
t.Fatalf("receipt #%d [%x]: non existent receipt returned: %v", i, receipt.TxHash, r)
}
}
// Insert all the receipts into the database, and verify contents
if err := WriteReceipts(db, receipts); err != nil {
t.Fatalf("failed to write receipts: %v", err)
}
for i, receipt := range receipts {
if r := GetReceipt(db, receipt.TxHash); r == nil {
t.Fatalf("receipt #%d [%x]: receipt not found", i, receipt.TxHash)
} else {
rlpHave, _ := rlp.EncodeToBytes(r)
rlpWant, _ := rlp.EncodeToBytes(receipt)
if bytes.Compare(rlpHave, rlpWant) != 0 {
t.Fatalf("receipt #%d [%x]: receipt mismatch: have %v, want %v", i, receipt.TxHash, r, receipt)
}
}
}
// Delete the receipts and check purge
for i, receipt := range receipts {
DeleteReceipt(db, receipt.TxHash)
if r := GetReceipt(db, receipt.TxHash); r != nil {
t.Fatalf("receipt #%d [%x]: deleted receipt returned: %v", i, receipt.TxHash, r)
}
}
}
// Tests that receipts associated with a single block can be stored and retrieved.
func TestBlockReceiptStorage(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
receipt1 := &types.Receipt{
PostState: []byte{0x01},
CumulativeGasUsed: big.NewInt(1),
Logs: vm.Logs{
&vm.Log{Address: common.BytesToAddress([]byte{0x11})},
&vm.Log{Address: common.BytesToAddress([]byte{0x01, 0x11})},
},
TxHash: common.BytesToHash([]byte{0x11, 0x11}),
ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),
GasUsed: big.NewInt(111111),
}
receipt2 := &types.Receipt{
PostState: []byte{0x02},
CumulativeGasUsed: big.NewInt(2),
Logs: vm.Logs{
&vm.Log{Address: common.BytesToAddress([]byte{0x22})},
&vm.Log{Address: common.BytesToAddress([]byte{0x02, 0x22})},
},
TxHash: common.BytesToHash([]byte{0x22, 0x22}),
ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}),
GasUsed: big.NewInt(222222),
}
receipts := []*types.Receipt{receipt1, receipt2}
// Check that no receipt entries are in a pristine database
hash := common.BytesToHash([]byte{0x03, 0x14})
if rs := GetBlockReceipts(db, hash); len(rs) != 0 {
t.Fatalf("non existent receipts returned: %v", rs)
}
// Insert the receipt slice into the database and check presence
if err := WriteBlockReceipts(db, hash, receipts); err != nil {
t.Fatalf("failed to write block receipts: %v", err)
}
if rs := GetBlockReceipts(db, hash); len(rs) == 0 {
t.Fatalf("no receipts returned")
} else {
for i := 0; i < len(receipts); i++ {
rlpHave, _ := rlp.EncodeToBytes(rs[i])
rlpWant, _ := rlp.EncodeToBytes(receipts[i])
if bytes.Compare(rlpHave, rlpWant) != 0 {
t.Fatalf("receipt #%d: receipt mismatch: have %v, want %v", i, rs[i], receipts[i])
}
}
}
// Delete the receipt slice and check purge
DeleteBlockReceipts(db, hash)
if rs := GetBlockReceipts(db, hash); len(rs) != 0 {
t.Fatalf("deleted receipts returned: %v", rs)
}
}
func TestMipmapBloom(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
@ -425,7 +583,7 @@ func TestMipmapChain(t *testing.T) {
}
// store the receipts
err := PutReceipts(db, receipts)
err := WriteReceipts(db, receipts)
if err != nil {
t.Fatal(err)
}
@ -439,7 +597,7 @@ func TestMipmapChain(t *testing.T) {
if err := WriteHeadBlockHash(db, block.Hash()); err != nil {
t.Fatalf("failed to insert block number: %v", err)
}
if err := PutBlockReceipts(db, block.Hash(), receipts[i]); err != nil {
if err := WriteBlockReceipts(db, block.Hash(), receipts[i]); err != nil {
t.Fatal("error writing block receipts:", err)
}
}

View File

@ -33,8 +33,17 @@ func Call(env vm.Environment, caller vm.ContractRef, addr common.Address, input
// CallCode executes the given address' code as the given contract address
func CallCode(env vm.Environment, caller vm.ContractRef, addr common.Address, input []byte, gas, gasPrice, value *big.Int) (ret []byte, err error) {
prev := caller.Address()
ret, _, err = exec(env, caller, &prev, &addr, input, env.Db().GetCode(addr), gas, gasPrice, value)
callerAddr := caller.Address()
ret, _, err = exec(env, caller, &callerAddr, &addr, input, env.Db().GetCode(addr), gas, gasPrice, value)
return ret, err
}
// DelegateCall is equivalent to CallCode except that sender and value propagates from parent scope to child scope
func DelegateCall(env vm.Environment, caller vm.ContractRef, addr common.Address, input []byte, gas, gasPrice *big.Int) (ret []byte, err error) {
callerAddr := caller.Address()
originAddr := env.Origin()
callerValue := caller.Value()
ret, _, err = execDelegateCall(env, caller, &originAddr, &callerAddr, &addr, input, env.Db().GetCode(addr), gas, gasPrice, callerValue)
return ret, err
}
@ -52,7 +61,6 @@ func Create(env vm.Environment, caller vm.ContractRef, code []byte, gas, gasPric
func exec(env vm.Environment, caller vm.ContractRef, address, codeAddr *common.Address, input, code []byte, gas, gasPrice, value *big.Int) (ret []byte, addr common.Address, err error) {
evm := vm.NewVm(env)
// Depth check execution. Fail if we're trying to execute above the
// limit.
if env.Depth() > int(params.CallCreateDepth.Int64()) {
@ -69,17 +77,15 @@ func exec(env vm.Environment, caller vm.ContractRef, address, codeAddr *common.A
var createAccount bool
if address == nil {
// Generate a new address
// Create a new account on the state
nonce := env.Db().GetNonce(caller.Address())
env.Db().SetNonce(caller.Address(), nonce+1)
addr = crypto.CreateAddress(caller.Address(), nonce)
address = &addr
createAccount = true
}
snapshot := env.MakeSnapshot()
snapshotPreTransfer := env.MakeSnapshot()
var (
from = env.Db().GetAccount(caller.Address())
to vm.Account
@ -95,12 +101,68 @@ func exec(env vm.Environment, caller vm.ContractRef, address, codeAddr *common.A
}
env.Transfer(from, to, value)
// initialise a new contract and set the code that is to be used by the
// EVM. The contract is a scoped environment for this execution context
// only.
contract := vm.NewContract(caller, to, value, gas, gasPrice)
contract.SetCallCode(codeAddr, code)
defer contract.Finalise()
ret, err = evm.Run(contract, input)
// if the contract creation ran successfully and no errors were returned
// calculate the gas required to store the code. If the code could not
// be stored due to not enough gas set an error and let it be handled
// by the error checking condition below.
if err == nil && createAccount {
dataGas := big.NewInt(int64(len(ret)))
dataGas.Mul(dataGas, params.CreateDataGas)
if contract.UseGas(dataGas) {
env.Db().SetCode(*address, ret)
} else {
err = vm.CodeStoreOutOfGasError
}
}
// When an error was returned by the EVM or when setting the creation code
// above we revert to the snapshot and consume any gas remaining. Additionally
// when we're in homestead this also counts for code storage gas errors.
if err != nil && (params.IsHomestead(env.BlockNumber()) || err != vm.CodeStoreOutOfGasError) {
contract.UseGas(contract.Gas)
env.SetSnapshot(snapshotPreTransfer)
}
return ret, addr, err
}
func execDelegateCall(env vm.Environment, caller vm.ContractRef, originAddr, toAddr, codeAddr *common.Address, input, code []byte, gas, gasPrice, value *big.Int) (ret []byte, addr common.Address, err error) {
evm := vm.NewVm(env)
// Depth check execution. Fail if we're trying to execute above the
// limit.
if env.Depth() > int(params.CallCreateDepth.Int64()) {
caller.ReturnGas(gas, gasPrice)
return nil, common.Address{}, vm.DepthError
}
snapshot := env.MakeSnapshot()
var to vm.Account
if !env.Db().Exist(*toAddr) {
to = env.Db().CreateAccount(*toAddr)
} else {
to = env.Db().GetAccount(*toAddr)
}
// Iinitialise a new contract and make initialise the delegate values
contract := vm.NewContract(caller, to, value, gas, gasPrice).AsDelegate()
contract.SetCallCode(codeAddr, code)
defer contract.Finalise()
ret, err = evm.Run(contract, input)
if err != nil {
env.SetSnapshot(snapshot) //env.Db().Set(snapshot)
contract.UseGas(contract.Gas)
env.SetSnapshot(snapshot)
}
return ret, addr, err

46
core/gaspool.go Normal file
View File

@ -0,0 +1,46 @@
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import "math/big"
// GasPool tracks the amount of gas available during
// execution of the transactions in a block.
// The zero value is a pool with zero gas available.
type GasPool big.Int
// AddGas makes gas available for execution.
func (gp *GasPool) AddGas(amount *big.Int) *GasPool {
i := (*big.Int)(gp)
i.Add(i, amount)
return gp
}
// SubGas deducts the given amount from the pool if enough gas is
// available and returns an error otherwise.
func (gp *GasPool) SubGas(amount *big.Int) error {
i := (*big.Int)(gp)
if i.Cmp(amount) < 0 {
return &GasLimitErr{Have: new(big.Int).Set(i), Want: amount}
}
i.Sub(i, amount)
return nil
}
func (gp *GasPool) String() string {
return (*big.Int)(gp).String()
}

View File

@ -103,7 +103,7 @@ func WriteGenesisBlock(chainDb ethdb.Database, reader io.Reader) (*types.Block,
if err := WriteBlock(chainDb, block); err != nil {
return nil, err
}
if err := PutBlockReceipts(chainDb, block.Hash(), nil); err != nil {
if err := WriteBlockReceipts(chainDb, block.Hash(), nil); err != nil {
return nil, err
}
if err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()); err != nil {

View File

@ -28,6 +28,7 @@ type Account struct {
Nonce uint64 `json:"nonce"`
Root string `json:"root"`
CodeHash string `json:"codeHash"`
Code string `json:"code"`
Storage map[string]string `json:"storage"`
}
@ -47,7 +48,7 @@ func (self *StateDB) RawDump() World {
addr := self.trie.GetKey(it.Key)
stateObject := NewStateObjectFromBytes(common.BytesToAddress(addr), it.Value, self.db)
account := Account{Balance: stateObject.balance.String(), Nonce: stateObject.nonce, Root: common.Bytes2Hex(stateObject.Root()), CodeHash: common.Bytes2Hex(stateObject.codeHash)}
account := Account{Balance: stateObject.balance.String(), Nonce: stateObject.nonce, Root: common.Bytes2Hex(stateObject.Root()), CodeHash: common.Bytes2Hex(stateObject.codeHash), Code: common.Bytes2Hex(stateObject.Code())}
account.Storage = make(map[string]string)
storageIt := stateObject.trie.Iterator()

View File

@ -260,6 +260,13 @@ func (self *StateObject) Nonce() uint64 {
return self.nonce
}
// Never called, but must be present to allow StateObject to be used
// as a vm.Account interface that also satisfies the vm.ContractRef
// interface. Interfaces are awesome.
func (self *StateObject) Value() *big.Int {
panic("Value on StateObject should never be called")
}
func (self *StateObject) EachStorage(cb func(key, value []byte)) {
// When iterating over the storage check the cache first
for h, v := range self.storage {

View File

@ -55,7 +55,6 @@ type StateDB struct {
func New(root common.Hash, db ethdb.Database) (*StateDB, error) {
tr, err := trie.NewSecure(root, db)
if err != nil {
glog.Errorf("can't create state trie with root %x: %v", root[:], err)
return nil, err
}
return &StateDB{

106
core/state_processor.go Normal file
View File

@ -0,0 +1,106 @@
package core
import (
"math/big"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
)
var (
big8 = big.NewInt(8)
big32 = big.NewInt(32)
)
type StateProcessor struct {
bc *BlockChain
}
func NewStateProcessor(bc *BlockChain) *StateProcessor {
return &StateProcessor{bc}
}
// Process processes the state changes according to the Ethereum rules by running
// the transaction messages using the statedb and applying any rewards to both
// the processor (coinbase) and any included uncles.
//
// Process returns the receipts and logs accumulated during the process and
// returns the amount of gas that was used in the process. If any of the
// transactions failed to execute due to insufficient gas it will return an error.
func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB) (types.Receipts, vm.Logs, *big.Int, error) {
var (
receipts types.Receipts
totalUsedGas = big.NewInt(0)
err error
header = block.Header()
allLogs vm.Logs
gp = new(GasPool).AddGas(block.GasLimit())
)
for i, tx := range block.Transactions() {
statedb.StartRecord(tx.Hash(), block.Hash(), i)
receipt, logs, _, err := ApplyTransaction(p.bc, gp, statedb, header, tx, totalUsedGas)
if err != nil {
return nil, nil, totalUsedGas, err
}
receipts = append(receipts, receipt)
allLogs = append(allLogs, logs...)
}
AccumulateRewards(statedb, header, block.Uncles())
return receipts, allLogs, totalUsedGas, err
}
// ApplyTransaction attemps to apply a transaction to the given state database
// and uses the input parameters for its environment.
//
// ApplyTransactions returns the generated receipts and vm logs during the
// execution of the state transition phase.
func ApplyTransaction(bc *BlockChain, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *big.Int) (*types.Receipt, vm.Logs, *big.Int, error) {
_, gas, err := ApplyMessage(NewEnv(statedb, bc, tx, header), tx, gp)
if err != nil {
return nil, nil, nil, err
}
// Update the state with pending changes
usedGas.Add(usedGas, gas)
receipt := types.NewReceipt(statedb.IntermediateRoot().Bytes(), usedGas)
receipt.TxHash = tx.Hash()
receipt.GasUsed = new(big.Int).Set(gas)
if MessageCreatesContract(tx) {
from, _ := tx.From()
receipt.ContractAddress = crypto.CreateAddress(from, tx.Nonce())
}
logs := statedb.GetLogs(tx.Hash())
receipt.Logs = logs
receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
glog.V(logger.Debug).Infoln(receipt)
return receipt, logs, gas, err
}
// AccumulateRewards credits the coinbase of the given block with the
// mining reward. The total reward consists of the static block reward
// and rewards for included uncles. The coinbase of each uncle block is
// also rewarded.
func AccumulateRewards(statedb *state.StateDB, header *types.Header, uncles []*types.Header) {
reward := new(big.Int).Set(BlockReward)
r := new(big.Int)
for _, uncle := range uncles {
r.Add(uncle.Number, big8)
r.Sub(r, header.Number)
r.Mul(r, BlockReward)
r.Div(r, big8)
statedb.AddBalance(uncle.Coinbase, r)
r.Div(BlockReward, big32)
reward.Add(reward, r)
}
statedb.AddBalance(header.Coinbase, reward)
}

View File

@ -27,6 +27,10 @@ import (
"github.com/ethereum/go-ethereum/params"
)
var (
Big0 = big.NewInt(0)
)
/*
The State Transitioning Model
@ -59,6 +63,7 @@ type StateTransition struct {
// Message represents a message sent to a contract.
type Message interface {
From() (common.Address, error)
FromFrontier() (common.Address, error)
To() *common.Address
GasPrice() *big.Int
@ -75,8 +80,13 @@ func MessageCreatesContract(msg Message) bool {
// IntrinsicGas computes the 'intrisic gas' for a message
// with the given data.
func IntrinsicGas(data []byte) *big.Int {
igas := new(big.Int).Set(params.TxGas)
func IntrinsicGas(data []byte, contractCreation, homestead bool) *big.Int {
igas := new(big.Int)
if contractCreation && homestead {
igas.Set(params.TxGasContractCreation)
} else {
igas.Set(params.TxGas)
}
if len(data) > 0 {
var nz int64
for _, byt := range data {
@ -110,7 +120,15 @@ func ApplyMessage(env vm.Environment, msg Message, gp *GasPool) ([]byte, *big.In
}
func (self *StateTransition) from() (vm.Account, error) {
f, err := self.msg.From()
var (
f common.Address
err error
)
if params.IsHomestead(self.env.BlockNumber()) {
f, err = self.msg.From()
} else {
f, err = self.msg.FromFrontier()
}
if err != nil {
return nil, err
}
@ -195,35 +213,35 @@ func (self *StateTransition) transitionDb() (ret []byte, usedGas *big.Int, err e
if err = self.preCheck(); err != nil {
return
}
msg := self.msg
sender, _ := self.from() // err checked in preCheck
homestead := params.IsHomestead(self.env.BlockNumber())
contractCreation := MessageCreatesContract(msg)
// Pay intrinsic gas
if err = self.useGas(IntrinsicGas(self.data)); err != nil {
if err = self.useGas(IntrinsicGas(self.data, contractCreation, homestead)); err != nil {
return nil, nil, InvalidTxError(err)
}
vmenv := self.env
var addr common.Address
if MessageCreatesContract(msg) {
ret, addr, err = vmenv.Create(sender, self.data, self.gas, self.gasPrice, self.value)
if err == nil {
dataGas := big.NewInt(int64(len(ret)))
dataGas.Mul(dataGas, params.CreateDataGas)
if err := self.useGas(dataGas); err == nil {
self.state.SetCode(addr, ret)
} else {
ret = nil // does not affect consensus but useful for StateTests validations
glog.V(logger.Core).Infoln("Insufficient gas for creating code. Require", dataGas, "and have", self.gas)
}
//var addr common.Address
if contractCreation {
ret, _, err = vmenv.Create(sender, self.data, self.gas, self.gasPrice, self.value)
if homestead && err == vm.CodeStoreOutOfGasError {
self.gas = Big0
}
if err != nil {
ret = nil
glog.V(logger.Core).Infoln("VM create err:", err)
}
glog.V(logger.Core).Infoln("VM create err:", err)
} else {
// Increment the nonce for the next transaction
self.state.SetNonce(sender.Address(), self.state.GetNonce(sender.Address())+1)
ret, err = vmenv.Call(sender, self.to().Address(), self.data, self.gas, self.gasPrice, self.value)
glog.V(logger.Core).Infoln("VM call err:", err)
if err != nil {
glog.V(logger.Core).Infoln("VM call err:", err)
}
}
if err != nil && IsValueTransferErr(err) {

View File

@ -1,263 +0,0 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"crypto/ecdsa"
"math/big"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
)
func transaction(nonce uint64, gaslimit *big.Int, key *ecdsa.PrivateKey) *types.Transaction {
tx, _ := types.NewTransaction(nonce, common.Address{}, big.NewInt(100), gaslimit, big.NewInt(1), nil).SignECDSA(key)
return tx
}
func setupTxPool() (*TxPool, *ecdsa.PrivateKey) {
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, db)
var m event.TypeMux
key, _ := crypto.GenerateKey()
newPool := NewTxPool(&m, func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
newPool.resetState()
return newPool, key
}
func TestInvalidTransactions(t *testing.T) {
pool, key := setupTxPool()
tx := transaction(0, big.NewInt(100), key)
if err := pool.Add(tx); err != ErrNonExistentAccount {
t.Error("expected", ErrNonExistentAccount)
}
from, _ := tx.From()
currentState, _ := pool.currentState()
currentState.AddBalance(from, big.NewInt(1))
if err := pool.Add(tx); err != ErrInsufficientFunds {
t.Error("expected", ErrInsufficientFunds)
}
balance := new(big.Int).Add(tx.Value(), new(big.Int).Mul(tx.Gas(), tx.GasPrice()))
currentState.AddBalance(from, balance)
if err := pool.Add(tx); err != ErrIntrinsicGas {
t.Error("expected", ErrIntrinsicGas, "got", err)
}
currentState.SetNonce(from, 1)
currentState.AddBalance(from, big.NewInt(0xffffffffffffff))
tx = transaction(0, big.NewInt(100000), key)
if err := pool.Add(tx); err != ErrNonce {
t.Error("expected", ErrNonce)
}
}
func TestTransactionQueue(t *testing.T) {
pool, key := setupTxPool()
tx := transaction(0, big.NewInt(100), key)
from, _ := tx.From()
currentState, _ := pool.currentState()
currentState.AddBalance(from, big.NewInt(1))
pool.queueTx(tx.Hash(), tx)
pool.checkQueue()
if len(pool.pending) != 1 {
t.Error("expected valid txs to be 1 is", len(pool.pending))
}
tx = transaction(1, big.NewInt(100), key)
from, _ = tx.From()
currentState.SetNonce(from, 2)
pool.queueTx(tx.Hash(), tx)
pool.checkQueue()
if _, ok := pool.pending[tx.Hash()]; ok {
t.Error("expected transaction to be in tx pool")
}
if len(pool.queue[from]) > 0 {
t.Error("expected transaction queue to be empty. is", len(pool.queue[from]))
}
pool, key = setupTxPool()
tx1 := transaction(0, big.NewInt(100), key)
tx2 := transaction(10, big.NewInt(100), key)
tx3 := transaction(11, big.NewInt(100), key)
pool.queueTx(tx1.Hash(), tx1)
pool.queueTx(tx2.Hash(), tx2)
pool.queueTx(tx3.Hash(), tx3)
from, _ = tx1.From()
pool.checkQueue()
if len(pool.pending) != 1 {
t.Error("expected tx pool to be 1 =")
}
if len(pool.queue[from]) != 2 {
t.Error("expected len(queue) == 2, got", len(pool.queue[from]))
}
}
func TestRemoveTx(t *testing.T) {
pool, key := setupTxPool()
tx := transaction(0, big.NewInt(100), key)
from, _ := tx.From()
currentState, _ := pool.currentState()
currentState.AddBalance(from, big.NewInt(1))
pool.queueTx(tx.Hash(), tx)
pool.addTx(tx.Hash(), from, tx)
if len(pool.queue) != 1 {
t.Error("expected queue to be 1, got", len(pool.queue))
}
if len(pool.pending) != 1 {
t.Error("expected txs to be 1, got", len(pool.pending))
}
pool.RemoveTx(tx.Hash())
if len(pool.queue) > 0 {
t.Error("expected queue to be 0, got", len(pool.queue))
}
if len(pool.pending) > 0 {
t.Error("expected txs to be 0, got", len(pool.pending))
}
}
func TestNegativeValue(t *testing.T) {
pool, key := setupTxPool()
tx, _ := types.NewTransaction(0, common.Address{}, big.NewInt(-1), big.NewInt(100), big.NewInt(1), nil).SignECDSA(key)
from, _ := tx.From()
currentState, _ := pool.currentState()
currentState.AddBalance(from, big.NewInt(1))
if err := pool.Add(tx); err != ErrNegativeValue {
t.Error("expected", ErrNegativeValue, "got", err)
}
}
func TestTransactionChainFork(t *testing.T) {
pool, key := setupTxPool()
addr := crypto.PubkeyToAddress(key.PublicKey)
resetState := func() {
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, db)
pool.currentState = func() (*state.StateDB, error) { return statedb, nil }
currentState, _ := pool.currentState()
currentState.AddBalance(addr, big.NewInt(100000000000000))
pool.resetState()
}
resetState()
tx := transaction(0, big.NewInt(100000), key)
if err := pool.add(tx); err != nil {
t.Error("didn't expect error", err)
}
pool.RemoveTransactions([]*types.Transaction{tx})
// reset the pool's internal state
resetState()
if err := pool.add(tx); err != nil {
t.Error("didn't expect error", err)
}
}
func TestTransactionDoubleNonce(t *testing.T) {
pool, key := setupTxPool()
addr := crypto.PubkeyToAddress(key.PublicKey)
resetState := func() {
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, db)
pool.currentState = func() (*state.StateDB, error) { return statedb, nil }
currentState, _ := pool.currentState()
currentState.AddBalance(addr, big.NewInt(100000000000000))
pool.resetState()
}
resetState()
tx := transaction(0, big.NewInt(100000), key)
tx2 := transaction(0, big.NewInt(1000000), key)
if err := pool.add(tx); err != nil {
t.Error("didn't expect error", err)
}
if err := pool.add(tx2); err != nil {
t.Error("didn't expect error", err)
}
pool.checkQueue()
if len(pool.pending) != 2 {
t.Error("expected 2 pending txs. Got", len(pool.pending))
}
}
func TestMissingNonce(t *testing.T) {
pool, key := setupTxPool()
addr := crypto.PubkeyToAddress(key.PublicKey)
currentState, _ := pool.currentState()
currentState.AddBalance(addr, big.NewInt(100000000000000))
tx := transaction(1, big.NewInt(100000), key)
if err := pool.add(tx); err != nil {
t.Error("didn't expect error", err)
}
if len(pool.pending) != 0 {
t.Error("expected 0 pending transactions, got", len(pool.pending))
}
if len(pool.queue[addr]) != 1 {
t.Error("expected 1 queued transaction, got", len(pool.queue[addr]))
}
}
func TestNonceRecovery(t *testing.T) {
const n = 10
pool, key := setupTxPool()
addr := crypto.PubkeyToAddress(key.PublicKey)
currentState, _ := pool.currentState()
currentState.SetNonce(addr, n)
currentState.AddBalance(addr, big.NewInt(100000000000000))
pool.resetState()
tx := transaction(n, big.NewInt(100000), key)
if err := pool.Add(tx); err != nil {
t.Error(err)
}
// simulate some weird re-order of transactions and missing nonce(s)
currentState.SetNonce(addr, n-1)
pool.resetState()
if fn := pool.pendingState.GetNonce(addr); fn != n+1 {
t.Errorf("expected nonce to be %d, got %d", n+1, fn)
}
}
func TestRemovedTxEvent(t *testing.T) {
pool, key := setupTxPool()
tx := transaction(0, big.NewInt(1000000), key)
from, _ := tx.From()
currentState, _ := pool.currentState()
currentState.AddBalance(from, big.NewInt(1000000000000))
pool.eventMux.Post(RemovedTransactionEvent{types.Transactions{tx}})
pool.eventMux.Post(ChainHeadEvent{nil})
if len(pool.pending) != 1 {
t.Error("expected 1 pending tx, got", len(pool.pending))
}
}

View File

@ -1,171 +0,0 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/rlp"
"github.com/syndtr/goleveldb/leveldb"
)
var (
receiptsPre = []byte("receipts-")
blockReceiptsPre = []byte("receipts-block-")
)
// PutTransactions stores the transactions in the given database
func PutTransactions(db ethdb.Database, block *types.Block, txs types.Transactions) error {
batch := db.NewBatch()
for i, tx := range block.Transactions() {
rlpEnc, err := rlp.EncodeToBytes(tx)
if err != nil {
return fmt.Errorf("failed encoding tx: %v", err)
}
batch.Put(tx.Hash().Bytes(), rlpEnc)
var txExtra struct {
BlockHash common.Hash
BlockIndex uint64
Index uint64
}
txExtra.BlockHash = block.Hash()
txExtra.BlockIndex = block.NumberU64()
txExtra.Index = uint64(i)
rlpMeta, err := rlp.EncodeToBytes(txExtra)
if err != nil {
return fmt.Errorf("failed encoding tx meta data: %v", err)
}
batch.Put(append(tx.Hash().Bytes(), 0x0001), rlpMeta)
}
if err := batch.Write(); err != nil {
return fmt.Errorf("failed writing tx to db: %v", err)
}
return nil
}
func DeleteTransaction(db ethdb.Database, txHash common.Hash) {
db.Delete(txHash[:])
}
func GetTransaction(db ethdb.Database, txhash common.Hash) *types.Transaction {
data, _ := db.Get(txhash[:])
if len(data) != 0 {
var tx types.Transaction
if err := rlp.DecodeBytes(data, &tx); err != nil {
return nil
}
return &tx
}
return nil
}
// PutReceipts stores the receipts in the current database
func PutReceipts(db ethdb.Database, receipts types.Receipts) error {
batch := new(leveldb.Batch)
_, batchWrite := db.(*ethdb.LDBDatabase)
for _, receipt := range receipts {
storageReceipt := (*types.ReceiptForStorage)(receipt)
bytes, err := rlp.EncodeToBytes(storageReceipt)
if err != nil {
return err
}
if batchWrite {
batch.Put(append(receiptsPre, receipt.TxHash[:]...), bytes)
} else {
err = db.Put(append(receiptsPre, receipt.TxHash[:]...), bytes)
if err != nil {
return err
}
}
}
if db, ok := db.(*ethdb.LDBDatabase); ok {
if err := db.LDB().Write(batch, nil); err != nil {
return err
}
}
return nil
}
// Delete a receipts from the database
func DeleteReceipt(db ethdb.Database, txHash common.Hash) {
db.Delete(append(receiptsPre, txHash[:]...))
}
// GetReceipt returns a receipt by hash
func GetReceipt(db ethdb.Database, txHash common.Hash) *types.Receipt {
data, _ := db.Get(append(receiptsPre, txHash[:]...))
if len(data) == 0 {
return nil
}
var receipt types.ReceiptForStorage
err := rlp.DecodeBytes(data, &receipt)
if err != nil {
glog.V(logger.Core).Infoln("GetReceipt err:", err)
}
return (*types.Receipt)(&receipt)
}
// GetBlockReceipts returns the receipts generated by the transactions
// included in block's given hash.
func GetBlockReceipts(db ethdb.Database, hash common.Hash) types.Receipts {
data, _ := db.Get(append(blockReceiptsPre, hash[:]...))
if len(data) == 0 {
return nil
}
rs := []*types.ReceiptForStorage{}
if err := rlp.DecodeBytes(data, &rs); err != nil {
glog.V(logger.Error).Infof("invalid receipt array RLP for hash %x: %v", hash, err)
return nil
}
receipts := make(types.Receipts, len(rs))
for i, receipt := range rs {
receipts[i] = (*types.Receipt)(receipt)
}
return receipts
}
// PutBlockReceipts stores the block's transactions associated receipts
// and stores them by block hash in a single slice. This is required for
// forks and chain reorgs
func PutBlockReceipts(db ethdb.Database, hash common.Hash, receipts types.Receipts) error {
rs := make([]*types.ReceiptForStorage, len(receipts))
for i, receipt := range receipts {
rs[i] = (*types.ReceiptForStorage)(receipt)
}
bytes, err := rlp.EncodeToBytes(rs)
if err != nil {
return err
}
err = db.Put(append(blockReceiptsPre, hash[:]...), bytes)
if err != nil {
return err
}
return nil
}

View File

@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/params"
)
var (
@ -65,10 +66,11 @@ type TxPool struct {
minGasPrice *big.Int
eventMux *event.TypeMux
events event.Subscription
mu sync.RWMutex
pending map[common.Hash]*types.Transaction // processable transactions
queue map[common.Address]map[common.Hash]*types.Transaction
mu sync.RWMutex
pending map[common.Hash]*types.Transaction // processable transactions
queue map[common.Address]map[common.Hash]*types.Transaction
homestead bool
}
func NewTxPool(eventMux *event.TypeMux, currentStateFn stateFn, gasLimitFn func() *big.Int) *TxPool {
@ -83,6 +85,7 @@ func NewTxPool(eventMux *event.TypeMux, currentStateFn stateFn, gasLimitFn func(
pendingState: nil,
events: eventMux.Subscribe(ChainHeadEvent{}, GasPriceChanged{}, RemovedTransactionEvent{}),
}
go pool.eventLoop()
return pool
@ -96,6 +99,10 @@ func (pool *TxPool) eventLoop() {
switch ev := ev.Data.(type) {
case ChainHeadEvent:
pool.mu.Lock()
if ev.Block != nil && params.IsHomestead(ev.Block.Number()) {
pool.homestead = true
}
pool.resetState()
pool.mu.Unlock()
case GasPriceChanged:
@ -138,7 +145,6 @@ func (pool *TxPool) resetState() {
}
}
}
// Check the queue and move transactions over to the pending if possible
// or remove those that have become invalid
pool.checkQueue()
@ -171,29 +177,23 @@ func (pool *TxPool) Stats() (pending int, queued int) {
// validateTx checks whether a transaction is valid according
// to the consensus rules.
func (pool *TxPool) validateTx(tx *types.Transaction) error {
// Validate sender
var (
from common.Address
err error
)
// Drop transactions under our own minimal accepted gas price
if pool.minGasPrice.Cmp(tx.GasPrice()) > 0 {
return ErrCheap
}
// Validate the transaction sender and it's sig. Throw
// if the from fields is invalid.
if from, err = tx.From(); err != nil {
currentState, err := pool.currentState()
if err != nil {
return err
}
from, err := tx.From()
if err != nil {
return ErrInvalidSender
}
// Make sure the account exist. Non existent accounts
// haven't got funds and well therefor never pass.
currentState, err := pool.currentState()
if err != nil {
return err
}
if !currentState.HasAccount(from) {
return ErrNonExistentAccount
}
@ -222,8 +222,8 @@ func (pool *TxPool) validateTx(tx *types.Transaction) error {
return ErrInsufficientFunds
}
// Should supply enough intrinsic gas
if tx.Gas().Cmp(IntrinsicGas(tx.Data())) < 0 {
intrGas := IntrinsicGas(tx.Data(), MessageCreatesContract(tx), pool.homestead)
if tx.Gas().Cmp(intrGas) < 0 {
return ErrIntrinsicGas
}
@ -290,17 +290,15 @@ func (pool *TxPool) addTx(hash common.Hash, addr common.Address, tx *types.Trans
}
// Add queues a single transaction in the pool if it is valid.
func (self *TxPool) Add(tx *types.Transaction) (err error) {
func (self *TxPool) Add(tx *types.Transaction) error {
self.mu.Lock()
defer self.mu.Unlock()
err = self.add(tx)
if err == nil {
// check and validate the queueue
self.checkQueue()
if err := self.add(tx); err != nil {
return err
}
return
self.checkQueue()
return nil
}
// AddTransactions attempts to queue all valid transactions in txs.
@ -368,7 +366,7 @@ func (self *TxPool) GetQueuedTransactions() types.Transactions {
ret = append(ret, tx)
}
}
sort.Sort(types.TxByNonce{ret})
sort.Sort(types.TxByNonce(ret))
return ret
}
@ -406,51 +404,55 @@ func (pool *TxPool) checkQueue() {
pool.resetState()
}
var addq txQueue
var promote txQueue
for address, txs := range pool.queue {
// guessed nonce is the nonce currently kept by the tx pool (pending state)
guessedNonce := pool.pendingState.GetNonce(address)
// true nonce is the nonce known by the last state
currentState, err := pool.currentState()
if err != nil {
glog.Errorf("could not get current state: %v", err)
return
}
trueNonce := currentState.GetNonce(address)
addq := addq[:0]
balance := currentState.GetBalance(address)
var (
guessedNonce = pool.pendingState.GetNonce(address) // nonce currently kept by the tx pool (pending state)
trueNonce = currentState.GetNonce(address) // nonce known by the last state
)
promote = promote[:0]
for hash, tx := range txs {
if tx.Nonce() < trueNonce {
// Drop queued transactions whose nonce is lower than
// the account nonce because they have been processed.
// Drop processed or out of fund transactions
if tx.Nonce() < trueNonce || balance.Cmp(tx.Cost()) < 0 {
if glog.V(logger.Core) {
glog.Infof("removed tx (%v) from pool queue: low tx nonce or out of funds\n", tx)
}
delete(txs, hash)
} else {
// Collect the remaining transactions for the next pass.
addq = append(addq, txQueueEntry{hash, address, tx})
}
}
// Find the next consecutive nonce range starting at the
// current account nonce.
sort.Sort(addq)
for i, e := range addq {
// start deleting the transactions from the queue if they exceed the limit
if i > maxQueued {
delete(pool.queue[address], e.hash)
continue
}
if e.Nonce() > guessedNonce {
if len(addq)-i > maxQueued {
// Collect the remaining transactions for the next pass.
promote = append(promote, txQueueEntry{hash, address, tx})
}
// Find the next consecutive nonce range starting at the current account nonce,
// pushing the guessed nonce forward if we add consecutive transactions.
sort.Sort(promote)
for i, entry := range promote {
// If we reached a gap in the nonces, enforce transaction limit and stop
if entry.Nonce() > guessedNonce {
if len(promote)-i > maxQueued {
if glog.V(logger.Debug) {
glog.Infof("Queued tx limit exceeded for %s. Tx %s removed\n", common.PP(address[:]), common.PP(e.hash[:]))
glog.Infof("Queued tx limit exceeded for %s. Tx %s removed\n", common.PP(address[:]), common.PP(entry.hash[:]))
}
for j := i + maxQueued; j < len(addq); j++ {
delete(txs, addq[j].hash)
for _, drop := range promote[i+maxQueued:] {
delete(txs, drop.hash)
}
}
break
}
delete(txs, e.hash)
pool.addTx(e.hash, address, e.Transaction)
// Otherwise promote the transaction and move the guess nonce if needed
pool.addTx(entry.hash, address, entry.Transaction)
delete(txs, entry.hash)
if entry.Nonce() == guessedNonce {
guessedNonce++
}
}
// Delete the entire queue entry if it became empty.
if len(txs) == 0 {
@ -460,20 +462,56 @@ func (pool *TxPool) checkQueue() {
}
// validatePool removes invalid and processed transactions from the main pool.
// If a transaction is removed for being invalid (e.g. out of funds), all sub-
// sequent (Still valid) transactions are moved back into the future queue. This
// is important to prevent a drained account from DOSing the network with non
// executable transactions.
func (pool *TxPool) validatePool() {
state, err := pool.currentState()
if err != nil {
glog.V(logger.Info).Infoln("failed to get current state: %v", err)
return
}
balanceCache := make(map[common.Address]*big.Int)
// Clean up the pending pool, accumulating invalid nonces
gaps := make(map[common.Address]uint64)
for hash, tx := range pool.pending {
from, _ := tx.From() // err already checked
// perform light nonce validation
if state.GetNonce(from) > tx.Nonce() {
sender, _ := tx.From() // err already checked
// Perform light nonce and balance validation
balance := balanceCache[sender]
if balance == nil {
balance = state.GetBalance(sender)
balanceCache[sender] = balance
}
if past := state.GetNonce(sender) > tx.Nonce(); past || balance.Cmp(tx.Cost()) < 0 {
// Remove an already past it invalidated transaction
if glog.V(logger.Core) {
glog.Infof("removed tx (%x) from pool: low tx nonce\n", hash[:4])
glog.Infof("removed tx (%v) from pool: low tx nonce or out of funds\n", tx)
}
delete(pool.pending, hash)
// Track the smallest invalid nonce to postpone subsequent transactions
if !past {
if prev, ok := gaps[sender]; !ok || tx.Nonce() < prev {
gaps[sender] = tx.Nonce()
}
}
}
}
// Move all transactions after a gap back to the future queue
if len(gaps) > 0 {
for hash, tx := range pool.pending {
sender, _ := tx.From()
if gap, ok := gaps[sender]; ok && tx.Nonce() >= gap {
if glog.V(logger.Core) {
glog.Infof("postponed tx (%v) due to introduced gap\n", tx)
}
pool.queueTx(hash, tx)
delete(pool.pending, hash)
}
}
}
}

526
core/tx_pool_test.go Normal file
View File

@ -0,0 +1,526 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"crypto/ecdsa"
"math/big"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
)
func transaction(nonce uint64, gaslimit *big.Int, key *ecdsa.PrivateKey) *types.Transaction {
tx, _ := types.NewTransaction(nonce, common.Address{}, big.NewInt(100), gaslimit, big.NewInt(1), nil).SignECDSA(key)
return tx
}
func setupTxPool() (*TxPool, *ecdsa.PrivateKey) {
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, db)
var m event.TypeMux
key, _ := crypto.GenerateKey()
newPool := NewTxPool(&m, func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
newPool.resetState()
return newPool, key
}
func TestInvalidTransactions(t *testing.T) {
pool, key := setupTxPool()
tx := transaction(0, big.NewInt(100), key)
if err := pool.Add(tx); err != ErrNonExistentAccount {
t.Error("expected", ErrNonExistentAccount)
}
from, _ := tx.From()
currentState, _ := pool.currentState()
currentState.AddBalance(from, big.NewInt(1))
if err := pool.Add(tx); err != ErrInsufficientFunds {
t.Error("expected", ErrInsufficientFunds)
}
balance := new(big.Int).Add(tx.Value(), new(big.Int).Mul(tx.Gas(), tx.GasPrice()))
currentState.AddBalance(from, balance)
if err := pool.Add(tx); err != ErrIntrinsicGas {
t.Error("expected", ErrIntrinsicGas, "got", err)
}
currentState.SetNonce(from, 1)
currentState.AddBalance(from, big.NewInt(0xffffffffffffff))
tx = transaction(0, big.NewInt(100000), key)
if err := pool.Add(tx); err != ErrNonce {
t.Error("expected", ErrNonce)
}
}
func TestTransactionQueue(t *testing.T) {
pool, key := setupTxPool()
tx := transaction(0, big.NewInt(100), key)
from, _ := tx.From()
currentState, _ := pool.currentState()
currentState.AddBalance(from, big.NewInt(1000))
pool.queueTx(tx.Hash(), tx)
pool.checkQueue()
if len(pool.pending) != 1 {
t.Error("expected valid txs to be 1 is", len(pool.pending))
}
tx = transaction(1, big.NewInt(100), key)
from, _ = tx.From()
currentState.SetNonce(from, 2)
pool.queueTx(tx.Hash(), tx)
pool.checkQueue()
if _, ok := pool.pending[tx.Hash()]; ok {
t.Error("expected transaction to be in tx pool")
}
if len(pool.queue[from]) > 0 {
t.Error("expected transaction queue to be empty. is", len(pool.queue[from]))
}
pool, key = setupTxPool()
tx1 := transaction(0, big.NewInt(100), key)
tx2 := transaction(10, big.NewInt(100), key)
tx3 := transaction(11, big.NewInt(100), key)
from, _ = tx1.From()
currentState, _ = pool.currentState()
currentState.AddBalance(from, big.NewInt(1000))
pool.queueTx(tx1.Hash(), tx1)
pool.queueTx(tx2.Hash(), tx2)
pool.queueTx(tx3.Hash(), tx3)
pool.checkQueue()
if len(pool.pending) != 1 {
t.Error("expected tx pool to be 1, got", len(pool.pending))
}
if len(pool.queue[from]) != 2 {
t.Error("expected len(queue) == 2, got", len(pool.queue[from]))
}
}
func TestRemoveTx(t *testing.T) {
pool, key := setupTxPool()
tx := transaction(0, big.NewInt(100), key)
from, _ := tx.From()
currentState, _ := pool.currentState()
currentState.AddBalance(from, big.NewInt(1))
pool.queueTx(tx.Hash(), tx)
pool.addTx(tx.Hash(), from, tx)
if len(pool.queue) != 1 {
t.Error("expected queue to be 1, got", len(pool.queue))
}
if len(pool.pending) != 1 {
t.Error("expected txs to be 1, got", len(pool.pending))
}
pool.RemoveTx(tx.Hash())
if len(pool.queue) > 0 {
t.Error("expected queue to be 0, got", len(pool.queue))
}
if len(pool.pending) > 0 {
t.Error("expected txs to be 0, got", len(pool.pending))
}
}
func TestNegativeValue(t *testing.T) {
pool, key := setupTxPool()
tx, _ := types.NewTransaction(0, common.Address{}, big.NewInt(-1), big.NewInt(100), big.NewInt(1), nil).SignECDSA(key)
from, _ := tx.From()
currentState, _ := pool.currentState()
currentState.AddBalance(from, big.NewInt(1))
if err := pool.Add(tx); err != ErrNegativeValue {
t.Error("expected", ErrNegativeValue, "got", err)
}
}
func TestTransactionChainFork(t *testing.T) {
pool, key := setupTxPool()
addr := crypto.PubkeyToAddress(key.PublicKey)
resetState := func() {
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, db)
pool.currentState = func() (*state.StateDB, error) { return statedb, nil }
currentState, _ := pool.currentState()
currentState.AddBalance(addr, big.NewInt(100000000000000))
pool.resetState()
}
resetState()
tx := transaction(0, big.NewInt(100000), key)
if err := pool.add(tx); err != nil {
t.Error("didn't expect error", err)
}
pool.RemoveTransactions([]*types.Transaction{tx})
// reset the pool's internal state
resetState()
if err := pool.add(tx); err != nil {
t.Error("didn't expect error", err)
}
}
func TestTransactionDoubleNonce(t *testing.T) {
pool, key := setupTxPool()
addr := crypto.PubkeyToAddress(key.PublicKey)
resetState := func() {
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, db)
pool.currentState = func() (*state.StateDB, error) { return statedb, nil }
currentState, _ := pool.currentState()
currentState.AddBalance(addr, big.NewInt(100000000000000))
pool.resetState()
}
resetState()
tx := transaction(0, big.NewInt(100000), key)
tx2 := transaction(0, big.NewInt(1000000), key)
if err := pool.add(tx); err != nil {
t.Error("didn't expect error", err)
}
if err := pool.add(tx2); err != nil {
t.Error("didn't expect error", err)
}
pool.checkQueue()
if len(pool.pending) != 2 {
t.Error("expected 2 pending txs. Got", len(pool.pending))
}
}
func TestMissingNonce(t *testing.T) {
pool, key := setupTxPool()
addr := crypto.PubkeyToAddress(key.PublicKey)
currentState, _ := pool.currentState()
currentState.AddBalance(addr, big.NewInt(100000000000000))
tx := transaction(1, big.NewInt(100000), key)
if err := pool.add(tx); err != nil {
t.Error("didn't expect error", err)
}
if len(pool.pending) != 0 {
t.Error("expected 0 pending transactions, got", len(pool.pending))
}
if len(pool.queue[addr]) != 1 {
t.Error("expected 1 queued transaction, got", len(pool.queue[addr]))
}
}
func TestNonceRecovery(t *testing.T) {
const n = 10
pool, key := setupTxPool()
addr := crypto.PubkeyToAddress(key.PublicKey)
currentState, _ := pool.currentState()
currentState.SetNonce(addr, n)
currentState.AddBalance(addr, big.NewInt(100000000000000))
pool.resetState()
tx := transaction(n, big.NewInt(100000), key)
if err := pool.Add(tx); err != nil {
t.Error(err)
}
// simulate some weird re-order of transactions and missing nonce(s)
currentState.SetNonce(addr, n-1)
pool.resetState()
if fn := pool.pendingState.GetNonce(addr); fn != n+1 {
t.Errorf("expected nonce to be %d, got %d", n+1, fn)
}
}
func TestRemovedTxEvent(t *testing.T) {
pool, key := setupTxPool()
tx := transaction(0, big.NewInt(1000000), key)
from, _ := tx.From()
currentState, _ := pool.currentState()
currentState.AddBalance(from, big.NewInt(1000000000000))
pool.eventMux.Post(RemovedTransactionEvent{types.Transactions{tx}})
pool.eventMux.Post(ChainHeadEvent{nil})
if len(pool.pending) != 1 {
t.Error("expected 1 pending tx, got", len(pool.pending))
}
}
// Tests that if an account runs out of funds, any pending and queued transactions
// are dropped.
func TestTransactionDropping(t *testing.T) {
// Create a test account and fund it
pool, key := setupTxPool()
account, _ := transaction(0, big.NewInt(0), key).From()
state, _ := pool.currentState()
state.AddBalance(account, big.NewInt(1000))
// Add some pending and some queued transactions
var (
tx0 = transaction(0, big.NewInt(100), key)
tx1 = transaction(1, big.NewInt(200), key)
tx10 = transaction(10, big.NewInt(100), key)
tx11 = transaction(11, big.NewInt(200), key)
)
pool.addTx(tx0.Hash(), account, tx0)
pool.addTx(tx1.Hash(), account, tx1)
pool.queueTx(tx10.Hash(), tx10)
pool.queueTx(tx11.Hash(), tx11)
// Check that pre and post validations leave the pool as is
if len(pool.pending) != 2 {
t.Errorf("pending transaction mismatch: have %d, want %d", len(pool.pending), 2)
}
if len(pool.queue[account]) != 2 {
t.Errorf("queued transaction mismatch: have %d, want %d", len(pool.queue), 2)
}
pool.resetState()
if len(pool.pending) != 2 {
t.Errorf("pending transaction mismatch: have %d, want %d", len(pool.pending), 2)
}
if len(pool.queue[account]) != 2 {
t.Errorf("queued transaction mismatch: have %d, want %d", len(pool.queue), 2)
}
// Reduce the balance of the account, and check that invalidated transactions are dropped
state.AddBalance(account, big.NewInt(-750))
pool.resetState()
if _, ok := pool.pending[tx0.Hash()]; !ok {
t.Errorf("funded pending transaction missing: %v", tx0)
}
if _, ok := pool.pending[tx1.Hash()]; ok {
t.Errorf("out-of-fund pending transaction present: %v", tx1)
}
if _, ok := pool.queue[account][tx10.Hash()]; !ok {
t.Errorf("funded queued transaction missing: %v", tx10)
}
if _, ok := pool.queue[account][tx11.Hash()]; ok {
t.Errorf("out-of-fund queued transaction present: %v", tx11)
}
}
// Tests that if a transaction is dropped from the current pending pool (e.g. out
// of fund), all consecutive (still valid, but not executable) transactions are
// postponed back into the future queue to prevent broadcating them.
func TestTransactionPostponing(t *testing.T) {
// Create a test account and fund it
pool, key := setupTxPool()
account, _ := transaction(0, big.NewInt(0), key).From()
state, _ := pool.currentState()
state.AddBalance(account, big.NewInt(1000))
// Add a batch consecutive pending transactions for validation
txns := []*types.Transaction{}
for i := 0; i < 100; i++ {
var tx *types.Transaction
if i%2 == 0 {
tx = transaction(uint64(i), big.NewInt(100), key)
} else {
tx = transaction(uint64(i), big.NewInt(500), key)
}
pool.addTx(tx.Hash(), account, tx)
txns = append(txns, tx)
}
// Check that pre and post validations leave the pool as is
if len(pool.pending) != len(txns) {
t.Errorf("pending transaction mismatch: have %d, want %d", len(pool.pending), len(txns))
}
if len(pool.queue[account]) != 0 {
t.Errorf("queued transaction mismatch: have %d, want %d", len(pool.queue), 0)
}
pool.resetState()
if len(pool.pending) != len(txns) {
t.Errorf("pending transaction mismatch: have %d, want %d", len(pool.pending), len(txns))
}
if len(pool.queue[account]) != 0 {
t.Errorf("queued transaction mismatch: have %d, want %d", len(pool.queue), 0)
}
// Reduce the balance of the account, and check that transactions are reorganized
state.AddBalance(account, big.NewInt(-750))
pool.resetState()
if _, ok := pool.pending[txns[0].Hash()]; !ok {
t.Errorf("tx %d: valid and funded transaction missing from pending pool: %v", 0, txns[0])
}
if _, ok := pool.queue[account][txns[0].Hash()]; ok {
t.Errorf("tx %d: valid and funded transaction present in future queue: %v", 0, txns[0])
}
for i, tx := range txns[1:] {
if i%2 == 1 {
if _, ok := pool.pending[tx.Hash()]; ok {
t.Errorf("tx %d: valid but future transaction present in pending pool: %v", i+1, tx)
}
if _, ok := pool.queue[account][tx.Hash()]; !ok {
t.Errorf("tx %d: valid but future transaction missing from future queue: %v", i+1, tx)
}
} else {
if _, ok := pool.pending[tx.Hash()]; ok {
t.Errorf("tx %d: out-of-fund transaction present in pending pool: %v", i+1, tx)
}
if _, ok := pool.queue[account][tx.Hash()]; ok {
t.Errorf("tx %d: out-of-fund transaction present in future queue: %v", i+1, tx)
}
}
}
}
// Tests that if the transaction count belonging to a single account goes above
// some threshold, the higher transactions are dropped to prevent DOS attacks.
func TestTransactionQueueLimiting(t *testing.T) {
// Create a test account and fund it
pool, key := setupTxPool()
account, _ := transaction(0, big.NewInt(0), key).From()
state, _ := pool.currentState()
state.AddBalance(account, big.NewInt(1000000))
// Keep queuing up transactions and make sure all above a limit are dropped
for i := uint64(1); i <= maxQueued+5; i++ {
if err := pool.Add(transaction(i, big.NewInt(100000), key)); err != nil {
t.Fatalf("tx %d: failed to add transaction: %v", i, err)
}
if len(pool.pending) != 0 {
t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, len(pool.pending), 0)
}
if i <= maxQueued {
if len(pool.queue[account]) != int(i) {
t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, len(pool.queue[account]), i)
}
} else {
if len(pool.queue[account]) != maxQueued {
t.Errorf("tx %d: queue limit mismatch: have %d, want %d", i, len(pool.queue[account]), maxQueued)
}
}
}
}
// Tests that even if the transaction count belonging to a single account goes
// above some threshold, as long as the transactions are executable, they are
// accepted.
func TestTransactionPendingLimiting(t *testing.T) {
// Create a test account and fund it
pool, key := setupTxPool()
account, _ := transaction(0, big.NewInt(0), key).From()
state, _ := pool.currentState()
state.AddBalance(account, big.NewInt(1000000))
// Keep queuing up transactions and make sure all above a limit are dropped
for i := uint64(0); i < maxQueued+5; i++ {
if err := pool.Add(transaction(i, big.NewInt(100000), key)); err != nil {
t.Fatalf("tx %d: failed to add transaction: %v", i, err)
}
if len(pool.pending) != int(i)+1 {
t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, len(pool.pending), i+1)
}
if len(pool.queue[account]) != 0 {
t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, len(pool.queue[account]), 0)
}
}
}
// Tests that the transaction limits are enforced the same way irrelevant whether
// the transactions are added one by one or in batches.
func TestTransactionQueueLimitingEquivalency(t *testing.T) { testTransactionLimitingEquivalency(t, 1) }
func TestTransactionPendingLimitingEquivalency(t *testing.T) { testTransactionLimitingEquivalency(t, 0) }
func testTransactionLimitingEquivalency(t *testing.T, origin uint64) {
// Add a batch of transactions to a pool one by one
pool1, key1 := setupTxPool()
account1, _ := transaction(0, big.NewInt(0), key1).From()
state1, _ := pool1.currentState()
state1.AddBalance(account1, big.NewInt(1000000))
for i := uint64(0); i < maxQueued+5; i++ {
if err := pool1.Add(transaction(origin+i, big.NewInt(100000), key1)); err != nil {
t.Fatalf("tx %d: failed to add transaction: %v", i, err)
}
}
// Add a batch of transactions to a pool in one bit batch
pool2, key2 := setupTxPool()
account2, _ := transaction(0, big.NewInt(0), key2).From()
state2, _ := pool2.currentState()
state2.AddBalance(account2, big.NewInt(1000000))
txns := []*types.Transaction{}
for i := uint64(0); i < maxQueued+5; i++ {
txns = append(txns, transaction(origin+i, big.NewInt(100000), key2))
}
pool2.AddTransactions(txns)
// Ensure the batch optimization honors the same pool mechanics
if len(pool1.pending) != len(pool2.pending) {
t.Errorf("pending transaction count mismatch: one-by-one algo: %d, batch algo: %d", len(pool1.pending), len(pool2.pending))
}
if len(pool1.queue[account1]) != len(pool2.queue[account2]) {
t.Errorf("queued transaction count mismatch: one-by-one algo: %d, batch algo: %d", len(pool1.queue[account1]), len(pool2.queue[account2]))
}
}
// Benchmarks the speed of validating the contents of the pending queue of the
// transaction pool.
func BenchmarkValidatePool100(b *testing.B) { benchmarkValidatePool(b, 100) }
func BenchmarkValidatePool1000(b *testing.B) { benchmarkValidatePool(b, 1000) }
func BenchmarkValidatePool10000(b *testing.B) { benchmarkValidatePool(b, 10000) }
func benchmarkValidatePool(b *testing.B, size int) {
// Add a batch of transactions to a pool one by one
pool, key := setupTxPool()
account, _ := transaction(0, big.NewInt(0), key).From()
state, _ := pool.currentState()
state.AddBalance(account, big.NewInt(1000000))
for i := 0; i < size; i++ {
tx := transaction(uint64(i), big.NewInt(100000), key)
pool.addTx(tx.Hash(), account, tx)
}
// Benchmark the speed of pool validation
b.ResetTimer()
for i := 0; i < b.N; i++ {
pool.validatePool()
}
}
// Benchmarks the speed of scheduling the contents of the future queue of the
// transaction pool.
func BenchmarkCheckQueue100(b *testing.B) { benchmarkCheckQueue(b, 100) }
func BenchmarkCheckQueue1000(b *testing.B) { benchmarkCheckQueue(b, 1000) }
func BenchmarkCheckQueue10000(b *testing.B) { benchmarkCheckQueue(b, 10000) }
func benchmarkCheckQueue(b *testing.B, size int) {
// Add a batch of transactions to a pool one by one
pool, key := setupTxPool()
account, _ := transaction(0, big.NewInt(0), key).From()
state, _ := pool.currentState()
state.AddBalance(account, big.NewInt(1000000))
for i := 0; i < size; i++ {
tx := transaction(uint64(1+i), big.NewInt(100000), key)
pool.queueTx(tx.Hash(), tx)
}
// Benchmark the speed of pool validation
b.ResetTimer()
for i := 0; i < b.N; i++ {
pool.checkQueue()
}
}

70
core/types.go Normal file
View File

@ -0,0 +1,70 @@
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"math/big"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
)
// Validator is an interface which defines the standard for block validation.
//
// The validator is responsible for validating incoming block or, if desired,
// validates headers for fast validation.
//
// ValidateBlock validates the given block and should return an error if it
// failed to do so and should be used for "full" validation.
//
// ValidateHeader validates the given header and parent and returns an error
// if it failed to do so.
//
// ValidateStack validates the given statedb and optionally the receipts and
// gas used. The implementor should decide what to do with the given input.
type Validator interface {
ValidateBlock(block *types.Block) error
ValidateHeader(header, parent *types.Header, checkPow bool) error
ValidateState(block, parent *types.Block, state *state.StateDB, receipts types.Receipts, usedGas *big.Int) error
}
// Processor is an interface for processing blocks using a given initial state.
//
// Process takes the block to be processed and the statedb upon which the
// initial state is based. It should return the receipts generated, amount
// of gas used in the process and return an error if any of the internal rules
// failed.
type Processor interface {
Process(block *types.Block, statedb *state.StateDB) (types.Receipts, vm.Logs, *big.Int, error)
}
// Backend is an interface defining the basic functionality for an operable node
// with all the functionality to be a functional, valid Ethereum operator.
//
// TODO Remove this
type Backend interface {
AccountManager() *accounts.Manager
BlockChain() *BlockChain
TxPool() *TxPool
ChainDb() ethdb.Database
DappDb() ethdb.Database
EventMux() *event.TypeMux
}

View File

@ -17,11 +17,13 @@
package types
import (
"container/heap"
"crypto/ecdsa"
"errors"
"fmt"
"io"
"math/big"
"sort"
"sync/atomic"
"github.com/ethereum/go-ethereum/common"
@ -155,11 +157,47 @@ func (tx *Transaction) Size() common.StorageSize {
return common.StorageSize(c)
}
// From returns the address derived from the signature (V, R, S) using secp256k1
// eliptic curve and an error if it failed deriving or upon an incorrect
// signature.
//
// From Uses the homestead consensus rules to determine whether the signature is
// valid.
//
// From caches the address, allowing it to be used regardless of
// Frontier / Homestead. however, the first time called it runs
// signature validations, so we need two versions. This makes it
// easier to ensure backwards compatibility of things like package rpc
// where eth_getblockbynumber uses tx.From() and needs to work for
// both txs before and after the first homestead block. Signatures
// valid in homestead are a subset of valid ones in Frontier)
func (tx *Transaction) From() (common.Address, error) {
return doFrom(tx, true)
}
// FromFrontier returns the address derived from the signature (V, R, S) using
// secp256k1 eliptic curve and an error if it failed deriving or upon an
// incorrect signature.
//
// FromFrantier uses the frontier consensus rules to determine whether the
// signature is valid.
//
// FromFrontier caches the address, allowing it to be used regardless of
// Frontier / Homestead. however, the first time called it runs
// signature validations, so we need two versions. This makes it
// easier to ensure backwards compatibility of things like package rpc
// where eth_getblockbynumber uses tx.From() and needs to work for
// both txs before and after the first homestead block. Signatures
// valid in homestead are a subset of valid ones in Frontier)
func (tx *Transaction) FromFrontier() (common.Address, error) {
return doFrom(tx, false)
}
func doFrom(tx *Transaction, homestead bool) (common.Address, error) {
if from := tx.from.Load(); from != nil {
return from.(common.Address), nil
}
pubkey, err := tx.publicKey()
pubkey, err := tx.publicKey(homestead)
if err != nil {
return common.Address{}, err
}
@ -180,8 +218,8 @@ func (tx *Transaction) SignatureValues() (v byte, r *big.Int, s *big.Int) {
return tx.data.V, new(big.Int).Set(tx.data.R), new(big.Int).Set(tx.data.S)
}
func (tx *Transaction) publicKey() ([]byte, error) {
if !crypto.ValidateSignatureValues(tx.data.V, tx.data.R, tx.data.S) {
func (tx *Transaction) publicKey(homestead bool) ([]byte, error) {
if !crypto.ValidateSignatureValues(tx.data.V, tx.data.R, tx.data.S, homestead) {
return nil, ErrInvalidSig
}
@ -302,27 +340,78 @@ func TxDifference(a, b Transactions) (keep Transactions) {
return keep
}
type TxByNonce struct{ Transactions }
// TxByNonce implements the sort interface to allow sorting a list of transactions
// by their nonces. This is usually only useful for sorting transactions from a
// single account, otherwise a nonce comparison doesn't make much sense.
type TxByNonce Transactions
func (s TxByNonce) Less(i, j int) bool {
return s.Transactions[i].data.AccountNonce < s.Transactions[j].data.AccountNonce
func (s TxByNonce) Len() int { return len(s) }
func (s TxByNonce) Less(i, j int) bool { return s[i].data.AccountNonce < s[j].data.AccountNonce }
func (s TxByNonce) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// TxByPrice implements both the sort and the heap interface, making it useful
// for all at once sorting as well as individually adding and removing elements.
type TxByPrice Transactions
func (s TxByPrice) Len() int { return len(s) }
func (s TxByPrice) Less(i, j int) bool { return s[i].data.Price.Cmp(s[j].data.Price) > 0 }
func (s TxByPrice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s *TxByPrice) Push(x interface{}) {
*s = append(*s, x.(*Transaction))
}
type TxByPrice struct{ Transactions }
func (s TxByPrice) Less(i, j int) bool {
return s.Transactions[i].data.Price.Cmp(s.Transactions[j].data.Price) > 0
func (s *TxByPrice) Pop() interface{} {
old := *s
n := len(old)
x := old[n-1]
*s = old[0 : n-1]
return x
}
type TxByPriceAndNonce struct{ Transactions }
func (s TxByPriceAndNonce) Less(i, j int) bool {
// we can ignore the error here. Sorting shouldn't care about validness
ifrom, _ := s.Transactions[i].From()
jfrom, _ := s.Transactions[j].From()
// favour nonce if they are from the same recipient
if ifrom == jfrom {
return s.Transactions[i].data.AccountNonce < s.Transactions[j].data.AccountNonce
// SortByPriceAndNonce sorts the transactions by price in such a way that the
// nonce orderings within a single account are maintained.
//
// Note, this is not as trivial as it seems from the first look as there are three
// different criteria that need to be taken into account (price, nonce, account
// match), which cannot be done with any plain sorting method, as certain items
// cannot be compared without context.
//
// This method first sorts the separates the list of transactions into individual
// sender accounts and sorts them by nonce. After the account nonce ordering is
// satisfied, the results are merged back together by price, always comparing only
// the head transaction from each account. This is done via a heap to keep it fast.
func SortByPriceAndNonce(txs []*Transaction) {
// Separate the transactions by account and sort by nonce
byNonce := make(map[common.Address][]*Transaction)
for _, tx := range txs {
acc, _ := tx.From() // we only sort valid txs so this cannot fail
byNonce[acc] = append(byNonce[acc], tx)
}
for _, accTxs := range byNonce {
sort.Sort(TxByNonce(accTxs))
}
// Initialize a price based heap with the head transactions
byPrice := make(TxByPrice, 0, len(byNonce))
for acc, accTxs := range byNonce {
byPrice = append(byPrice, accTxs[0])
byNonce[acc] = accTxs[1:]
}
heap.Init(&byPrice)
// Merge by replacing the best with the next from the same account
txs = txs[:0]
for len(byPrice) > 0 {
// Retrieve the next best transaction by price
best := heap.Pop(&byPrice).(*Transaction)
// Push in its place the next transaction from the same account
acc, _ := best.From() // we only sort valid txs so this cannot fail
if accTxs, ok := byNonce[acc]; ok && len(accTxs) > 0 {
heap.Push(&byPrice, accTxs[0])
byNonce[acc] = accTxs[1:]
}
// Accumulate the best priced transaction
txs = append(txs, best)
}
return s.Transactions[i].data.Price.Cmp(s.Transactions[j].data.Price) > 0
}

View File

@ -117,3 +117,60 @@ func TestRecipientNormal(t *testing.T) {
t.Error("derived address doesn't match")
}
}
// Tests that transactions can be correctly sorted according to their price in
// decreasing order, but at the same time with increasing nonces when issued by
// the same account.
func TestTransactionPriceNonceSort(t *testing.T) {
// Generate a batch of accounts to start with
keys := make([]*ecdsa.PrivateKey, 25)
for i := 0; i < len(keys); i++ {
keys[i], _ = crypto.GenerateKey()
}
// Generate a batch of transactions with overlapping values, but shifted nonces
txs := []*Transaction{}
for start, key := range keys {
for i := 0; i < 25; i++ {
tx, _ := NewTransaction(uint64(start+i), common.Address{}, big.NewInt(100), big.NewInt(100), big.NewInt(int64(start+i)), nil).SignECDSA(key)
txs = append(txs, tx)
}
}
// Sort the transactions and cross check the nonce ordering
SortByPriceAndNonce(txs)
for i, txi := range txs {
fromi, _ := txi.From()
// Make sure the nonce order is valid
for j, txj := range txs[i+1:] {
fromj, _ := txj.From()
if fromi == fromj && txi.Nonce() > txj.Nonce() {
t.Errorf("invalid nonce ordering: tx #%d (A=%x N=%v) < tx #%d (A=%x N=%v)", i, fromi[:4], txi.Nonce(), i+j, fromj[:4], txj.Nonce())
}
}
// Find the previous and next nonce of this account
prev, next := i-1, i+1
for j := i - 1; j >= 0; j-- {
if fromj, _ := txs[j].From(); fromi == fromj {
prev = j
break
}
}
for j := i + 1; j < len(txs); j++ {
if fromj, _ := txs[j].From(); fromi == fromj {
next = j
break
}
}
// Make sure that in between the neighbor nonces, the transaction is correctly positioned price wise
for j := prev + 1; j < next; j++ {
fromj, _ := txs[j].From()
if j < i && txs[j].GasPrice().Cmp(txi.GasPrice()) < 0 {
t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", j, fromj[:4], txs[j].GasPrice(), i, fromi[:4], txi.GasPrice())
}
if j > i && txs[j].GasPrice().Cmp(txi.GasPrice()) > 0 {
t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) > tx #%d (A=%x P=%v)", j, fromj[:4], txs[j].GasPrice(), i, fromi[:4], txi.GasPrice())
}
}
}
}

View File

@ -26,14 +26,19 @@ import (
type ContractRef interface {
ReturnGas(*big.Int, *big.Int)
Address() common.Address
Value() *big.Int
SetCode([]byte)
}
// Contract represents an ethereum contract in the state database. It contains
// the the contract code, calling arguments. Contract implements ContractReg
// the the contract code, calling arguments. Contract implements ContractRef
type Contract struct {
caller ContractRef
self ContractRef
// CallerAddress is the result of the caller which initialised this
// contract. However when the "call method" is delegated this value
// needs to be initialised to that of the caller's caller.
CallerAddress common.Address
caller ContractRef
self ContractRef
jumpdests destinations // result of JUMPDEST analysis.
@ -44,11 +49,13 @@ type Contract struct {
value, Gas, UsedGas, Price *big.Int
Args []byte
DelegateCall bool
}
// Create a new context for the given data items.
// NewContract returns a new contract environment for the execution of EVM.
func NewContract(caller ContractRef, object ContractRef, value, gas, price *big.Int) *Contract {
c := &Contract{caller: caller, self: object, Args: nil}
c := &Contract{CallerAddress: caller.Address(), caller: caller, self: object, Args: nil}
if parent, ok := caller.(*Contract); ok {
// Reuse JUMPDEST analysis from parent context if available.
@ -69,6 +76,16 @@ func NewContract(caller ContractRef, object ContractRef, value, gas, price *big.
return c
}
// AsDelegate sets the contract to be a delegate call and returns the current
// contract (for chaining calls)
func (c *Contract) AsDelegate() *Contract {
c.DelegateCall = true
// NOTE: caller must, at all times be a contract. It should never happen
// that caller is something other than a Contract.
c.CallerAddress = c.caller.(*Contract).CallerAddress
return c
}
// GetOp returns the n'th element in the contract's byte array
func (c *Contract) GetOp(n uint64) OpCode {
return OpCode(c.GetByte(n))
@ -83,13 +100,19 @@ func (c *Contract) GetByte(n uint64) byte {
return 0
}
// Return returns the given ret argument and returns any remaining gas to the
// caller
func (c *Contract) Return(ret []byte) []byte {
// Caller returns the caller of the contract.
//
// Caller will recursively call caller when the contract is a delegate
// call, including that of caller's caller.
func (c *Contract) Caller() common.Address {
return c.CallerAddress
}
// Finalise finalises the contract and returning any remaining gas to the original
// caller.
func (c *Contract) Finalise() {
// Return the remaining gas to the caller
c.caller.ReturnGas(c.Gas, c.Price)
return ret
}
// UseGas attempts the use gas and subtracts it and returns true on success
@ -113,6 +136,11 @@ func (c *Contract) Address() common.Address {
return c.self.Address()
}
// Value returns the contracts value (sent to it from it's caller)
func (c *Contract) Value() *big.Int {
return c.value
}
// SetCode sets the code to the contract
func (self *Contract) SetCode(code []byte) {
self.Code = code

View File

@ -93,7 +93,8 @@ func ecrecoverFunc(in []byte) []byte {
vbig := common.Bytes2Big(in[32:64])
v := byte(vbig.Uint64())
if !crypto.ValidateSignatureValues(v, r, s) {
// tighter sig s values in homestead only apply to tx sigs
if !crypto.ValidateSignatureValues(v, r, s, false) {
glog.V(logger.Debug).Infof("EC RECOVER FAIL: v, r or s value invalid")
return nil
}

View File

@ -70,6 +70,8 @@ type Environment interface {
Call(me ContractRef, addr common.Address, data []byte, gas, price, value *big.Int) ([]byte, error)
// Take another's contract code and execute within our own context
CallCode(me ContractRef, addr common.Address, data []byte, gas, price, value *big.Int) ([]byte, error)
// Same as CallCode except sender and value is propagated from parent to child scope
DelegateCall(me ContractRef, addr common.Address, data []byte, gas, price *big.Int) ([]byte, error)
// Create a new contract
Create(me ContractRef, data []byte, gas, price, value *big.Int) ([]byte, common.Address, error)
}
@ -121,4 +123,6 @@ type Account interface {
Address() common.Address
ReturnGas(*big.Int, *big.Int)
SetCode([]byte)
EachStorage(cb func(key, value []byte))
Value() *big.Int
}

View File

@ -24,4 +24,5 @@ import (
)
var OutOfGasError = errors.New("Out of gas")
var CodeStoreOutOfGasError = errors.New("Contract creation code storage out of gas")
var DepthError = fmt.Errorf("Max call depth exceeded (%d)", params.CallCreateDepth)

View File

@ -136,6 +136,7 @@ var _baseCheck = map[OpCode]req{
CREATE: {3, params.CreateGas, 1},
CALL: {7, params.CallGas, 1},
CALLCODE: {7, params.CallGas, 1},
DELEGATECALL: {6, params.CallGas, 1},
JUMPDEST: {0, params.JumpdestGas, 0},
SUICIDE: {1, Zero, 0},
RETURN: {2, Zero, 0},

View File

@ -337,7 +337,7 @@ func opOrigin(instr instruction, pc *uint64, env Environment, contract *Contract
}
func opCaller(instr instruction, pc *uint64, env Environment, contract *Contract, memory *Memory, stack *stack) {
stack.push(common.Bytes2Big(contract.caller.Address().Bytes()))
stack.push(contract.Caller().Big())
}
func opCallValue(instr instruction, pc *uint64, env Environment, contract *Contract, memory *Memory, stack *stack) {
@ -485,7 +485,6 @@ func opSload(instr instruction, pc *uint64, env Environment, contract *Contract,
func opSstore(instr instruction, pc *uint64, env Environment, contract *Contract, memory *Memory, stack *stack) {
loc := common.BigToHash(stack.pop())
val := stack.pop()
env.Db().SetState(contract.Address(), loc, common.BigToHash(val))
}
@ -514,25 +513,19 @@ func opCreate(instr instruction, pc *uint64, env Environment, contract *Contract
offset, size = stack.pop(), stack.pop()
input = memory.Get(offset.Int64(), size.Int64())
gas = new(big.Int).Set(contract.Gas)
addr common.Address
ret []byte
suberr error
)
contract.UseGas(contract.Gas)
ret, addr, suberr = env.Create(contract, input, gas, contract.Price, value)
if suberr != nil {
_, addr, suberr := env.Create(contract, input, gas, contract.Price, value)
// Push item on the stack based on the returned error. If the ruleset is
// homestead we must check for CodeStoreOutOfGasError (homestead only
// rule) and treat as an error, if the ruleset is frontier we must
// ignore this error and pretend the operation was successful.
if params.IsHomestead(env.BlockNumber()) && suberr == CodeStoreOutOfGasError {
stack.push(new(big.Int))
} else if suberr != nil && suberr != CodeStoreOutOfGasError {
stack.push(new(big.Int))
} else {
// gas < len(ret) * Createinstr.dataGas == NO_CODE
dataGas := big.NewInt(int64(len(ret)))
dataGas.Mul(dataGas, params.CreateDataGas)
if contract.UseGas(dataGas) {
env.Db().SetCode(addr, ret)
}
stack.push(addr.Big())
}
}
@ -598,6 +591,21 @@ func opCallCode(instr instruction, pc *uint64, env Environment, contract *Contra
}
}
func opDelegateCall(instr instruction, pc *uint64, env Environment, contract *Contract, memory *Memory, stack *stack) {
gas, to, inOffset, inSize, outOffset, outSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
toAddr := common.BigToAddress(to)
args := memory.Get(inOffset.Int64(), inSize.Int64())
ret, err := env.DelegateCall(contract, toAddr, args, gas, contract.Price)
if err != nil {
stack.push(new(big.Int))
} else {
stack.push(big.NewInt(1))
memory.Set(outOffset.Uint64(), outSize.Uint64(), ret)
}
}
func opReturn(instr instruction, pc *uint64, env Environment, contract *Contract, memory *Memory, stack *stack) {
}
func opStop(instr instruction, pc *uint64, env Environment, contract *Contract, memory *Memory, stack *stack) {

View File

@ -275,6 +275,11 @@ func CompileProgram(program *Program) (err error) {
program.addInstr(op, pc, opGas, nil)
case CREATE:
program.addInstr(op, pc, opCreate, nil)
case DELEGATECALL:
// Instruction added regardless of homestead phase.
// Homestead (and execution of the opcode) is checked during
// runtime.
program.addInstr(op, pc, opDelegateCall, nil)
case CALL:
program.addInstr(op, pc, opCall, nil)
case CALLCODE:
@ -317,10 +322,14 @@ func runProgram(program *Program, pcstart uint64, mem *Memory, stack *stack, env
}()
}
homestead := params.IsHomestead(env.BlockNumber())
for pc < uint64(len(program.instructions)) {
instrCount++
instr := program.instructions[pc]
if instr.Op() == DELEGATECALL && !homestead {
return nil, fmt.Errorf("Invalid opcode 0x%x", instr.Op())
}
ret, err := instr.do(program, &pc, env, contract, mem, stack)
if err != nil {
@ -328,13 +337,13 @@ func runProgram(program *Program, pcstart uint64, mem *Memory, stack *stack, env
}
if instr.halts() {
return contract.Return(ret), nil
return ret, nil
}
}
contract.Input = nil
return contract.Return(nil), nil
return nil, nil
}
// validDest checks if the given distination is a valid one given the
@ -457,7 +466,6 @@ func jitCalculateGasAndSize(env Environment, contract *Contract, instr instructi
gas.Add(gas, stack.data[stack.len()-1])
if op == CALL {
//if env.Db().GetStateObject(common.BigToAddress(stack.data[stack.len()-2])) == nil {
if !env.Db().Exist(common.BigToAddress(stack.data[stack.len()-2])) {
gas.Add(gas, params.CallNewAccountGas)
}
@ -470,6 +478,13 @@ func jitCalculateGasAndSize(env Environment, contract *Contract, instr instructi
x := calcMemSize(stack.data[stack.len()-6], stack.data[stack.len()-7])
y := calcMemSize(stack.data[stack.len()-4], stack.data[stack.len()-5])
newMemSize = common.BigMax(x, y)
case DELEGATECALL:
gas.Add(gas, stack.data[stack.len()-1])
x := calcMemSize(stack.data[stack.len()-5], stack.data[stack.len()-6])
y := calcMemSize(stack.data[stack.len()-3], stack.data[stack.len()-4])
newMemSize = common.BigMax(x, y)
}
quadMemGas(mem, newMemSize, gas)

View File

@ -125,14 +125,17 @@ type vmBench struct {
type account struct{}
func (account) SubBalance(amount *big.Int) {}
func (account) AddBalance(amount *big.Int) {}
func (account) SetBalance(*big.Int) {}
func (account) SetNonce(uint64) {}
func (account) Balance() *big.Int { return nil }
func (account) Address() common.Address { return common.Address{} }
func (account) ReturnGas(*big.Int, *big.Int) {}
func (account) SetCode([]byte) {}
func (account) SubBalance(amount *big.Int) {}
func (account) AddBalance(amount *big.Int) {}
func (account) SetAddress(common.Address) {}
func (account) Value() *big.Int { return nil }
func (account) SetBalance(*big.Int) {}
func (account) SetNonce(uint64) {}
func (account) Balance() *big.Int { return nil }
func (account) Address() common.Address { return common.Address{} }
func (account) ReturnGas(*big.Int, *big.Int) {}
func (account) SetCode([]byte) {}
func (account) EachStorage(cb func(key, value []byte)) {}
func runVmBench(test vmBench, b *testing.B) {
var sender account
@ -205,3 +208,6 @@ func (self *Env) CallCode(caller ContractRef, addr common.Address, data []byte,
func (self *Env) Create(caller ContractRef, data []byte, gas, price, value *big.Int) ([]byte, common.Address, error) {
return nil, common.Address{}, nil
}
func (self *Env) DelegateCall(me ContractRef, addr common.Address, data []byte, gas, price *big.Int) ([]byte, error) {
return nil, nil
}

View File

@ -1,13 +1,29 @@
package vm
import "math/big"
import (
"math/big"
"github.com/ethereum/go-ethereum/params"
)
type jumpPtr struct {
fn instrFn
valid bool
}
var jumpTable [256]jumpPtr
type vmJumpTable [256]jumpPtr
func (jt vmJumpTable) init(blockNumber *big.Int) {
// when initialising a new VM execution we must first check the homestead
// changes.
if params.IsHomestead(blockNumber) {
jumpTable[DELEGATECALL] = jumpPtr{opDelegateCall, true}
} else {
jumpTable[DELEGATECALL] = jumpPtr{nil, false}
}
}
var jumpTable vmJumpTable
func init() {
jumpTable[ADD] = jumpPtr{opAdd, true}

View File

@ -0,0 +1,24 @@
package vm
import (
"math/big"
"testing"
"github.com/ethereum/go-ethereum/params"
)
func TestInit(t *testing.T) {
params.HomesteadBlock = big.NewInt(1)
jumpTable.init(big.NewInt(0))
if jumpTable[DELEGATECALL].valid {
t.Error("Expected DELEGATECALL not to be present")
}
for _, n := range []int64{1, 2, 100} {
jumpTable.init(big.NewInt(n))
if !jumpTable[DELEGATECALL].valid {
t.Error("Expected DELEGATECALL to be present for block", n)
}
}
}

View File

@ -200,6 +200,7 @@ const (
CALL
CALLCODE
RETURN
DELEGATECALL
SUICIDE = 0xff
)
@ -349,11 +350,12 @@ var opCodeToString = map[OpCode]string{
LOG4: "LOG4",
// 0xf0 range
CREATE: "CREATE",
CALL: "CALL",
RETURN: "RETURN",
CALLCODE: "CALLCODE",
SUICIDE: "SUICIDE",
CREATE: "CREATE",
CALL: "CALL",
RETURN: "RETURN",
CALLCODE: "CALLCODE",
DELEGATECALL: "DELEGATECALL",
SUICIDE: "SUICIDE",
PUSH: "PUSH",
DUP: "DUP",
@ -402,6 +404,7 @@ var stringToOp = map[string]OpCode{
"CALLDATALOAD": CALLDATALOAD,
"CALLDATASIZE": CALLDATASIZE,
"CALLDATACOPY": CALLDATACOPY,
"DELEGATECALL": DELEGATECALL,
"CODESIZE": CODESIZE,
"CODECOPY": CODECOPY,
"GASPRICE": GASPRICE,

View File

@ -14,12 +14,5 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package types
import "github.com/ethereum/go-ethereum/core/vm"
type BlockProcessor interface {
Process(*Block) (vm.Logs, Receipts, error)
ValidateHeader(*Header, bool, bool) error
ValidateHeaderWithParent(*Header, *Header, bool, bool) error
}
// Package runtime provides a basic execution model for executing EVM code.
package runtime

110
core/vm/runtime/env.go Normal file
View File

@ -0,0 +1,110 @@
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package runtime
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/vm"
)
// Env is a basic runtime environment required for running the EVM.
type Env struct {
depth int
state *state.StateDB
origin common.Address
coinbase common.Address
number *big.Int
time *big.Int
difficulty *big.Int
gasLimit *big.Int
logs []vm.StructLog
getHashFn func(uint64) common.Hash
}
// NewEnv returns a new vm.Environment
func NewEnv(cfg *Config, state *state.StateDB) vm.Environment {
return &Env{
state: state,
origin: cfg.Origin,
coinbase: cfg.Coinbase,
number: cfg.BlockNumber,
time: cfg.Time,
difficulty: cfg.Difficulty,
gasLimit: cfg.GasLimit,
}
}
func (self *Env) StructLogs() []vm.StructLog {
return self.logs
}
func (self *Env) AddStructLog(log vm.StructLog) {
self.logs = append(self.logs, log)
}
func (self *Env) Origin() common.Address { return self.origin }
func (self *Env) BlockNumber() *big.Int { return self.number }
func (self *Env) Coinbase() common.Address { return self.coinbase }
func (self *Env) Time() *big.Int { return self.time }
func (self *Env) Difficulty() *big.Int { return self.difficulty }
func (self *Env) Db() vm.Database { return self.state }
func (self *Env) GasLimit() *big.Int { return self.gasLimit }
func (self *Env) VmType() vm.Type { return vm.StdVmTy }
func (self *Env) GetHash(n uint64) common.Hash {
return self.getHashFn(n)
}
func (self *Env) AddLog(log *vm.Log) {
self.state.AddLog(log)
}
func (self *Env) Depth() int { return self.depth }
func (self *Env) SetDepth(i int) { self.depth = i }
func (self *Env) CanTransfer(from common.Address, balance *big.Int) bool {
return self.state.GetBalance(from).Cmp(balance) >= 0
}
func (self *Env) MakeSnapshot() vm.Database {
return self.state.Copy()
}
func (self *Env) SetSnapshot(copy vm.Database) {
self.state.Set(copy.(*state.StateDB))
}
func (self *Env) Transfer(from, to vm.Account, amount *big.Int) {
core.Transfer(from, to, amount)
}
func (self *Env) Call(caller vm.ContractRef, addr common.Address, data []byte, gas, price, value *big.Int) ([]byte, error) {
return core.Call(self, caller, addr, data, gas, price, value)
}
func (self *Env) CallCode(caller vm.ContractRef, addr common.Address, data []byte, gas, price, value *big.Int) ([]byte, error) {
return core.CallCode(self, caller, addr, data, gas, price, value)
}
func (self *Env) DelegateCall(me vm.ContractRef, addr common.Address, data []byte, gas, price *big.Int) ([]byte, error) {
return core.DelegateCall(self, me, addr, data, gas, price)
}
func (self *Env) Create(caller vm.ContractRef, data []byte, gas, price, value *big.Int) ([]byte, common.Address, error) {
return core.Create(self, caller, data, gas, price, value)
}

121
core/vm/runtime/runtime.go Normal file
View File

@ -0,0 +1,121 @@
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package runtime
import (
"math/big"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
)
// Config is a basic type specifing certain configuration flags for running
// the EVM.
type Config struct {
Difficulty *big.Int
Origin common.Address
Coinbase common.Address
BlockNumber *big.Int
Time *big.Int
GasLimit *big.Int
GasPrice *big.Int
Value *big.Int
DisableJit bool // "disable" so it's enabled by default
Debug bool
GetHashFn func(n uint64) common.Hash
}
// sets defaults on the config
func setDefaults(cfg *Config) {
if cfg.Difficulty == nil {
cfg.Difficulty = new(big.Int)
}
if cfg.Time == nil {
cfg.Time = big.NewInt(time.Now().Unix())
}
if cfg.GasLimit == nil {
cfg.GasLimit = new(big.Int).Set(common.MaxBig)
}
if cfg.GasPrice == nil {
cfg.GasPrice = new(big.Int)
}
if cfg.Value == nil {
cfg.Value = new(big.Int)
}
if cfg.BlockNumber == nil {
cfg.BlockNumber = new(big.Int)
}
if cfg.GetHashFn == nil {
cfg.GetHashFn = func(n uint64) common.Hash {
return common.BytesToHash(crypto.Sha3([]byte(new(big.Int).SetUint64(n).String())))
}
}
}
// Execute executes the code using the input as call data during the execution.
// It returns the EVM's return value, the new state and an error if it failed.
//
// Executes sets up a in memory, temporarily, environment for the execution of
// the given code. It enabled the JIT by default and make sure that it's restored
// to it's original state afterwards.
func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) {
if cfg == nil {
cfg = new(Config)
}
setDefaults(cfg)
// defer the call to setting back the original values
defer func(debug, forceJit, enableJit bool) {
vm.Debug = debug
vm.ForceJit = forceJit
vm.EnableJit = enableJit
}(vm.Debug, vm.ForceJit, vm.EnableJit)
vm.ForceJit = !cfg.DisableJit
vm.EnableJit = !cfg.DisableJit
vm.Debug = cfg.Debug
var (
db, _ = ethdb.NewMemDatabase()
statedb, _ = state.New(common.Hash{}, db)
vmenv = NewEnv(cfg, statedb)
sender = statedb.CreateAccount(cfg.Origin)
receiver = statedb.CreateAccount(common.StringToAddress("contract"))
)
// set the receiver's (the executing contract) code for execution.
receiver.SetCode(code)
// Call the code with the given configuration.
ret, err := vmenv.Call(
sender,
receiver.Address(),
input,
cfg.GasLimit,
cfg.GasPrice,
cfg.Value,
)
if cfg.Debug {
vm.StdErrFormat(vmenv.StructLogs())
}
return ret, statedb, err
}

View File

@ -14,21 +14,21 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
package runtime_test
import (
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/vm/runtime"
)
// TODO move this to types?
type Backend interface {
AccountManager() *accounts.Manager
BlockProcessor() *BlockProcessor
BlockChain() *BlockChain
TxPool() *TxPool
ChainDb() ethdb.Database
DappDb() ethdb.Database
EventMux() *event.TypeMux
func ExampleExecute() {
ret, _, err := runtime.Execute(common.Hex2Bytes("6060604052600a8060106000396000f360606040526008565b00"), nil, nil)
if err != nil {
fmt.Println(err)
}
fmt.Println(ret)
// Output:
// [96 96 96 64 82 96 8 86 91 0]
}

View File

@ -0,0 +1,120 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package runtime
import (
"strings"
"testing"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/vm"
)
func TestDefaults(t *testing.T) {
cfg := new(Config)
setDefaults(cfg)
if cfg.Difficulty == nil {
t.Error("expected difficulty to be non nil")
}
if cfg.Time == nil {
t.Error("expected time to be non nil")
}
if cfg.GasLimit == nil {
t.Error("expected time to be non nil")
}
if cfg.GasPrice == nil {
t.Error("expected time to be non nil")
}
if cfg.Value == nil {
t.Error("expected time to be non nil")
}
if cfg.GetHashFn == nil {
t.Error("expected time to be non nil")
}
if cfg.BlockNumber == nil {
t.Error("expected block number to be non nil")
}
}
func TestEnvironment(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Fatalf("crashed with: %v", r)
}
}()
Execute([]byte{
byte(vm.DIFFICULTY),
byte(vm.TIMESTAMP),
byte(vm.GASLIMIT),
byte(vm.PUSH1),
byte(vm.ORIGIN),
byte(vm.BLOCKHASH),
byte(vm.COINBASE),
}, nil, nil)
}
func TestRestoreDefaults(t *testing.T) {
Execute(nil, nil, &Config{Debug: true})
if vm.ForceJit {
t.Error("expected force jit to be disabled")
}
if vm.Debug {
t.Error("expected debug to be disabled")
}
if vm.EnableJit {
t.Error("expected jit to be disabled")
}
}
func BenchmarkCall(b *testing.B) {
var definition = `[{"constant":true,"inputs":[],"name":"seller","outputs":[{"name":"","type":"address"}],"type":"function"},{"constant":false,"inputs":[],"name":"abort","outputs":[],"type":"function"},{"constant":true,"inputs":[],"name":"value","outputs":[{"name":"","type":"uint256"}],"type":"function"},{"constant":false,"inputs":[],"name":"refund","outputs":[],"type":"function"},{"constant":true,"inputs":[],"name":"buyer","outputs":[{"name":"","type":"address"}],"type":"function"},{"constant":false,"inputs":[],"name":"confirmReceived","outputs":[],"type":"function"},{"constant":true,"inputs":[],"name":"state","outputs":[{"name":"","type":"uint8"}],"type":"function"},{"constant":false,"inputs":[],"name":"confirmPurchase","outputs":[],"type":"function"},{"inputs":[],"type":"constructor"},{"anonymous":false,"inputs":[],"name":"Aborted","type":"event"},{"anonymous":false,"inputs":[],"name":"PurchaseConfirmed","type":"event"},{"anonymous":false,"inputs":[],"name":"ItemReceived","type":"event"},{"anonymous":false,"inputs":[],"name":"Refunded","type":"event"}]`
var code = common.Hex2Bytes("6060604052361561006c5760e060020a600035046308551a53811461007457806335a063b4146100865780633fa4f245146100a6578063590e1ae3146100af5780637150d8ae146100cf57806373fac6f0146100e1578063c19d93fb146100fe578063d696069714610112575b610131610002565b610133600154600160a060020a031681565b610131600154600160a060020a0390811633919091161461015057610002565b61014660005481565b610131600154600160a060020a039081163391909116146102d557610002565b610133600254600160a060020a031681565b610131600254600160a060020a0333811691161461023757610002565b61014660025460ff60a060020a9091041681565b61013160025460009060ff60a060020a9091041681146101cc57610002565b005b600160a060020a03166060908152602090f35b6060908152602090f35b60025460009060a060020a900460ff16811461016b57610002565b600154600160a060020a03908116908290301631606082818181858883f150506002805460a060020a60ff02191660a160020a179055506040517f72c874aeff0b183a56e2b79c71b46e1aed4dee5e09862134b8821ba2fddbf8bf9250a150565b80546002023414806101dd57610002565b6002805460a060020a60ff021973ffffffffffffffffffffffffffffffffffffffff1990911633171660a060020a1790557fd5d55c8a68912e9a110618df8d5e2e83b8d83211c57a8ddd1203df92885dc881826060a15050565b60025460019060a060020a900460ff16811461025257610002565b60025460008054600160a060020a0390921691606082818181858883f150508354604051600160a060020a0391821694503090911631915082818181858883f150506002805460a060020a60ff02191660a160020a179055506040517fe89152acd703c9d8c7d28829d443260b411454d45394e7995815140c8cbcbcf79250a150565b60025460019060a060020a900460ff1681146102f057610002565b6002805460008054600160a060020a0390921692909102606082818181858883f150508354604051600160a060020a0391821694503090911631915082818181858883f150506002805460a060020a60ff02191660a160020a179055506040517f8616bbbbad963e4e65b1366f1d75dfb63f9e9704bbbf91fb01bec70849906cf79250a15056")
abi, err := abi.JSON(strings.NewReader(definition))
if err != nil {
b.Fatal(err)
}
cpurchase, err := abi.Pack("confirmPurchase")
if err != nil {
b.Fatal(err)
}
creceived, err := abi.Pack("confirmReceived")
if err != nil {
b.Fatal(err)
}
refund, err := abi.Pack("refund")
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
for j := 0; j < 400; j++ {
Execute(code, cpurchase, nil)
Execute(code, creceived, nil)
Execute(code, refund, nil)
}
}
}

View File

@ -35,6 +35,9 @@ type Vm struct {
// New returns a new Vm
func New(env Environment) *Vm {
// init the jump table. Also prepares the homestead changes
jumpTable.init(env.BlockNumber())
return &Vm{env: env}
}
@ -43,16 +46,6 @@ func (self *Vm) Run(contract *Contract, input []byte) (ret []byte, err error) {
self.env.SetDepth(self.env.Depth() + 1)
defer self.env.SetDepth(self.env.Depth() - 1)
// User defer pattern to check for an error and, based on the error being nil or not, use all gas and return.
defer func() {
if err != nil {
// In case of a VM exception (known exceptions) all gas consumed (panics NOT included).
contract.UseGas(contract.Gas)
ret = contract.Return(nil)
}
}()
if contract.CodeAddr != nil {
if p := Precompiled[contract.CodeAddr.Str()]; p != nil {
return self.RunPrecompiled(p, input, contract)
@ -61,7 +54,7 @@ func (self *Vm) Run(contract *Contract, input []byte) (ret []byte, err error) {
// Don't bother with the execution if there's no code.
if len(contract.Code) == 0 {
return contract.Return(nil), nil
return nil, nil
}
var (
@ -160,7 +153,6 @@ func (self *Vm) Run(contract *Contract, input []byte) (ret []byte, err error) {
// Get the memory location of pc
op = contract.GetOp(pc)
// calculate the new memory size and gas price for the current executing opcode
newMemSize, cost, err = calculateGasAndSize(self.env, contract, caller, op, statedb, mem, stack)
if err != nil {
@ -177,7 +169,6 @@ func (self *Vm) Run(contract *Contract, input []byte) (ret []byte, err error) {
mem.Resize(newMemSize.Uint64())
// Add a log message
self.log(pc, op, contract.Gas, cost, mem, stack, contract, nil)
if opPtr := jumpTable[op]; opPtr.valid {
if opPtr.fn != nil {
opPtr.fn(instruction{}, &pc, self.env, contract, mem, stack)
@ -205,13 +196,13 @@ func (self *Vm) Run(contract *Contract, input []byte) (ret []byte, err error) {
offset, size := stack.pop(), stack.pop()
ret := mem.GetPtr(offset.Int64(), size.Int64())
return contract.Return(ret), nil
return ret, nil
case SUICIDE:
opSuicide(instruction{}, nil, self.env, contract, mem, stack)
fallthrough
case STOP: // Stop the contract
return contract.Return(nil), nil
return nil, nil
}
}
} else {
@ -332,7 +323,6 @@ func calculateGasAndSize(env Environment, contract *Contract, caller ContractRef
gas.Add(gas, stack.data[stack.len()-1])
if op == CALL {
//if env.Db().GetStateObject(common.BigToAddress(stack.data[stack.len()-2])) == nil {
if !env.Db().Exist(common.BigToAddress(stack.data[stack.len()-2])) {
gas.Add(gas, params.CallNewAccountGas)
}
@ -345,6 +335,13 @@ func calculateGasAndSize(env Environment, contract *Contract, caller ContractRef
x := calcMemSize(stack.data[stack.len()-6], stack.data[stack.len()-7])
y := calcMemSize(stack.data[stack.len()-4], stack.data[stack.len()-5])
newMemSize = common.BigMax(x, y)
case DELEGATECALL:
gas.Add(gas, stack.data[stack.len()-1])
x := calcMemSize(stack.data[stack.len()-5], stack.data[stack.len()-6])
y := calcMemSize(stack.data[stack.len()-3], stack.data[stack.len()-4])
newMemSize = common.BigMax(x, y)
}
quadMemGas(mem, newMemSize, gas)
@ -358,7 +355,7 @@ func (self *Vm) RunPrecompiled(p *PrecompiledAccount, input []byte, contract *Co
if contract.UseGas(gas) {
ret = p.Call(input)
return contract.Return(ret), nil
return ret, nil
} else {
return nil, OutOfGasError
}

View File

@ -94,6 +94,10 @@ func (self *VMEnv) CallCode(me vm.ContractRef, addr common.Address, data []byte,
return CallCode(self, me, addr, data, gas, price, value)
}
func (self *VMEnv) DelegateCall(me vm.ContractRef, addr common.Address, data []byte, gas, price *big.Int) ([]byte, error) {
return DelegateCall(self, me, addr, data, gas, price)
}
func (self *VMEnv) Create(me vm.ContractRef, data []byte, gas, price, value *big.Int) ([]byte, common.Address, error) {
return Create(self, me, data, gas, price, value)
}

View File

@ -171,12 +171,21 @@ func GenerateKey() (*ecdsa.PrivateKey, error) {
return ecdsa.GenerateKey(S256(), rand.Reader)
}
func ValidateSignatureValues(v byte, r, s *big.Int) bool {
func ValidateSignatureValues(v byte, r, s *big.Int, homestead bool) bool {
if r.Cmp(common.Big1) < 0 || s.Cmp(common.Big1) < 0 {
return false
}
vint := uint32(v)
if r.Cmp(secp256k1n) < 0 && s.Cmp(secp256k1n) < 0 && (vint == 27 || vint == 28) {
// reject upper range of s values (ECDSA malleability)
// see discussion in secp256k1/libsecp256k1/include/secp256k1.h
if homestead && s.Cmp(secp256k1.HalfN) > 0 {
return false
}
// Frontier: allow s to be in full N range
if s.Cmp(secp256k1.N) >= 0 {
return false
}
if r.Cmp(secp256k1.N) < 0 && (vint == 27 || vint == 28) {
return true
} else {
return false

View File

@ -174,7 +174,7 @@ func TestLoadECDSAFile(t *testing.T) {
func TestValidateSignatureValues(t *testing.T) {
check := func(expected bool, v byte, r, s *big.Int) {
if ValidateSignatureValues(v, r, s) != expected {
if ValidateSignatureValues(v, r, s, false) != expected {
t.Errorf("mismatch for v: %d r: %d s: %d want: %v", v, r, s, expected)
}
}

View File

@ -191,11 +191,9 @@ func concatKDF(hash hash.Hash, z, s1 []byte, kdLen int) (k []byte, err error) {
// messageTag computes the MAC of a message (called the tag) as per
// SEC 1, 3.5.
func messageTag(hash func() hash.Hash, km, msg, shared []byte) []byte {
if shared == nil {
shared = make([]byte, 0)
}
mac := hmac.New(hash, km)
mac.Write(msg)
mac.Write(shared)
tag := mac.Sum(nil)
return tag
}
@ -242,9 +240,11 @@ func symDecrypt(rand io.Reader, params *ECIESParams, key, ct []byte) (m []byte,
return
}
// Encrypt encrypts a message using ECIES as specified in SEC 1, 5.1. If
// the shared information parameters aren't being used, they should be
// nil.
// Encrypt encrypts a message using ECIES as specified in SEC 1, 5.1.
//
// s1 and s2 contain shared information that is not part of the resulting
// ciphertext. s1 is fed into key derivation, s2 is fed into the MAC. If the
// shared information parameters aren't being used, they should be nil.
func Encrypt(rand io.Reader, pub *PublicKey, m, s1, s2 []byte) (ct []byte, err error) {
params := pub.Params
if params == nil {

View File

@ -353,6 +353,36 @@ func TestEncryptDecrypt(t *testing.T) {
}
}
func TestDecryptShared2(t *testing.T) {
prv, err := GenerateKey(rand.Reader, DefaultCurve, nil)
if err != nil {
t.Fatal(err)
}
message := []byte("Hello, world.")
shared2 := []byte("shared data 2")
ct, err := Encrypt(rand.Reader, &prv.PublicKey, message, nil, shared2)
if err != nil {
t.Fatal(err)
}
// Check that decrypting with correct shared data works.
pt, err := prv.Decrypt(rand.Reader, ct, nil, shared2)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(pt, message) {
t.Fatal("ecies: plaintext doesn't match message")
}
// Decrypting without shared data or incorrect shared data fails.
if _, err = prv.Decrypt(rand.Reader, ct, nil, nil); err == nil {
t.Fatal("ecies: decrypting without shared data didn't fail")
}
if _, err = prv.Decrypt(rand.Reader, ct, nil, []byte("garbage")); err == nil {
t.Fatal("ecies: decrypting with incorrect shared data didn't fail")
}
}
// TestMarshalEncryption validates the encode/decode produces a valid
// ECIES encryption key.
func TestMarshalEncryption(t *testing.T) {

View File

@ -1,25 +0,0 @@
secp256k1-go
=======
golang secp256k1 library
Implements cryptographic operations for the secp256k1 ECDSA curve used by Bitcoin.
Installing
===
GMP library headers are required to build. On Debian-based systems, the package is called `libgmp-dev`.
```
sudo apt-get install libgmp-dev
```
Now compiles with cgo!
Test
===
To run tests do
```
go tests
```

View File

@ -0,0 +1,33 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package secp256k1
import "C"
import "unsafe"
// Callbacks for converting libsecp256k1 internal faults into
// recoverable Go panics.
//export secp256k1GoPanicIllegal
func secp256k1GoPanicIllegal(msg *C.char, data unsafe.Pointer) {
panic("illegal argument: " + C.GoString(msg))
}
//export secp256k1GoPanicError
func secp256k1GoPanicError(msg *C.char, data unsafe.Pointer) {
panic("internal error: " + C.GoString(msg))
}

View File

@ -20,14 +20,8 @@ package secp256k1
/*
#cgo CFLAGS: -I./libsecp256k1
#cgo darwin CFLAGS: -I/usr/local/include
#cgo freebsd CFLAGS: -I/usr/local/include
#cgo linux,arm CFLAGS: -I/usr/local/arm/include
#cgo LDFLAGS: -lgmp
#cgo darwin LDFLAGS: -L/usr/local/lib
#cgo freebsd LDFLAGS: -L/usr/local/lib
#cgo linux,arm LDFLAGS: -L/usr/local/arm/lib
#define USE_NUM_GMP
#cgo CFLAGS: -I./libsecp256k1/src/
#define USE_NUM_NONE
#define USE_FIELD_10X26
#define USE_FIELD_INV_BUILTIN
#define USE_SCALAR_8X32
@ -35,12 +29,16 @@ package secp256k1
#define NDEBUG
#include "./libsecp256k1/src/secp256k1.c"
#include "./libsecp256k1/src/modules/recovery/main_impl.h"
typedef void (*callbackFunc) (const char* msg, void* data);
extern void secp256k1GoPanicIllegal(const char* msg, void* data);
extern void secp256k1GoPanicError(const char* msg, void* data);
*/
import "C"
import (
"bytes"
"errors"
"math/big"
"unsafe"
"github.com/ethereum/go-ethereum/crypto/randentropy"
@ -57,13 +55,29 @@ import (
*/
// holds ptr to secp256k1_context_struct (see secp256k1/include/secp256k1.h)
var context *C.secp256k1_context
var (
context *C.secp256k1_context
N *big.Int
HalfN *big.Int
)
func init() {
N, _ = new(big.Int).SetString("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", 16)
// N / 2 == 57896044618658097711785492504343953926418782139537452191302581570759080747168
HalfN, _ = new(big.Int).SetString("7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a0", 16)
// around 20 ms on a modern CPU.
context = C.secp256k1_context_create(3) // SECP256K1_START_SIGN | SECP256K1_START_VERIFY
C.secp256k1_context_set_illegal_callback(context, C.callbackFunc(C.secp256k1GoPanicIllegal), nil)
C.secp256k1_context_set_error_callback(context, C.callbackFunc(C.secp256k1GoPanicError), nil)
}
var (
ErrInvalidMsgLen = errors.New("invalid message length for signature recovery")
ErrInvalidSignatureLen = errors.New("invalid signature length")
ErrInvalidRecoveryID = errors.New("invalid signature recovery id")
)
func GenerateKeyPair() ([]byte, []byte) {
var seckey []byte = randentropy.GetEntropyCSPRNG(32)
var seckey_ptr *C.uchar = (*C.uchar)(unsafe.Pointer(&seckey[0]))
@ -177,69 +191,20 @@ func VerifySeckeyValidity(seckey []byte) error {
return nil
}
func VerifySignatureValidity(sig []byte) bool {
//64+1
if len(sig) != 65 {
return false
}
//malleability check, highest bit must be 1
if (sig[32] & 0x80) == 0x80 {
return false
}
//recovery id check
if sig[64] >= 4 {
return false
}
return true
}
//for compressed signatures, does not need pubkey
func VerifySignature(msg []byte, sig []byte, pubkey1 []byte) error {
if msg == nil || sig == nil || pubkey1 == nil {
return errors.New("inputs must be non-nil")
}
if len(sig) != 65 {
return errors.New("invalid signature length")
}
if len(pubkey1) != 65 {
return errors.New("Invalid public key length")
}
//to enforce malleability, highest bit of S must be 0
//S starts at 32nd byte
if (sig[32] & 0x80) == 0x80 { //highest bit must be 1
return errors.New("Signature not malleable")
}
if sig[64] >= 4 {
return errors.New("Recover byte invalid")
}
// if pubkey recovered, signature valid
pubkey2, err := RecoverPubkey(msg, sig)
if err != nil {
return err
}
if len(pubkey2) != 65 {
return errors.New("Invalid recovered public key length")
}
if !bytes.Equal(pubkey1, pubkey2) {
return errors.New("Public key does not match recovered public key")
}
return nil
}
// recovers a public key from the signature
// RecoverPubkey returns the the public key of the signer.
// msg must be the 32-byte hash of the message to be signed.
// sig must be a 65-byte compact ECDSA signature containing the
// recovery id as the last element.
func RecoverPubkey(msg []byte, sig []byte) ([]byte, error) {
if len(sig) != 65 {
return nil, errors.New("Invalid signature length")
if len(msg) != 32 {
return nil, ErrInvalidMsgLen
}
if err := checkSignature(sig); err != nil {
return nil, err
}
msg_ptr := (*C.uchar)(unsafe.Pointer(&msg[0]))
sig_ptr := (*C.uchar)(unsafe.Pointer(&sig[0]))
pubkey := make([]byte, 64)
/*
this slice is used for both the recoverable signature and the
@ -248,17 +213,15 @@ func RecoverPubkey(msg []byte, sig []byte) ([]byte, error) {
pubkey recovery is one bottleneck during load in Ethereum
*/
bytes65 := make([]byte, 65)
pubkey_ptr := (*C.secp256k1_pubkey)(unsafe.Pointer(&pubkey[0]))
recoverable_sig_ptr := (*C.secp256k1_ecdsa_recoverable_signature)(unsafe.Pointer(&bytes65[0]))
recid := C.int(sig[64])
ret := C.secp256k1_ecdsa_recoverable_signature_parse_compact(
context,
recoverable_sig_ptr,
sig_ptr,
recid)
if ret == C.int(0) {
return nil, errors.New("Failed to parse signature")
}
@ -269,20 +232,28 @@ func RecoverPubkey(msg []byte, sig []byte) ([]byte, error) {
recoverable_sig_ptr,
msg_ptr,
)
if ret == C.int(0) {
return nil, errors.New("Failed to recover public key")
} else {
serialized_pubkey_ptr := (*C.uchar)(unsafe.Pointer(&bytes65[0]))
var output_len C.size_t
C.secp256k1_ec_pubkey_serialize( // always returns 1
context,
serialized_pubkey_ptr,
&output_len,
pubkey_ptr,
0, // SECP256K1_EC_COMPRESSED
)
return bytes65, nil
}
serialized_pubkey_ptr := (*C.uchar)(unsafe.Pointer(&bytes65[0]))
var output_len C.size_t
C.secp256k1_ec_pubkey_serialize( // always returns 1
context,
serialized_pubkey_ptr,
&output_len,
pubkey_ptr,
0, // SECP256K1_EC_COMPRESSED
)
return bytes65, nil
}
func checkSignature(sig []byte) error {
if len(sig) != 65 {
return ErrInvalidSignatureLen
}
if sig[64] >= 4 {
return ErrInvalidRecoveryID
}
return nil
}

View File

@ -56,6 +56,17 @@ func TestSignatureValidity(t *testing.T) {
}
}
func TestInvalidRecoveryID(t *testing.T) {
_, seckey := GenerateKeyPair()
msg := randentropy.GetEntropyCSPRNG(32)
sig, _ := Sign(msg, seckey)
sig[64] = 99
_, err := RecoverPubkey(msg, sig)
if err != ErrInvalidRecoveryID {
t.Fatalf("got %q, want %q", err, ErrInvalidRecoveryID)
}
}
func TestSignAndRecover(t *testing.T) {
pubkey1, seckey := GenerateKeyPair()
msg := randentropy.GetEntropyCSPRNG(32)
@ -70,10 +81,6 @@ func TestSignAndRecover(t *testing.T) {
if !bytes.Equal(pubkey1, pubkey2) {
t.Errorf("pubkey mismatch: want: %x have: %x", pubkey1, pubkey2)
}
err = VerifySignature(msg, sig, pubkey1)
if err != nil {
t.Errorf("signature verification error: %s", err)
}
}
func TestRandomMessagesWithSameKey(t *testing.T) {

View File

@ -65,7 +65,7 @@ const (
var (
jsonlogger = logger.NewJsonLogger()
datadirInUseErrNos = []uint{11, 32, 35}
datadirInUseErrnos = map[uint]bool{11: true, 32: true, 35: true}
portInUseErrRE = regexp.MustCompile("address already in use")
defaultBootNodes = []*discover.Node{
@ -231,9 +231,7 @@ type Ethereum struct {
chainDb ethdb.Database // Block chain database
dappDb ethdb.Database // Dapp database
//*** SERVICES ***
// State manager for processing new blocks and managing the over all states
blockProcessor *core.BlockProcessor
// Handlers
txPool *core.TxPool
blockchain *core.BlockChain
accountManager *accounts.Manager
@ -286,15 +284,7 @@ func New(config *Config) (*Ethereum, error) {
// Open the chain database and perform any upgrades needed
chainDb, err := newdb(filepath.Join(config.DataDir, "chaindata"))
if err != nil {
var ok bool
errno := uint(err.(syscall.Errno))
for _, no := range datadirInUseErrNos {
if errno == no {
ok = true
break
}
}
if ok {
if errno, ok := err.(syscall.Errno); ok && datadirInUseErrnos[uint(errno)] {
err = fmt.Errorf("%v (check if another instance of geth is already running with the same data directory '%s')", err, config.DataDir)
}
return nil, fmt.Errorf("blockchain db err: %v", err)
@ -311,14 +301,7 @@ func New(config *Config) (*Ethereum, error) {
dappDb, err := newdb(filepath.Join(config.DataDir, "dapp"))
if err != nil {
var ok bool
for _, no := range datadirInUseErrNos {
if uint(err.(syscall.Errno)) == no {
ok = true
break
}
}
if ok {
if errno, ok := err.(syscall.Errno); ok && datadirInUseErrnos[uint(errno)] {
err = fmt.Errorf("%v (check if another instance of geth is already running with the same data directory '%s')", err, config.DataDir)
}
return nil, fmt.Errorf("dapp db err: %v", err)
@ -422,8 +405,6 @@ func New(config *Config) (*Ethereum, error) {
newPool := core.NewTxPool(eth.EventMux(), eth.blockchain.State, eth.blockchain.GasLimit)
eth.txPool = newPool
eth.blockProcessor = core.NewBlockProcessor(chainDb, eth.pow, eth.blockchain, eth.EventMux())
eth.blockchain.SetProcessor(eth.blockProcessor)
if eth.protocolManager, err = NewProtocolManager(config.FastSync, config.NetworkId, eth.eventMux, eth.txPool, eth.pow, eth.blockchain, chainDb); err != nil {
return nil, err
}
@ -467,62 +448,10 @@ func New(config *Config) (*Ethereum, error) {
return eth, nil
}
type NodeInfo struct {
Name string
NodeUrl string
NodeID string
IP string
DiscPort int // UDP listening port for discovery protocol
TCPPort int // TCP listening port for RLPx
Td string
ListenAddr string
}
func (s *Ethereum) NodeInfo() *NodeInfo {
node := s.net.Self()
return &NodeInfo{
Name: s.Name(),
NodeUrl: node.String(),
NodeID: node.ID.String(),
IP: node.IP.String(),
DiscPort: int(node.UDP),
TCPPort: int(node.TCP),
ListenAddr: s.net.ListenAddr,
Td: s.BlockChain().GetTd(s.BlockChain().CurrentBlock().Hash()).String(),
}
}
type PeerInfo struct {
ID string
Name string
Caps string
RemoteAddress string
LocalAddress string
}
func newPeerInfo(peer *p2p.Peer) *PeerInfo {
var caps []string
for _, cap := range peer.Caps() {
caps = append(caps, cap.String())
}
return &PeerInfo{
ID: peer.ID().String(),
Name: peer.Name(),
Caps: strings.Join(caps, ", "),
RemoteAddress: peer.RemoteAddr().String(),
LocalAddress: peer.LocalAddr().String(),
}
}
// PeersInfo returns an array of PeerInfo objects describing connected peers
func (s *Ethereum) PeersInfo() (peersinfo []*PeerInfo) {
for _, peer := range s.net.Peers() {
if peer != nil {
peersinfo = append(peersinfo, newPeerInfo(peer))
}
}
return
// Network retrieves the underlying P2P network server. This should eventually
// be moved out into a protocol independent package, but for now use an accessor.
func (s *Ethereum) Network() *p2p.Server {
return s.net
}
func (s *Ethereum) ResetWithGenesisBlock(gb *types.Block) {
@ -552,24 +481,23 @@ func (s *Ethereum) IsMining() bool { return s.miner.Mining() }
func (s *Ethereum) Miner() *miner.Miner { return s.miner }
// func (s *Ethereum) Logger() logger.LogSystem { return s.logger }
func (s *Ethereum) Name() string { return s.net.Name }
func (s *Ethereum) AccountManager() *accounts.Manager { return s.accountManager }
func (s *Ethereum) BlockChain() *core.BlockChain { return s.blockchain }
func (s *Ethereum) BlockProcessor() *core.BlockProcessor { return s.blockProcessor }
func (s *Ethereum) TxPool() *core.TxPool { return s.txPool }
func (s *Ethereum) Whisper() *whisper.Whisper { return s.whisper }
func (s *Ethereum) EventMux() *event.TypeMux { return s.eventMux }
func (s *Ethereum) ChainDb() ethdb.Database { return s.chainDb }
func (s *Ethereum) DappDb() ethdb.Database { return s.dappDb }
func (s *Ethereum) IsListening() bool { return true } // Always listening
func (s *Ethereum) PeerCount() int { return s.net.PeerCount() }
func (s *Ethereum) Peers() []*p2p.Peer { return s.net.Peers() }
func (s *Ethereum) MaxPeers() int { return s.net.MaxPeers }
func (s *Ethereum) ClientVersion() string { return s.clientVersion }
func (s *Ethereum) EthVersion() int { return int(s.protocolManager.SubProtocols[0].Version) }
func (s *Ethereum) NetVersion() int { return s.netVersionId }
func (s *Ethereum) ShhVersion() int { return s.shhVersionId }
func (s *Ethereum) Downloader() *downloader.Downloader { return s.protocolManager.downloader }
func (s *Ethereum) Name() string { return s.net.Name }
func (s *Ethereum) AccountManager() *accounts.Manager { return s.accountManager }
func (s *Ethereum) BlockChain() *core.BlockChain { return s.blockchain }
func (s *Ethereum) TxPool() *core.TxPool { return s.txPool }
func (s *Ethereum) Whisper() *whisper.Whisper { return s.whisper }
func (s *Ethereum) EventMux() *event.TypeMux { return s.eventMux }
func (s *Ethereum) ChainDb() ethdb.Database { return s.chainDb }
func (s *Ethereum) DappDb() ethdb.Database { return s.dappDb }
func (s *Ethereum) IsListening() bool { return true } // Always listening
func (s *Ethereum) PeerCount() int { return s.net.PeerCount() }
func (s *Ethereum) Peers() []*p2p.Peer { return s.net.Peers() }
func (s *Ethereum) MaxPeers() int { return s.net.MaxPeers }
func (s *Ethereum) ClientVersion() string { return s.clientVersion }
func (s *Ethereum) EthVersion() int { return int(s.protocolManager.SubProtocols[0].Version) }
func (s *Ethereum) NetVersion() int { return s.netVersionId }
func (s *Ethereum) ShhVersion() int { return s.shhVersionId }
func (s *Ethereum) Downloader() *downloader.Downloader { return s.protocolManager.downloader }
// Start the ethereum
func (s *Ethereum) Start() error {

View File

@ -32,7 +32,7 @@ func TestMipmapUpgrade(t *testing.T) {
}
// store the receipts
err := core.PutReceipts(db, receipts)
err := core.WriteReceipts(db, receipts)
if err != nil {
t.Fatal(err)
}
@ -45,7 +45,7 @@ func TestMipmapUpgrade(t *testing.T) {
if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil {
t.Fatalf("failed to insert block number: %v", err)
}
if err := core.PutBlockReceipts(db, block.Hash(), receipts[i]); err != nil {
if err := core.WriteBlockReceipts(db, block.Hash(), receipts[i]); err != nil {
t.Fatal("error writing block receipts:", err)
}
}

View File

@ -45,16 +45,17 @@ var (
MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request
MaxStateFetch = 384 // Amount of node state values to allow fetching per request
hashTTL = 5 * time.Second // [eth/61] Time it takes for a hash request to time out
blockSoftTTL = 3 * time.Second // [eth/61] Request completion threshold for increasing or decreasing a peer's bandwidth
blockHardTTL = 3 * blockSoftTTL // [eth/61] Maximum time allowance before a block request is considered expired
headerTTL = 5 * time.Second // [eth/62] Time it takes for a header request to time out
bodySoftTTL = 3 * time.Second // [eth/62] Request completion threshold for increasing or decreasing a peer's bandwidth
bodyHardTTL = 3 * bodySoftTTL // [eth/62] Maximum time allowance before a block body request is considered expired
receiptSoftTTL = 3 * time.Second // [eth/63] Request completion threshold for increasing or decreasing a peer's bandwidth
receiptHardTTL = 3 * receiptSoftTTL // [eth/63] Maximum time allowance before a receipt request is considered expired
stateSoftTTL = 2 * time.Second // [eth/63] Request completion threshold for increasing or decreasing a peer's bandwidth
stateHardTTL = 3 * stateSoftTTL // [eth/63] Maximum time allowance before a node data request is considered expired
hashTTL = 3 * time.Second // [eth/61] Time it takes for a hash request to time out
blockTargetRTT = 3 * time.Second / 2 // [eth/61] Target time for completing a block retrieval request
blockTTL = 3 * blockTargetRTT // [eth/61] Maximum time allowance before a block request is considered expired
headerTTL = 3 * time.Second // [eth/62] Time it takes for a header request to time out
bodyTargetRTT = 3 * time.Second / 2 // [eth/62] Target time for completing a block body retrieval request
bodyTTL = 3 * bodyTargetRTT // [eth/62] Maximum time allowance before a block body request is considered expired
receiptTargetRTT = 3 * time.Second / 2 // [eth/63] Target time for completing a receipt retrieval request
receiptTTL = 3 * receiptTargetRTT // [eth/63] Maximum time allowance before a receipt request is considered expired
stateTargetRTT = 2 * time.Second / 2 // [eth/63] Target time for completing a state trie retrieval request
stateTTL = 3 * stateTargetRTT // [eth/63] Maximum time allowance before a node data request is considered expired
maxQueuedHashes = 256 * 1024 // [eth/61] Maximum number of hashes to queue for import (DOS protection)
maxQueuedHeaders = 256 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection)
@ -74,7 +75,6 @@ var (
errBadPeer = errors.New("action from bad peer ignored")
errStallingPeer = errors.New("peer is stalling")
errNoPeers = errors.New("no peers to keep download active")
errPendingQueue = errors.New("pending items in queue")
errTimeout = errors.New("timeout")
errEmptyHashSet = errors.New("empty hash set by peer")
errEmptyHeaderSet = errors.New("empty header set by peer")
@ -90,6 +90,7 @@ var (
errCancelBodyFetch = errors.New("block body download canceled (requested)")
errCancelReceiptFetch = errors.New("receipt download canceled (requested)")
errCancelStateFetch = errors.New("state data download canceled (requested)")
errCancelProcessing = errors.New("processing canceled (requested)")
errNoSyncActive = errors.New("no sync active")
)
@ -111,25 +112,24 @@ type Downloader struct {
syncStatsLock sync.RWMutex // Lock protecting the sync stats fields
// Callbacks
hasHeader headerCheckFn // Checks if a header is present in the chain
hasBlock blockCheckFn // Checks if a block is present in the chain
getHeader headerRetrievalFn // Retrieves a header from the chain
getBlock blockRetrievalFn // Retrieves a block from the chain
headHeader headHeaderRetrievalFn // Retrieves the head header from the chain
headBlock headBlockRetrievalFn // Retrieves the head block from the chain
headFastBlock headFastBlockRetrievalFn // Retrieves the head fast-sync block from the chain
commitHeadBlock headBlockCommitterFn // Commits a manually assembled block as the chain head
getTd tdRetrievalFn // Retrieves the TD of a block from the chain
insertHeaders headerChainInsertFn // Injects a batch of headers into the chain
insertBlocks blockChainInsertFn // Injects a batch of blocks into the chain
insertReceipts receiptChainInsertFn // Injects a batch of blocks and their receipts into the chain
rollback chainRollbackFn // Removes a batch of recently added chain links
dropPeer peerDropFn // Drops a peer for misbehaving
hasHeader headerCheckFn // Checks if a header is present in the chain
hasBlockAndState blockAndStateCheckFn // Checks if a block and associated state is present in the chain
getHeader headerRetrievalFn // Retrieves a header from the chain
getBlock blockRetrievalFn // Retrieves a block from the chain
headHeader headHeaderRetrievalFn // Retrieves the head header from the chain
headBlock headBlockRetrievalFn // Retrieves the head block from the chain
headFastBlock headFastBlockRetrievalFn // Retrieves the head fast-sync block from the chain
commitHeadBlock headBlockCommitterFn // Commits a manually assembled block as the chain head
getTd tdRetrievalFn // Retrieves the TD of a block from the chain
insertHeaders headerChainInsertFn // Injects a batch of headers into the chain
insertBlocks blockChainInsertFn // Injects a batch of blocks into the chain
insertReceipts receiptChainInsertFn // Injects a batch of blocks and their receipts into the chain
rollback chainRollbackFn // Removes a batch of recently added chain links
dropPeer peerDropFn // Drops a peer for misbehaving
// Status
synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing
synchronising int32
processing int32
notified int32
// Channels
@ -156,41 +156,41 @@ type Downloader struct {
}
// New creates a new downloader to fetch hashes and blocks from remote peers.
func New(stateDb ethdb.Database, mux *event.TypeMux, hasHeader headerCheckFn, hasBlock blockCheckFn, getHeader headerRetrievalFn,
getBlock blockRetrievalFn, headHeader headHeaderRetrievalFn, headBlock headBlockRetrievalFn, headFastBlock headFastBlockRetrievalFn,
commitHeadBlock headBlockCommitterFn, getTd tdRetrievalFn, insertHeaders headerChainInsertFn, insertBlocks blockChainInsertFn,
insertReceipts receiptChainInsertFn, rollback chainRollbackFn, dropPeer peerDropFn) *Downloader {
func New(stateDb ethdb.Database, mux *event.TypeMux, hasHeader headerCheckFn, hasBlockAndState blockAndStateCheckFn,
getHeader headerRetrievalFn, getBlock blockRetrievalFn, headHeader headHeaderRetrievalFn, headBlock headBlockRetrievalFn,
headFastBlock headFastBlockRetrievalFn, commitHeadBlock headBlockCommitterFn, getTd tdRetrievalFn, insertHeaders headerChainInsertFn,
insertBlocks blockChainInsertFn, insertReceipts receiptChainInsertFn, rollback chainRollbackFn, dropPeer peerDropFn) *Downloader {
return &Downloader{
mode: FullSync,
mux: mux,
queue: newQueue(stateDb),
peers: newPeerSet(),
hasHeader: hasHeader,
hasBlock: hasBlock,
getHeader: getHeader,
getBlock: getBlock,
headHeader: headHeader,
headBlock: headBlock,
headFastBlock: headFastBlock,
commitHeadBlock: commitHeadBlock,
getTd: getTd,
insertHeaders: insertHeaders,
insertBlocks: insertBlocks,
insertReceipts: insertReceipts,
rollback: rollback,
dropPeer: dropPeer,
newPeerCh: make(chan *peer, 1),
hashCh: make(chan dataPack, 1),
blockCh: make(chan dataPack, 1),
headerCh: make(chan dataPack, 1),
bodyCh: make(chan dataPack, 1),
receiptCh: make(chan dataPack, 1),
stateCh: make(chan dataPack, 1),
blockWakeCh: make(chan bool, 1),
bodyWakeCh: make(chan bool, 1),
receiptWakeCh: make(chan bool, 1),
stateWakeCh: make(chan bool, 1),
mode: FullSync,
mux: mux,
queue: newQueue(stateDb),
peers: newPeerSet(),
hasHeader: hasHeader,
hasBlockAndState: hasBlockAndState,
getHeader: getHeader,
getBlock: getBlock,
headHeader: headHeader,
headBlock: headBlock,
headFastBlock: headFastBlock,
commitHeadBlock: commitHeadBlock,
getTd: getTd,
insertHeaders: insertHeaders,
insertBlocks: insertBlocks,
insertReceipts: insertReceipts,
rollback: rollback,
dropPeer: dropPeer,
newPeerCh: make(chan *peer, 1),
hashCh: make(chan dataPack, 1),
blockCh: make(chan dataPack, 1),
headerCh: make(chan dataPack, 1),
bodyCh: make(chan dataPack, 1),
receiptCh: make(chan dataPack, 1),
stateCh: make(chan dataPack, 1),
blockWakeCh: make(chan bool, 1),
bodyWakeCh: make(chan bool, 1),
receiptWakeCh: make(chan bool, 1),
stateWakeCh: make(chan bool, 1),
}
}
@ -215,7 +215,7 @@ func (d *Downloader) Progress() (uint64, uint64, uint64) {
// Synchronising returns whether the downloader is currently retrieving blocks.
func (d *Downloader) Synchronising() bool {
return atomic.LoadInt32(&d.synchronising) > 0 || atomic.LoadInt32(&d.processing) > 0
return atomic.LoadInt32(&d.synchronising) > 0
}
// RegisterPeer injects a new download peer into the set of block source to be
@ -263,9 +263,6 @@ func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode
glog.V(logger.Debug).Infof("Removing peer %v: %v", id, err)
d.dropPeer(id)
case errPendingQueue:
glog.V(logger.Debug).Infoln("Synchronisation aborted:", err)
default:
glog.V(logger.Warn).Infof("Synchronisation failed: %v", err)
}
@ -290,10 +287,6 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
glog.V(logger.Info).Infoln("Block synchronisation started")
}
// Abort if the queue still contains some leftover data
if d.queue.GetHeadResult() != nil {
return errPendingQueue
}
// Reset the queue, peer set and wake channels to clean any internal leftover state
d.queue.Reset()
d.peers.Reset()
@ -335,7 +328,6 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
defer func() {
// reset on error
if err != nil {
d.cancel()
d.mux.Post(FailedEvent{err})
} else {
d.mux.Post(DoneEvent{})
@ -365,23 +357,15 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
d.syncStatsChainHeight = latest
d.syncStatsLock.Unlock()
// Initiate the sync using a concurrent hash and block retrieval algorithm
// Initiate the sync using a concurrent hash and block retrieval algorithm
d.queue.Prepare(origin+1, d.mode, 0)
if d.syncInitHook != nil {
d.syncInitHook(origin, latest)
}
d.queue.Prepare(origin+1, d.mode, 0)
errc := make(chan error, 2)
go func() { errc <- d.fetchHashes61(p, td, origin+1) }()
go func() { errc <- d.fetchBlocks61(origin + 1) }()
// If any fetcher fails, cancel the other
if err := <-errc; err != nil {
d.cancel()
<-errc
return err
}
return <-errc
return d.spawnSync(
func() error { return d.fetchHashes61(p, td, origin+1) },
func() error { return d.fetchBlocks61(origin + 1) },
)
case p.version >= 62:
// Look up the sync boundaries: the common ancestor and the target block
@ -405,7 +389,6 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
switch d.mode {
case LightSync:
pivot = latest
case FastSync:
// Calculate the new fast/slow sync pivot point
pivotOffset, err := rand.Int(rand.Reader, big.NewInt(int64(fsPivotInterval)))
@ -426,34 +409,51 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
glog.V(logger.Debug).Infof("Fast syncing until pivot block #%d", pivot)
}
d.queue.Prepare(origin+1, d.mode, pivot)
if d.syncInitHook != nil {
d.syncInitHook(origin, latest)
}
errc := make(chan error, 4)
go func() { errc <- d.fetchHeaders(p, td, origin+1) }() // Headers are always retrieved
go func() { errc <- d.fetchBodies(origin + 1) }() // Bodies are retrieved during normal and fast sync
go func() { errc <- d.fetchReceipts(origin + 1) }() // Receipts are retrieved during fast sync
go func() { errc <- d.fetchNodeData() }() // Node state data is retrieved during fast sync
// If any fetcher fails, cancel the others
var fail error
for i := 0; i < cap(errc); i++ {
if err := <-errc; err != nil {
if fail == nil {
fail = err
d.cancel()
}
}
}
return fail
return d.spawnSync(
func() error { return d.fetchHeaders(p, td, origin+1) }, // Headers are always retrieved
func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and fast sync
func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync
func() error { return d.fetchNodeData() }, // Node state data is retrieved during fast sync
)
default:
// Something very wrong, stop right here
glog.V(logger.Error).Infof("Unsupported eth protocol: %d", p.version)
return errBadPeer
}
return nil
}
// spawnSync runs d.process and all given fetcher functions to completion in
// separate goroutines, returning the first error that appears.
func (d *Downloader) spawnSync(fetchers ...func() error) error {
var wg sync.WaitGroup
errc := make(chan error, len(fetchers)+1)
wg.Add(len(fetchers) + 1)
go func() { defer wg.Done(); errc <- d.process() }()
for _, fn := range fetchers {
fn := fn
go func() { defer wg.Done(); errc <- fn() }()
}
// Wait for the first error, then terminate the others.
var err error
for i := 0; i < len(fetchers)+1; i++ {
if i == len(fetchers) {
// Close the queue when all fetchers have exited.
// This will cause the block processor to end when
// it has processed the queue.
d.queue.Close()
}
if err = <-errc; err != nil {
break
}
}
d.queue.Close()
d.cancel()
wg.Wait()
return err
}
// cancel cancels all of the operations and resets the queue. It returns true
@ -470,12 +470,10 @@ func (d *Downloader) cancel() {
}
}
d.cancelLock.Unlock()
// Reset the queue
d.queue.Reset()
}
// Terminate interrupts the downloader, canceling all pending operations.
// The downloader cannot be reused after calling Terminate.
func (d *Downloader) Terminate() {
atomic.StoreInt32(&d.interrupt, 1)
d.cancel()
@ -489,21 +487,12 @@ func (d *Downloader) fetchHeight61(p *peer) (uint64, error) {
// Request the advertised remote head block and wait for the response
go p.getBlocks([]common.Hash{p.head})
timeout := time.After(blockSoftTTL)
timeout := time.After(hashTTL)
for {
select {
case <-d.cancelCh:
return 0, errCancelBlockFetch
case <-d.headerCh:
// Out of bounds eth/62 block headers received, ignore them
case <-d.bodyCh:
// Out of bounds eth/62 block bodies received, ignore them
case <-d.hashCh:
// Out of bounds hashes received, ignore them
case packet := <-d.blockCh:
// Discard anything not from the origin peer
if packet.PeerId() != p.id {
@ -521,6 +510,16 @@ func (d *Downloader) fetchHeight61(p *peer) (uint64, error) {
case <-timeout:
glog.V(logger.Debug).Infof("%v: head block timeout", p)
return 0, errTimeout
case <-d.hashCh:
// Out of bounds hashes received, ignore them
case <-d.headerCh:
case <-d.bodyCh:
case <-d.stateCh:
case <-d.receiptCh:
// Ignore eth/{62,63} packets because this is eth/61.
// These can arrive as a late delivery from a previous sync.
}
}
}
@ -565,24 +564,25 @@ func (d *Downloader) findAncestor61(p *peer) (uint64, error) {
// Check if a common ancestor was found
finished = true
for i := len(hashes) - 1; i >= 0; i-- {
if d.hasBlock(hashes[i]) {
if d.hasBlockAndState(hashes[i]) {
number, hash = uint64(from)+uint64(i), hashes[i]
break
}
}
case <-timeout:
glog.V(logger.Debug).Infof("%v: head hash timeout", p)
return 0, errTimeout
case <-d.blockCh:
// Out of bounds blocks received, ignore them
case <-d.headerCh:
// Out of bounds eth/62 block headers received, ignore them
case <-d.bodyCh:
// Out of bounds eth/62 block bodies received, ignore them
case <-timeout:
glog.V(logger.Debug).Infof("%v: head hash timeout", p)
return 0, errTimeout
case <-d.stateCh:
case <-d.receiptCh:
// Ignore eth/{62,63} packets because this is eth/61.
// These can arrive as a late delivery from a previous sync.
}
}
// If the head fetch already found an ancestor, return
@ -620,29 +620,30 @@ func (d *Downloader) findAncestor61(p *peer) (uint64, error) {
arrived = true
// Modify the search interval based on the response
block := d.getBlock(hashes[0])
if block == nil {
if !d.hasBlockAndState(hashes[0]) {
end = check
break
}
block := d.getBlock(hashes[0]) // this doesn't check state, hence the above explicit check
if block.NumberU64() != check {
glog.V(logger.Debug).Infof("%v: non requested hash #%d [%x…], instead of #%d", p, block.NumberU64(), block.Hash().Bytes()[:4], check)
return 0, errBadPeer
}
start = check
case <-timeout:
glog.V(logger.Debug).Infof("%v: search hash timeout", p)
return 0, errTimeout
case <-d.blockCh:
// Out of bounds blocks received, ignore them
case <-d.headerCh:
// Out of bounds eth/62 block headers received, ignore them
case <-d.bodyCh:
// Out of bounds eth/62 block bodies received, ignore them
case <-timeout:
glog.V(logger.Debug).Infof("%v: search hash timeout", p)
return 0, errTimeout
case <-d.stateCh:
case <-d.receiptCh:
// Ignore eth/{62,63} packets because this is eth/61.
// These can arrive as a late delivery from a previous sync.
}
}
}
@ -676,12 +677,6 @@ func (d *Downloader) fetchHashes61(p *peer, td *big.Int, from uint64) error {
case <-d.cancelCh:
return errCancelHashFetch
case <-d.headerCh:
// Out of bounds eth/62 block headers received, ignore them
case <-d.bodyCh:
// Out of bounds eth/62 block bodies received, ignore them
case packet := <-d.hashCh:
// Make sure the active peer is giving us the hashes
if packet.PeerId() != p.id {
@ -750,6 +745,13 @@ func (d *Downloader) fetchHashes61(p *peer, td *big.Int, from uint64) error {
glog.V(logger.Debug).Infof("%v: hash request timed out", p)
hashTimeoutMeter.Mark(1)
return errTimeout
case <-d.headerCh:
case <-d.bodyCh:
case <-d.stateCh:
case <-d.receiptCh:
// Ignore eth/{62,63} packets because this is eth/61.
// These can arrive as a late delivery from a previous sync.
}
}
}
@ -774,59 +776,31 @@ func (d *Downloader) fetchBlocks61(from uint64) error {
case <-d.cancelCh:
return errCancelBlockFetch
case <-d.headerCh:
// Out of bounds eth/62 block headers received, ignore them
case <-d.bodyCh:
// Out of bounds eth/62 block bodies received, ignore them
case packet := <-d.blockCh:
// If the peer was previously banned and failed to deliver it's pack
// in a reasonable time frame, ignore it's message.
if peer := d.peers.Peer(packet.PeerId()); peer != nil {
// Deliver the received chunk of blocks, and demote in case of errors
blocks := packet.(*blockPack).blocks
err := d.queue.DeliverBlocks(peer.id, blocks)
switch err {
case nil:
// If no blocks were delivered, demote the peer (need the delivery above)
if len(blocks) == 0 {
peer.Demote()
peer.SetBlocksIdle()
glog.V(logger.Detail).Infof("%s: no blocks delivered", peer)
break
}
// All was successful, promote the peer and potentially start processing
peer.Promote()
peer.SetBlocksIdle()
glog.V(logger.Detail).Infof("%s: delivered %d blocks", peer, len(blocks))
go d.process()
case errInvalidChain:
// The hash chain is invalid (blocks are not ordered properly), abort
// Deliver the received chunk of blocks and check chain validity
accepted, err := d.queue.DeliverBlocks(peer.id, blocks)
if err == errInvalidChain {
return err
case errNoFetchesPending:
// Peer probably timed out with its delivery but came through
// in the end, demote, but allow to to pull from this peer.
peer.Demote()
peer.SetBlocksIdle()
glog.V(logger.Detail).Infof("%s: out of bound delivery", peer)
case errStaleDelivery:
// Delivered something completely else than requested, usually
// caused by a timeout and delivery during a new sync cycle.
// Don't set it to idle as the original request should still be
// in flight.
peer.Demote()
glog.V(logger.Detail).Infof("%s: stale delivery", peer)
}
// Unless a peer delivered something completely else than requested (usually
// caused by a timed out request which came through in the end), set it to
// idle. If the delivery's stale, the peer should have already been idled.
if err != errStaleDelivery {
peer.SetBlocksIdle(accepted)
}
// Issue a log to the user to see what's going on
switch {
case err == nil && len(blocks) == 0:
glog.V(logger.Detail).Infof("%s: no blocks delivered", peer)
case err == nil:
glog.V(logger.Detail).Infof("%s: delivered %d blocks", peer, len(blocks))
default:
// Peer did something semi-useful, demote but keep it around
peer.Demote()
peer.SetBlocksIdle()
glog.V(logger.Detail).Infof("%s: delivery partially failed: %v", peer, err)
go d.process()
glog.V(logger.Detail).Infof("%s: delivery failed: %v", peer, err)
}
}
// Blocks arrived, try to update the progress
@ -859,10 +833,15 @@ func (d *Downloader) fetchBlocks61(from uint64) error {
return errNoPeers
}
// Check for block request timeouts and demote the responsible peers
for _, pid := range d.queue.ExpireBlocks(blockHardTTL) {
for pid, fails := range d.queue.ExpireBlocks(blockTTL) {
if peer := d.peers.Peer(pid); peer != nil {
peer.Demote()
glog.V(logger.Detail).Infof("%s: block delivery timeout", peer)
if fails > 1 {
glog.V(logger.Detail).Infof("%s: block delivery timeout", peer)
peer.SetBlocksIdle(0)
} else {
glog.V(logger.Debug).Infof("%s: stalling block delivery, dropping", peer)
d.dropPeer(pid)
}
}
}
// If there's nothing more to fetch, wait or terminate
@ -909,6 +888,13 @@ func (d *Downloader) fetchBlocks61(from uint64) error {
if !throttled && !d.queue.InFlightBlocks() && len(idles) == total {
return errPeersUnavailable
}
case <-d.headerCh:
case <-d.bodyCh:
case <-d.stateCh:
case <-d.receiptCh:
// Ignore eth/{62,63} packets because this is eth/61.
// These can arrive as a late delivery from a previous sync.
}
}
}
@ -941,18 +927,19 @@ func (d *Downloader) fetchHeight(p *peer) (uint64, error) {
}
return headers[0].Number.Uint64(), nil
case <-d.bodyCh:
// Out of bounds block bodies received, ignore them
case <-d.hashCh:
// Out of bounds eth/61 hashes received, ignore them
case <-d.blockCh:
// Out of bounds eth/61 blocks received, ignore them
case <-timeout:
glog.V(logger.Debug).Infof("%v: head header timeout", p)
return 0, errTimeout
case <-d.bodyCh:
case <-d.stateCh:
case <-d.receiptCh:
// Out of bounds delivery, ignore
case <-d.hashCh:
case <-d.blockCh:
// Ignore eth/61 packets because this is eth/62+.
// These can arrive as a late delivery from a previous sync.
}
}
}
@ -1002,24 +989,25 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) {
// Check if a common ancestor was found
finished = true
for i := len(headers) - 1; i >= 0; i-- {
if (d.mode != LightSync && d.hasBlock(headers[i].Hash())) || (d.mode == LightSync && d.hasHeader(headers[i].Hash())) {
if (d.mode != LightSync && d.hasBlockAndState(headers[i].Hash())) || (d.mode == LightSync && d.hasHeader(headers[i].Hash())) {
number, hash = headers[i].Number.Uint64(), headers[i].Hash()
break
}
}
case <-d.bodyCh:
// Out of bounds block bodies received, ignore them
case <-d.hashCh:
// Out of bounds eth/61 hashes received, ignore them
case <-d.blockCh:
// Out of bounds eth/61 blocks received, ignore them
case <-timeout:
glog.V(logger.Debug).Infof("%v: head header timeout", p)
return 0, errTimeout
case <-d.bodyCh:
case <-d.stateCh:
case <-d.receiptCh:
// Out of bounds delivery, ignore
case <-d.hashCh:
case <-d.blockCh:
// Ignore eth/61 packets because this is eth/62+.
// These can arrive as a late delivery from a previous sync.
}
}
// If the head fetch already found an ancestor, return
@ -1057,7 +1045,7 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) {
arrived = true
// Modify the search interval based on the response
if (d.mode == FullSync && !d.hasBlock(headers[0].Hash())) || (d.mode != FullSync && !d.hasHeader(headers[0].Hash())) {
if (d.mode == FullSync && !d.hasBlockAndState(headers[0].Hash())) || (d.mode != FullSync && !d.hasHeader(headers[0].Hash())) {
end = check
break
}
@ -1068,18 +1056,19 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) {
}
start = check
case <-d.bodyCh:
// Out of bounds block bodies received, ignore them
case <-d.hashCh:
// Out of bounds eth/61 hashes received, ignore them
case <-d.blockCh:
// Out of bounds eth/61 blocks received, ignore them
case <-timeout:
glog.V(logger.Debug).Infof("%v: search header timeout", p)
return 0, errTimeout
case <-d.bodyCh:
case <-d.stateCh:
case <-d.receiptCh:
// Out of bounds delivery, ignore
case <-d.hashCh:
case <-d.blockCh:
// Ignore eth/61 packets because this is eth/62+.
// These can arrive as a late delivery from a previous sync.
}
}
}
@ -1141,12 +1130,6 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error {
case <-d.cancelCh:
return errCancelHeaderFetch
case <-d.hashCh:
// Out of bounds eth/61 hashes received, ignore them
case <-d.blockCh:
// Out of bounds eth/61 blocks received, ignore them
case packet := <-d.headerCh:
// Make sure the active peer is giving us the headers
if packet.PeerId() != p.id {
@ -1233,7 +1216,7 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error {
}
}
// Notify the content fetchers of new headers, but stop if queue is full
cont := d.queue.PendingBlocks() < maxQueuedHeaders || d.queue.PendingReceipts() < maxQueuedHeaders
cont := d.queue.PendingBlocks() < maxQueuedHeaders && d.queue.PendingReceipts() < maxQueuedHeaders
for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} {
if cont {
// We still have headers to fetch, send continuation wake signal (potential)
@ -1247,9 +1230,11 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error {
case ch <- false:
case <-d.cancelCh:
}
return nil
}
}
if !cont {
return nil
}
// Queue not yet full, fetch the next batch
from += uint64(len(headers))
getHeaders(from)
@ -1268,6 +1253,11 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error {
}
}
return nil
case <-d.hashCh:
case <-d.blockCh:
// Ignore eth/61 packets because this is eth/62+.
// These can arrive as a late delivery from a previous sync.
}
}
}
@ -1279,14 +1269,14 @@ func (d *Downloader) fetchBodies(from uint64) error {
glog.V(logger.Debug).Infof("Downloading block bodies from #%d", from)
var (
deliver = func(packet dataPack) error {
deliver = func(packet dataPack) (int, error) {
pack := packet.(*bodyPack)
return d.queue.DeliverBodies(pack.peerId, pack.transactions, pack.uncles)
}
expire = func() []string { return d.queue.ExpireBodies(bodyHardTTL) }
expire = func() map[string]int { return d.queue.ExpireBodies(bodyTTL) }
fetch = func(p *peer, req *fetchRequest) error { return p.FetchBodies(req) }
capacity = func(p *peer) int { return p.BlockCapacity() }
setIdle = func(p *peer) { p.SetBodiesIdle() }
setIdle = func(p *peer, accepted int) { p.SetBodiesIdle(accepted) }
)
err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire,
d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies,
@ -1303,14 +1293,14 @@ func (d *Downloader) fetchReceipts(from uint64) error {
glog.V(logger.Debug).Infof("Downloading receipts from #%d", from)
var (
deliver = func(packet dataPack) error {
deliver = func(packet dataPack) (int, error) {
pack := packet.(*receiptPack)
return d.queue.DeliverReceipts(pack.peerId, pack.receipts)
}
expire = func() []string { return d.queue.ExpireReceipts(receiptHardTTL) }
expire = func() map[string]int { return d.queue.ExpireReceipts(receiptTTL) }
fetch = func(p *peer, req *fetchRequest) error { return p.FetchReceipts(req) }
capacity = func(p *peer) int { return p.ReceiptCapacity() }
setIdle = func(p *peer) { p.SetReceiptsIdle() }
setIdle = func(p *peer, accepted int) { p.SetReceiptsIdle(accepted) }
)
err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire,
d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts,
@ -1327,7 +1317,7 @@ func (d *Downloader) fetchNodeData() error {
glog.V(logger.Debug).Infof("Downloading node state data")
var (
deliver = func(packet dataPack) error {
deliver = func(packet dataPack) (int, error) {
start := time.Now()
return d.queue.DeliverNodeData(packet.PeerId(), packet.(*statePack).states, func(err error, delivered int) {
if err != nil {
@ -1336,10 +1326,8 @@ func (d *Downloader) fetchNodeData() error {
d.cancel()
return
}
// Processing succeeded, notify state fetcher and processor of continuation
if d.queue.PendingNodeData() == 0 {
go d.process()
} else {
// Processing succeeded, notify state fetcher of continuation
if d.queue.PendingNodeData() > 0 {
select {
case d.stateWakeCh <- true:
default:
@ -1348,19 +1336,18 @@ func (d *Downloader) fetchNodeData() error {
// Log a message to the user and return
d.syncStatsLock.Lock()
defer d.syncStatsLock.Unlock()
d.syncStatsStateDone += uint64(delivered)
glog.V(logger.Info).Infof("imported %d state entries in %v: processed %d in total", delivered, time.Since(start), d.syncStatsStateDone)
})
}
expire = func() []string { return d.queue.ExpireNodeData(stateHardTTL) }
expire = func() map[string]int { return d.queue.ExpireNodeData(stateTTL) }
throttle = func() bool { return false }
reserve = func(p *peer, count int) (*fetchRequest, bool, error) {
return d.queue.ReserveNodeData(p, count), false, nil
}
fetch = func(p *peer, req *fetchRequest) error { return p.FetchNodeData(req) }
capacity = func(p *peer) int { return p.NodeDataCapacity() }
setIdle = func(p *peer) { p.SetNodeDataIdle() }
setIdle = func(p *peer, accepted int) { p.SetNodeDataIdle(accepted) }
)
err := d.fetchParts(errCancelStateFetch, d.stateCh, deliver, d.stateWakeCh, expire,
d.queue.PendingNodeData, d.queue.InFlightNodeData, throttle, reserve, nil, fetch,
@ -1373,10 +1360,10 @@ func (d *Downloader) fetchNodeData() error {
// fetchParts iteratively downloads scheduled block parts, taking any available
// peers, reserving a chunk of fetch requests for each, waiting for delivery and
// also periodically checking for timeouts.
func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliver func(packet dataPack) error, wakeCh chan bool,
expire func() []string, pending func() int, inFlight func() bool, throttle func() bool, reserve func(*peer, int) (*fetchRequest, bool, error),
func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool,
expire func() map[string]int, pending func() int, inFlight func() bool, throttle func() bool, reserve func(*peer, int) (*fetchRequest, bool, error),
fetchHook func([]*types.Header), fetch func(*peer, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peer) int,
idle func() ([]*peer, int), setIdle func(*peer), kind string) error {
idle func() ([]*peer, int), setIdle func(*peer, int), kind string) error {
// Create a ticker to detect expired retrieval tasks
ticker := time.NewTicker(100 * time.Millisecond)
@ -1391,57 +1378,29 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
case <-d.cancelCh:
return errCancel
case <-d.hashCh:
// Out of bounds eth/61 hashes received, ignore them
case <-d.blockCh:
// Out of bounds eth/61 blocks received, ignore them
case packet := <-deliveryCh:
// If the peer was previously banned and failed to deliver it's pack
// in a reasonable time frame, ignore it's message.
if peer := d.peers.Peer(packet.PeerId()); peer != nil {
// Deliver the received chunk of data, and demote in case of errors
switch err := deliver(packet); err {
case nil:
// If no blocks were delivered, demote the peer (need the delivery above to clean internal queue!)
if packet.Items() == 0 {
peer.Demote()
setIdle(peer)
glog.V(logger.Detail).Infof("%s: no %s delivered", peer, strings.ToLower(kind))
break
}
// All was successful, promote the peer and potentially start processing
peer.Promote()
setIdle(peer)
glog.V(logger.Detail).Infof("%s: delivered %s %s(s)", peer, packet.Stats(), strings.ToLower(kind))
go d.process()
case errInvalidChain:
// The hash chain is invalid (blocks are not ordered properly), abort
// Deliver the received chunk of data and check chain validity
accepted, err := deliver(packet)
if err == errInvalidChain {
return err
case errNoFetchesPending:
// Peer probably timed out with its delivery but came through
// in the end, demote, but allow to to pull from this peer.
peer.Demote()
setIdle(peer)
glog.V(logger.Detail).Infof("%s: out of bound %s delivery", peer, strings.ToLower(kind))
case errStaleDelivery:
// Delivered something completely else than requested, usually
// caused by a timeout and delivery during a new sync cycle.
// Don't set it to idle as the original request should still be
// in flight.
peer.Demote()
glog.V(logger.Detail).Infof("%s: %s stale delivery", peer, strings.ToLower(kind))
}
// Unless a peer delivered something completely else than requested (usually
// caused by a timed out request which came through in the end), set it to
// idle. If the delivery's stale, the peer should have already been idled.
if err != errStaleDelivery {
setIdle(peer, accepted)
}
// Issue a log to the user to see what's going on
switch {
case err == nil && packet.Items() == 0:
glog.V(logger.Detail).Infof("%s: no %s delivered", peer, strings.ToLower(kind))
case err == nil:
glog.V(logger.Detail).Infof("%s: delivered %s %s(s)", peer, packet.Stats(), strings.ToLower(kind))
default:
// Peer did something semi-useful, demote but keep it around
peer.Demote()
setIdle(peer)
glog.V(logger.Detail).Infof("%s: %s delivery partially failed: %v", peer, strings.ToLower(kind), err)
go d.process()
glog.V(logger.Detail).Infof("%s: %s delivery failed: %v", peer, strings.ToLower(kind), err)
}
}
// Blocks assembled, try to update the progress
@ -1474,11 +1433,15 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
return errNoPeers
}
// Check for fetch request timeouts and demote the responsible peers
for _, pid := range expire() {
for pid, fails := range expire() {
if peer := d.peers.Peer(pid); peer != nil {
peer.Demote()
setIdle(peer)
glog.V(logger.Detail).Infof("%s: %s delivery timeout", peer, strings.ToLower(kind))
if fails > 1 {
glog.V(logger.Detail).Infof("%s: %s delivery timeout", peer, strings.ToLower(kind))
setIdle(peer, 0)
} else {
glog.V(logger.Debug).Infof("%s: stalling %s delivery, dropping", peer, strings.ToLower(kind))
d.dropPeer(pid)
}
}
}
// If there's nothing more to fetch, wait or terminate
@ -1508,7 +1471,6 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
}
if progress {
progressed = true
go d.process()
}
if request == nil {
continue
@ -1540,51 +1502,23 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
if !progressed && !throttled && !running && len(idles) == total && pending() > 0 {
return errPeersUnavailable
}
case <-d.hashCh:
case <-d.blockCh:
// Ignore eth/61 packets because this is eth/62+.
// These can arrive as a late delivery from a previous sync.
}
}
}
// process takes fetch results from the queue and tries to import them into the
// chain. The type of import operation will depend on the result contents:
// -
//
// The algorithmic flow is as follows:
// - The `processing` flag is swapped to 1 to ensure singleton access
// - The current `cancel` channel is retrieved to detect sync abortions
// - Blocks are iteratively taken from the cache and inserted into the chain
// - When the cache becomes empty, insertion stops
// - The `processing` flag is swapped back to 0
// - A post-exit check is made whether new blocks became available
// - This step is important: it handles a potential race condition between
// checking for no more work, and releasing the processing "mutex". In
// between these state changes, a block may have arrived, but a processing
// attempt denied, so we need to re-enter to ensure the block isn't left
// to idle in the cache.
func (d *Downloader) process() {
// Make sure only one goroutine is ever allowed to process blocks at once
if !atomic.CompareAndSwapInt32(&d.processing, 0, 1) {
return
}
// If the processor just exited, but there are freshly pending items, try to
// reenter. This is needed because the goroutine spinned up for processing
// the fresh results might have been rejected entry to to this present thread
// not yet releasing the `processing` state.
defer func() {
if atomic.LoadInt32(&d.interrupt) == 0 && d.queue.GetHeadResult() != nil {
d.process()
}
}()
// Release the lock upon exit (note, before checking for reentry!)
// the import statistics to zero.
defer atomic.StoreInt32(&d.processing, 0)
// Repeat the processing as long as there are results to process
// chain. The type of import operation will depend on the result contents.
func (d *Downloader) process() error {
pivot := d.queue.FastSyncPivot()
for {
// Fetch the next batch of results
pivot := d.queue.FastSyncPivot() // Fetch pivot before results to prevent reset race
results := d.queue.TakeResults()
results := d.queue.WaitResults()
if len(results) == 0 {
return
return nil // queue empty
}
if d.chainInsertHook != nil {
d.chainInsertHook(results)
@ -1597,7 +1531,7 @@ func (d *Downloader) process() {
for len(results) != 0 {
// Check for any termination requests
if atomic.LoadInt32(&d.interrupt) == 1 {
return
return errCancelProcessing
}
// Retrieve the a batch of results to import
var (
@ -1633,8 +1567,7 @@ func (d *Downloader) process() {
}
if err != nil {
glog.V(logger.Debug).Infof("Result #%d [%x…] processing failed: %v", results[index].Header.Number, results[index].Header.Hash().Bytes()[:4], err)
d.cancel()
return
return err
}
// Shift the results to the next batch
results = results[items:]
@ -1685,19 +1618,16 @@ func (d *Downloader) deliver(id string, destCh chan dataPack, packet dataPack, i
dropMeter.Mark(int64(packet.Items()))
}
}()
// Make sure the downloader is active
if atomic.LoadInt32(&d.synchronising) == 0 {
return errNoSyncActive
}
// Deliver or abort if the sync is canceled while queuing
d.cancelLock.RLock()
cancel := d.cancelCh
d.cancelLock.RUnlock()
if cancel == nil {
return errNoSyncActive
}
select {
case destCh <- packet:
return nil
case <-cancel:
return errNoSyncActive
}

View File

@ -150,6 +150,8 @@ func newTester() *downloadTester {
peerChainTds: make(map[string]map[common.Hash]*big.Int),
}
tester.stateDb, _ = ethdb.NewMemDatabase()
tester.stateDb.Put(genesis.Root().Bytes(), []byte{0x00})
tester.downloader = New(tester.stateDb, new(event.TypeMux), tester.hasHeader, tester.hasBlock, tester.getHeader,
tester.getBlock, tester.headHeader, tester.headBlock, tester.headFastBlock, tester.commitHeadBlock, tester.getTd,
tester.insertHeaders, tester.insertBlocks, tester.insertReceipts, tester.rollback, tester.dropPeer)
@ -169,17 +171,7 @@ func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
}
}
dl.lock.RUnlock()
err := dl.downloader.synchronise(id, hash, td, mode)
for {
// If the queue is empty and processing stopped, break
if dl.downloader.queue.Idle() && atomic.LoadInt32(&dl.downloader.processing) == 0 {
break
}
// Otherwise sleep a bit and retry
time.Sleep(time.Millisecond)
}
return err
return dl.downloader.synchronise(id, hash, td, mode)
}
// hasHeader checks if a header is present in the testers canonical chain.
@ -187,9 +179,14 @@ func (dl *downloadTester) hasHeader(hash common.Hash) bool {
return dl.getHeader(hash) != nil
}
// hasBlock checks if a block is present in the testers canonical chain.
// hasBlock checks if a block and associated state is present in the testers canonical chain.
func (dl *downloadTester) hasBlock(hash common.Hash) bool {
return dl.getBlock(hash) != nil
block := dl.getBlock(hash)
if block == nil {
return false
}
_, err := dl.stateDb.Get(block.Root().Bytes())
return err == nil
}
// getHeader retrieves a header from the testers canonical chain.
@ -302,8 +299,10 @@ func (dl *downloadTester) insertBlocks(blocks types.Blocks) (int, error) {
defer dl.lock.Unlock()
for i, block := range blocks {
if _, ok := dl.ownBlocks[block.ParentHash()]; !ok {
if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
return i, errors.New("unknown parent")
} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err)
}
if _, ok := dl.ownHeaders[block.Hash()]; !ok {
dl.ownHashes = append(dl.ownHashes, block.Hash())
@ -701,6 +700,8 @@ func TestCanonicalSynchronisation64Fast(t *testing.T) { testCanonicalSynchronis
func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronisation(t, 64, LightSync) }
func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
// Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
@ -725,6 +726,8 @@ func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
func testThrottling(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
// Create a long block chain to download and the tester
targetBlocks := 8 * blockCacheLimit
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
@ -757,8 +760,8 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) {
for start := time.Now(); time.Since(start) < time.Second; {
time.Sleep(25 * time.Millisecond)
tester.lock.RLock()
tester.downloader.queue.lock.RLock()
tester.lock.Lock()
tester.downloader.queue.lock.Lock()
cached = len(tester.downloader.queue.blockDonePool)
if mode == FastSync {
if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached {
@ -769,8 +772,8 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) {
}
frozen = int(atomic.LoadUint32(&blocked))
retrieved = len(tester.ownBlocks)
tester.downloader.queue.lock.RUnlock()
tester.lock.RUnlock()
tester.downloader.queue.lock.Unlock()
tester.lock.Unlock()
if cached == blockCacheLimit || retrieved+cached+frozen == targetBlocks+1 {
break
@ -810,6 +813,8 @@ func TestForkedSynchronisation64Fast(t *testing.T) { testForkedSynchronisation(
func TestForkedSynchronisation64Light(t *testing.T) { testForkedSynchronisation(t, 64, LightSync) }
func testForkedSynchronisation(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
// Create a long enough forked chain
common, fork := MaxHashFetch, 2*MaxHashFetch
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil)
@ -833,6 +838,7 @@ func testForkedSynchronisation(t *testing.T, protocol int, mode SyncMode) {
// Tests that an inactive downloader will not accept incoming hashes and blocks.
func TestInactiveDownloader61(t *testing.T) {
t.Parallel()
tester := newTester()
// Check that neither hashes nor blocks are accepted
@ -847,6 +853,7 @@ func TestInactiveDownloader61(t *testing.T) {
// Tests that an inactive downloader will not accept incoming block headers and
// bodies.
func TestInactiveDownloader62(t *testing.T) {
t.Parallel()
tester := newTester()
// Check that neither block headers nor bodies are accepted
@ -861,6 +868,7 @@ func TestInactiveDownloader62(t *testing.T) {
// Tests that an inactive downloader will not accept incoming block headers,
// bodies and receipts.
func TestInactiveDownloader63(t *testing.T) {
t.Parallel()
tester := newTester()
// Check that neither block headers nor bodies are accepted
@ -885,6 +893,8 @@ func TestCancel64Fast(t *testing.T) { testCancel(t, 64, FastSync) }
func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) }
func testCancel(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
// Create a small enough block chain to download and the tester
targetBlocks := blockCacheLimit - 15
if targetBlocks >= MaxHashFetch {
@ -923,6 +933,8 @@ func TestMultiSynchronisation64Fast(t *testing.T) { testMultiSynchronisation(t,
func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) }
func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
// Create various peers with various parts of the chain
targetPeers := 8
targetBlocks := targetPeers*blockCacheLimit - 15
@ -950,6 +962,8 @@ func TestMultiProtoSynchronisation64Fast(t *testing.T) { testMultiProtoSync(t,
func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) }
func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
// Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
@ -986,6 +1000,8 @@ func TestEmptyShortCircuit64Fast(t *testing.T) { testEmptyShortCircuit(t, 64, F
func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) }
func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
// Create a block chain to download
targetBlocks := 2*blockCacheLimit - 15
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
@ -1037,6 +1053,8 @@ func TestMissingHeaderAttack64Fast(t *testing.T) { testMissingHeaderAttack(t, 6
func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) }
func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
// Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
@ -1093,6 +1111,8 @@ func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
}
// Tests that upon detecting an invalid header, the recent ones are rolled back
// for various failure scenarios. Afterwards a full sync is attempted to make
// sure no state was corrupted.
func TestInvalidHeaderRollback63Fast(t *testing.T) { testInvalidHeaderRollback(t, 63, FastSync) }
func TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback(t, 64, FastSync) }
func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
@ -1188,6 +1208,8 @@ func TestHighTDStarvationAttack64Fast(t *testing.T) { testHighTDStarvationAttac
func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) }
func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
tester := newTester()
hashes, headers, blocks, receipts := makeChain(0, 0, genesis, nil)
@ -1209,25 +1231,26 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
result error
drop bool
}{
{nil, false}, // Sync succeeded, all is well
{errBusy, false}, // Sync is already in progress, no problem
{errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop
{errBadPeer, true}, // Peer was deemed bad for some reason, drop it
{errStallingPeer, true}, // Peer was detected to be stalling, drop it
{errNoPeers, false}, // No peers to download from, soft race, no issue
{errPendingQueue, false}, // There are blocks still cached, wait to exhaust, no issue
{errTimeout, true}, // No hashes received in due time, drop the peer
{errEmptyHashSet, true}, // No hashes were returned as a response, drop as it's a dead end
{errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end
{errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser
{errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop
{errInvalidBlock, false}, // A bad peer was detected, but not the sync origin
{errInvalidBody, false}, // A bad peer was detected, but not the sync origin
{errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin
{errCancelHashFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
{errCancelBlockFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
{errCancelHeaderFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
{errCancelBodyFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
{nil, false}, // Sync succeeded, all is well
{errBusy, false}, // Sync is already in progress, no problem
{errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop
{errBadPeer, true}, // Peer was deemed bad for some reason, drop it
{errStallingPeer, true}, // Peer was detected to be stalling, drop it
{errNoPeers, false}, // No peers to download from, soft race, no issue
{errTimeout, true}, // No hashes received in due time, drop the peer
{errEmptyHashSet, true}, // No hashes were returned as a response, drop as it's a dead end
{errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end
{errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser
{errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop
{errInvalidBlock, false}, // A bad peer was detected, but not the sync origin
{errInvalidBody, false}, // A bad peer was detected, but not the sync origin
{errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin
{errCancelHashFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
{errCancelBlockFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
{errCancelHeaderFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
{errCancelBodyFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
{errCancelReceiptFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
{errCancelProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
}
// Run the tests and check disconnection status
tester := newTester()
@ -1261,6 +1284,8 @@ func TestSyncProgress64Fast(t *testing.T) { testSyncProgress(t, 64, FastSync) }
func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) }
func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
// Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
@ -1331,6 +1356,8 @@ func TestForkedSyncProgress64Fast(t *testing.T) { testForkedSyncProgress(t, 64,
func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) }
func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
// Create a forked chain to simulate origin revertal
common, fork := MaxHashFetch, 2*MaxHashFetch
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil)
@ -1404,6 +1431,8 @@ func TestFailedSyncProgress64Fast(t *testing.T) { testFailedSyncProgress(t, 64,
func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) }
func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
// Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
@ -1478,6 +1507,8 @@ func TestFakedSyncProgress64Fast(t *testing.T) { testFakedSyncProgress(t, 64, F
func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) }
func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
// Create a small block chain
targetBlocks := blockCacheLimit - 15
hashes, headers, blocks, receipts := makeChain(targetBlocks+3, 0, genesis, nil)
@ -1541,3 +1572,50 @@ func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", origin, current, latest, targetBlocks, targetBlocks, targetBlocks)
}
}
// This test reproduces an issue where unexpected deliveries would
// block indefinitely if they arrived at the right time.
func TestDeliverHeadersHang62(t *testing.T) { testDeliverHeadersHang(t, 62, FullSync) }
func TestDeliverHeadersHang63Full(t *testing.T) { testDeliverHeadersHang(t, 63, FullSync) }
func TestDeliverHeadersHang63Fast(t *testing.T) { testDeliverHeadersHang(t, 63, FastSync) }
func TestDeliverHeadersHang64Full(t *testing.T) { testDeliverHeadersHang(t, 64, FullSync) }
func TestDeliverHeadersHang64Fast(t *testing.T) { testDeliverHeadersHang(t, 64, FastSync) }
func TestDeliverHeadersHang64Light(t *testing.T) { testDeliverHeadersHang(t, 64, LightSync) }
func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
hashes, headers, blocks, receipts := makeChain(5, 0, genesis, nil)
fakeHeads := []*types.Header{{}, {}, {}, {}}
for i := 0; i < 200; i++ {
tester := newTester()
tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
// Whenever the downloader requests headers, flood it with
// a lot of unrequested header deliveries.
tester.downloader.peers.peers["peer"].getAbsHeaders = func(from uint64, count, skip int, reverse bool) error {
deliveriesDone := make(chan struct{}, 500)
for i := 0; i < cap(deliveriesDone); i++ {
peer := fmt.Sprintf("fake-peer%d", i)
go func() {
tester.downloader.DeliverHeaders(peer, fakeHeads)
deliveriesDone <- struct{}{}
}()
}
// Deliver the actual requested headers.
impl := tester.peerGetAbsHeadersFn("peer", 0)
go impl(from, count, skip, reverse)
// None of the extra deliveries should block.
timeout := time.After(5 * time.Second)
for i := 0; i < cap(deliveriesDone); i++ {
select {
case <-deliveriesDone:
case <-timeout:
panic("blocked")
}
}
return nil
}
if err := tester.sync("peer", nil, mode); err != nil {
t.Errorf("sync failed: %v", err)
}
}
}

View File

@ -28,7 +28,11 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
"gopkg.in/fatih/set.v0"
)
const (
maxLackingHashes = 4096 // Maximum number of entries allowed on the list or lacking items
throughputImpact = 0.1 // The impact a single measurement has on a peer's final throughput value.
)
// Hash and block fetchers belonging to eth/61 and below
@ -57,17 +61,16 @@ type peer struct {
blockIdle int32 // Current block activity state of the peer (idle = 0, active = 1)
receiptIdle int32 // Current receipt activity state of the peer (idle = 0, active = 1)
stateIdle int32 // Current node data activity state of the peer (idle = 0, active = 1)
rep int32 // Simple peer reputation
blockCapacity int32 // Number of blocks (bodies) allowed to fetch per request
receiptCapacity int32 // Number of receipts allowed to fetch per request
stateCapacity int32 // Number of node data pieces allowed to fetch per request
blockThroughput float64 // Number of blocks (bodies) measured to be retrievable per second
receiptThroughput float64 // Number of receipts measured to be retrievable per second
stateThroughput float64 // Number of node data pieces measured to be retrievable per second
blockStarted time.Time // Time instance when the last block (body)fetch was started
receiptStarted time.Time // Time instance when the last receipt fetch was started
stateStarted time.Time // Time instance when the last node data fetch was started
ignored *set.Set // Set of hashes not to request (didn't have previously)
lacking map[common.Hash]struct{} // Set of hashes not to request (didn't have previously)
getRelHashes relativeHashFetcherFn // [eth/61] Method to retrieve a batch of hashes from an origin hash
getAbsHashes absoluteHashFetcherFn // [eth/61] Method to retrieve a batch of hashes from an absolute position
@ -81,6 +84,7 @@ type peer struct {
getNodeData stateFetcherFn // [eth/63] Method to retrieve a batch of state trie data
version int // Eth protocol version number to switch strategies
lock sync.RWMutex
}
// newPeer create a new downloader peer, with specific hash and block retrieval
@ -90,12 +94,9 @@ func newPeer(id string, version int, head common.Hash,
getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn,
getReceipts receiptFetcherFn, getNodeData stateFetcherFn) *peer {
return &peer{
id: id,
head: head,
blockCapacity: 1,
receiptCapacity: 1,
stateCapacity: 1,
ignored: set.New(),
id: id,
head: head,
lacking: make(map[common.Hash]struct{}),
getRelHashes: getRelHashes,
getAbsHashes: getAbsHashes,
@ -114,12 +115,18 @@ func newPeer(id string, version int, head common.Hash,
// Reset clears the internal state of a peer entity.
func (p *peer) Reset() {
p.lock.Lock()
defer p.lock.Unlock()
atomic.StoreInt32(&p.blockIdle, 0)
atomic.StoreInt32(&p.receiptIdle, 0)
atomic.StoreInt32(&p.blockCapacity, 1)
atomic.StoreInt32(&p.receiptCapacity, 1)
atomic.StoreInt32(&p.stateCapacity, 1)
p.ignored.Clear()
atomic.StoreInt32(&p.stateIdle, 0)
p.blockThroughput = 0
p.receiptThroughput = 0
p.stateThroughput = 0
p.lacking = make(map[common.Hash]struct{})
}
// Fetch61 sends a block retrieval request to the remote peer.
@ -210,108 +217,116 @@ func (p *peer) FetchNodeData(request *fetchRequest) error {
return nil
}
// SetBlocksIdle sets the peer to idle, allowing it to execute new retrieval requests.
// Its block retrieval allowance will also be updated either up- or downwards,
// depending on whether the previous fetch completed in time.
func (p *peer) SetBlocksIdle() {
p.setIdle(p.blockStarted, blockSoftTTL, blockHardTTL, MaxBlockFetch, &p.blockCapacity, &p.blockIdle)
// SetBlocksIdle sets the peer to idle, allowing it to execute new block retrieval
// requests. Its estimated block retrieval throughput is updated with that measured
// just now.
func (p *peer) SetBlocksIdle(delivered int) {
p.setIdle(p.blockStarted, delivered, &p.blockThroughput, &p.blockIdle)
}
// SetBodiesIdle sets the peer to idle, allowing it to execute new retrieval requests.
// Its block body retrieval allowance will also be updated either up- or downwards,
// depending on whether the previous fetch completed in time.
func (p *peer) SetBodiesIdle() {
p.setIdle(p.blockStarted, bodySoftTTL, bodyHardTTL, MaxBodyFetch, &p.blockCapacity, &p.blockIdle)
// SetBodiesIdle sets the peer to idle, allowing it to execute block body retrieval
// requests. Its estimated body retrieval throughput is updated with that measured
// just now.
func (p *peer) SetBodiesIdle(delivered int) {
p.setIdle(p.blockStarted, delivered, &p.blockThroughput, &p.blockIdle)
}
// SetReceiptsIdle sets the peer to idle, allowing it to execute new retrieval requests.
// Its receipt retrieval allowance will also be updated either up- or downwards,
// depending on whether the previous fetch completed in time.
func (p *peer) SetReceiptsIdle() {
p.setIdle(p.receiptStarted, receiptSoftTTL, receiptHardTTL, MaxReceiptFetch, &p.receiptCapacity, &p.receiptIdle)
// SetReceiptsIdle sets the peer to idle, allowing it to execute new receipt
// retrieval requests. Its estimated receipt retrieval throughput is updated
// with that measured just now.
func (p *peer) SetReceiptsIdle(delivered int) {
p.setIdle(p.receiptStarted, delivered, &p.receiptThroughput, &p.receiptIdle)
}
// SetNodeDataIdle sets the peer to idle, allowing it to execute new retrieval
// requests. Its node data retrieval allowance will also be updated either up- or
// downwards, depending on whether the previous fetch completed in time.
func (p *peer) SetNodeDataIdle() {
p.setIdle(p.stateStarted, stateSoftTTL, stateSoftTTL, MaxStateFetch, &p.stateCapacity, &p.stateIdle)
// SetNodeDataIdle sets the peer to idle, allowing it to execute new state trie
// data retrieval requests. Its estimated state retrieval throughput is updated
// with that measured just now.
func (p *peer) SetNodeDataIdle(delivered int) {
p.setIdle(p.stateStarted, delivered, &p.stateThroughput, &p.stateIdle)
}
// setIdle sets the peer to idle, allowing it to execute new retrieval requests.
// Its data retrieval allowance will also be updated either up- or downwards,
// depending on whether the previous fetch completed in time.
func (p *peer) setIdle(started time.Time, softTTL, hardTTL time.Duration, maxFetch int, capacity, idle *int32) {
// Update the peer's download allowance based on previous performance
scale := 2.0
if time.Since(started) > softTTL {
scale = 0.5
if time.Since(started) > hardTTL {
scale = 1 / float64(maxFetch) // reduces capacity to 1
}
}
for {
// Calculate the new download bandwidth allowance
prev := atomic.LoadInt32(capacity)
next := int32(math.Max(1, math.Min(float64(maxFetch), float64(prev)*scale)))
// Its estimated retrieval throughput is updated with that measured just now.
func (p *peer) setIdle(started time.Time, delivered int, throughput *float64, idle *int32) {
// Irrelevant of the scaling, make sure the peer ends up idle
defer atomic.StoreInt32(idle, 0)
// Try to update the old value
if atomic.CompareAndSwapInt32(capacity, prev, next) {
// If we're having problems at 1 capacity, try to find better peers
if next == 1 {
p.Demote()
}
break
}
p.lock.RLock()
defer p.lock.RUnlock()
// If nothing was delivered (hard timeout / unavailable data), reduce throughput to minimum
if delivered == 0 {
*throughput = 0
return
}
// Set the peer to idle to allow further fetch requests
atomic.StoreInt32(idle, 0)
// Otherwise update the throughput with a new measurement
measured := float64(delivered) / (float64(time.Since(started)+1) / float64(time.Second)) // +1 (ns) to ensure non-zero divisor
*throughput = (1-throughputImpact)*(*throughput) + throughputImpact*measured
}
// BlockCapacity retrieves the peers block download allowance based on its
// previously discovered bandwidth capacity.
// previously discovered throughput.
func (p *peer) BlockCapacity() int {
return int(atomic.LoadInt32(&p.blockCapacity))
p.lock.RLock()
defer p.lock.RUnlock()
return int(math.Max(1, math.Min(p.blockThroughput*float64(blockTargetRTT)/float64(time.Second), float64(MaxBlockFetch))))
}
// ReceiptCapacity retrieves the peers block download allowance based on its
// previously discovered bandwidth capacity.
// ReceiptCapacity retrieves the peers receipt download allowance based on its
// previously discovered throughput.
func (p *peer) ReceiptCapacity() int {
return int(atomic.LoadInt32(&p.receiptCapacity))
p.lock.RLock()
defer p.lock.RUnlock()
return int(math.Max(1, math.Min(p.receiptThroughput*float64(receiptTargetRTT)/float64(time.Second), float64(MaxReceiptFetch))))
}
// NodeDataCapacity retrieves the peers block download allowance based on its
// previously discovered bandwidth capacity.
// NodeDataCapacity retrieves the peers state download allowance based on its
// previously discovered throughput.
func (p *peer) NodeDataCapacity() int {
return int(atomic.LoadInt32(&p.stateCapacity))
p.lock.RLock()
defer p.lock.RUnlock()
return int(math.Max(1, math.Min(p.stateThroughput*float64(stateTargetRTT)/float64(time.Second), float64(MaxStateFetch))))
}
// Promote increases the peer's reputation.
func (p *peer) Promote() {
atomic.AddInt32(&p.rep, 1)
}
// MarkLacking appends a new entity to the set of items (blocks, receipts, states)
// that a peer is known not to have (i.e. have been requested before). If the
// set reaches its maximum allowed capacity, items are randomly dropped off.
func (p *peer) MarkLacking(hash common.Hash) {
p.lock.Lock()
defer p.lock.Unlock()
// Demote decreases the peer's reputation or leaves it at 0.
func (p *peer) Demote() {
for {
// Calculate the new reputation value
prev := atomic.LoadInt32(&p.rep)
next := prev / 2
// Try to update the old value
if atomic.CompareAndSwapInt32(&p.rep, prev, next) {
return
for len(p.lacking) >= maxLackingHashes {
for drop, _ := range p.lacking {
delete(p.lacking, drop)
break
}
}
p.lacking[hash] = struct{}{}
}
// Lacks retrieves whether the hash of a blockchain item is on the peers lacking
// list (i.e. whether we know that the peer does not have it).
func (p *peer) Lacks(hash common.Hash) bool {
p.lock.RLock()
defer p.lock.RUnlock()
_, ok := p.lacking[hash]
return ok
}
// String implements fmt.Stringer.
func (p *peer) String() string {
p.lock.RLock()
defer p.lock.RUnlock()
return fmt.Sprintf("Peer %s [%s]", p.id,
fmt.Sprintf("reputation %3d, ", atomic.LoadInt32(&p.rep))+
fmt.Sprintf("block cap %3d, ", atomic.LoadInt32(&p.blockCapacity))+
fmt.Sprintf("receipt cap %3d, ", atomic.LoadInt32(&p.receiptCapacity))+
fmt.Sprintf("ignored %4d", p.ignored.Size()),
fmt.Sprintf("blocks %3.2f/s, ", p.blockThroughput)+
fmt.Sprintf("receipts %3.2f/s, ", p.receiptThroughput)+
fmt.Sprintf("states %3.2f/s, ", p.stateThroughput)+
fmt.Sprintf("lacking %4d", len(p.lacking)),
)
}
@ -342,6 +357,10 @@ func (ps *peerSet) Reset() {
// Register injects a new peer into the working set, or returns an error if the
// peer is already known.
//
// The method also sets the starting throughput values of the new peer to the
// average of all existing peers, to give it a realistic change of being used
// for data retrievals.
func (ps *peerSet) Register(p *peer) error {
ps.lock.Lock()
defer ps.lock.Unlock()
@ -349,6 +368,20 @@ func (ps *peerSet) Register(p *peer) error {
if _, ok := ps.peers[p.id]; ok {
return errAlreadyRegistered
}
if len(ps.peers) > 0 {
p.blockThroughput, p.receiptThroughput, p.stateThroughput = 0, 0, 0
for _, peer := range ps.peers {
peer.lock.RLock()
p.blockThroughput += peer.blockThroughput
p.receiptThroughput += peer.receiptThroughput
p.stateThroughput += peer.stateThroughput
peer.lock.RUnlock()
}
p.blockThroughput /= float64(len(ps.peers))
p.receiptThroughput /= float64(len(ps.peers))
p.stateThroughput /= float64(len(ps.peers))
}
ps.peers[p.id] = p
return nil
}
@ -400,7 +433,12 @@ func (ps *peerSet) BlockIdlePeers() ([]*peer, int) {
idle := func(p *peer) bool {
return atomic.LoadInt32(&p.blockIdle) == 0
}
return ps.idlePeers(61, 61, idle)
throughput := func(p *peer) float64 {
p.lock.RLock()
defer p.lock.RUnlock()
return p.blockThroughput
}
return ps.idlePeers(61, 61, idle, throughput)
}
// BodyIdlePeers retrieves a flat list of all the currently body-idle peers within
@ -409,7 +447,12 @@ func (ps *peerSet) BodyIdlePeers() ([]*peer, int) {
idle := func(p *peer) bool {
return atomic.LoadInt32(&p.blockIdle) == 0
}
return ps.idlePeers(62, 64, idle)
throughput := func(p *peer) float64 {
p.lock.RLock()
defer p.lock.RUnlock()
return p.blockThroughput
}
return ps.idlePeers(62, 64, idle, throughput)
}
// ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers
@ -418,7 +461,12 @@ func (ps *peerSet) ReceiptIdlePeers() ([]*peer, int) {
idle := func(p *peer) bool {
return atomic.LoadInt32(&p.receiptIdle) == 0
}
return ps.idlePeers(63, 64, idle)
throughput := func(p *peer) float64 {
p.lock.RLock()
defer p.lock.RUnlock()
return p.receiptThroughput
}
return ps.idlePeers(63, 64, idle, throughput)
}
// NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle
@ -427,12 +475,18 @@ func (ps *peerSet) NodeDataIdlePeers() ([]*peer, int) {
idle := func(p *peer) bool {
return atomic.LoadInt32(&p.stateIdle) == 0
}
return ps.idlePeers(63, 64, idle)
throughput := func(p *peer) float64 {
p.lock.RLock()
defer p.lock.RUnlock()
return p.stateThroughput
}
return ps.idlePeers(63, 64, idle, throughput)
}
// idlePeers retrieves a flat list of all currently idle peers satisfying the
// protocol version constraints, using the provided function to check idleness.
func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peer) bool) ([]*peer, int) {
// The resulting set of peers are sorted by their measure throughput.
func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peer) bool, throughput func(*peer) float64) ([]*peer, int) {
ps.lock.RLock()
defer ps.lock.RUnlock()
@ -447,7 +501,7 @@ func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peer)
}
for i := 0; i < len(idle); i++ {
for j := i + 1; j < len(idle); j++ {
if atomic.LoadInt32(&idle[i].rep) < atomic.LoadInt32(&idle[j].rep) {
if throughput(idle[i]) < throughput(idle[j]) {
idle[i], idle[j] = idle[j], idle[i]
}
}

View File

@ -101,11 +101,14 @@ type queue struct {
resultCache []*fetchResult // Downloaded but not yet delivered fetch results
resultOffset uint64 // Offset of the first cached fetch result in the block chain
lock sync.RWMutex
lock *sync.Mutex
active *sync.Cond
closed bool
}
// newQueue creates a new download queue for scheduling block retrieval.
func newQueue(stateDb ethdb.Database) *queue {
lock := new(sync.Mutex)
return &queue{
hashPool: make(map[common.Hash]int),
hashQueue: prque.New(),
@ -122,6 +125,8 @@ func newQueue(stateDb ethdb.Database) *queue {
statePendPool: make(map[string]*fetchRequest),
stateDatabase: stateDb,
resultCache: make([]*fetchResult, blockCacheLimit),
active: sync.NewCond(lock),
lock: lock,
}
}
@ -133,6 +138,7 @@ func (q *queue) Reset() {
q.stateSchedLock.Lock()
defer q.stateSchedLock.Unlock()
q.closed = false
q.mode = FullSync
q.fastSyncPivot = 0
@ -162,18 +168,27 @@ func (q *queue) Reset() {
q.resultOffset = 0
}
// Close marks the end of the sync, unblocking WaitResults.
// It may be called even if the queue is already closed.
func (q *queue) Close() {
q.lock.Lock()
q.closed = true
q.lock.Unlock()
q.active.Broadcast()
}
// PendingBlocks retrieves the number of block (body) requests pending for retrieval.
func (q *queue) PendingBlocks() int {
q.lock.RLock()
defer q.lock.RUnlock()
q.lock.Lock()
defer q.lock.Unlock()
return q.hashQueue.Size() + q.blockTaskQueue.Size()
}
// PendingReceipts retrieves the number of block receipts pending for retrieval.
func (q *queue) PendingReceipts() int {
q.lock.RLock()
defer q.lock.RUnlock()
q.lock.Lock()
defer q.lock.Unlock()
return q.receiptTaskQueue.Size()
}
@ -192,8 +207,8 @@ func (q *queue) PendingNodeData() int {
// InFlightBlocks retrieves whether there are block fetch requests currently in
// flight.
func (q *queue) InFlightBlocks() bool {
q.lock.RLock()
defer q.lock.RUnlock()
q.lock.Lock()
defer q.lock.Unlock()
return len(q.blockPendPool) > 0
}
@ -201,8 +216,8 @@ func (q *queue) InFlightBlocks() bool {
// InFlightReceipts retrieves whether there are receipt fetch requests currently
// in flight.
func (q *queue) InFlightReceipts() bool {
q.lock.RLock()
defer q.lock.RUnlock()
q.lock.Lock()
defer q.lock.Unlock()
return len(q.receiptPendPool) > 0
}
@ -210,8 +225,8 @@ func (q *queue) InFlightReceipts() bool {
// InFlightNodeData retrieves whether there are node data entry fetch requests
// currently in flight.
func (q *queue) InFlightNodeData() bool {
q.lock.RLock()
defer q.lock.RUnlock()
q.lock.Lock()
defer q.lock.Unlock()
return len(q.statePendPool)+int(atomic.LoadInt32(&q.stateProcessors)) > 0
}
@ -219,8 +234,8 @@ func (q *queue) InFlightNodeData() bool {
// Idle returns if the queue is fully idle or has some data still inside. This
// method is used by the tester to detect termination events.
func (q *queue) Idle() bool {
q.lock.RLock()
defer q.lock.RUnlock()
q.lock.Lock()
defer q.lock.Unlock()
queued := q.hashQueue.Size() + q.blockTaskQueue.Size() + q.receiptTaskQueue.Size() + q.stateTaskQueue.Size()
pending := len(q.blockPendPool) + len(q.receiptPendPool) + len(q.statePendPool)
@ -237,8 +252,8 @@ func (q *queue) Idle() bool {
// FastSyncPivot retrieves the currently used fast sync pivot point.
func (q *queue) FastSyncPivot() uint64 {
q.lock.RLock()
defer q.lock.RUnlock()
q.lock.Lock()
defer q.lock.Unlock()
return q.fastSyncPivot
}
@ -246,8 +261,8 @@ func (q *queue) FastSyncPivot() uint64 {
// ShouldThrottleBlocks checks if the download should be throttled (active block (body)
// fetches exceed block cache).
func (q *queue) ShouldThrottleBlocks() bool {
q.lock.RLock()
defer q.lock.RUnlock()
q.lock.Lock()
defer q.lock.Unlock()
// Calculate the currently in-flight block (body) requests
pending := 0
@ -261,8 +276,8 @@ func (q *queue) ShouldThrottleBlocks() bool {
// ShouldThrottleReceipts checks if the download should be throttled (active receipt
// fetches exceed block cache).
func (q *queue) ShouldThrottleReceipts() bool {
q.lock.RLock()
defer q.lock.RUnlock()
q.lock.Lock()
defer q.lock.Unlock()
// Calculate the currently in-flight receipt requests
pending := 0
@ -351,91 +366,74 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
return inserts
}
// GetHeadResult retrieves the first fetch result from the cache, or nil if it hasn't
// been downloaded yet (or simply non existent).
func (q *queue) GetHeadResult() *fetchResult {
q.lock.RLock()
defer q.lock.RUnlock()
// If there are no results pending, return nil
if len(q.resultCache) == 0 || q.resultCache[0] == nil {
return nil
}
// If the next result is still incomplete, return nil
if q.resultCache[0].Pending > 0 {
return nil
}
// If the next result is the fast sync pivot...
if q.mode == FastSync && q.resultCache[0].Header.Number.Uint64() == q.fastSyncPivot {
// If the pivot state trie is still being pulled, return nil
if len(q.stateTaskPool) > 0 {
return nil
}
if q.PendingNodeData() > 0 {
return nil
}
// If the state is done, but not enough post-pivot headers were verified, stall...
for i := 0; i < fsHeaderForceVerify; i++ {
if i+1 >= len(q.resultCache) || q.resultCache[i+1] == nil {
return nil
}
}
}
return q.resultCache[0]
}
// TakeResults retrieves and permanently removes a batch of fetch results from
// the cache.
func (q *queue) TakeResults() []*fetchResult {
// WaitResults retrieves and permanently removes a batch of fetch
// results from the cache. the result slice will be empty if the queue
// has been closed.
func (q *queue) WaitResults() []*fetchResult {
q.lock.Lock()
defer q.lock.Unlock()
// Accumulate all available results
results := []*fetchResult{}
for i, result := range q.resultCache {
// Stop if no more results are ready
if result == nil || result.Pending > 0 {
break
nproc := q.countProcessableItems()
for nproc == 0 && !q.closed {
q.active.Wait()
nproc = q.countProcessableItems()
}
results := make([]*fetchResult, nproc)
copy(results, q.resultCache[:nproc])
if len(results) > 0 {
// Mark results as done before dropping them from the cache.
for _, result := range results {
hash := result.Header.Hash()
delete(q.blockDonePool, hash)
delete(q.receiptDonePool, hash)
}
// The fast sync pivot block may only be processed after state fetch completes
if q.mode == FastSync && result.Header.Number.Uint64() == q.fastSyncPivot {
if len(q.stateTaskPool) > 0 {
break
}
if q.PendingNodeData() > 0 {
break
}
// Even is state fetch is done, ensure post-pivot headers passed verifications
safe := true
for j := 0; j < fsHeaderForceVerify; j++ {
if i+j+1 >= len(q.resultCache) || q.resultCache[i+j+1] == nil {
safe = false
// Delete the results from the cache and clear the tail.
copy(q.resultCache, q.resultCache[nproc:])
for i := len(q.resultCache) - nproc; i < len(q.resultCache); i++ {
q.resultCache[i] = nil
}
// Advance the expected block number of the first cache entry.
q.resultOffset += uint64(nproc)
}
return results
}
// countProcessableItems counts the processable items.
func (q *queue) countProcessableItems() int {
for i, result := range q.resultCache {
// Don't process incomplete or unavailable items.
if result == nil || result.Pending > 0 {
return i
}
// Special handling for the fast-sync pivot block:
if q.mode == FastSync {
bnum := result.Header.Number.Uint64()
if bnum == q.fastSyncPivot {
// If the state of the pivot block is not
// available yet, we cannot proceed and return 0.
//
// Stop before processing the pivot block to ensure that
// resultCache has space for fsHeaderForceVerify items. Not
// doing this could leave us unable to download the required
// amount of headers.
if i > 0 || len(q.stateTaskPool) > 0 || q.PendingNodeData() > 0 {
return i
}
for j := 0; j < fsHeaderForceVerify; j++ {
if i+j+1 >= len(q.resultCache) || q.resultCache[i+j+1] == nil {
return i
}
}
}
if !safe {
break
// If we're just the fast sync pivot, stop as well
// because the following batch needs different insertion.
// This simplifies handling the switchover in d.process.
if bnum == q.fastSyncPivot+1 && i > 0 {
return i
}
}
// If we've just inserted the fast sync pivot, stop as the following batch needs different insertion
if q.mode == FastSync && result.Header.Number.Uint64() == q.fastSyncPivot+1 && len(results) > 0 {
break
}
results = append(results, result)
hash := result.Header.Hash()
delete(q.blockDonePool, hash)
delete(q.receiptDonePool, hash)
}
// Delete the results from the slice and let them be garbage collected
// without this slice trick the results would stay in memory until nil
// would be assigned to them.
copy(q.resultCache, q.resultCache[len(results):])
for k, n := len(q.resultCache)-len(results), len(q.resultCache); k < n; k++ {
q.resultCache[k] = nil
}
q.resultOffset += uint64(len(results))
return results
return len(q.resultCache)
}
// ReserveBlocks reserves a set of block hashes for the given peer, skipping any
@ -501,7 +499,7 @@ func (q *queue) reserveHashes(p *peer, count int, taskQueue *prque.Prque, taskGe
for proc := 0; (allowance == 0 || proc < allowance) && len(send) < count && !taskQueue.Empty(); proc++ {
hash, priority := taskQueue.Pop()
if p.ignored.Has(hash) {
if p.Lacks(hash.(common.Hash)) {
skip[hash.(common.Hash)] = int(priority)
} else {
send[hash.(common.Hash)] = int(priority)
@ -584,6 +582,7 @@ func (q *queue) reserveHeaders(p *peer, count int, taskPool map[common.Hash]*typ
// If we're the first to request this task, initialise the result container
index := int(header.Number.Int64() - int64(q.resultOffset))
if index >= len(q.resultCache) || index < 0 {
common.Report("index allocation went beyond available resultCache space")
return nil, false, errInvalidChain
}
if q.resultCache[index] == nil {
@ -607,7 +606,7 @@ func (q *queue) reserveHeaders(p *peer, count int, taskPool map[common.Hash]*typ
continue
}
// Otherwise unless the peer is known not to have the data, add to the retrieve list
if p.ignored.Has(header.Hash()) {
if p.Lacks(header.Hash()) {
skip = append(skip, header)
} else {
send = append(send, header)
@ -617,6 +616,10 @@ func (q *queue) reserveHeaders(p *peer, count int, taskPool map[common.Hash]*typ
for _, header := range skip {
taskQueue.Push(header, -float32(header.Number.Uint64()))
}
if progress {
// Wake WaitResults, resultCache was modified
q.active.Signal()
}
// Assemble and return the block download request
if len(send) == 0 {
return nil, progress, nil
@ -700,7 +703,7 @@ func (q *queue) Revoke(peerId string) {
// ExpireBlocks checks for in flight requests that exceeded a timeout allowance,
// canceling them and returning the responsible peers for penalisation.
func (q *queue) ExpireBlocks(timeout time.Duration) []string {
func (q *queue) ExpireBlocks(timeout time.Duration) map[string]int {
q.lock.Lock()
defer q.lock.Unlock()
@ -709,7 +712,7 @@ func (q *queue) ExpireBlocks(timeout time.Duration) []string {
// ExpireBodies checks for in flight block body requests that exceeded a timeout
// allowance, canceling them and returning the responsible peers for penalisation.
func (q *queue) ExpireBodies(timeout time.Duration) []string {
func (q *queue) ExpireBodies(timeout time.Duration) map[string]int {
q.lock.Lock()
defer q.lock.Unlock()
@ -718,7 +721,7 @@ func (q *queue) ExpireBodies(timeout time.Duration) []string {
// ExpireReceipts checks for in flight receipt requests that exceeded a timeout
// allowance, canceling them and returning the responsible peers for penalisation.
func (q *queue) ExpireReceipts(timeout time.Duration) []string {
func (q *queue) ExpireReceipts(timeout time.Duration) map[string]int {
q.lock.Lock()
defer q.lock.Unlock()
@ -727,7 +730,7 @@ func (q *queue) ExpireReceipts(timeout time.Duration) []string {
// ExpireNodeData checks for in flight node data requests that exceeded a timeout
// allowance, canceling them and returning the responsible peers for penalisation.
func (q *queue) ExpireNodeData(timeout time.Duration) []string {
func (q *queue) ExpireNodeData(timeout time.Duration) map[string]int {
q.lock.Lock()
defer q.lock.Unlock()
@ -737,12 +740,12 @@ func (q *queue) ExpireNodeData(timeout time.Duration) []string {
// expire is the generic check that move expired tasks from a pending pool back
// into a task pool, returning all entities caught with expired tasks.
//
// Note, this method expects the queue lock to be already held for writing. The
// Note, this method expects the queue lock to be already held. The
// reason the lock is not obtained in here is because the parameters already need
// to access the queue, so they already need a lock anyway.
func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) []string {
func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) map[string]int {
// Iterate over the expired requests and return each to the queue
peers := []string{}
expiries := make(map[string]int)
for id, request := range pendPool {
if time.Since(request.Time) > timeout {
// Update the metrics with the timeout
@ -755,25 +758,32 @@ func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest,
for _, header := range request.Headers {
taskQueue.Push(header, -float32(header.Number.Uint64()))
}
peers = append(peers, id)
// Add the peer to the expiry report along the the number of failed requests
expirations := len(request.Hashes)
if expirations < len(request.Headers) {
expirations = len(request.Headers)
}
expiries[id] = expirations
}
}
// Remove the expired requests from the pending pool
for _, id := range peers {
for id, _ := range expiries {
delete(pendPool, id)
}
return peers
return expiries
}
// DeliverBlocks injects a block retrieval response into the download queue.
func (q *queue) DeliverBlocks(id string, blocks []*types.Block) error {
// DeliverBlocks injects a block retrieval response into the download queue. The
// method returns the number of blocks accepted from the delivery and also wakes
// any threads waiting for data delivery.
func (q *queue) DeliverBlocks(id string, blocks []*types.Block) (int, error) {
q.lock.Lock()
defer q.lock.Unlock()
// Short circuit if the blocks were never requested
request := q.blockPendPool[id]
if request == nil {
return errNoFetchesPending
return 0, errNoFetchesPending
}
blockReqTimer.UpdateSince(request.Time)
delete(q.blockPendPool, id)
@ -781,11 +791,11 @@ func (q *queue) DeliverBlocks(id string, blocks []*types.Block) error {
// If no blocks were retrieved, mark them as unavailable for the origin peer
if len(blocks) == 0 {
for hash, _ := range request.Hashes {
request.Peer.ignored.Add(hash)
request.Peer.MarkLacking(hash)
}
}
// Iterate over the downloaded blocks and add each of them
errs := make([]error, 0)
accepted, errs := 0, make([]error, 0)
for _, block := range blocks {
// Skip any blocks that were not requested
hash := block.Hash()
@ -808,29 +818,33 @@ func (q *queue) DeliverBlocks(id string, blocks []*types.Block) error {
delete(request.Hashes, hash)
delete(q.hashPool, hash)
accepted++
}
// Return all failed or missing fetches to the queue
for hash, index := range request.Hashes {
q.hashQueue.Push(hash, float32(index))
}
// Wake up WaitResults
if accepted > 0 {
q.active.Signal()
}
// If none of the blocks were good, it's a stale delivery
switch {
case len(errs) == 0:
return nil
return accepted, nil
case len(errs) == 1 && (errs[0] == errInvalidChain || errs[0] == errInvalidBlock):
return errs[0]
return accepted, errs[0]
case len(errs) == len(blocks):
return errStaleDelivery
return accepted, errStaleDelivery
default:
return fmt.Errorf("multiple failures: %v", errs)
return accepted, fmt.Errorf("multiple failures: %v", errs)
}
}
// DeliverBodies injects a block body retrieval response into the results queue.
func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) error {
// The method returns the number of blocks bodies accepted from the delivery and
// also wakes any threads waiting for data delivery.
func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) (int, error) {
q.lock.Lock()
defer q.lock.Unlock()
@ -846,7 +860,9 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLi
}
// DeliverReceipts injects a receipt retrieval response into the results queue.
func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) error {
// The method returns the number of transaction receipts accepted from the delivery
// and also wakes any threads waiting for data delivery.
func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, error) {
q.lock.Lock()
defer q.lock.Unlock()
@ -865,26 +881,29 @@ func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) error
// Note, this method expects the queue lock to be already held for writing. The
// reason the lock is not obtained in here is because the parameters already need
// to access the queue, so they already need a lock anyway.
func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, pendPool map[string]*fetchRequest,
donePool map[common.Hash]struct{}, reqTimer metrics.Timer, results int, reconstruct func(header *types.Header, index int, result *fetchResult) error) error {
func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque,
pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, reqTimer metrics.Timer,
results int, reconstruct func(header *types.Header, index int, result *fetchResult) error) (int, error) {
// Short circuit if the data was never requested
request := pendPool[id]
if request == nil {
return errNoFetchesPending
return 0, errNoFetchesPending
}
reqTimer.UpdateSince(request.Time)
delete(pendPool, id)
// If no data items were retrieved, mark them as unavailable for the origin peer
if results == 0 {
for hash, _ := range request.Headers {
request.Peer.ignored.Add(hash)
for _, header := range request.Headers {
request.Peer.MarkLacking(header.Hash())
}
}
// Assemble each of the results with their headers and retrieved data parts
var (
failure error
useful bool
accepted int
failure error
useful bool
)
for i, header := range request.Headers {
// Short circuit assembly if no more fetch results are found
@ -904,6 +923,7 @@ func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQ
donePool[header.Hash()] = struct{}{}
q.resultCache[index].Pending--
useful = true
accepted++
// Clean up a successful fetch
request.Headers[i] = nil
@ -915,28 +935,32 @@ func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQ
taskQueue.Push(header, -float32(header.Number.Uint64()))
}
}
// Wake up WaitResults
if accepted > 0 {
q.active.Signal()
}
// If none of the data was good, it's a stale delivery
switch {
case failure == nil || failure == errInvalidChain:
return failure
return accepted, failure
case useful:
return fmt.Errorf("partial failure: %v", failure)
return accepted, fmt.Errorf("partial failure: %v", failure)
default:
return errStaleDelivery
return accepted, errStaleDelivery
}
}
// DeliverNodeData injects a node state data retrieval response into the queue.
func (q *queue) DeliverNodeData(id string, data [][]byte, callback func(error, int)) error {
// The method returns the number of node state entries originally requested, and
// the number of them actually accepted from the delivery.
func (q *queue) DeliverNodeData(id string, data [][]byte, callback func(error, int)) (int, error) {
q.lock.Lock()
defer q.lock.Unlock()
// Short circuit if the data was never requested
request := q.statePendPool[id]
if request == nil {
return errNoFetchesPending
return 0, errNoFetchesPending
}
stateReqTimer.UpdateSince(request.Time)
delete(q.statePendPool, id)
@ -944,14 +968,14 @@ func (q *queue) DeliverNodeData(id string, data [][]byte, callback func(error, i
// If no data was retrieved, mark their hashes as unavailable for the origin peer
if len(data) == 0 {
for hash, _ := range request.Hashes {
request.Peer.ignored.Add(hash)
request.Peer.MarkLacking(hash)
}
}
// Iterate over the downloaded data and verify each of them
errs := make([]error, 0)
accepted, errs := 0, make([]error, 0)
process := []trie.SyncResult{}
for _, blob := range data {
// Skip any blocks that were not requested
// Skip any state trie entires that were not requested
hash := common.BytesToHash(crypto.Sha3(blob))
if _, ok := request.Hashes[hash]; !ok {
errs = append(errs, fmt.Errorf("non-requested state data %x", hash))
@ -959,6 +983,7 @@ func (q *queue) DeliverNodeData(id string, data [][]byte, callback func(error, i
}
// Inject the next state trie item into the processing queue
process = append(process, trie.SyncResult{hash, blob})
accepted++
delete(request.Hashes, hash)
delete(q.stateTaskPool, hash)
@ -976,19 +1001,21 @@ func (q *queue) DeliverNodeData(id string, data [][]byte, callback func(error, i
// If none of the data items were good, it's a stale delivery
switch {
case len(errs) == 0:
return nil
return accepted, nil
case len(errs) == len(request.Hashes):
return errStaleDelivery
return accepted, errStaleDelivery
default:
return fmt.Errorf("multiple failures: %v", errs)
return accepted, fmt.Errorf("multiple failures: %v", errs)
}
}
// deliverNodeData is the asynchronous node data processor that injects a batch
// of sync results into the state scheduler.
func (q *queue) deliverNodeData(results []trie.SyncResult, callback func(error, int)) {
// Wake up WaitResults after the state has been written because it
// might be waiting for the pivot block state to get completed.
defer q.active.Signal()
// Process results one by one to permit task fetches in between
for i, result := range results {
q.stateSchedLock.Lock()

View File

@ -27,8 +27,8 @@ import (
// headerCheckFn is a callback type for verifying a header's presence in the local chain.
type headerCheckFn func(common.Hash) bool
// blockCheckFn is a callback type for verifying a block's presence in the local chain.
type blockCheckFn func(common.Hash) bool
// blockAndStateCheckFn is a callback type for verifying block and associated states' presence in the local chain.
type blockAndStateCheckFn func(common.Hash) bool
// headerRetrievalFn is a callback type for retrieving a header from the local chain.
type headerRetrievalFn func(common.Hash) *types.Header

View File

@ -64,7 +64,7 @@ func BenchmarkMipmaps(b *testing.B) {
}
// store the receipts
err := core.PutReceipts(db, receipts)
err := core.WriteReceipts(db, receipts)
if err != nil {
b.Fatal(err)
}
@ -78,7 +78,7 @@ func BenchmarkMipmaps(b *testing.B) {
if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil {
b.Fatalf("failed to insert block number: %v", err)
}
if err := core.PutBlockReceipts(db, block.Hash(), receipts[i]); err != nil {
if err := core.WriteBlockReceipts(db, block.Hash(), receipts[i]); err != nil {
b.Fatal("error writing block receipts:", err)
}
}
@ -163,7 +163,7 @@ func TestFilters(t *testing.T) {
}
// store the receipts
err := core.PutReceipts(db, receipts)
err := core.WriteReceipts(db, receipts)
if err != nil {
t.Fatal(err)
}
@ -180,7 +180,7 @@ func TestFilters(t *testing.T) {
if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil {
t.Fatalf("failed to insert block number: %v", err)
}
if err := core.PutBlockReceipts(db, block.Hash(), receipts[i]); err != nil {
if err := core.WriteBlockReceipts(db, block.Hash(), receipts[i]); err != nil {
t.Fatal("error writing block receipts:", err)
}
}

View File

@ -166,7 +166,7 @@ func (self *GasPriceOracle) processBlock(block *types.Block) {
func (self *GasPriceOracle) lowestPrice(block *types.Block) *big.Int {
gasUsed := big.NewInt(0)
receipts := self.eth.BlockProcessor().GetBlockReceipts(block.Hash())
receipts := core.GetBlockReceipts(self.eth.ChainDb(), block.Hash())
if len(receipts) > 0 {
if cgu := receipts[len(receipts)-1].CumulativeGasUsed; cgu != nil {
gasUsed = receipts[len(receipts)-1].CumulativeGasUsed

View File

@ -34,6 +34,7 @@ import (
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/pow"
"github.com/ethereum/go-ethereum/rlp"
)
@ -55,6 +56,8 @@ type hashFetcherFn func(common.Hash) error
type blockFetcherFn func([]common.Hash) error
type ProtocolManager struct {
networkId int
fastSync bool
txpool txPool
blockchain *core.BlockChain
@ -91,6 +94,7 @@ func NewProtocolManager(fastSync bool, networkId int, mux *event.TypeMux, txpool
}
// Create the protocol manager with the base fields
manager := &ProtocolManager{
networkId: networkId,
fastSync: fastSync,
eventMux: mux,
txpool: txpool,
@ -111,23 +115,33 @@ func NewProtocolManager(fastSync bool, networkId int, mux *event.TypeMux, txpool
// Compatible; initialise the sub-protocol
version := version // Closure for the run
manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{
Name: "eth",
Name: ProtocolName,
Version: version,
Length: ProtocolLengths[i],
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
peer := manager.newPeer(int(version), networkId, p, rw)
peer := manager.newPeer(int(version), p, rw)
manager.newPeerCh <- peer
return manager.handle(peer)
},
NodeInfo: func() interface{} {
return manager.NodeInfo()
},
PeerInfo: func(id discover.NodeID) interface{} {
if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
return p.Info()
}
return nil
},
})
}
if len(manager.SubProtocols) == 0 {
return nil, errIncompatibleConfig
}
// Construct the different synchronisation mechanisms
manager.downloader = downloader.New(chaindb, manager.eventMux, blockchain.HasHeader, blockchain.HasBlock, blockchain.GetHeader, blockchain.GetBlock,
blockchain.CurrentHeader, blockchain.CurrentBlock, blockchain.CurrentFastBlock, blockchain.FastSyncCommitHead, blockchain.GetTd,
blockchain.InsertHeaderChain, blockchain.InsertChain, blockchain.InsertReceiptChain, blockchain.Rollback, manager.removePeer)
manager.downloader = downloader.New(chaindb, manager.eventMux, blockchain.HasHeader, blockchain.HasBlockAndState, blockchain.GetHeader,
blockchain.GetBlock, blockchain.CurrentHeader, blockchain.CurrentBlock, blockchain.CurrentFastBlock, blockchain.FastSyncCommitHead,
blockchain.GetTd, blockchain.InsertHeaderChain, blockchain.InsertChain, blockchain.InsertReceiptChain, blockchain.Rollback,
manager.removePeer)
validator := func(block *types.Block, parent *types.Block) error {
return core.ValidateHeader(pow, block.Header(), parent.Header(), true, false)
@ -188,8 +202,8 @@ func (pm *ProtocolManager) Stop() {
glog.V(logger.Info).Infoln("Ethereum protocol handler stopped")
}
func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
return newPeer(pv, nv, p, newMeteredMsgWriter(rw))
func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
return newPeer(pv, p, newMeteredMsgWriter(rw))
}
// handle is the callback invoked to manage the life cycle of an eth peer. When
@ -199,7 +213,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
// Execute the Ethereum handshake
td, head, genesis := pm.blockchain.Status()
if err := p.Handshake(td, head, genesis); err != nil {
if err := p.Handshake(pm.networkId, td, head, genesis); err != nil {
glog.V(logger.Debug).Infof("%v: handshake failed: %v", p, err)
return err
}
@ -477,7 +491,11 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
uncles[i] = body.Uncles
}
// Filter out any explicitly requested bodies, deliver the rest to the downloader
if trasactions, uncles := pm.fetcher.FilterBodies(trasactions, uncles, time.Now()); len(trasactions) > 0 || len(uncles) > 0 {
filter := len(trasactions) > 0 || len(uncles) > 0
if filter {
trasactions, uncles = pm.fetcher.FilterBodies(trasactions, uncles, time.Now())
}
if len(trasactions) > 0 || len(uncles) > 0 || !filter {
err := pm.downloader.DeliverBodies(p.id, trasactions, uncles)
if err != nil {
glog.V(logger.Debug).Infoln(err)
@ -730,3 +748,22 @@ func (self *ProtocolManager) txBroadcastLoop() {
self.BroadcastTx(event.Tx.Hash(), event.Tx)
}
}
// EthNodeInfo represents a short summary of the Ethereum sub-protocol metadata known
// about the host peer.
type EthNodeInfo struct {
Network int `json:"network"` // Ethereum network ID (0=Olympic, 1=Frontier, 2=Morden)
Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain
Genesis string `json:"genesis"` // SHA3 hash of the host's genesis block
Head string `json:"head"` // SHA3 hash of the host's best owned block
}
// NodeInfo retrieves some protocol metadata about the running host node.
func (self *ProtocolManager) NodeInfo() *EthNodeInfo {
return &EthNodeInfo{
Network: self.networkId,
Difficulty: self.blockchain.GetTd(self.blockchain.CurrentBlock().Hash()),
Genesis: fmt.Sprintf("%x", self.blockchain.Genesis().Hash()),
Head: fmt.Sprintf("%x", self.blockchain.CurrentBlock().Hash()),
}
}

View File

@ -35,9 +35,7 @@ func newTestProtocolManager(fastSync bool, blocks int, generator func(int, *core
db, _ = ethdb.NewMemDatabase()
genesis = core.WriteGenesisBlockForTesting(db, core.GenesisAccount{testBankAddress, testBankFunds})
blockchain, _ = core.NewBlockChain(db, pow, evmux)
blockproc = core.NewBlockProcessor(db, pow, blockchain, evmux)
)
blockchain.SetProcessor(blockproc)
chain, _ := core.GenerateChain(genesis, db, blocks, generator)
if _, err := blockchain.InsertChain(chain); err != nil {
panic(err)
@ -117,7 +115,7 @@ func newTestPeer(name string, version int, pm *ProtocolManager, shake bool) (*te
var id discover.NodeID
rand.Read(id[:])
peer := pm.newPeer(version, NetworkId, p2p.NewPeer(id, name, nil), net)
peer := pm.newPeer(version, p2p.NewPeer(id, name, nil), net)
// Start the peer on a new thread
errc := make(chan error, 1)

View File

@ -44,38 +44,51 @@ const (
handshakeTimeout = 5 * time.Second
)
type peer struct {
*p2p.Peer
// PeerInfo represents a short summary of the Ethereum sub-protocol metadata known
// about a connected peer.
type PeerInfo struct {
Version int `json:"version"` // Ethereum protocol version negotiated
Difficulty *big.Int `json:"difficulty"` // Total difficulty of the peer's blockchain
Head string `json:"head"` // SHA3 hash of the peer's best owned block
}
type peer struct {
id string
*p2p.Peer
rw p2p.MsgReadWriter
version int // Protocol version negotiated
network int // Network ID being on
id string
head common.Hash
td *big.Int
lock sync.RWMutex
head common.Hash
td *big.Int
lock sync.RWMutex
knownTxs *set.Set // Set of transaction hashes known to be known by this peer
knownBlocks *set.Set // Set of block hashes known to be known by this peer
}
func newPeer(version, network int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
func newPeer(version int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
id := p.ID()
return &peer{
Peer: p,
rw: rw,
version: version,
network: network,
id: fmt.Sprintf("%x", id[:8]),
knownTxs: set.New(),
knownBlocks: set.New(),
}
}
// Info gathers and returns a collection of metadata known about a peer.
func (p *peer) Info() *PeerInfo {
return &PeerInfo{
Version: p.version,
Difficulty: p.Td(),
Head: fmt.Sprintf("%x", p.Head()),
}
}
// Head retrieves a copy of the current head (most recent) hash of the peer.
func (p *peer) Head() (hash common.Hash) {
p.lock.RLock()
@ -268,20 +281,22 @@ func (p *peer) RequestReceipts(hashes []common.Hash) error {
// Handshake executes the eth protocol handshake, negotiating version number,
// network IDs, difficulties, head and genesis blocks.
func (p *peer) Handshake(td *big.Int, head common.Hash, genesis common.Hash) error {
func (p *peer) Handshake(network int, td *big.Int, head common.Hash, genesis common.Hash) error {
// Send out own handshake in a new thread
errc := make(chan error, 2)
var status statusData // safe to read after two values have been received from errc
go func() {
errc <- p2p.Send(p.rw, StatusMsg, &statusData{
ProtocolVersion: uint32(p.version),
NetworkId: uint32(p.network),
NetworkId: uint32(network),
TD: td,
CurrentBlock: head,
GenesisBlock: genesis,
})
}()
go func() {
errc <- p.readStatus(&status, genesis)
errc <- p.readStatus(network, &status, genesis)
}()
timeout := time.NewTimer(handshakeTimeout)
defer timeout.Stop()
@ -299,7 +314,7 @@ func (p *peer) Handshake(td *big.Int, head common.Hash, genesis common.Hash) err
return nil
}
func (p *peer) readStatus(status *statusData, genesis common.Hash) (err error) {
func (p *peer) readStatus(network int, status *statusData, genesis common.Hash) (err error) {
msg, err := p.rw.ReadMsg()
if err != nil {
return err
@ -317,8 +332,8 @@ func (p *peer) readStatus(status *statusData, genesis common.Hash) (err error) {
if status.GenesisBlock != genesis {
return errResp(ErrGenesisBlockMismatch, "%x (!= %x)", status.GenesisBlock, genesis)
}
if int(status.NetworkId) != p.network {
return errResp(ErrNetworkIdMismatch, "%d (!= %d)", status.NetworkId, p.network)
if int(status.NetworkId) != network {
return errResp(ErrNetworkIdMismatch, "%d (!= %d)", status.NetworkId, network)
}
if int(status.ProtocolVersion) != p.version {
return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", status.ProtocolVersion, p.version)

View File

@ -33,6 +33,9 @@ const (
eth63 = 63
)
// Official short name of the protocol used during capability negotiation.
var ProtocolName = "eth"
// Supported versions of the eth protocol (first is primary).
var ProtocolVersions = []uint{eth63, eth62, eth61}

View File

@ -175,10 +175,6 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
}
// If fast sync was enabled, and we synced up, disable it
if pm.fastSync {
// Wait until all pending imports finish processing
for pm.downloader.Synchronising() {
time.Sleep(100 * time.Millisecond)
}
// Disable fast sync if we indeed have something in our chain
if pm.blockchain.CurrentBlock().NumberU64() > 0 {
glog.V(logger.Info).Infof("fast sync complete, auto disabling")

View File

@ -40,8 +40,8 @@ func TestFastSyncDisabling(t *testing.T) {
// Sync up the two peers
io1, io2 := p2p.MsgPipe()
go pmFull.handle(pmFull.newPeer(63, NetworkId, p2p.NewPeer(discover.NodeID{}, "empty", nil), io2))
go pmEmpty.handle(pmEmpty.newPeer(63, NetworkId, p2p.NewPeer(discover.NodeID{}, "full", nil), io1))
go pmFull.handle(pmFull.newPeer(63, p2p.NewPeer(discover.NodeID{}, "empty", nil), io2))
go pmEmpty.handle(pmEmpty.newPeer(63, p2p.NewPeer(discover.NodeID{}, "full", nil), io1))
time.Sleep(250 * time.Millisecond)
pmEmpty.synchronise(pmEmpty.peers.BestPeer())

View File

@ -21,35 +21,40 @@ import (
"time"
)
// Simple test to check if baseline matching/mismatching filtering works.
func TestFilters(t *testing.T) {
var success bool
var failure bool
fm := New()
fm.Start()
// Register two filters to catch posted data
first := make(chan struct{})
fm.Install(Generic{
Str1: "hello",
Fn: func(data interface{}) {
success = data.(bool)
first <- struct{}{}
},
})
second := make(chan struct{})
fm.Install(Generic{
Str1: "hello1",
Str2: "hello",
Fn: func(data interface{}) {
failure = true
second <- struct{}{}
},
})
// Post an event that should only match the first filter
fm.Notify(Generic{Str1: "hello"}, true)
fm.Stop()
time.Sleep(10 * time.Millisecond) // yield to the notifier
if !success {
t.Error("expected 'hello' to be posted")
// Ensure only the mathcing filters fire
select {
case <-first:
case <-time.After(100 * time.Millisecond):
t.Error("matching filter timed out")
}
if failure {
t.Error("hello1 was triggered")
select {
case <-second:
t.Error("mismatching filter fired")
case <-time.After(100 * time.Millisecond):
}
}

View File

@ -85,7 +85,7 @@ func TestNatto(t *testing.T) {
if err != nil {
t.Errorf("expected no error, got %v", err)
}
time.Sleep(time.Millisecond * 10)
time.Sleep(100 * time.Millisecond)
val, err := jsre.Run("msg")
if err != nil {
t.Errorf("expected no error, got %v", err)

View File

@ -19,7 +19,6 @@ package miner
import (
"fmt"
"math/big"
"sort"
"sync"
"sync/atomic"
"time"
@ -100,7 +99,7 @@ type worker struct {
eth core.Backend
chain *core.BlockChain
proc *core.BlockProcessor
proc core.Validator
chainDb ethdb.Database
coinbase common.Address
@ -131,7 +130,7 @@ func newWorker(coinbase common.Address, eth core.Backend) *worker {
recv: make(chan *Result, resultQueueSize),
gasPrice: new(big.Int),
chain: eth.BlockChain(),
proc: eth.BlockProcessor(),
proc: eth.BlockChain().Validator(),
possibleUncles: make(map[common.Hash]*types.Block),
coinbase: coinbase,
txQueue: make(map[common.Hash]*types.Transaction),
@ -244,7 +243,7 @@ func (self *worker) update() {
// Apply transaction to the pending state if we're not mining
if atomic.LoadInt32(&self.mining) == 0 {
self.currentMu.Lock()
self.current.commitTransactions(types.Transactions{ev.Tx}, self.gasPrice, self.proc)
self.current.commitTransactions(types.Transactions{ev.Tx}, self.gasPrice, self.chain)
self.currentMu.Unlock()
}
}
@ -290,7 +289,9 @@ func (self *worker) wait() {
glog.V(logger.Error).Infoln("Invalid block found during mining")
continue
}
if err := core.ValidateHeader(self.eth.BlockProcessor().Pow, block.Header(), parent.Header(), true, false); err != nil && err != core.BlockFutureErr {
auxValidator := self.eth.BlockChain().AuxValidator()
if err := core.ValidateHeader(auxValidator, block.Header(), parent.Header(), true, false); err != nil && err != core.BlockFutureErr {
glog.V(logger.Error).Infoln("Invalid header on mined block:", err)
continue
}
@ -300,12 +301,23 @@ func (self *worker) wait() {
glog.V(logger.Error).Infoln("error writing block to chain", err)
continue
}
// update block hash since it is now available and not when the receipt/log of individual transactions were created
for _, r := range work.receipts {
for _, l := range r.Logs {
l.BlockHash = block.Hash()
}
}
for _, log := range work.state.Logs() {
log.BlockHash = block.Hash()
}
// check if canon block and write transactions
if stat == core.CanonStatTy {
// This puts transactions in a extra db for rpc
core.PutTransactions(self.chainDb, block, block.Transactions())
core.WriteTransactions(self.chainDb, block)
// store the receipts
core.PutReceipts(self.chainDb, work.receipts)
core.WriteReceipts(self.chainDb, work.receipts)
// Write map map bloom filters
core.WriteMipmapBloom(self.chainDb, block.NumberU64(), work.receipts)
}
@ -318,7 +330,7 @@ func (self *worker) wait() {
self.mux.Post(core.ChainHeadEvent{block})
self.mux.Post(logs)
}
if err := core.PutBlockReceipts(self.chainDb, block.Hash(), receipts); err != nil {
if err := core.WriteBlockReceipts(self.chainDb, block.Hash(), receipts); err != nil {
glog.V(logger.Warn).Infoln("error writing block receipts:", err)
}
}(block, work.state.Logs(), work.receipts)
@ -482,12 +494,12 @@ func (self *worker) commitNewWork() {
/* //approach 1
transactions := self.eth.TxPool().GetTransactions()
sort.Sort(types.TxByNonce{transactions})
sort.Sort(types.TxByNonce(transactions))
*/
//approach 2
transactions := self.eth.TxPool().GetTransactions()
sort.Sort(types.TxByPriceAndNonce{transactions})
types.SortByPriceAndNonce(transactions)
/* // approach 3
// commit transactions for this run.
@ -511,12 +523,12 @@ func (self *worker) commitNewWork() {
multiTxOwner = append(multiTxOwner, txs...)
}
}
sort.Sort(types.TxByPrice{singleTxOwner})
sort.Sort(types.TxByNonce{multiTxOwner})
sort.Sort(types.TxByPrice(singleTxOwner))
sort.Sort(types.TxByNonce(multiTxOwner))
transactions := append(singleTxOwner, multiTxOwner...)
*/
work.commitTransactions(transactions, self.gasPrice, self.proc)
work.commitTransactions(transactions, self.gasPrice, self.chain)
self.eth.TxPool().RemoveTransactions(work.lowGasTxs)
// compute uncles for the new block.
@ -575,11 +587,11 @@ func (self *worker) commitUncle(work *Work, uncle *types.Header) error {
return nil
}
func (env *Work) commitTransactions(transactions types.Transactions, gasPrice *big.Int, proc *core.BlockProcessor) {
func (env *Work) commitTransactions(transactions types.Transactions, gasPrice *big.Int, bc *core.BlockChain) {
gp := new(core.GasPool).AddGas(env.header.GasLimit)
for _, tx := range transactions {
// We can skip err. It has already been validated in the tx pool
// Error may be ignored here. The error has already been checked
// during transaction acceptance is the transaction pool.
from, _ := tx.From()
// Check if it falls within margin. Txs from owned accounts are always processed.
@ -615,7 +627,7 @@ func (env *Work) commitTransactions(transactions types.Transactions, gasPrice *b
env.state.StartRecord(tx.Hash(), common.Hash{}, 0)
err := env.commitTransaction(tx, proc, gp)
err := env.commitTransaction(tx, bc, gp)
switch {
case core.IsGasLimitErr(err):
// ignore the transactor so no nonce errors will be thrown for this account
@ -635,9 +647,9 @@ func (env *Work) commitTransactions(transactions types.Transactions, gasPrice *b
}
}
func (env *Work) commitTransaction(tx *types.Transaction, proc *core.BlockProcessor, gp *core.GasPool) error {
func (env *Work) commitTransaction(tx *types.Transaction, bc *core.BlockChain, gp *core.GasPool) error {
snap := env.state.Copy()
receipt, _, err := proc.ApplyTransaction(gp, env.state, env.header, tx, env.header.GasUsed, true)
receipt, _, _, err := core.ApplyTransaction(bc, gp, env.state, env.header, tx, env.header.GasUsed)
if err != nil {
env.state.Set(snap)
return err

View File

@ -67,6 +67,8 @@ type (
Version uint
From, To rpcEndpoint
Expiration uint64
// Ignore additional fields (for forward compatibility).
Rest []rlp.RawValue `rlp:"tail"`
}
// pong is the reply to ping.
@ -78,18 +80,24 @@ type (
ReplyTok []byte // This contains the hash of the ping packet.
Expiration uint64 // Absolute timestamp at which the packet becomes invalid.
// Ignore additional fields (for forward compatibility).
Rest []rlp.RawValue `rlp:"tail"`
}
// findnode is a query for nodes close to the given target.
findnode struct {
Target NodeID // doesn't need to be an actual public key
Expiration uint64
// Ignore additional fields (for forward compatibility).
Rest []rlp.RawValue `rlp:"tail"`
}
// reply to findnode
neighbors struct {
Nodes []rpcNode
Expiration uint64
// Ignore additional fields (for forward compatibility).
Rest []rlp.RawValue `rlp:"tail"`
}
rpcNode struct {
@ -447,8 +455,11 @@ func encodePacket(priv *ecdsa.PrivateKey, ptype byte, req interface{}) ([]byte,
return packet, nil
}
type tempError interface {
Temporary() bool
func isTemporaryError(err error) bool {
tempErr, ok := err.(interface {
Temporary() bool
})
return ok && tempErr.Temporary() || isPacketTooBig(err)
}
// readLoop runs in its own goroutine. it handles incoming UDP packets.
@ -460,7 +471,7 @@ func (t *udp) readLoop() {
buf := make([]byte, 1280)
for {
nbytes, from, err := t.conn.ReadFromUDP(buf)
if tempErr, ok := err.(tempError); ok && tempErr.Temporary() {
if isTemporaryError(err) {
// Ignore temporary read errors.
glog.V(logger.Debug).Infof("Temporary read error: %v", err)
continue
@ -513,7 +524,8 @@ func decodePacket(buf []byte) (packet, NodeID, []byte, error) {
default:
return nil, fromID, hash, fmt.Errorf("unknown type: %d", ptype)
}
err = rlp.DecodeBytes(sigdata[1:], req)
s := rlp.NewStream(bytes.NewReader(sigdata[1:]), 0)
err = s.Decode(req)
return req, fromID, hash, err
}

View File

@ -0,0 +1,26 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//+build !windows
package discover
// reports whether err indicates that a UDP packet didn't
// fit the receive buffer. There is no such error on
// non-Windows platforms.
func isPacketTooBig(err error) bool {
return false
}

View File

@ -20,13 +20,12 @@ import (
"bytes"
"crypto/ecdsa"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
logpkg "log"
"math/rand"
"net"
"os"
"path/filepath"
"reflect"
"runtime"
@ -34,12 +33,64 @@ import (
"testing"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/rlp"
)
func init() {
logger.AddLogSystem(logger.NewStdLogSystem(os.Stdout, logpkg.LstdFlags, logger.ErrorLevel))
spew.Config.DisableMethods = true
}
// This test checks that isPacketTooBig correctly identifies
// errors that result from receiving a UDP packet larger
// than the supplied receive buffer.
func TestIsPacketTooBig(t *testing.T) {
listener, err := net.ListenPacket("udp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer listener.Close()
sender, err := net.Dial("udp", listener.LocalAddr().String())
if err != nil {
t.Fatal(err)
}
defer sender.Close()
sendN := 1800
recvN := 300
for i := 0; i < 20; i++ {
go func() {
buf := make([]byte, sendN)
for i := range buf {
buf[i] = byte(i)
}
sender.Write(buf)
}()
buf := make([]byte, recvN)
listener.SetDeadline(time.Now().Add(1 * time.Second))
n, _, err := listener.ReadFrom(buf)
if err != nil {
if nerr, ok := err.(net.Error); ok && nerr.Timeout() {
continue
}
if !isPacketTooBig(err) {
t.Fatal("unexpected read error:", spew.Sdump(err))
}
continue
}
if n != recvN {
t.Fatalf("short read: %d, want %d", n, recvN)
}
for i := range buf {
if buf[i] != byte(i) {
t.Fatalf("error in pattern")
break
}
}
}
}
// shared test variables
@ -385,6 +436,115 @@ func TestUDP_successfulPing(t *testing.T) {
}
}
var testPackets = []struct {
input string
wantPacket interface{}
}{
{
input: "71dbda3a79554728d4f94411e42ee1f8b0d561c10e1e5f5893367948c6a7d70bb87b235fa28a77070271b6c164a2dce8c7e13a5739b53b5e96f2e5acb0e458a02902f5965d55ecbeb2ebb6cabb8b2b232896a36b737666c55265ad0a68412f250001ea04cb847f000001820cfa8215a8d790000000000000000000000000000000018208ae820d058443b9a355",
wantPacket: &ping{
Version: 4,
From: rpcEndpoint{net.ParseIP("127.0.0.1").To4(), 3322, 5544},
To: rpcEndpoint{net.ParseIP("::1"), 2222, 3333},
Expiration: 1136239445,
Rest: []rlp.RawValue{},
},
},
{
input: "e9614ccfd9fc3e74360018522d30e1419a143407ffcce748de3e22116b7e8dc92ff74788c0b6663aaa3d67d641936511c8f8d6ad8698b820a7cf9e1be7155e9a241f556658c55428ec0563514365799a4be2be5a685a80971ddcfa80cb422cdd0101ec04cb847f000001820cfa8215a8d790000000000000000000000000000000018208ae820d058443b9a3550102",
wantPacket: &ping{
Version: 4,
From: rpcEndpoint{net.ParseIP("127.0.0.1").To4(), 3322, 5544},
To: rpcEndpoint{net.ParseIP("::1"), 2222, 3333},
Expiration: 1136239445,
Rest: []rlp.RawValue{{0x01}, {0x02}},
},
},
{
input: "577be4349c4dd26768081f58de4c6f375a7a22f3f7adda654d1428637412c3d7fe917cadc56d4e5e7ffae1dbe3efffb9849feb71b262de37977e7c7a44e677295680e9e38ab26bee2fcbae207fba3ff3d74069a50b902a82c9903ed37cc993c50001f83e82022bd79020010db83c4d001500000000abcdef12820cfa8215a8d79020010db885a308d313198a2e037073488208ae82823a8443b9a355c5010203040531b9019afde696e582a78fa8d95ea13ce3297d4afb8ba6433e4154caa5ac6431af1b80ba76023fa4090c408f6b4bc3701562c031041d4702971d102c9ab7fa5eed4cd6bab8f7af956f7d565ee1917084a95398b6a21eac920fe3dd1345ec0a7ef39367ee69ddf092cbfe5b93e5e568ebc491983c09c76d922dc3",
wantPacket: &ping{
Version: 555,
From: rpcEndpoint{net.ParseIP("2001:db8:3c4d:15::abcd:ef12"), 3322, 5544},
To: rpcEndpoint{net.ParseIP("2001:db8:85a3:8d3:1319:8a2e:370:7348"), 2222, 33338},
Expiration: 1136239445,
Rest: []rlp.RawValue{{0xC5, 0x01, 0x02, 0x03, 0x04, 0x05}},
},
},
{
input: "09b2428d83348d27cdf7064ad9024f526cebc19e4958f0fdad87c15eb598dd61d08423e0bf66b2069869e1724125f820d851c136684082774f870e614d95a2855d000f05d1648b2d5945470bc187c2d2216fbe870f43ed0909009882e176a46b0102f846d79020010db885a308d313198a2e037073488208ae82823aa0fbc914b16819237dcd8801d7e53f69e9719adecb3cc0e790c57e91ca4461c9548443b9a355c6010203c2040506a0c969a58f6f9095004c0177a6b47f451530cab38966a25cca5cb58f055542124e",
wantPacket: &pong{
To: rpcEndpoint{net.ParseIP("2001:db8:85a3:8d3:1319:8a2e:370:7348"), 2222, 33338},
ReplyTok: common.Hex2Bytes("fbc914b16819237dcd8801d7e53f69e9719adecb3cc0e790c57e91ca4461c954"),
Expiration: 1136239445,
Rest: []rlp.RawValue{{0xC6, 0x01, 0x02, 0x03, 0xC2, 0x04, 0x05}, {0x06}},
},
},
{
input: "c7c44041b9f7c7e41934417ebac9a8e1a4c6298f74553f2fcfdcae6ed6fe53163eb3d2b52e39fe91831b8a927bf4fc222c3902202027e5e9eb812195f95d20061ef5cd31d502e47ecb61183f74a504fe04c51e73df81f25c4d506b26db4517490103f84eb840ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31387574077f301b421bc84df7266c44e9e6d569fc56be00812904767bf5ccd1fc7f8443b9a35582999983999999280dc62cc8255c73471e0a61da0c89acdc0e035e260add7fc0c04ad9ebf3919644c91cb247affc82b69bd2ca235c71eab8e49737c937a2c396",
wantPacket: &findnode{
Target: MustHexID("ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31387574077f301b421bc84df7266c44e9e6d569fc56be00812904767bf5ccd1fc7f"),
Expiration: 1136239445,
Rest: []rlp.RawValue{{0x82, 0x99, 0x99}, {0x83, 0x99, 0x99, 0x99}},
},
},
{
input: "c679fc8fe0b8b12f06577f2e802d34f6fa257e6137a995f6f4cbfc9ee50ed3710faf6e66f932c4c8d81d64343f429651328758b47d3dbc02c4042f0fff6946a50f4a49037a72bb550f3a7872363a83e1b9ee6469856c24eb4ef80b7535bcf99c0004f9015bf90150f84d846321163782115c82115db8403155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32f84984010203040101b840312c55512422cf9b8a4097e9a6ad79402e87a15ae909a4bfefa22398f03d20951933beea1e4dfa6f968212385e829f04c2d314fc2d4e255e0d3bc08792b069dbf8599020010db83c4d001500000000abcdef12820d05820d05b84038643200b172dcfef857492156971f0e6aa2c538d8b74010f8e140811d53b98c765dd2d96126051913f44582e8c199ad7c6d6819e9a56483f637feaac9448aacf8599020010db885a308d313198a2e037073488203e78203e8b8408dcab8618c3253b558d459da53bd8fa68935a719aff8b811197101a4b2b47dd2d47295286fc00cc081bb542d760717d1bdd6bec2c37cd72eca367d6dd3b9df738443b9a355010203b525a138aa34383fec3d2719a0",
wantPacket: &neighbors{
Nodes: []rpcNode{
{
ID: MustHexID("3155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32"),
IP: net.ParseIP("99.33.22.55").To4(),
UDP: 4444,
TCP: 4445,
},
{
ID: MustHexID("312c55512422cf9b8a4097e9a6ad79402e87a15ae909a4bfefa22398f03d20951933beea1e4dfa6f968212385e829f04c2d314fc2d4e255e0d3bc08792b069db"),
IP: net.ParseIP("1.2.3.4").To4(),
UDP: 1,
TCP: 1,
},
{
ID: MustHexID("38643200b172dcfef857492156971f0e6aa2c538d8b74010f8e140811d53b98c765dd2d96126051913f44582e8c199ad7c6d6819e9a56483f637feaac9448aac"),
IP: net.ParseIP("2001:db8:3c4d:15::abcd:ef12"),
UDP: 3333,
TCP: 3333,
},
{
ID: MustHexID("8dcab8618c3253b558d459da53bd8fa68935a719aff8b811197101a4b2b47dd2d47295286fc00cc081bb542d760717d1bdd6bec2c37cd72eca367d6dd3b9df73"),
IP: net.ParseIP("2001:db8:85a3:8d3:1319:8a2e:370:7348"),
UDP: 999,
TCP: 1000,
},
},
Expiration: 1136239445,
Rest: []rlp.RawValue{{0x01}, {0x02}, {0x03}},
},
},
}
func TestForwardCompatibility(t *testing.T) {
testkey, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
wantNodeID := PubkeyID(&testkey.PublicKey)
for _, test := range testPackets {
input, err := hex.DecodeString(test.input)
if err != nil {
t.Fatalf("invalid hex: %s", test.input)
}
packet, nodeid, _, err := decodePacket(input)
if err != nil {
t.Errorf("did not accept packet %s\n%v", test.input, err)
continue
}
if !reflect.DeepEqual(packet, test.wantPacket) {
t.Errorf("got %s\nwant %s", spew.Sdump(packet), spew.Sdump(test.wantPacket))
}
if nodeid != wantNodeID {
t.Errorf("got id %v\nwant id %v", nodeid, wantNodeID)
}
}
}
// dgramPipe is a fake UDP socket. It queues all sent datagrams.
type dgramPipe struct {
mu *sync.Mutex

View File

@ -0,0 +1,40 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//+build windows
package discover
import (
"net"
"os"
"syscall"
)
const _WSAEMSGSIZE = syscall.Errno(10040)
// reports whether err indicates that a UDP packet didn't
// fit the receive buffer. On Windows, WSARecvFrom returns
// code WSAEMSGSIZE and no data if this happens.
func isPacketTooBig(err error) bool {
if opErr, ok := err.(*net.OpError); ok {
if scErr, ok := opErr.Err.(*os.SyscallError); ok {
return scErr.Err == _WSAEMSGSIZE
}
return opErr.Err == _WSAEMSGSIZE
}
return false
}

View File

@ -143,7 +143,8 @@ func TestEOFSignal(t *testing.T) {
}
func unhex(str string) []byte {
b, err := hex.DecodeString(strings.Replace(str, "\n", "", -1))
r := strings.NewReplacer("\t", "", " ", "", "\n", "")
b, err := hex.DecodeString(r.Replace(str))
if err != nil {
panic(fmt.Sprintf("invalid hex string: %q", str))
}

View File

@ -56,6 +56,9 @@ type protoHandshake struct {
Caps []Cap
ListenPort uint64
ID discover.NodeID
// Ignore additional fields (for forward compatibility).
Rest []rlp.RawValue `rlp:"tail"`
}
// Peer represents a connected remote node.
@ -359,3 +362,49 @@ func (rw *protoRW) ReadMsg() (Msg, error) {
return Msg{}, io.EOF
}
}
// PeerInfo represents a short summary of the information known about a connected
// peer. Sub-protocol independent fields are contained and initialized here, with
// protocol specifics delegated to all connected sub-protocols.
type PeerInfo struct {
ID string `json:"id"` // Unique node identifier (also the encryption key)
Name string `json:"name"` // Name of the node, including client type, version, OS, custom data
Caps []string `json:"caps"` // Sum-protocols advertised by this particular peer
Network struct {
LocalAddress string `json:"localAddress"` // Local endpoint of the TCP data connection
RemoteAddress string `json:"remoteAddress"` // Remote endpoint of the TCP data connection
} `json:"network"`
Protocols map[string]interface{} `json:"protocols"` // Sub-protocol specific metadata fields
}
// Info gathers and returns a collection of metadata known about a peer.
func (p *Peer) Info() *PeerInfo {
// Gather the protocol capabilities
var caps []string
for _, cap := range p.Caps() {
caps = append(caps, cap.String())
}
// Assemble the generic peer metadata
info := &PeerInfo{
ID: p.ID().String(),
Name: p.Name(),
Caps: caps,
Protocols: make(map[string]interface{}),
}
info.Network.LocalAddress = p.LocalAddr().String()
info.Network.RemoteAddress = p.RemoteAddr().String()
// Gather all the running protocol infos
for _, proto := range p.running {
protoInfo := interface{}("unknown")
if query := proto.Protocol.PeerInfo; query != nil {
if metadata := query(p.ID()); metadata != nil {
protoInfo = metadata
} else {
protoInfo = "handshake"
}
}
info.Protocols[proto.Name] = protoInfo
}
return info
}

View File

@ -16,7 +16,11 @@
package p2p
import "fmt"
import (
"fmt"
"github.com/ethereum/go-ethereum/p2p/discover"
)
// Protocol represents a P2P subprotocol implementation.
type Protocol struct {
@ -39,6 +43,15 @@ type Protocol struct {
// any protocol-level error (such as an I/O error) that is
// encountered.
Run func(peer *Peer, rw MsgReadWriter) error
// NodeInfo is an optional helper method to retrieve protocol specific metadata
// about the host node.
NodeInfo func() interface{}
// PeerInfo is an optional helper method to retrieve protocol specific metadata
// about a certain peer in the network. If an info retrieval function is set,
// but returns nil, it is assumed that the protocol handshake is still running.
PeerInfo func(id discover.NodeID) interface{}
}
func (p Protocol) cap() Cap {

View File

@ -24,11 +24,14 @@ import (
"crypto/elliptic"
"crypto/hmac"
"crypto/rand"
"encoding/binary"
"errors"
"fmt"
"hash"
"io"
mrand "math/rand"
"net"
"os"
"sync"
"time"
@ -51,9 +54,10 @@ const (
authMsgLen = sigLen + shaLen + pubLen + shaLen + 1
authRespLen = pubLen + shaLen + 1
eciesBytes = 65 + 16 + 32
encAuthMsgLen = authMsgLen + eciesBytes // size of the final ECIES payload sent as initiator's handshake
encAuthRespLen = authRespLen + eciesBytes // size of the final ECIES payload sent as receiver's handshake
eciesOverhead = 65 /* pubkey */ + 16 /* IV */ + 32 /* MAC */
encAuthMsgLen = authMsgLen + eciesOverhead // size of encrypted pre-EIP-8 initiator handshake
encAuthRespLen = authRespLen + eciesOverhead // size of encrypted pre-EIP-8 handshake reply
// total timeout for encryption handshake and protocol
// handshake in both directions.
@ -151,10 +155,6 @@ func readProtocolHandshake(rw MsgReader, our *protoHandshake) (*protoHandshake,
if err := msg.Decode(&hs); err != nil {
return nil, err
}
// validate handshake info
if hs.Version != our.Version {
return nil, DiscIncompatibleVersion
}
if (hs.ID == discover.NodeID{}) {
return nil, DiscInvalidIdentity
}
@ -200,6 +200,29 @@ type secrets struct {
Token []byte
}
// RLPx v4 handshake auth (defined in EIP-8).
type authMsgV4 struct {
gotPlain bool // whether read packet had plain format.
Signature [sigLen]byte
InitiatorPubkey [pubLen]byte
Nonce [shaLen]byte
Version uint
// Ignore additional fields (forward-compatibility)
Rest []rlp.RawValue `rlp:"tail"`
}
// RLPx v4 handshake response (defined in EIP-8).
type authRespV4 struct {
RandomPubkey [pubLen]byte
Nonce [shaLen]byte
Version uint
// Ignore additional fields (forward-compatibility)
Rest []rlp.RawValue `rlp:"tail"`
}
// secrets is called after the handshake is completed.
// It extracts the connection secrets from the handshake values.
func (h *encHandshake) secrets(auth, authResp []byte) (secrets, error) {
@ -215,7 +238,6 @@ func (h *encHandshake) secrets(auth, authResp []byte) (secrets, error) {
RemoteID: h.remoteID,
AES: aesSecret,
MAC: crypto.Sha3(ecdheSecret, aesSecret),
Token: crypto.Sha3(sharedSecret),
}
// setup sha3 instances for the MACs
@ -234,114 +256,89 @@ func (h *encHandshake) secrets(auth, authResp []byte) (secrets, error) {
return s, nil
}
func (h *encHandshake) ecdhShared(prv *ecdsa.PrivateKey) ([]byte, error) {
// staticSharedSecret returns the static shared secret, the result
// of key agreement between the local and remote static node key.
func (h *encHandshake) staticSharedSecret(prv *ecdsa.PrivateKey) ([]byte, error) {
return ecies.ImportECDSA(prv).GenerateShared(h.remotePub, sskLen, sskLen)
}
var configSendEIP = os.Getenv("RLPX_EIP8") != ""
// initiatorEncHandshake negotiates a session token on conn.
// it should be called on the dialing side of the connection.
//
// prv is the local client's private key.
// token is the token from a previous session with this node.
func initiatorEncHandshake(conn io.ReadWriter, prv *ecdsa.PrivateKey, remoteID discover.NodeID, token []byte) (s secrets, err error) {
h, err := newInitiatorHandshake(remoteID)
h := &encHandshake{initiator: true, remoteID: remoteID}
authMsg, err := h.makeAuthMsg(prv, token)
if err != nil {
return s, err
}
auth, err := h.authMsg(prv, token)
var authPacket []byte
if configSendEIP {
authPacket, err = sealEIP8(authMsg, h)
} else {
authPacket, err = authMsg.sealPlain(h)
}
if err != nil {
return s, err
}
if _, err = conn.Write(auth); err != nil {
if _, err = conn.Write(authPacket); err != nil {
return s, err
}
response := make([]byte, encAuthRespLen)
if _, err = io.ReadFull(conn, response); err != nil {
authRespMsg := new(authRespV4)
authRespPacket, err := readHandshakeMsg(authRespMsg, encAuthRespLen, prv, conn)
if err != nil {
return s, err
}
if err := h.decodeAuthResp(response, prv); err != nil {
if err := h.handleAuthResp(authRespMsg); err != nil {
return s, err
}
return h.secrets(auth, response)
return h.secrets(authPacket, authRespPacket)
}
func newInitiatorHandshake(remoteID discover.NodeID) (*encHandshake, error) {
rpub, err := remoteID.Pubkey()
// makeAuthMsg creates the initiator handshake message.
func (h *encHandshake) makeAuthMsg(prv *ecdsa.PrivateKey, token []byte) (*authMsgV4, error) {
rpub, err := h.remoteID.Pubkey()
if err != nil {
return nil, fmt.Errorf("bad remoteID: %v", err)
}
// generate random initiator nonce
n := make([]byte, shaLen)
if _, err := rand.Read(n); err != nil {
h.remotePub = ecies.ImportECDSAPublic(rpub)
// Generate random initiator nonce.
h.initNonce = make([]byte, shaLen)
if _, err := rand.Read(h.initNonce); err != nil {
return nil, err
}
// generate random keypair to use for signing
randpriv, err := ecies.GenerateKey(rand.Reader, crypto.S256(), nil)
// Generate random keypair to for ECDH.
h.randomPrivKey, err = ecies.GenerateKey(rand.Reader, crypto.S256(), nil)
if err != nil {
return nil, err
}
h := &encHandshake{
initiator: true,
remoteID: remoteID,
remotePub: ecies.ImportECDSAPublic(rpub),
initNonce: n,
randomPrivKey: randpriv,
}
return h, nil
}
// authMsg creates an encrypted initiator handshake message.
func (h *encHandshake) authMsg(prv *ecdsa.PrivateKey, token []byte) ([]byte, error) {
var tokenFlag byte
if token == nil {
// no session token found means we need to generate shared secret.
// ecies shared secret is used as initial session token for new peers
// generate shared key from prv and remote pubkey
var err error
if token, err = h.ecdhShared(prv); err != nil {
return nil, err
}
} else {
// for known peers, we use stored token from the previous session
tokenFlag = 0x01
// Sign known message: static-shared-secret ^ nonce
token, err = h.staticSharedSecret(prv)
if err != nil {
return nil, err
}
// sign known message:
// ecdh-shared-secret^nonce for new peers
// token^nonce for old peers
signed := xor(token, h.initNonce)
signature, err := crypto.Sign(signed, h.randomPrivKey.ExportECDSA())
if err != nil {
return nil, err
}
// encode auth message
// signature || sha3(ecdhe-random-pubk) || pubk || nonce || token-flag
msg := make([]byte, authMsgLen)
n := copy(msg, signature)
n += copy(msg[n:], crypto.Sha3(exportPubkey(&h.randomPrivKey.PublicKey)))
n += copy(msg[n:], crypto.FromECDSAPub(&prv.PublicKey)[1:])
n += copy(msg[n:], h.initNonce)
msg[n] = tokenFlag
// encrypt auth message using remote-pubk
return ecies.Encrypt(rand.Reader, h.remotePub, msg, nil, nil)
msg := new(authMsgV4)
copy(msg.Signature[:], signature)
copy(msg.InitiatorPubkey[:], crypto.FromECDSAPub(&prv.PublicKey)[1:])
copy(msg.Nonce[:], h.initNonce)
msg.Version = 4
return msg, nil
}
// decodeAuthResp decode an encrypted authentication response message.
func (h *encHandshake) decodeAuthResp(auth []byte, prv *ecdsa.PrivateKey) error {
msg, err := crypto.Decrypt(prv, auth)
if err != nil {
return fmt.Errorf("could not decrypt auth response (%v)", err)
}
h.respNonce = msg[pubLen : pubLen+shaLen]
h.remoteRandomPub, err = importPublicKey(msg[:pubLen])
if err != nil {
return err
}
// ignore token flag for now
return nil
func (h *encHandshake) handleAuthResp(msg *authRespV4) (err error) {
h.respNonce = msg.Nonce[:]
h.remoteRandomPub, err = importPublicKey(msg.RandomPubkey[:])
return err
}
// receiverEncHandshake negotiates a session token on conn.
@ -350,99 +347,165 @@ func (h *encHandshake) decodeAuthResp(auth []byte, prv *ecdsa.PrivateKey) error
// prv is the local client's private key.
// token is the token from a previous session with this node.
func receiverEncHandshake(conn io.ReadWriter, prv *ecdsa.PrivateKey, token []byte) (s secrets, err error) {
// read remote auth sent by initiator.
auth := make([]byte, encAuthMsgLen)
if _, err := io.ReadFull(conn, auth); err != nil {
return s, err
}
h, err := decodeAuthMsg(prv, token, auth)
authMsg := new(authMsgV4)
authPacket, err := readHandshakeMsg(authMsg, encAuthMsgLen, prv, conn)
if err != nil {
return s, err
}
h := new(encHandshake)
if err := h.handleAuthMsg(authMsg, prv); err != nil {
return s, err
}
// send auth response
resp, err := h.authResp(prv, token)
authRespMsg, err := h.makeAuthResp()
if err != nil {
return s, err
}
if _, err = conn.Write(resp); err != nil {
var authRespPacket []byte
if authMsg.gotPlain {
authRespPacket, err = authRespMsg.sealPlain(h)
} else {
authRespPacket, err = sealEIP8(authRespMsg, h)
}
if err != nil {
return s, err
}
return h.secrets(auth, resp)
if _, err = conn.Write(authRespPacket); err != nil {
return s, err
}
return h.secrets(authPacket, authRespPacket)
}
func decodeAuthMsg(prv *ecdsa.PrivateKey, token []byte, auth []byte) (*encHandshake, error) {
var err error
h := new(encHandshake)
// generate random keypair for session
h.randomPrivKey, err = ecies.GenerateKey(rand.Reader, crypto.S256(), nil)
func (h *encHandshake) handleAuthMsg(msg *authMsgV4, prv *ecdsa.PrivateKey) error {
// Import the remote identity.
h.initNonce = msg.Nonce[:]
h.remoteID = msg.InitiatorPubkey
rpub, err := h.remoteID.Pubkey()
if err != nil {
return nil, err
return fmt.Errorf("bad remoteID: %#v", err)
}
// generate random nonce
h.remotePub = ecies.ImportECDSAPublic(rpub)
// Generate random keypair for ECDH.
// If a private key is already set, use it instead of generating one (for testing).
if h.randomPrivKey == nil {
h.randomPrivKey, err = ecies.GenerateKey(rand.Reader, crypto.S256(), nil)
if err != nil {
return err
}
}
// Check the signature.
token, err := h.staticSharedSecret(prv)
if err != nil {
return err
}
signedMsg := xor(token, h.initNonce)
remoteRandomPub, err := secp256k1.RecoverPubkey(signedMsg, msg.Signature[:])
if err != nil {
return err
}
h.remoteRandomPub, _ = importPublicKey(remoteRandomPub)
return nil
}
func (h *encHandshake) makeAuthResp() (msg *authRespV4, err error) {
// Generate random nonce.
h.respNonce = make([]byte, shaLen)
if _, err = rand.Read(h.respNonce); err != nil {
return nil, err
}
msg, err := crypto.Decrypt(prv, auth)
if err != nil {
return nil, fmt.Errorf("could not decrypt auth message (%v)", err)
}
// decode message parameters
// signature || sha3(ecdhe-random-pubk) || pubk || nonce || token-flag
h.initNonce = msg[authMsgLen-shaLen-1 : authMsgLen-1]
copy(h.remoteID[:], msg[sigLen+shaLen:sigLen+shaLen+pubLen])
rpub, err := h.remoteID.Pubkey()
if err != nil {
return nil, fmt.Errorf("bad remoteID: %#v", err)
}
h.remotePub = ecies.ImportECDSAPublic(rpub)
// recover remote random pubkey from signed message.
if token == nil {
// TODO: it is an error if the initiator has a token and we don't. check that.
// no session token means we need to generate shared secret.
// ecies shared secret is used as initial session token for new peers.
// generate shared key from prv and remote pubkey.
if token, err = h.ecdhShared(prv); err != nil {
return nil, err
}
}
signedMsg := xor(token, h.initNonce)
remoteRandomPub, err := secp256k1.RecoverPubkey(signedMsg, msg[:sigLen])
if err != nil {
return nil, err
}
// validate the sha3 of recovered pubkey
remoteRandomPubMAC := msg[sigLen : sigLen+shaLen]
shaRemoteRandomPub := crypto.Sha3(remoteRandomPub[1:])
if !bytes.Equal(remoteRandomPubMAC, shaRemoteRandomPub) {
return nil, fmt.Errorf("sha3 of recovered ephemeral pubkey does not match checksum in auth message")
}
h.remoteRandomPub, _ = importPublicKey(remoteRandomPub)
return h, nil
msg = new(authRespV4)
copy(msg.Nonce[:], h.respNonce)
copy(msg.RandomPubkey[:], exportPubkey(&h.randomPrivKey.PublicKey))
msg.Version = 4
return msg, nil
}
// authResp generates the encrypted authentication response message.
func (h *encHandshake) authResp(prv *ecdsa.PrivateKey, token []byte) ([]byte, error) {
// responder auth message
// E(remote-pubk, ecdhe-random-pubk || nonce || 0x0)
resp := make([]byte, authRespLen)
n := copy(resp, exportPubkey(&h.randomPrivKey.PublicKey))
n += copy(resp[n:], h.respNonce)
if token == nil {
resp[n] = 0
} else {
resp[n] = 1
func (msg *authMsgV4) sealPlain(h *encHandshake) ([]byte, error) {
buf := make([]byte, authMsgLen)
n := copy(buf, msg.Signature[:])
n += copy(buf[n:], crypto.Sha3(exportPubkey(&h.randomPrivKey.PublicKey)))
n += copy(buf[n:], msg.InitiatorPubkey[:])
n += copy(buf[n:], msg.Nonce[:])
buf[n] = 0 // token-flag
return ecies.Encrypt(rand.Reader, h.remotePub, buf, nil, nil)
}
func (msg *authMsgV4) decodePlain(input []byte) {
n := copy(msg.Signature[:], input)
n += shaLen // skip sha3(initiator-ephemeral-pubk)
n += copy(msg.InitiatorPubkey[:], input[n:])
n += copy(msg.Nonce[:], input[n:])
msg.Version = 4
msg.gotPlain = true
}
func (msg *authRespV4) sealPlain(hs *encHandshake) ([]byte, error) {
buf := make([]byte, authRespLen)
n := copy(buf, msg.RandomPubkey[:])
n += copy(buf[n:], msg.Nonce[:])
return ecies.Encrypt(rand.Reader, hs.remotePub, buf, nil, nil)
}
func (msg *authRespV4) decodePlain(input []byte) {
n := copy(msg.RandomPubkey[:], input)
n += copy(msg.Nonce[:], input[n:])
msg.Version = 4
}
var padSpace = make([]byte, 300)
func sealEIP8(msg interface{}, h *encHandshake) ([]byte, error) {
buf := new(bytes.Buffer)
if err := rlp.Encode(buf, msg); err != nil {
return nil, err
}
// encrypt using remote-pubk
return ecies.Encrypt(rand.Reader, h.remotePub, resp, nil, nil)
// pad with random amount of data. the amount needs to be at least 100 bytes to make
// the message distinguishable from pre-EIP-8 handshakes.
pad := padSpace[:mrand.Intn(len(padSpace)-100)+100]
buf.Write(pad)
prefix := make([]byte, 2)
binary.BigEndian.PutUint16(prefix, uint16(buf.Len()+eciesOverhead))
enc, err := ecies.Encrypt(rand.Reader, h.remotePub, buf.Bytes(), nil, prefix)
return append(prefix, enc...), err
}
type plainDecoder interface {
decodePlain([]byte)
}
func readHandshakeMsg(msg plainDecoder, plainSize int, prv *ecdsa.PrivateKey, r io.Reader) ([]byte, error) {
buf := make([]byte, plainSize)
if _, err := io.ReadFull(r, buf); err != nil {
return buf, err
}
// Attempt decoding pre-EIP-8 "plain" format.
key := ecies.ImportECDSA(prv)
if dec, err := key.Decrypt(rand.Reader, buf, nil, nil); err == nil {
msg.decodePlain(dec)
return buf, nil
}
// Could be EIP-8 format, try that.
prefix := buf[:2]
size := binary.BigEndian.Uint16(prefix)
if size < uint16(plainSize) {
return buf, fmt.Errorf("size underflow, need at least %d bytes", plainSize)
}
buf = append(buf, make([]byte, size-uint16(plainSize)+2)...)
if _, err := io.ReadFull(r, buf[plainSize:]); err != nil {
return buf, err
}
dec, err := key.Decrypt(rand.Reader, buf[2:], nil, prefix)
if err != nil {
return buf, err
}
// Can't use rlp.DecodeBytes here because it rejects
// trailing data (forward-compatibility).
s := rlp.NewStream(bytes.NewReader(dec), 0)
return buf, s.Decode(msg)
}
// importPublicKey unmarshals 512 bit public keys.
@ -458,7 +521,11 @@ func importPublicKey(pubKey []byte) (*ecies.PublicKey, error) {
return nil, fmt.Errorf("invalid public key length %v (expect 64/65)", len(pubKey))
}
// TODO: fewer pointless conversions
return ecies.ImportECDSAPublic(crypto.ToECDSAPub(pubKey65)), nil
pub := crypto.ToECDSAPub(pubKey65)
if pub.X == nil {
return nil, fmt.Errorf("invalid public key")
}
return ecies.ImportECDSAPublic(pub), nil
}
func exportPubkey(pub *ecies.PublicKey) []byte {

View File

@ -21,6 +21,7 @@ import (
"crypto/rand"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"reflect"
@ -160,6 +161,7 @@ func TestProtocolHandshake(t *testing.T) {
wg.Add(2)
go func() {
defer wg.Done()
defer fd1.Close()
rlpx := newRLPX(fd0)
remid, err := rlpx.doEncHandshake(prv0, node1)
if err != nil {
@ -176,6 +178,7 @@ func TestProtocolHandshake(t *testing.T) {
t.Errorf("dial side proto handshake error: %v", err)
return
}
phs.Rest = nil
if !reflect.DeepEqual(phs, hs1) {
t.Errorf("dial side proto handshake mismatch:\ngot: %s\nwant: %s\n", spew.Sdump(phs), spew.Sdump(hs1))
return
@ -184,6 +187,7 @@ func TestProtocolHandshake(t *testing.T) {
}()
go func() {
defer wg.Done()
defer fd1.Close()
rlpx := newRLPX(fd1)
remid, err := rlpx.doEncHandshake(prv1, nil)
if err != nil {
@ -200,6 +204,7 @@ func TestProtocolHandshake(t *testing.T) {
t.Errorf("listen side proto handshake error: %v", err)
return
}
phs.Rest = nil
if !reflect.DeepEqual(phs, hs0) {
t.Errorf("listen side proto handshake mismatch:\ngot: %s\nwant: %s\n", spew.Sdump(phs), spew.Sdump(hs0))
return
@ -214,7 +219,6 @@ func TestProtocolHandshake(t *testing.T) {
func TestProtocolHandshakeErrors(t *testing.T) {
our := &protoHandshake{Version: 3, Caps: []Cap{{"foo", 2}, {"bar", 3}}, Name: "quux"}
id := randomID()
tests := []struct {
code uint64
msg interface{}
@ -240,11 +244,6 @@ func TestProtocolHandshakeErrors(t *testing.T) {
msg: []byte{1, 2, 3},
err: newPeerError(errInvalidMsg, "(code 0) (size 4) rlp: expected input list for p2p.protoHandshake"),
},
{
code: handshakeMsg,
msg: &protoHandshake{Version: 9944, ID: id},
err: DiscIncompatibleVersion,
},
{
code: handshakeMsg,
msg: &protoHandshake{Version: 3},
@ -372,3 +371,227 @@ func TestRLPXFrameRW(t *testing.T) {
}
}
}
type handshakeAuthTest struct {
input string
isPlain bool
wantVersion uint
wantRest []rlp.RawValue
}
var eip8HandshakeAuthTests = []handshakeAuthTest{
// (Auth₁) RLPx v4 plain encoding
{
input: `
048ca79ad18e4b0659fab4853fe5bc58eb83992980f4c9cc147d2aa31532efd29a3d3dc6a3d89eaf
913150cfc777ce0ce4af2758bf4810235f6e6ceccfee1acc6b22c005e9e3a49d6448610a58e98744
ba3ac0399e82692d67c1f58849050b3024e21a52c9d3b01d871ff5f210817912773e610443a9ef14
2e91cdba0bd77b5fdf0769b05671fc35f83d83e4d3b0b000c6b2a1b1bba89e0fc51bf4e460df3105
c444f14be226458940d6061c296350937ffd5e3acaceeaaefd3c6f74be8e23e0f45163cc7ebd7622
0f0128410fd05250273156d548a414444ae2f7dea4dfca2d43c057adb701a715bf59f6fb66b2d1d2
0f2c703f851cbf5ac47396d9ca65b6260bd141ac4d53e2de585a73d1750780db4c9ee4cd4d225173
a4592ee77e2bd94d0be3691f3b406f9bba9b591fc63facc016bfa8
`,
isPlain: true,
wantVersion: 4,
},
// (Auth₂) EIP-8 encoding
{
input: `
01b304ab7578555167be8154d5cc456f567d5ba302662433674222360f08d5f1534499d3678b513b
0fca474f3a514b18e75683032eb63fccb16c156dc6eb2c0b1593f0d84ac74f6e475f1b8d56116b84
9634a8c458705bf83a626ea0384d4d7341aae591fae42ce6bd5c850bfe0b999a694a49bbbaf3ef6c
da61110601d3b4c02ab6c30437257a6e0117792631a4b47c1d52fc0f8f89caadeb7d02770bf999cc
147d2df3b62e1ffb2c9d8c125a3984865356266bca11ce7d3a688663a51d82defaa8aad69da39ab6
d5470e81ec5f2a7a47fb865ff7cca21516f9299a07b1bc63ba56c7a1a892112841ca44b6e0034dee
70c9adabc15d76a54f443593fafdc3b27af8059703f88928e199cb122362a4b35f62386da7caad09
c001edaeb5f8a06d2b26fb6cb93c52a9fca51853b68193916982358fe1e5369e249875bb8d0d0ec3
6f917bc5e1eafd5896d46bd61ff23f1a863a8a8dcd54c7b109b771c8e61ec9c8908c733c0263440e
2aa067241aaa433f0bb053c7b31a838504b148f570c0ad62837129e547678c5190341e4f1693956c
3bf7678318e2d5b5340c9e488eefea198576344afbdf66db5f51204a6961a63ce072c8926c
`,
wantVersion: 4,
wantRest: []rlp.RawValue{},
},
// (Auth₃) RLPx v4 EIP-8 encoding with version 56, additional list elements
{
input: `
01b8044c6c312173685d1edd268aa95e1d495474c6959bcdd10067ba4c9013df9e40ff45f5bfd6f7
2471f93a91b493f8e00abc4b80f682973de715d77ba3a005a242eb859f9a211d93a347fa64b597bf
280a6b88e26299cf263b01b8dfdb712278464fd1c25840b995e84d367d743f66c0e54a586725b7bb
f12acca27170ae3283c1073adda4b6d79f27656993aefccf16e0d0409fe07db2dc398a1b7e8ee93b
cd181485fd332f381d6a050fba4c7641a5112ac1b0b61168d20f01b479e19adf7fdbfa0905f63352
bfc7e23cf3357657455119d879c78d3cf8c8c06375f3f7d4861aa02a122467e069acaf513025ff19
6641f6d2810ce493f51bee9c966b15c5043505350392b57645385a18c78f14669cc4d960446c1757
1b7c5d725021babbcd786957f3d17089c084907bda22c2b2675b4378b114c601d858802a55345a15
116bc61da4193996187ed70d16730e9ae6b3bb8787ebcaea1871d850997ddc08b4f4ea668fbf3740
7ac044b55be0908ecb94d4ed172ece66fd31bfdadf2b97a8bc690163ee11f5b575a4b44e36e2bfb2
f0fce91676fd64c7773bac6a003f481fddd0bae0a1f31aa27504e2a533af4cef3b623f4791b2cca6
d490
`,
wantVersion: 56,
wantRest: []rlp.RawValue{{0x01}, {0x02}, {0xC2, 0x04, 0x05}},
},
}
type handshakeAckTest struct {
input string
wantVersion uint
wantRest []rlp.RawValue
}
var eip8HandshakeRespTests = []handshakeAckTest{
// (Ack₁) RLPx v4 plain encoding
{
input: `
049f8abcfa9c0dc65b982e98af921bc0ba6e4243169348a236abe9df5f93aa69d99cadddaa387662
b0ff2c08e9006d5a11a278b1b3331e5aaabf0a32f01281b6f4ede0e09a2d5f585b26513cb794d963
5a57563921c04a9090b4f14ee42be1a5461049af4ea7a7f49bf4c97a352d39c8d02ee4acc416388c
1c66cec761d2bc1c72da6ba143477f049c9d2dde846c252c111b904f630ac98e51609b3b1f58168d
dca6505b7196532e5f85b259a20c45e1979491683fee108e9660edbf38f3add489ae73e3dda2c71b
d1497113d5c755e942d1
`,
wantVersion: 4,
},
// (Ack₂) EIP-8 encoding
{
input: `
01ea0451958701280a56482929d3b0757da8f7fbe5286784beead59d95089c217c9b917788989470
b0e330cc6e4fb383c0340ed85fab836ec9fb8a49672712aeabbdfd1e837c1ff4cace34311cd7f4de
05d59279e3524ab26ef753a0095637ac88f2b499b9914b5f64e143eae548a1066e14cd2f4bd7f814
c4652f11b254f8a2d0191e2f5546fae6055694aed14d906df79ad3b407d94692694e259191cde171
ad542fc588fa2b7333313d82a9f887332f1dfc36cea03f831cb9a23fea05b33deb999e85489e645f
6aab1872475d488d7bd6c7c120caf28dbfc5d6833888155ed69d34dbdc39c1f299be1057810f34fb
e754d021bfca14dc989753d61c413d261934e1a9c67ee060a25eefb54e81a4d14baff922180c395d
3f998d70f46f6b58306f969627ae364497e73fc27f6d17ae45a413d322cb8814276be6ddd13b885b
201b943213656cde498fa0e9ddc8e0b8f8a53824fbd82254f3e2c17e8eaea009c38b4aa0a3f306e8
797db43c25d68e86f262e564086f59a2fc60511c42abfb3057c247a8a8fe4fb3ccbadde17514b7ac
8000cdb6a912778426260c47f38919a91f25f4b5ffb455d6aaaf150f7e5529c100ce62d6d92826a7
1778d809bdf60232ae21ce8a437eca8223f45ac37f6487452ce626f549b3b5fdee26afd2072e4bc7
5833c2464c805246155289f4
`,
wantVersion: 4,
wantRest: []rlp.RawValue{},
},
// (Ack₃) EIP-8 encoding with version 57, additional list elements
{
input: `
01f004076e58aae772bb101ab1a8e64e01ee96e64857ce82b1113817c6cdd52c09d26f7b90981cd7
ae835aeac72e1573b8a0225dd56d157a010846d888dac7464baf53f2ad4e3d584531fa203658fab0
3a06c9fd5e35737e417bc28c1cbf5e5dfc666de7090f69c3b29754725f84f75382891c561040ea1d
dc0d8f381ed1b9d0d4ad2a0ec021421d847820d6fa0ba66eaf58175f1b235e851c7e2124069fbc20
2888ddb3ac4d56bcbd1b9b7eab59e78f2e2d400905050f4a92dec1c4bdf797b3fc9b2f8e84a482f3
d800386186712dae00d5c386ec9387a5e9c9a1aca5a573ca91082c7d68421f388e79127a5177d4f8
590237364fd348c9611fa39f78dcdceee3f390f07991b7b47e1daa3ebcb6ccc9607811cb17ce51f1
c8c2c5098dbdd28fca547b3f58c01a424ac05f869f49c6a34672ea2cbbc558428aa1fe48bbfd6115
8b1b735a65d99f21e70dbc020bfdface9f724a0d1fb5895db971cc81aa7608baa0920abb0a565c9c
436e2fd13323428296c86385f2384e408a31e104670df0791d93e743a3a5194ee6b076fb6323ca59
3011b7348c16cf58f66b9633906ba54a2ee803187344b394f75dd2e663a57b956cb830dd7a908d4f
39a2336a61ef9fda549180d4ccde21514d117b6c6fd07a9102b5efe710a32af4eeacae2cb3b1dec0
35b9593b48b9d3ca4c13d245d5f04169b0b1
`,
wantVersion: 57,
wantRest: []rlp.RawValue{{0x06}, {0xC2, 0x07, 0x08}, {0x81, 0xFA}},
},
}
func TestHandshakeForwardCompatibility(t *testing.T) {
var (
keyA, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
keyB, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
pubA = crypto.FromECDSAPub(&keyA.PublicKey)[1:]
pubB = crypto.FromECDSAPub(&keyB.PublicKey)[1:]
ephA, _ = crypto.HexToECDSA("869d6ecf5211f1cc60418a13b9d870b22959d0c16f02bec714c960dd2298a32d")
ephB, _ = crypto.HexToECDSA("e238eb8e04fee6511ab04c6dd3c89ce097b11f25d584863ac2b6d5b35b1847e4")
ephPubA = crypto.FromECDSAPub(&ephA.PublicKey)[1:]
ephPubB = crypto.FromECDSAPub(&ephB.PublicKey)[1:]
nonceA = unhex("7e968bba13b6c50e2c4cd7f241cc0d64d1ac25c7f5952df231ac6a2bda8ee5d6")
nonceB = unhex("559aead08264d5795d3909718cdd05abd49572e84fe55590eef31a88a08fdffd")
_, _, _, _ = pubA, pubB, ephPubA, ephPubB
authSignature = unhex("299ca6acfd35e3d72d8ba3d1e2b60b5561d5af5218eb5bc182045769eb4226910a301acae3b369fffc4a4899d6b02531e89fd4fe36a2cf0d93607ba470b50f7800")
_ = authSignature
)
makeAuth := func(test handshakeAuthTest) *authMsgV4 {
msg := &authMsgV4{Version: test.wantVersion, Rest: test.wantRest, gotPlain: test.isPlain}
copy(msg.Signature[:], authSignature)
copy(msg.InitiatorPubkey[:], pubA)
copy(msg.Nonce[:], nonceA)
return msg
}
makeAck := func(test handshakeAckTest) *authRespV4 {
msg := &authRespV4{Version: test.wantVersion, Rest: test.wantRest}
copy(msg.RandomPubkey[:], ephPubB)
copy(msg.Nonce[:], nonceB)
return msg
}
// check auth msg parsing
for _, test := range eip8HandshakeAuthTests {
r := bytes.NewReader(unhex(test.input))
msg := new(authMsgV4)
ciphertext, err := readHandshakeMsg(msg, encAuthMsgLen, keyB, r)
if err != nil {
t.Errorf("error for input %x:\n %v", unhex(test.input), err)
continue
}
if !bytes.Equal(ciphertext, unhex(test.input)) {
t.Errorf("wrong ciphertext for input %x:\n %x", unhex(test.input), ciphertext)
}
want := makeAuth(test)
if !reflect.DeepEqual(msg, want) {
t.Errorf("wrong msg for input %x:\ngot %s\nwant %s", unhex(test.input), spew.Sdump(msg), spew.Sdump(want))
}
}
// check auth resp parsing
for _, test := range eip8HandshakeRespTests {
input := unhex(test.input)
r := bytes.NewReader(input)
msg := new(authRespV4)
ciphertext, err := readHandshakeMsg(msg, encAuthRespLen, keyA, r)
if err != nil {
t.Errorf("error for input %x:\n %v", input, err)
continue
}
if !bytes.Equal(ciphertext, input) {
t.Errorf("wrong ciphertext for input %x:\n %x", input, err)
}
want := makeAck(test)
if !reflect.DeepEqual(msg, want) {
t.Errorf("wrong msg for input %x:\ngot %s\nwant %s", input, spew.Sdump(msg), spew.Sdump(want))
}
}
// check derivation for (Auth₂, Ack₂) on recipient side
var (
hs = &encHandshake{
initiator: false,
respNonce: nonceB,
randomPrivKey: ecies.ImportECDSA(ephB),
}
authCiphertext = unhex(eip8HandshakeAuthTests[1].input)
authRespCiphertext = unhex(eip8HandshakeRespTests[1].input)
authMsg = makeAuth(eip8HandshakeAuthTests[1])
wantAES = unhex("80e8632c05fed6fc2a13b0f8d31a3cf645366239170ea067065aba8e28bac487")
wantMAC = unhex("2ea74ec5dae199227dff1af715362700e989d889d7a493cb0639691efb8e5f98")
wantFooIngressHash = unhex("0c7ec6340062cc46f5e9f1e3cf86f8c8c403c5a0964f5df0ebd34a75ddc86db5")
)
if err := hs.handleAuthMsg(authMsg, keyB); err != nil {
t.Fatalf("handleAuthMsg: %v", err)
}
derived, err := hs.secrets(authCiphertext, authRespCiphertext)
if err != nil {
t.Fatalf("secrets: %v", err)
}
if !bytes.Equal(derived.AES, wantAES) {
t.Errorf("aes-secret mismatch:\ngot %x\nwant %x", derived.AES, wantAES)
}
if !bytes.Equal(derived.MAC, wantMAC) {
t.Errorf("mac-secret mismatch:\ngot %x\nwant %x", derived.MAC, wantMAC)
}
io.WriteString(derived.IngressMAC, "foo")
fooIngressHash := derived.IngressMAC.Sum(nil)
if !bytes.Equal(fooIngressHash, wantFooIngressHash) {
t.Errorf("ingress-mac('foo') mismatch:\ngot %x\nwant %x", fooIngressHash, wantFooIngressHash)
}
}

View File

@ -689,3 +689,66 @@ func (srv *Server) runPeer(p *Peer) {
NumConnections: srv.PeerCount(),
})
}
// NodeInfo represents a short summary of the information known about the host.
type NodeInfo struct {
ID string `json:"id"` // Unique node identifier (also the encryption key)
Name string `json:"name"` // Name of the node, including client type, version, OS, custom data
Enode string `json:"enode"` // Enode URL for adding this peer from remote peers
IP string `json:"ip"` // IP address of the node
Ports struct {
Discovery int `json:"discovery"` // UDP listening port for discovery protocol
Listener int `json:"listener"` // TCP listening port for RLPx
} `json:"ports"`
ListenAddr string `json:"listenAddr"`
Protocols map[string]interface{} `json:"protocols"`
}
// Info gathers and returns a collection of metadata known about the host.
func (srv *Server) NodeInfo() *NodeInfo {
node := srv.Self()
// Gather and assemble the generic node infos
info := &NodeInfo{
Name: srv.Name,
Enode: node.String(),
ID: node.ID.String(),
IP: node.IP.String(),
ListenAddr: srv.ListenAddr,
Protocols: make(map[string]interface{}),
}
info.Ports.Discovery = int(node.UDP)
info.Ports.Listener = int(node.TCP)
// Gather all the running protocol infos (only once per protocol type)
for _, proto := range srv.Protocols {
if _, ok := info.Protocols[proto.Name]; !ok {
nodeInfo := interface{}("unknown")
if query := proto.NodeInfo; query != nil {
nodeInfo = proto.NodeInfo()
}
info.Protocols[proto.Name] = nodeInfo
}
}
return info
}
// PeersInfo returns an array of metadata objects describing connected peers.
func (srv *Server) PeersInfo() []*PeerInfo {
// Gather all the generic and sub-protocol specific infos
infos := make([]*PeerInfo, 0, srv.PeerCount())
for _, peer := range srv.Peers() {
if peer != nil {
infos = append(infos, peer.Info())
}
}
// Sort the result array alphabetically by node identifier
for i := 0; i < len(infos); i++ {
for j := i + 1; j < len(infos); j++ {
if infos[i].ID > infos[j].ID {
infos[i], infos[j] = infos[j], infos[i]
}
}
}
return infos
}

View File

@ -25,9 +25,10 @@ var (
MaximumExtraDataSize = big.NewInt(32) // Maximum size extra data may be after Genesis.
ExpByteGas = big.NewInt(10) // Times ceil(log256(exponent)) for the EXP instruction.
SloadGas = big.NewInt(50) // Multiplied by the number of 32-byte words that are copied (round up) for any *COPY operation and added.
CallValueTransferGas = big.NewInt(9000) // Paid for CALL when the value transfor is non-zero.
CallValueTransferGas = big.NewInt(9000) // Paid for CALL when the value transfer is non-zero.
CallNewAccountGas = big.NewInt(25000) // Paid for CALL when the destination address didn't exist prior.
TxGas = big.NewInt(21000) // Per transaction. NOTE: Not payable on data of calls between transactions.
TxGas = big.NewInt(21000) // Per transaction not creating a contract. NOTE: Not payable on data of calls between transactions.
TxGasContractCreation = big.NewInt(53000) // Per transaction that creates a contract. NOTE: Not payable on data of calls between transactions.
TxDataZeroGas = big.NewInt(4) // Per byte of data attached to a transaction that equals zero. NOTE: Not payable on data of calls between transactions.
DifficultyBoundDivisor = big.NewInt(2048) // The bound divisor of the difficulty, used in the update calculations.
QuadCoeffDiv = big.NewInt(512) // Divisor for the quadratic particle of the memory cost equation.
@ -40,7 +41,7 @@ var (
Sha256WordGas = big.NewInt(12) //
MinGasLimit = big.NewInt(5000) // Minimum the gas limit may ever be.
GenesisGasLimit = big.NewInt(3141592) // Gas limit of the Genesis block.
GenesisGasLimit = big.NewInt(4712388) // Gas limit of the Genesis block.
Sha3Gas = big.NewInt(30) // Once per SHA3 operation.
Sha256Gas = big.NewInt(60) //

33
params/util.go Normal file
View File

@ -0,0 +1,33 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package params
import "math/big"
var (
TestNetHomesteadBlock = big.NewInt(494000) // testnet homestead block
MainNetHomesteadBlock = big.NewInt(1150000) // mainnet homestead block
HomesteadBlock = MainNetHomesteadBlock // homestead block used to check against
)
func IsHomestead(blockNumber *big.Int) bool {
// for unit tests TODO: flip to true after homestead is live
if blockNumber == nil {
return false
}
return blockNumber.Cmp(HomesteadBlock) >= 0
}

View File

@ -63,11 +63,12 @@ type Decoder interface {
// must contain an element for each decoded field. Decode returns an
// error if there are too few or too many elements.
//
// The decoding of struct fields honours one particular struct tag,
// "nil". This tag applies to pointer-typed fields and changes the
// The decoding of struct fields honours two struct tags, "tail" and
// "nil". For an explanation of "tail", see the example.
// The "nil" tag applies to pointer-typed fields and changes the
// decoding rules for the field such that input values of size zero
// decode as a nil pointer. This tag can be useful when decoding recursive
// types.
// decode as a nil pointer. This tag can be useful when decoding
// recursive types.
//
// type StructWithEmptyOK struct {
// Foo *[20]byte `rlp:"nil"`
@ -190,7 +191,7 @@ func makeDecoder(typ reflect.Type, tags tags) (dec decoder, err error) {
case kind == reflect.String:
return decodeString, nil
case kind == reflect.Slice || kind == reflect.Array:
return makeListDecoder(typ)
return makeListDecoder(typ, tags)
case kind == reflect.Struct:
return makeStructDecoder(typ)
case kind == reflect.Ptr:
@ -264,7 +265,7 @@ func decodeBigInt(s *Stream, val reflect.Value) error {
return nil
}
func makeListDecoder(typ reflect.Type) (decoder, error) {
func makeListDecoder(typ reflect.Type, tag tags) (decoder, error) {
etype := typ.Elem()
if etype.Kind() == reflect.Uint8 && !reflect.PtrTo(etype).Implements(decoderInterface) {
if typ.Kind() == reflect.Array {
@ -277,15 +278,26 @@ func makeListDecoder(typ reflect.Type) (decoder, error) {
if err != nil {
return nil, err
}
isArray := typ.Kind() == reflect.Array
return func(s *Stream, val reflect.Value) error {
if isArray {
var dec decoder
switch {
case typ.Kind() == reflect.Array:
dec = func(s *Stream, val reflect.Value) error {
return decodeListArray(s, val, etypeinfo.decoder)
} else {
}
case tag.tail:
// A slice with "tail" tag can occur as the last field
// of a struct and is upposed to swallow all remaining
// list elements. The struct decoder already called s.List,
// proceed directly to decoding the elements.
dec = func(s *Stream, val reflect.Value) error {
return decodeSliceElems(s, val, etypeinfo.decoder)
}
default:
dec = func(s *Stream, val reflect.Value) error {
return decodeListSlice(s, val, etypeinfo.decoder)
}
}, nil
}
return dec, nil
}
func decodeListSlice(s *Stream, val reflect.Value, elemdec decoder) error {
@ -297,7 +309,13 @@ func decodeListSlice(s *Stream, val reflect.Value, elemdec decoder) error {
val.Set(reflect.MakeSlice(val.Type(), 0, 0))
return s.ListEnd()
}
if err := decodeSliceElems(s, val, elemdec); err != nil {
return err
}
return s.ListEnd()
}
func decodeSliceElems(s *Stream, val reflect.Value, elemdec decoder) error {
i := 0
for ; ; i++ {
// grow slice if necessary
@ -323,12 +341,11 @@ func decodeListSlice(s *Stream, val reflect.Value, elemdec decoder) error {
if i < val.Len() {
val.SetLen(i)
}
return s.ListEnd()
return nil
}
func decodeListArray(s *Stream, val reflect.Value, elemdec decoder) error {
_, err := s.List()
if err != nil {
if _, err := s.List(); err != nil {
return wrapStreamError(err, val.Type())
}
vlen := val.Len()
@ -398,11 +415,11 @@ func makeStructDecoder(typ reflect.Type) (decoder, error) {
return nil, err
}
dec := func(s *Stream, val reflect.Value) (err error) {
if _, err = s.List(); err != nil {
if _, err := s.List(); err != nil {
return wrapStreamError(err, typ)
}
for _, f := range fields {
err = f.info.decoder(s, val.Field(f.index))
err := f.info.decoder(s, val.Field(f.index))
if err == EOL {
return &decodeError{msg: "too few elements", typ: typ}
} else if err != nil {

33
rlp/decode_tail_test.go Normal file
View File

@ -0,0 +1,33 @@
package rlp
import (
"bytes"
"fmt"
)
type structWithTail struct {
A, B uint
C []uint `rlp:"tail"`
}
func ExampleDecode_structTagTail() {
// In this example, the "tail" struct tag is used to decode lists of
// differing length into a struct.
var val structWithTail
err := Decode(bytes.NewReader([]byte{0xC4, 0x01, 0x02, 0x03, 0x04}), &val)
fmt.Printf("with 4 elements: err=%v val=%v\n", err, val)
err = Decode(bytes.NewReader([]byte{0xC6, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06}), &val)
fmt.Printf("with 6 elements: err=%v val=%v\n", err, val)
// Note that at least two list elements must be present to
// fill fields A and B:
err = Decode(bytes.NewReader([]byte{0xC1, 0x01}), &val)
fmt.Printf("with 1 element: err=%q\n", err)
// Output:
// with 4 elements: err=<nil> val={1 2 [3 4]}
// with 6 elements: err=<nil> val={1 2 [3 4 5 6]}
// with 1 element: err="rlp: too few elements for rlp.structWithTail"
}

View File

@ -312,6 +312,26 @@ type recstruct struct {
Child *recstruct `rlp:"nil"`
}
type invalidTail1 struct {
A uint `rlp:"tail"`
B string
}
type invalidTail2 struct {
A uint
B string `rlp:"tail"`
}
type tailRaw struct {
A uint
Tail []RawValue `rlp:"tail"`
}
type tailUint struct {
A uint
Tail []uint `rlp:"tail"`
}
var (
veryBigInt = big.NewInt(0).Add(
big.NewInt(0).Lsh(big.NewInt(0xFFFFFFFFFFFFFF), 16),
@ -437,6 +457,38 @@ var decodeTests = []decodeTest{
ptr: new(recstruct),
error: "rlp: expected input string or byte for uint, decoding into (rlp.recstruct).Child.I",
},
{
input: "C0",
ptr: new(invalidTail1),
error: "rlp: invalid struct tag \"tail\" for rlp.invalidTail1.A (must be on last field)",
},
{
input: "C0",
ptr: new(invalidTail2),
error: "rlp: invalid struct tag \"tail\" for rlp.invalidTail2.B (field type is not slice)",
},
{
input: "C50102C20102",
ptr: new(tailUint),
error: "rlp: expected input string or byte for uint, decoding into (rlp.tailUint).Tail[1]",
},
// struct tag "tail"
{
input: "C3010203",
ptr: new(tailRaw),
value: tailRaw{A: 1, Tail: []RawValue{unhex("02"), unhex("03")}},
},
{
input: "C20102",
ptr: new(tailRaw),
value: tailRaw{A: 1, Tail: []RawValue{unhex("02")}},
},
{
input: "C101",
ptr: new(tailRaw),
value: tailRaw{A: 1, Tail: []RawValue{}},
},
// RawValue
{input: "01", ptr: new(RawValue), value: RawValue(unhex("01"))},

View File

@ -345,7 +345,7 @@ var (
)
// makeWriter creates a writer function for the given type.
func makeWriter(typ reflect.Type) (writer, error) {
func makeWriter(typ reflect.Type, ts tags) (writer, error) {
kind := typ.Kind()
switch {
case typ == rawValueType:
@ -371,7 +371,7 @@ func makeWriter(typ reflect.Type) (writer, error) {
case kind == reflect.Array && isByte(typ.Elem()):
return writeByteArray, nil
case kind == reflect.Slice || kind == reflect.Array:
return makeSliceWriter(typ)
return makeSliceWriter(typ, ts)
case kind == reflect.Struct:
return makeStructWriter(typ)
case kind == reflect.Ptr:
@ -507,20 +507,21 @@ func writeInterface(val reflect.Value, w *encbuf) error {
return ti.writer(eval, w)
}
func makeSliceWriter(typ reflect.Type) (writer, error) {
func makeSliceWriter(typ reflect.Type, ts tags) (writer, error) {
etypeinfo, err := cachedTypeInfo1(typ.Elem(), tags{})
if err != nil {
return nil, err
}
writer := func(val reflect.Value, w *encbuf) error {
lh := w.list()
if !ts.tail {
defer w.listEnd(w.list())
}
vlen := val.Len()
for i := 0; i < vlen; i++ {
if err := etypeinfo.writer(val.Index(i), w); err != nil {
return err
}
}
w.listEnd(lh)
return nil
}
return writer, nil

View File

@ -214,6 +214,10 @@ var encTests = []encTest{
{val: simplestruct{A: 3, B: "foo"}, output: "C50383666F6F"},
{val: &recstruct{5, nil}, output: "C205C0"},
{val: &recstruct{5, &recstruct{4, &recstruct{3, nil}}}, output: "C605C404C203C0"},
{val: &tailRaw{A: 1, Tail: []RawValue{unhex("02"), unhex("03")}}, output: "C3010203"},
{val: &tailRaw{A: 1, Tail: []RawValue{unhex("02")}}, output: "C20102"},
{val: &tailRaw{A: 1, Tail: []RawValue{}}, output: "C101"},
{val: &tailRaw{A: 1, Tail: nil}, output: "C101"},
// nil
{val: (*uint)(nil), output: "80"},

View File

@ -17,7 +17,9 @@
package rlp
import (
"fmt"
"reflect"
"strings"
"sync"
)
@ -33,7 +35,13 @@ type typeinfo struct {
// represents struct tags
type tags struct {
// rlp:"nil" controls whether empty input results in a nil pointer.
nilOK bool
// rlp:"tail" controls whether this field swallows additional list
// elements. It can only be set for the last field, which must be
// of slice type.
tail bool
}
type typekey struct {
@ -89,7 +97,10 @@ type field struct {
func structFields(typ reflect.Type) (fields []field, err error) {
for i := 0; i < typ.NumField(); i++ {
if f := typ.Field(i); f.PkgPath == "" { // exported
tags := parseStructTag(f.Tag.Get("rlp"))
tags, err := parseStructTag(typ, i)
if err != nil {
return nil, err
}
info, err := cachedTypeInfo1(f.Type, tags)
if err != nil {
return nil, err
@ -100,8 +111,27 @@ func structFields(typ reflect.Type) (fields []field, err error) {
return fields, nil
}
func parseStructTag(tag string) tags {
return tags{nilOK: tag == "nil"}
func parseStructTag(typ reflect.Type, fi int) (tags, error) {
f := typ.Field(fi)
var ts tags
for _, t := range strings.Split(f.Tag.Get("rlp"), ",") {
switch t = strings.TrimSpace(t); t {
case "":
case "nil":
ts.nilOK = true
case "tail":
ts.tail = true
if fi != typ.NumField()-1 {
return ts, fmt.Errorf(`rlp: invalid struct tag "tail" for %v.%s (must be on last field)`, typ, f.Name)
}
if f.Type.Kind() != reflect.Slice {
return ts, fmt.Errorf(`rlp: invalid struct tag "tail" for %v.%s (field type is not slice)`, typ, f.Name)
}
default:
return ts, fmt.Errorf("rlp: unknown struct tag %q on %v.%s", t, typ, f.Name)
}
}
return ts, nil
}
func genTypeInfo(typ reflect.Type, tags tags) (info *typeinfo, err error) {
@ -109,7 +139,7 @@ func genTypeInfo(typ reflect.Type, tags tags) (info *typeinfo, err error) {
if info.decoder, err = makeDecoder(typ, tags); err != nil {
return nil, err
}
if info.writer, err = makeWriter(typ); err != nil {
if info.writer, err = makeWriter(typ, tags); err != nil {
return nil, err
}
return info, nil

Some files were not shown because too many files have changed in this diff Show More