Compare commits

...

131 Commits

Author SHA1 Message Date
6962eda19d VERSION, cmd/geth: version 1.2.3 2015-10-21 15:53:54 +02:00
619b37ca37 core, tests: get_hash fix
Make sure that we're fetching the hash from the current chain and not
the canonical chain.

Conflicts:
	core/vm/environment.go
2015-10-21 15:52:48 +02:00
465e810c66 VERSION, cmd/geth: bumped version 1.2.2 2015-10-02 12:55:57 +02:00
274f86cd86 eth/downloader: match capabilities when querying idle peers 2015-10-02 12:50:26 +02:00
b527c9c718 core: deadlock in chainmanager after posting RemovedTransactionEvent
This PR solves an issue with the chain manager posting a
`RemovedTransactionEvent`, the tx pool will try to
acquire the chainmanager lock which has previously been locked prior to
posting `RemovedTransactionEvent`. This results in a deadlock in the
core.
2015-10-02 12:41:10 +02:00
9666db2a44 VERSION, cmd/geth: bumped version 1.2.1 2015-10-01 10:38:43 +02:00
e3ac56d502 Merge pull request #1859 from fjl/fix-discover-refresh-race
p2p/discover: fix race involving the seed node iterator
2015-09-30 08:21:40 -07:00
32dda97602 p2p/discover: ignore packet version numbers
The strict matching can get in the way of protocol upgrades.
2015-09-30 16:23:03 +02:00
631bf36102 p2p/discover: remove unused lastLookup field 2015-09-30 16:23:03 +02:00
b4374436f3 p2p/discover: fix race involving the seed node iterator
nodeDB.querySeeds was not safe for concurrent use but could be called
concurrenty on multiple goroutines in the following case:

- the table was empty
- a timed refresh started
- a lookup was started and initiated refresh

These conditions are unlikely to coincide during normal use, but are
much more likely to occur all at once when the user's machine just woke
from sleep. The root cause of the issue is that querySeeds reused the
same leveldb iterator until it was exhausted.

This commit moves the refresh scheduling logic into its own goroutine
(so only one refresh is ever active) and changes querySeeds to not use
a persistent iterator. The seed node selection is now more random and
ignores nodes that have not been contacted in the last 5 days.
2015-09-30 16:23:03 +02:00
46ad5a5f5b Merge pull request #1852 from obscuren/filter-nil-fix
xeth: fixed nil pointer of filter retrieval
2015-09-30 03:06:36 -07:00
9b94076717 Merge pull request #1854 from karalabe/badhasherror-formatting-loop
core: fix a formatting loop in BadHashError
2015-09-29 02:26:01 -07:00
b8b996be74 core: fix a formatting loop in BadHashError 2015-09-29 09:11:38 +03:00
b9359981f4 xeth: fixed nil pointer of filter retrieval
This fix addresses an issue with filters that were (possibly) not yet
added to the filter queues but were expected. I've added additional nil
checks making sure it doesn't crash and swapped the installation of the
filter around so it's installed before use.

Closes #1665
2015-09-25 13:56:53 +02:00
7977e87ce1 Merge pull request #1843 from karalabe/cleanup-downloader-channel
eth/downloader: always send termination wakes, clean leftover
2015-09-25 04:34:59 -07:00
69d86442a5 Merge pull request #1803 from Gustav-Simonsson/badhashes
core: Add BadHashErr and test for BadHashes handling
2015-09-23 11:10:25 -07:00
36f46a61a7 Merge pull request #1844 from obscuren/version-file
VERSION: added version
2015-09-23 05:48:00 -07:00
6e1dc321f4 VERSION: added version 2015-09-23 14:47:20 +02:00
7a2a918067 Merge pull request #1842 from fjl/rpc-fix-unknown-block
rpc/api: don't crash for unknown blocks
2015-09-23 12:57:33 +02:00
f459a3f0ae eth/downloader: always send termination wakes, clean leftover 2015-09-23 12:39:17 +03:00
e456f27795 Merge pull request #1827 from Gustav-Simonsson/common_tests
tests: add test for StateTests/stCallCodes.json
2015-09-23 02:12:59 -07:00
90cd8ae9f2 rpc/api: don't crash for unknown blocks
Most eth RPC calls that work with blocks crashed when the block was not
found because they called Hash on a nil block. This is a regression
introduced in cdc2662c40 (#1779).

While here, remove the insane conversions in get*CountBy*. There is no
need to construct a complete BlockRes and converting
int->int64->*big.Int->[]byte->hexnum->string to format the length of a
slice as hex.
2015-09-22 23:59:26 +02:00
70b6174748 cmd/geth, core: make "geth blocktest" work again
The test genesis block was not written properly, block insertion failed
immediately.

While here, fix the panic when shutting down "geth blocktest" with
Ctrl+C. The signal handler is now installed automatically, causing
ethereum.Stop to crash because everything is already stopped.
2015-09-22 23:55:31 +02:00
bfde1a4305 core: Add BadHashErr and test for BadHashes handling 2015-09-22 18:02:26 +02:00
e56cbc225e Merge pull request #1835 from karalabe/make-cross
makefile: built in cross compilation targets
2015-09-21 11:47:10 -07:00
7bf8e949e7 Merge pull request #1669 from obscuren/tx-pool-auto-resend
core, xeth: chain reorg move missing transactions to transaction pool
2015-09-21 11:45:59 -07:00
6a05c569f2 makefile: built in cross compilation targets 2015-09-21 21:36:01 +03:00
eaa4473dbd core, core/types: readd transactions after chain re-org
Added a `Difference` method to `types.Transactions` which sets the
receiver to the difference of a to b (NOTE: not a **and** b).

Transaction pool subscribes to RemovedTransactionEvent adding back to
those potential missing from the chain.

When a chain re-org occurs remove any transactions that were removed
from the canonical chain during the re-org as well as the receipts that
were generated in the process.

Closes #1746
2015-09-21 20:33:28 +02:00
be76a68aea cmd/geth: changed version number to 1.2.0
Changed the version number of geth to 1.2.0 so that dev builds are now properly build (instead of master). Note to self; increase version number to 1.2.1 for our next actual release.
2015-09-21 16:13:07 +02:00
12c0afe4fe Merge pull request #1822 from karalabe/contain-pow
core: separate and contain POW verifier, extensive tests
2015-09-21 06:52:11 -07:00
5621308949 tests: add test for StateTests/stCallCodes.json 2015-09-21 11:34:02 +02:00
399c920380 core: separate and contain POW verifier, extensive tests 2015-09-21 10:24:49 +03:00
e40b447fea Merge pull request #1814 from Gustav-Simonsson/common_tests
tests: update common test wrappers and test files
2015-09-18 16:34:54 -07:00
b94b9b0158 Merge pull request #1817 from obscuren/nonce-fix
core: transaction nonce recovery
2015-09-18 15:56:10 -07:00
47ca6904b3 tests: use lastblockhash field to validate reorgs and block headers 2015-09-18 17:48:31 +02:00
075815e5ff tests: update common test wrappers and test files 2015-09-18 13:08:36 +02:00
b60a27627b core: transaction nonce recovery fix
When the transaction state recovery kicked in it assigned the last
(incorrect) nonce to the pending state which caused transactions with
the same nonce to occur.

Added test for nonce recovery
2015-09-18 11:59:21 +02:00
216c486a3a Merge pull request #1815 from karalabe/chain-maker-timer
core: allow modifying test-chain block times
2015-09-18 11:23:31 +02:00
ac6248ed7a Merge pull request #1793 from jeffallen/typo
common: Update README.md for the current package name
2015-09-17 19:26:49 +02:00
bdf4fd6091 Merge pull request #1813 from kobigurk/develop
cmd/geth: extradata is correcly initialized with console
2015-09-17 19:25:32 +02:00
69f48e4689 Merge pull request #1811 from bas-vk/timer-clearinterval
timer bugfix when clearInterval was called from within the callback
2015-09-17 19:21:49 +02:00
6f3cb12924 core: allow modifying test-chain block times 2015-09-17 13:43:52 +03:00
58fbcaa750 Merge pull request #1810 from karalabe/pure-header-verifications-2
core, eth, miner: use pure header validation
2015-09-16 14:21:12 -07:00
1a1a1ee4ff cmd/geth: extradata is correcly initialized with console 2015-09-16 21:01:21 +03:00
985b5f29ed Merge pull request #1801 from fjl/ethdb
all: move common.Database to ethdb and add NewBatch
2015-09-16 07:50:14 -07:00
2f65ddc501 jsre: timer bugfix when clearInterval was called from within the callback 2015-09-16 11:57:33 +02:00
1cc2f08041 Merge pull request #1784 from karalabe/standard-sync-stats
eth, rpc: standardize the chain sync progress counters
2015-09-16 02:31:58 -07:00
821619e1c3 core, eth, miner: use pure header validation 2015-09-16 10:46:28 +03:00
e9a80518c7 Merge pull request #1744 from kobigurk/develop
adds extradata flag
2015-09-15 13:56:10 -07:00
321733ab23 cmd/geth: adds extradata flag 2015-09-15 23:35:36 +03:00
d4d3fc6a70 jsre, rpc/api: pull in new web3 and use hex numbers 2015-09-15 17:05:12 +03:00
99b62f36b6 eth/downloader: header-chain order and ancestry check 2015-09-15 14:45:53 +03:00
0a7d059b6a eth, rpc: standardize the chain sync progress counters 2015-09-15 14:45:53 +03:00
55bdcfaeac Merge pull request #1806 from ethersphere/solc2
new solc api - late fixes
2015-09-15 01:08:30 -07:00
3a5e7ed9a6 new solc api:
* use legacy version matcher
* optimise just a boolean flag
* skipf for messages in tests
2015-09-15 00:35:22 +02:00
b252589960 ethdb: remove Flush 2015-09-14 23:36:30 +02:00
d581dfee5f ethdb: copy stored memdb values
Storing a value in LevelDB copies the bytes, modifying the value
afterwards does not affect the content of the database. This commit
ensures that MemDatabase satisfies the same property.
2015-09-14 23:36:30 +02:00
8b32f10f16 ethdb: add NewBatch 2015-09-14 23:36:30 +02:00
8c4dab77ba all: move common.Database to package ethdb 2015-09-14 23:36:30 +02:00
071e2cd08e Merge pull request #1786 from ethersphere/solc
common/compiler: new solc API
2015-09-14 23:32:40 +02:00
47b9c640f5 Merge pull request #1797 from karalabe/ensure-ipcpath-exists
rpc/comms: fix #1795, ensure IPC path exists before binding
2015-09-14 14:45:11 +02:00
a9c809b441 Merge pull request #1792 from jeffallen/uuid
Change go-uuid to use the current supported repository.
2015-09-14 12:06:59 +02:00
0d40727775 Change go-uuid to use the current supported repository. 2015-09-12 16:49:24 +06:00
17b729759b Solidity Compiler - solc new API
* adapt to new compiler versioning
* use compiler version as language version
* implement new solc API for versions >= 0.1.[2-9][0-9]* fixes #1770
* add optimize=1 to options
* backward compatibility (for now) for <= 0.1.1, and old versions (0.[2-9][0-9]*.[0-9]+)
* introduce compilerOptions to ContractInfo
* clean up flair, include full version string to version line and ContractInfo
2015-09-12 10:52:52 +02:00
55ed8d108d Merge pull request #1789 from Gustav-Simonsson/core_remove_unused_functions
core, core/vm, core/state: remove unused functions
2015-09-11 15:29:27 -07:00
f1a4b330dd Merge pull request #1796 from karalabe/ethash-android-support
godeps: pull in ethash android fix
2015-09-11 15:26:01 -07:00
0eac601b5b Merge pull request #1779 from karalabe/split-block-storage-3000
core: split the db blocks into components, move TD out top level
2015-09-11 08:10:37 -07:00
cdc2662c40 core: split out TD from database and all internals 2015-09-11 17:42:25 +03:00
2b339cbbd8 core, eth: split the db blocks into headers and bodies 2015-09-11 17:42:25 +03:00
3e6964b841 rpc/comms: fix #1795, ensure IPC path exists before binding 2015-09-11 17:03:31 +03:00
c6013725a8 godeps: pull in ethash android fix 2015-09-11 15:53:23 +03:00
4e075e4013 Merge pull request #1773 from obscuren/dev-mode
cmd/geth, cmd/utils, eth: added dev mode flag
2015-09-10 21:15:33 +02:00
b81a6e6ab8 core, core/vm, core/state: remove unused functions 2015-09-10 21:10:58 +02:00
62bbf8a09e Merge pull request #1778 from fjl/rlp-trie-changes
rlp: precursor changes for trie, p2p
2015-09-10 12:02:16 -07:00
4ce3dfe9c8 common: Update README.md for the current package name 2015-09-10 23:59:38 +06:00
fc8b246109 rlp: move ListSize to raw.go 2015-09-10 19:41:51 +02:00
24bb68e7cf rlp: add RawValue 2015-09-10 19:41:51 +02:00
bc17dba8fb rlp: add Split functions
These functions allow destructuring of raw rlp-encoded bytes
without the overhead of reflection or copying.
2015-09-10 19:41:51 +02:00
ac32f52ca6 rlp: fix encReader returning nil buffers to the pool
The bug can cause crashes if Read is called after EOF has been returned.
No code performs such calls right now, but hitting the bug gets more
likely as rlp.EncodeToReader gets used in more places.
2015-09-10 19:12:32 +02:00
90f1fe0ed2 Merge pull request #1781 from Gustav-Simonsson/state_object_copy
core/state: deleted field in StateObject Copy() and unit test
2015-09-09 18:42:36 +02:00
28b13a4d1e Merge pull request #1780 from bas-vk/miner-crash
agent/miner Prevent the CpuAgent to be started multiple times
2015-09-09 04:49:28 -07:00
f04b3a6f29 cmd/geth, cmd/utils, eth: added dev mode flag
Dev mode enabled some debugging flags such as:

* VM debugging mode
* Simpler proof of work
* Whisper enabled by default
* Datadir to a tmp datadir
* Maxpeers set to 0
* Gas price of 0
* Random listen port
2015-09-09 08:53:05 +02:00
bf879ef230 core/state: test formatting adhering to Go convention 2015-09-09 00:26:18 +02:00
004ed786b4 core/state: deleted field in StateObject Copy() and unit test 2015-09-08 15:56:11 +02:00
652eea71fe put unlock after lock 2015-09-08 12:42:29 +02:00
618065895b agent/miner Prevent the CpuAgent to be started multiple times 2015-09-08 11:27:55 +02:00
edaea69817 Merge pull request #1777 from hectorchu/develop
rpc/comms: fix bug attaching the console over http
2015-09-08 11:02:09 +03:00
6fe46cc743 Merge pull request #1774 from bas-vk/console-crash
cmd/geth Autocompletion bugfix which let the console crash
2015-09-08 10:33:09 +03:00
4ea81f170a rpc/comms: fix bug attaching the console over http 2015-09-07 15:09:59 +01:00
f69121357d cmd/geth Autocompletion bugfix which let the console crash 2015-09-06 16:25:55 +02:00
e2d7c1a523 Merge pull request #1752 from karalabe/fix-eth61-test
eth/downloader: fix race causing occasional test failure
2015-09-03 15:52:18 +02:00
ebbe25ee71 Merge pull request #1764 from kobigurk/honor_ipc_datadir
honors datadir when attaching
2015-09-03 10:48:23 +03:00
1a86adc5a2 cmd/geth: honor datadir when attaching 2015-09-03 10:28:30 +03:00
e98854588b Merge pull request #1761 from CJentzsch/patch-3
fix block time issue
2015-09-02 15:13:14 -07:00
0fda4c4e15 fix block time issue
currently, under normal circumstances, you always set the timestamp to previous.Time() + 1.
credits to https://www.reddit.com/r/ethereum/comments/3jcs5r/code_avg_block_time_vs_difficulty_adjustment/cuoi4op

style
2015-09-03 00:05:05 +02:00
b2c17a5a63 Merge pull request #1726 from Gustav-Simonsson/update_tests
Add TestBcForkUncle tests & update JSON files
2015-09-02 22:02:44 +02:00
e9b031b88b Merge pull request #1755 from fjl/coinbase
core: improve block gas tracking
2015-09-01 23:36:05 +02:00
00b45acb9e core: improve block gas tracking 2015-09-01 23:11:03 +02:00
1ffc5b0cfd Merge pull request #1751 from maran/fix_filters
core: Filter on addresses should work as an OR not an AND.
2015-09-01 20:10:27 +02:00
5e4cd599eb Merge pull request #1745 from mrdomino/obsd-build-master
Pull in ethash and go-isatty updates
2015-09-01 20:06:13 +02:00
1f1d73ab74 eth/downloader: fix race causing occasional test failure 2015-09-01 16:11:14 +03:00
67225de255 Filter on addresses should work as an OR not an AND. 2015-09-01 09:19:45 +02:00
540eb3d02d Pull in ethash and go-isatty updates
Fixes build on OpenBSD.
2015-08-31 12:14:32 -04:00
fe8093b71f Add TestBcForkUncleTests and update JSON files 2015-08-31 16:45:00 +02:00
9dc23ce284 Merge pull request #1742 from fjl/rpc-receipt-root
rpc: add receiptRoot to getBlock* responses
2015-08-31 14:50:21 +02:00
1801748ccd Merge pull request #1734 from fjl/ldflags-warning-go1.5
build: avoid -X separator warning with Go >= 1.5
2015-08-31 14:49:50 +02:00
8b12bcc0ac rpc: add receiptRoot to getBlock* responses
Fixes #1679
2015-08-29 11:12:01 +02:00
e1037bd0cf Merge pull request #1724 from Gustav-Simonsson/get_work
rpc: return error code for eth_getWork when no work ready
2015-08-29 10:54:10 +02:00
2d1ced8759 Merge pull request #1739 from bas-vk/empty-password
rpc/api allow empty password
2015-08-28 13:14:51 +02:00
39e9560600 rpc/api allow empty password 2015-08-28 12:49:41 +02:00
d9addf79fa Improve error string and remove unneeded else clause 2015-08-28 03:42:01 +02:00
cfd84a6ad9 build: avoid -X separator warning with Go >= 1.5 2015-08-27 13:26:13 +02:00
6ec13e7e2b Merge pull request #1701 from karalabe/eth62-sync-rebase
eth: implement eth/62 synchronization logic
2015-08-27 00:03:59 +02:00
79b644c7a3 Merge pull request #1717 from karalabe/forward-solidity-errors
common/compiler: fix #1598, expose solidity errors
2015-08-26 19:00:11 +02:00
14370a2260 Merge pull request #1718 from caktux/develop
add missing shh_getMessages to RPC mappings
2015-08-26 18:55:51 +02:00
3df6f3fc14 Merge pull request #1721 from bas-vk/console-error-parsing
Improved console error handling
2015-08-26 18:55:31 +02:00
847794a321 Merge pull request #1722 from bas-vk/remote-deleteaccount
Remove personal.deleteAccount from RPC interface
2015-08-26 18:02:51 +02:00
829201382b rpc: return error code for eth_getWork when no work ready 2015-08-26 12:46:50 +02:00
5dd2462816 rpc/api - remove personal.deleteAccount from RPC interface 2015-08-26 11:39:43 +02:00
f448310eef bugfix console error handling 2015-08-26 11:33:02 +02:00
101418b275 common/compiler: fix #1598, expose solidity errors 2015-08-26 10:04:23 +03:00
a1d8015817 add missing shh_getMessages to RPC mappings 2015-08-25 14:42:57 -04:00
17f65cd1e5 eth: update metrics collection to handle eth/62 algos 2015-08-25 17:48:47 +03:00
47a7fe5d22 eth: port the synchronisation algo to eth/62 2015-08-25 17:48:47 +03:00
abce09954b Merge pull request #1711 from Gustav-Simonsson/timestamp_big_int
Add tests for uncle timestamps and refactor timestamp type
2015-08-25 15:49:36 +02:00
a219159e7e Merge pull request #1710 from bas-vk/useragent
user agent messages were dumped in some cases
2015-08-25 12:23:25 +02:00
7324176f70 Add tests for uncle timestamps and refactor timestamp type 2015-08-25 04:46:11 +02:00
ca88e18f59 eth: kill off protocol eth/60 in preparation for eth/62 2015-08-24 17:57:28 +03:00
42f44dda54 eth, eth/downloader: handle header requests, table driven proto tests 2015-08-24 17:57:28 +03:00
d910148a96 Set ipc channel as user agent client 2015-08-24 12:41:34 +02:00
c51e153b5c eth, metrics, p2p: prepare metrics and net packets to eth/62 2015-08-21 10:30:57 +03:00
886 changed files with 30299 additions and 5295 deletions

15
Godeps/Godeps.json generated
View File

@ -5,11 +5,6 @@
"./..."
],
"Deps": [
{
"ImportPath": "code.google.com/p/go-uuid/uuid",
"Comment": "null-12",
"Rev": "7dda39b2e7d5e265014674c5af696ba4186679e9"
},
{
"ImportPath": "github.com/codegangsta/cli",
"Comment": "1.2.0-95-g9b2bd2b",
@ -21,8 +16,8 @@
},
{
"ImportPath": "github.com/ethereum/ethash",
"Comment": "v23.1-227-g8f6ccaa",
"Rev": "8f6ccaaef9b418553807a73a95cb5f49cd3ea39f"
"Comment": "v23.1-234-g062e40a",
"Rev": "062e40a1a1671f5a5102862b56e4c56f68a732f5"
},
{
"ImportPath": "github.com/fatih/color",
@ -51,7 +46,7 @@
},
{
"ImportPath": "github.com/mattn/go-isatty",
"Rev": "fdbe02a1b44e75977b2690062b83cf507d70c013"
"Rev": "7fcbc72f853b92b5720db4a6b8482be612daef24"
},
{
"ImportPath": "github.com/mattn/go-runewidth",
@ -62,6 +57,10 @@
"ImportPath": "github.com/nsf/termbox-go",
"Rev": "675ffd907b7401b8a709a5ef2249978af5616bb2"
},
{
"ImportPath": "github.com/pborman/uuid",
"Rev": "cccd189d45f7ac3368a0d127efb7f4d08ae0b655"
},
{
"ImportPath": "github.com/peterh/liner",
"Rev": "29f6a646557d83e2b6e9ba05c45fbea9c006dbe8"

View File

@ -35,10 +35,14 @@
#elif defined(__FreeBSD__) || defined(__DragonFly__) || defined(__NetBSD__)
#define ethash_swap_u32(input_) bswap32(input_)
#define ethash_swap_u64(input_) bswap64(input_)
#elif defined(__OpenBSD__)
#include <endian.h>
#define ethash_swap_u32(input_) swap32(input_)
#define ethash_swap_u64(input_) swap64(input_)
#else // posix
#include <byteswap.h>
#define ethash_swap_u32(input_) __bswap_32(input_)
#define ethash_swap_u64(input_) __bswap_64(input_)
#define ethash_swap_u32(input_) bswap_32(input_)
#define ethash_swap_u64(input_) bswap_64(input_)
#endif

View File

@ -29,6 +29,10 @@ extern "C" {
#define FNV_PRIME 0x01000193
/* The FNV-1 spec multiplies the prime with the input one byte (octet) in turn.
We instead multiply it with the full 32-bit input.
This gives a different result compared to a canonical FNV-1 implementation.
*/
static inline uint32_t fnv_hash(uint32_t const x, uint32_t const y)
{
return x * FNV_PRIME ^ y;

View File

@ -0,0 +1,9 @@
Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
MIT License (Expat)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,4 +1,4 @@
// +build darwin freebsd
// +build darwin freebsd openbsd netbsd
package isatty

View File

@ -0,0 +1 @@
Paul Borman <borman@google.com>

View File

@ -1,4 +1,4 @@
Copyright (c) 2009 Google Inc. All rights reserved.
Copyright (c) 2009,2014 Google Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are

30
Godeps/_workspace/src/github.com/pborman/uuid/json.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
// Copyright 2014 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import "errors"
func (u UUID) MarshalJSON() ([]byte, error) {
if len(u) == 0 {
return []byte(`""`), nil
}
return []byte(`"` + u.String() + `"`), nil
}
func (u *UUID) UnmarshalJSON(data []byte) error {
if len(data) == 0 || string(data) == `""` {
return nil
}
if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' {
return errors.New("invalid UUID format")
}
data = data[1 : len(data)-1]
uu := Parse(string(data))
if uu == nil {
return errors.New("invalid UUID format")
}
*u = uu
return nil
}

View File

@ -0,0 +1,32 @@
// Copyright 2014 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/json"
"reflect"
"testing"
)
var testUUID = Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479")
func TestJSON(t *testing.T) {
type S struct {
ID1 UUID
ID2 UUID
}
s1 := S{ID1: testUUID}
data, err := json.Marshal(&s1)
if err != nil {
t.Fatal(err)
}
var s2 S
if err := json.Unmarshal(data, &s2); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(&s1, &s2) {
t.Errorf("got %#v, want %#v", s2, s1)
}
}

View File

@ -0,0 +1,66 @@
// Copyright 2014 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"flag"
"runtime"
"testing"
"time"
)
// This test is only run when --regressions is passed on the go test line.
var regressions = flag.Bool("regressions", false, "run uuid regression tests")
// TestClockSeqRace tests for a particular race condition of returning two
// identical Version1 UUIDs. The duration of 1 minute was chosen as the race
// condition, before being fixed, nearly always occured in under 30 seconds.
func TestClockSeqRace(t *testing.T) {
if !*regressions {
t.Skip("skipping regression tests")
}
duration := time.Minute
done := make(chan struct{})
defer close(done)
ch := make(chan UUID, 10000)
ncpu := runtime.NumCPU()
switch ncpu {
case 0, 1:
// We can't run the test effectively.
t.Skip("skipping race test, only one CPU detected")
return
default:
runtime.GOMAXPROCS(ncpu)
}
for i := 0; i < ncpu; i++ {
go func() {
for {
select {
case <-done:
return
case ch <- NewUUID():
}
}
}()
}
uuids := make(map[string]bool)
cnt := 0
start := time.Now()
for u := range ch {
s := u.String()
if uuids[s] {
t.Errorf("duplicate uuid after %d in %v: %s", cnt, time.Since(start), s)
return
}
uuids[s] = true
if time.Since(start) > duration {
return
}
cnt++
}
}

40
Godeps/_workspace/src/github.com/pborman/uuid/sql.go generated vendored Normal file
View File

@ -0,0 +1,40 @@
// Copyright 2015 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"errors"
"fmt"
)
// Scan implements sql.Scanner so UUIDs can be read from databases transparently
// Currently, database types that map to string and []byte are supported. Please
// consult database-specific driver documentation for matching types.
func (uuid *UUID) Scan(src interface{}) error {
switch src.(type) {
case string:
// see uuid.Parse for required string format
parsed := Parse(src.(string))
if parsed == nil {
return errors.New("Scan: invalid UUID format")
}
*uuid = parsed
case []byte:
// assumes a simple slice of bytes, just check validity and store
u := UUID(src.([]byte))
if u.Variant() == Invalid {
return errors.New("Scan: invalid UUID format")
}
*uuid = u
default:
return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
}
return nil
}

View File

@ -0,0 +1,53 @@
// Copyright 2015 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"strings"
"testing"
)
func TestScan(t *testing.T) {
var stringTest string = "f47ac10b-58cc-0372-8567-0e02b2c3d479"
var byteTest []byte = Parse(stringTest)
var badTypeTest int = 6
var invalidTest string = "f47ac10b-58cc-0372-8567-0e02b2c3d4"
var invalidByteTest []byte = Parse(invalidTest)
var uuid UUID
err := (&uuid).Scan(stringTest)
if err != nil {
t.Fatal(err)
}
err = (&uuid).Scan(byteTest)
if err != nil {
t.Fatal(err)
}
err = (&uuid).Scan(badTypeTest)
if err == nil {
t.Error("int correctly parsed and shouldn't have")
}
if !strings.Contains(err.Error(), "unable to scan type") {
t.Error("attempting to parse an int returned an incorrect error message")
}
err = (&uuid).Scan(invalidTest)
if err == nil {
t.Error("invalid uuid was parsed without error")
}
if !strings.Contains(err.Error(), "invalid UUID") {
t.Error("attempting to parse an invalid UUID returned an incorrect error message")
}
err = (&uuid).Scan(invalidByteTest)
if err == nil {
t.Error("invalid byte uuid was parsed without error")
}
if !strings.Contains(err.Error(), "invalid UUID") {
t.Error("attempting to parse an invalid byte UUID returned an incorrect error message")
}
}

View File

@ -40,15 +40,15 @@ func (t Time) UnixTime() (sec, nsec int64) {
}
// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
// adjusts the clock sequence as needed. An error is returned if the current
// time cannot be determined.
func GetTime() (Time, error) {
// clock sequence as well as adjusting the clock sequence as needed. An error
// is returned if the current time cannot be determined.
func GetTime() (Time, uint16, error) {
defer mu.Unlock()
mu.Lock()
return getTime()
}
func getTime() (Time, error) {
func getTime() (Time, uint16, error) {
t := timeNow()
// If we don't have a clock sequence already, set one.
@ -63,7 +63,7 @@ func getTime() (Time, error) {
clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000
}
lasttime = now
return Time(now), nil
return Time(now), clock_seq, nil
}
// ClockSequence returns the current clock sequence, generating one if not

View File

@ -19,7 +19,7 @@ func NewUUID() UUID {
SetNodeInterface("")
}
now, err := GetTime()
now, seq, err := GetTime()
if err != nil {
return nil
}
@ -34,7 +34,7 @@ func NewUUID() UUID {
binary.BigEndian.PutUint32(uuid[0:], time_low)
binary.BigEndian.PutUint16(uuid[4:], time_mid)
binary.BigEndian.PutUint16(uuid[6:], time_hi)
binary.BigEndian.PutUint16(uuid[8:], clock_seq)
binary.BigEndian.PutUint16(uuid[8:], seq)
copy(uuid[10:], nodeID)
return uuid

View File

@ -10,6 +10,30 @@ geth:
@echo "Done building."
@echo "Run \"$(GOBIN)/geth\" to launch geth."
geth-cross: geth-linux geth-darwin geth-windows geth-android
@echo "Full cross compilation done:"
@ls -l $(GOBIN)/geth-*
geth-linux: xgo
build/env.sh $(GOBIN)/xgo --dest=$(GOBIN) --deps=https://gmplib.org/download/gmp/gmp-6.0.0a.tar.bz2 --targets=linux/* -v ./cmd/geth
@echo "Linux cross compilation done:"
@ls -l $(GOBIN)/geth-linux-*
geth-darwin: xgo
build/env.sh $(GOBIN)/xgo --dest=$(GOBIN) --deps=https://gmplib.org/download/gmp/gmp-6.0.0a.tar.bz2 --targets=darwin/* -v ./cmd/geth
@echo "Darwin cross compilation done:"
@ls -l $(GOBIN)/geth-darwin-*
geth-windows: xgo
build/env.sh $(GOBIN)/xgo --dest=$(GOBIN) --deps=https://gmplib.org/download/gmp/gmp-6.0.0a.tar.bz2 --targets=windows/* -v ./cmd/geth
@echo "Windows cross compilation done:"
@ls -l $(GOBIN)/geth-windows-*
geth-android: xgo
build/env.sh $(GOBIN)/xgo --dest=$(GOBIN) --deps=https://gmplib.org/download/gmp/gmp-6.0.0a.tar.bz2 --targets=android-16/*,android-21/* -v ./cmd/geth
@echo "Android cross compilation done:"
@ls -l $(GOBIN)/geth-android-*
evm:
build/env.sh $(GOROOT)/bin/go install -v $(shell build/ldflags.sh) ./cmd/evm
@echo "Done building."
@ -28,5 +52,8 @@ test: all
travis-test-with-coverage: all
build/env.sh build/test-global-coverage.sh
xgo:
build/env.sh go get github.com/karalabe/xgo
clean:
rm -fr build/_workspace/pkg/ Godeps/_workspace/pkg $(GOBIN)/*

1
VERSION Normal file
View File

@ -0,0 +1 @@
1.2.3

View File

@ -7,7 +7,12 @@ if [ ! -f "build/env.sh" ]; then
exit 2
fi
# Since Go 1.5, the separator char for link time assignments
# is '=' and using ' ' prints a warning. However, Go < 1.5 does
# not support using '='.
sep=$(go version | awk '{ if ($3 >= "go1.5" || index($3, "devel")) print "="; else print " "; }' -)
# set gitCommit when running from a Git checkout.
if [ -f ".git/HEAD" ]; then
echo "-ldflags '-X main.gitCommit $(git rev-parse HEAD)'"
echo "-ldflags '-X main.gitCommit$sep$(git rev-parse HEAD)'"
fi

View File

@ -166,7 +166,7 @@ type VMEnv struct {
depth int
Gas *big.Int
time uint64
time *big.Int
logs []vm.StructLog
}
@ -175,7 +175,7 @@ func NewEnv(state *state.StateDB, transactor common.Address, value *big.Int) *VM
state: state,
transactor: &transactor,
value: value,
time: uint64(time.Now().Unix()),
time: big.NewInt(time.Now().Unix()),
}
}
@ -183,7 +183,7 @@ func (self *VMEnv) State() *state.StateDB { return self.state }
func (self *VMEnv) Origin() common.Address { return *self.transactor }
func (self *VMEnv) BlockNumber() *big.Int { return common.Big0 }
func (self *VMEnv) Coinbase() common.Address { return *self.transactor }
func (self *VMEnv) Time() uint64 { return self.time }
func (self *VMEnv) Time() *big.Int { return self.time }
func (self *VMEnv) Difficulty() *big.Int { return common.Big1 }
func (self *VMEnv) BlockHash() []byte { return make([]byte, 32) }
func (self *VMEnv) Value() *big.Int { return self.value }

View File

@ -22,7 +22,6 @@ import (
"github.com/codegangsta/cli"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/tests"
@ -92,7 +91,6 @@ func runBlockTest(ctx *cli.Context) {
if err != nil {
utils.Fatalf("%v", err)
}
defer ethereum.Stop()
if rpc {
fmt.Println("Block Test post state validated, starting RPC interface.")
startEth(ctx, ethereum)
@ -103,34 +101,31 @@ func runBlockTest(ctx *cli.Context) {
func runOneBlockTest(ctx *cli.Context, test *tests.BlockTest) (*eth.Ethereum, error) {
cfg := utils.MakeEthConfig(ClientIdentifier, Version, ctx)
cfg.NewDB = func(path string) (common.Database, error) { return ethdb.NewMemDatabase() }
cfg.NewDB = func(path string) (ethdb.Database, error) { return ethdb.NewMemDatabase() }
cfg.MaxPeers = 0 // disable network
cfg.Shh = false // disable whisper
cfg.NAT = nil // disable port mapping
ethereum, err := eth.New(cfg)
if err != nil {
return nil, err
}
// if err := ethereum.Start(); err != nil {
// return nil, err
// }
// import the genesis block
ethereum.ResetWithGenesisBlock(test.Genesis)
// import pre accounts
statedb, err := test.InsertPreState(ethereum)
_, err = test.InsertPreState(ethereum)
if err != nil {
return ethereum, fmt.Errorf("InsertPreState: %v", err)
}
if err := test.TryBlocksInsert(ethereum.ChainManager()); err != nil {
cm := ethereum.ChainManager()
validBlocks, err := test.TryBlocksInsert(cm)
if err != nil {
return ethereum, fmt.Errorf("Block Test load error: %v", err)
}
if err := test.ValidatePostState(statedb); err != nil {
newDB := cm.State()
if err := test.ValidatePostState(newDB); err != nil {
return ethereum, fmt.Errorf("post state validation failed: %v", err)
}
return ethereum, nil
return ethereum, test.ValidateImportedHeaders(cm, validBlocks)
}

View File

@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger/glog"
)
@ -191,7 +192,7 @@ func hashish(x string) bool {
return err != nil
}
func closeAll(dbs ...common.Database) {
func closeAll(dbs ...ethdb.Database) {
for _, db := range dbs {
db.Close()
}

View File

@ -121,7 +121,7 @@ func keywordCompleter(line string) []string {
}
func apiWordCompleter(line string, pos int) (head string, completions []string, tail string) {
if len(line) == 0 {
if len(line) == 0 || pos == 0 {
return "", nil, ""
}

View File

@ -92,7 +92,7 @@ func testREPL(t *testing.T, config func(*eth.Config)) (string, *testjethre, *eth
db, _ := ethdb.NewMemDatabase()
core.WriteGenesisBlockForTesting(db, common.HexToAddress(testAddress), common.String2Big(testBalance))
core.WriteGenesisBlockForTesting(db, core.GenesisAccount{common.HexToAddress(testAddress), common.String2Big(testBalance)})
ks := crypto.NewKeyStorePlain(filepath.Join(tmp, "keystore"))
am := accounts.NewManager(ks)
conf := &eth.Config{
@ -103,7 +103,7 @@ func testREPL(t *testing.T, config func(*eth.Config)) (string, *testjethre, *eth
Name: "test",
SolcPath: testSolcPath,
PowTest: true,
NewDB: func(path string) (common.Database, error) { return db, nil },
NewDB: func(path string) (ethdb.Database, error) { return db, nil },
}
if config != nil {
config(conf)

View File

@ -48,16 +48,21 @@ import (
const (
ClientIdentifier = "Geth"
Version = "1.1.0"
Version = "1.2.3"
VersionMajor = 1
VersionMinor = 1
VersionPatch = 0
VersionMinor = 2
VersionPatch = 3
)
var (
gitCommit string // set via linker flagg
nodeNameVersion string
app *cli.App
ExtraDataFlag = cli.StringFlag{
Name: "extradata",
Usage: "Extra data for the miner",
}
)
func init() {
@ -283,6 +288,7 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
utils.DataDirFlag,
utils.BlockchainVersionFlag,
utils.OlympicFlag,
utils.EthVersionFlag,
utils.CacheFlag,
utils.JSpathFlag,
utils.ListenPortFlag,
@ -307,6 +313,7 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
utils.IPCPathFlag,
utils.ExecFlag,
utils.WhisperEnabledFlag,
utils.DevModeFlag,
utils.VMDebugFlag,
utils.VMForceJitFlag,
utils.VMJitCacheFlag,
@ -329,10 +336,12 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
utils.GpobaseStepDownFlag,
utils.GpobaseStepUpFlag,
utils.GpobaseCorrectionFactorFlag,
ExtraDataFlag,
}
app.Before = func(ctx *cli.Context) error {
utils.SetupLogger(ctx)
utils.SetupVM(ctx)
utils.SetupEth(ctx)
if ctx.GlobalBool(utils.PProfEanbledFlag.Name) {
utils.StartPProf(ctx)
}
@ -351,6 +360,14 @@ func main() {
}
}
// makeExtra resolves extradata for the miner from a flag or returns a default.
func makeExtra(ctx *cli.Context) []byte {
if ctx.GlobalIsSet(ExtraDataFlag.Name) {
return []byte(ctx.GlobalString(ExtraDataFlag.Name))
}
return makeDefaultExtra()
}
func makeDefaultExtra() []byte {
var clientInfo = struct {
Version uint
@ -379,7 +396,7 @@ func run(ctx *cli.Context) {
}
cfg := utils.MakeEthConfig(ClientIdentifier, nodeNameVersion, ctx)
cfg.ExtraData = makeDefaultExtra()
cfg.ExtraData = makeExtra(ctx)
ethereum, err := eth.New(cfg)
if err != nil {
@ -400,7 +417,7 @@ func attach(ctx *cli.Context) {
client, err = comms.ClientFromEndpoint(ctx.Args().First(), codec.JSON)
} else {
cfg := comms.IpcConfig{
Endpoint: ctx.GlobalString(utils.IPCPathFlag.Name),
Endpoint: utils.IpcSocketPath(ctx),
}
client, err = comms.NewIpcClient(cfg, codec.JSON)
}
@ -427,6 +444,8 @@ func console(ctx *cli.Context) {
utils.CheckLegalese(ctx.GlobalString(utils.DataDirFlag.Name))
cfg := utils.MakeEthConfig(ClientIdentifier, nodeNameVersion, ctx)
cfg.ExtraData = makeExtra(ctx)
ethereum, err := eth.New(cfg)
if err != nil {
utils.Fatalf("%v", err)
@ -525,17 +544,16 @@ func blockRecovery(ctx *cli.Context) {
var block *types.Block
if arg[0] == '#' {
block = core.GetBlockByNumber(blockDb, common.String2Big(arg[1:]).Uint64())
block = core.GetBlock(blockDb, core.GetCanonicalHash(blockDb, common.String2Big(arg[1:]).Uint64()))
} else {
block = core.GetBlockByHash(blockDb, common.HexToHash(arg))
block = core.GetBlock(blockDb, common.HexToHash(arg))
}
if block == nil {
glog.Fatalln("block not found. Recovery failed")
}
err = core.WriteHead(blockDb, block)
if err != nil {
if err = core.WriteHeadBlockHash(blockDb, block.Hash()); err != nil {
glog.Fatalln("block write err", err)
}
glog.Infof("Recovery succesful. New HEAD %x\n", block.Hash())

View File

@ -289,7 +289,7 @@ func updateChart(metric string, data []float64, base *int, chart *termui.LineCha
}
}
unit, scale := 0, 1.0
for high >= 1000 {
for high >= 1000 && unit+1 < len(dataUnits) {
high, unit, scale = high/1000, unit+1, scale*1000
}
// If the unit changes, re-create the chart (hack to set max height...)

View File

@ -121,6 +121,10 @@ var (
Name: "genesis",
Usage: "Inserts/Overwrites the genesis block (json format)",
}
DevModeFlag = cli.BoolFlag{
Name: "dev",
Usage: "Developer mode. This mode creates a private network and sets several debugging flags",
}
IdentityFlag = cli.StringFlag{
Name: "identity",
Usage: "Custom node name",
@ -138,6 +142,11 @@ var (
Name: "olympic",
Usage: "Use olympic style protocol",
}
EthVersionFlag = cli.IntFlag{
Name: "eth",
Value: 62,
Usage: "Highest eth protocol to advertise (temporary, dev option)",
}
// miner settings
MinerThreadsFlag = cli.IntFlag{
@ -405,7 +414,7 @@ func MakeEthConfig(clientID, version string, ctx *cli.Context) *eth.Config {
glog.V(logger.Error).Infoln("WARNING: No etherbase set and no accounts found as default")
}
return &eth.Config{
cfg := &eth.Config{
Name: common.MakeName(clientID, version),
DataDir: ctx.GlobalString(DataDirFlag.Name),
GenesisNonce: ctx.GlobalInt(GenesisNonceFlag.Name),
@ -442,6 +451,33 @@ func MakeEthConfig(clientID, version string, ctx *cli.Context) *eth.Config {
SolcPath: ctx.GlobalString(SolcPathFlag.Name),
AutoDAG: ctx.GlobalBool(AutoDAGFlag.Name) || ctx.GlobalBool(MiningEnabledFlag.Name),
}
if ctx.GlobalBool(DevModeFlag.Name) {
if !ctx.GlobalIsSet(VMDebugFlag.Name) {
cfg.VmDebug = true
}
if !ctx.GlobalIsSet(MaxPeersFlag.Name) {
cfg.MaxPeers = 0
}
if !ctx.GlobalIsSet(GasPriceFlag.Name) {
cfg.GasPrice = new(big.Int)
}
if !ctx.GlobalIsSet(ListenPortFlag.Name) {
cfg.Port = "0" // auto port
}
if !ctx.GlobalIsSet(WhisperEnabledFlag.Name) {
cfg.Shh = true
}
if !ctx.GlobalIsSet(DataDirFlag.Name) {
cfg.DataDir = os.TempDir() + "/ethereum_dev_mode"
}
cfg.PowTest = true
cfg.DevMode = true
glog.V(logger.Info).Infoln("dev mode enabled")
}
return cfg
}
// SetupLogger configures glog from the logging-related command line flags.
@ -459,8 +495,20 @@ func SetupVM(ctx *cli.Context) {
vm.SetJITCacheSize(ctx.GlobalInt(VMJitCacheFlag.Name))
}
// SetupEth configures the eth packages global settings
func SetupEth(ctx *cli.Context) {
version := ctx.GlobalInt(EthVersionFlag.Name)
for len(eth.ProtocolVersions) > 0 && eth.ProtocolVersions[0] > uint(version) {
eth.ProtocolVersions = eth.ProtocolVersions[1:]
eth.ProtocolLengths = eth.ProtocolLengths[1:]
}
if len(eth.ProtocolVersions) == 0 {
Fatalf("No valid eth protocols remaining")
}
}
// MakeChain creates a chain manager from set command line flags.
func MakeChain(ctx *cli.Context) (chain *core.ChainManager, chainDb common.Database) {
func MakeChain(ctx *cli.Context) (chain *core.ChainManager, chainDb ethdb.Database) {
datadir := ctx.GlobalString(DataDirFlag.Name)
cache := ctx.GlobalInt(CacheFlag.Name)

View File

@ -1,49 +1,50 @@
# ethutil
# common
[![Build
Status](https://travis-ci.org/ethereum/go-ethereum.png?branch=master)](https://travis-ci.org/ethereum/go-ethereum)
The ethutil package contains the ethereum utility library.
The common package contains the ethereum utility library.
# Installation
`go get github.com/ethereum/ethutil-go`
As a subdirectory the main go-ethereum repository, you get it with
`go get github.com/ethereum/go-ethereum`.
# Usage
## RLP (Recursive Linear Prefix) Encoding
RLP Encoding is an encoding scheme utilized by the Ethereum project. It
encodes any native value or list to string.
RLP Encoding is an encoding scheme used by the Ethereum project. It
encodes any native value or list to a string.
More in depth information about the Encoding scheme see the [Wiki](http://wiki.ethereum.org/index.php/RLP)
article.
More in depth information about the encoding scheme see the
[Wiki](http://wiki.ethereum.org/index.php/RLP) article.
```go
rlp := ethutil.Encode("doge")
rlp := common.Encode("doge")
fmt.Printf("%q\n", rlp) // => "\0x83dog"
rlp = ethutil.Encode([]interface{}{"dog", "cat"})
rlp = common.Encode([]interface{}{"dog", "cat"})
fmt.Printf("%q\n", rlp) // => "\0xc8\0x83dog\0x83cat"
decoded := ethutil.Decode(rlp)
decoded := common.Decode(rlp)
fmt.Println(decoded) // => ["dog" "cat"]
```
## Patricia Trie
Patricie Tree is a merkle trie utilized by the Ethereum project.
Patricie Tree is a merkle trie used by the Ethereum project.
More in depth information about the (modified) Patricia Trie can be
found on the [Wiki](http://wiki.ethereum.org/index.php/Patricia_Tree).
The patricia trie uses a db as backend and could be anything as long as
it satisfies the Database interface found in `ethutil/db.go`.
it satisfies the Database interface found in `common/db.go`.
```go
db := NewDatabase()
// db, root
trie := ethutil.NewTrie(db, "")
trie := common.NewTrie(db, "")
trie.Put("puppy", "dog")
trie.Put("horse", "stallion")
@ -65,7 +66,7 @@ all (key, value) bindings.
// ... Create db/trie
// Note that RLP uses interface slices as list
value := ethutil.Encode([]interface{}{"one", 2, "three", []interface{}{42}})
value := common.Encode([]interface{}{"one", 2, "three", []interface{}{42}})
// Store the RLP encoded value of the list
trie.Put("mykey", value)
```
@ -89,7 +90,7 @@ type (e.g. `Slice()` returns []interface{}, `Uint()` return 0, etc).
`Append(v)` appends the value (v) to the current value/list.
```go
val := ethutil.NewEmptyValue().Append(1).Append("2")
val := common.NewEmptyValue().Append(1).Append("2")
val.AppendList().Append(3)
```
@ -110,7 +111,7 @@ val.AppendList().Append(3)
`Byte()` returns the value as a single byte.
```go
val := ethutil.NewValue([]interface{}{1,"2",[]interface{}{3}})
val := common.NewValue([]interface{}{1,"2",[]interface{}{3}})
val.Get(0).Uint() // => 1
val.Get(1).Str() // => "2"
s := val.Get(2) // => Value([]interface{}{3})
@ -122,7 +123,7 @@ s.Get(0).Uint() // => 3
Decoding streams of RLP data is simplified
```go
val := ethutil.NewValueFromBytes(rlpData)
val := common.NewValueFromBytes(rlpData)
val.Get(0).Uint()
```
@ -132,7 +133,7 @@ Encoding from Value to RLP is done with the `Encode` method. The
underlying value can be anything RLP can encode (int, str, lists, bytes)
```go
val := ethutil.NewValue([]interface{}{1,"2",[]interface{}{3}})
val := common.NewValue([]interface{}{1,"2",[]interface{}{3}})
rlp := val.Encode()
// Store the rlp data
Store(rlp)

View File

@ -19,6 +19,7 @@ package compiler
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
@ -33,15 +34,10 @@ import (
"github.com/ethereum/go-ethereum/logger/glog"
)
const (
// flair = "Christian <c@ethdev.com> and Lefteris <lefteris@ethdev.com> (c) 2014-2015"
flair = ""
languageVersion = "0"
)
var (
versionRegExp = regexp.MustCompile("[0-9]+.[0-9]+.[0-9]+")
params = []string{
versionRegexp = regexp.MustCompile("[0-9]+\\.[0-9]+\\.[0-9]+")
legacyRegexp = regexp.MustCompile("0\\.(9\\..*|1\\.[01])")
paramsLegacy = []string{
"--binary", // Request to output the contract in binary (hexadecimal).
"file", //
"--json-abi", // Request to output the contract's JSON ABI interface.
@ -53,6 +49,15 @@ var (
"--add-std",
"1",
}
paramsNew = []string{
"--bin", // Request to output the contract in binary (hexadecimal).
"--abi", // Request to output the contract's JSON ABI interface.
"--userdoc", // Request to output the contract's Natspec user documentation.
"--devdoc", // Request to output the contract's Natspec developer documentation.
"--add-std", // include standard lib contracts
"--optimize", // code optimizer switched on
"-o", // output directory
}
)
type Contract struct {
@ -65,6 +70,7 @@ type ContractInfo struct {
Language string `json:"language"`
LanguageVersion string `json:"languageVersion"`
CompilerVersion string `json:"compilerVersion"`
CompilerOptions string `json:"compilerOptions"`
AbiDefinition interface{} `json:"abiDefinition"`
UserDoc interface{} `json:"userDoc"`
DeveloperDoc interface{} `json:"developerDoc"`
@ -73,6 +79,8 @@ type ContractInfo struct {
type Solidity struct {
solcPath string
version string
fullVersion string
legacy bool
}
func New(solcPath string) (sol *Solidity, err error) {
@ -93,112 +101,118 @@ func New(solcPath string) (sol *Solidity, err error) {
return
}
version := versionRegExp.FindString(out.String())
fullVersion := out.String()
version := versionRegexp.FindString(fullVersion)
legacy := legacyRegexp.MatchString(version)
sol = &Solidity{
solcPath: solcPath,
version: version,
fullVersion: fullVersion,
legacy: legacy,
}
glog.V(logger.Info).Infoln(sol.Info())
return
}
func (sol *Solidity) Info() string {
return fmt.Sprintf("solc v%s\nSolidity Compiler: %s\n%s", sol.version, sol.solcPath, flair)
return fmt.Sprintf("%s\npath: %s", sol.fullVersion, sol.solcPath)
}
func (sol *Solidity) Version() string {
return sol.version
}
func (sol *Solidity) Compile(source string) (contracts map[string]*Contract, err error) {
// Compile builds and returns all the contracts contained within a source string.
func (sol *Solidity) Compile(source string) (map[string]*Contract, error) {
// Short circuit if no source code was specified
if len(source) == 0 {
err = fmt.Errorf("empty source")
return
return nil, errors.New("solc: empty source string")
}
// Create a safe place to dump compilation output
wd, err := ioutil.TempDir("", "solc")
if err != nil {
return
return nil, fmt.Errorf("solc: failed to create temporary build folder: %v", err)
}
defer os.RemoveAll(wd)
in := strings.NewReader(source)
var out bytes.Buffer
// cwd set to temp dir
// Assemble the compiler command, change to the temp folder and capture any errors
stderr := new(bytes.Buffer)
var params []string
if sol.legacy {
params = paramsLegacy
} else {
params = paramsNew
params = append(params, wd)
}
compilerOptions := strings.Join(params, " ")
cmd := exec.Command(sol.solcPath, params...)
cmd.Dir = wd
cmd.Stdin = in
cmd.Stdout = &out
err = cmd.Run()
if err != nil {
err = fmt.Errorf("solc error: %v", err)
return
}
cmd.Stdin = strings.NewReader(source)
cmd.Stderr = stderr
matches, _ := filepath.Glob(wd + "/*.binary")
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("solc: %v\n%s", err, string(stderr.Bytes()))
}
// Sanity check that something was actually built
matches, _ := filepath.Glob(wd + "/*\\.bin*")
if len(matches) < 1 {
err = fmt.Errorf("solc error: missing code output")
return
return nil, fmt.Errorf("solc: no build results found")
}
contracts = make(map[string]*Contract)
// Compilation succeeded, assemble and return the contracts
contracts := make(map[string]*Contract)
for _, path := range matches {
_, file := filepath.Split(path)
base := strings.Split(file, ".")[0]
codeFile := filepath.Join(wd, base+".binary")
abiDefinitionFile := filepath.Join(wd, base+".abi")
userDocFile := filepath.Join(wd, base+".docuser")
developerDocFile := filepath.Join(wd, base+".docdev")
var code, abiDefinitionJson, userDocJson, developerDocJson []byte
code, err = ioutil.ReadFile(codeFile)
if err != nil {
err = fmt.Errorf("error reading compiler output for code: %v", err)
return
// Parse the individual compilation results (code binary, ABI definitions, user and dev docs)
var binary []byte
binext := ".bin"
if sol.legacy {
binext = ".binary"
}
abiDefinitionJson, err = ioutil.ReadFile(abiDefinitionFile)
if err != nil {
err = fmt.Errorf("error reading compiler output for abiDefinition: %v", err)
return
if binary, err = ioutil.ReadFile(filepath.Join(wd, base+binext)); err != nil {
return nil, fmt.Errorf("solc: error reading compiler output for code: %v", err)
}
var abiDefinition interface{}
err = json.Unmarshal(abiDefinitionJson, &abiDefinition)
userDocJson, err = ioutil.ReadFile(userDocFile)
if err != nil {
err = fmt.Errorf("error reading compiler output for userDoc: %v", err)
return
var abi interface{}
if blob, err := ioutil.ReadFile(filepath.Join(wd, base+".abi")); err != nil {
return nil, fmt.Errorf("solc: error reading abi definition: %v", err)
} else if err = json.Unmarshal(blob, &abi); err != nil {
return nil, fmt.Errorf("solc: error parsing abi definition: %v", err)
}
var userDoc interface{}
err = json.Unmarshal(userDocJson, &userDoc)
developerDocJson, err = ioutil.ReadFile(developerDocFile)
if err != nil {
err = fmt.Errorf("error reading compiler output for developerDoc: %v", err)
return
var userdoc interface{}
if blob, err := ioutil.ReadFile(filepath.Join(wd, base+".docuser")); err != nil {
return nil, fmt.Errorf("solc: error reading user doc: %v", err)
} else if err = json.Unmarshal(blob, &userdoc); err != nil {
return nil, fmt.Errorf("solc: error parsing user doc: %v", err)
}
var developerDoc interface{}
err = json.Unmarshal(developerDocJson, &developerDoc)
contract := &Contract{
Code: "0x" + string(code),
var devdoc interface{}
if blob, err := ioutil.ReadFile(filepath.Join(wd, base+".docdev")); err != nil {
return nil, fmt.Errorf("solc: error reading dev doc: %v", err)
} else if err = json.Unmarshal(blob, &devdoc); err != nil {
return nil, fmt.Errorf("solc: error parsing dev doc: %v", err)
}
// Assemble the final contract
contracts[base] = &Contract{
Code: "0x" + string(binary),
Info: ContractInfo{
Source: source,
Language: "Solidity",
LanguageVersion: languageVersion,
LanguageVersion: sol.version,
CompilerVersion: sol.version,
AbiDefinition: abiDefinition,
UserDoc: userDoc,
DeveloperDoc: developerDoc,
CompilerOptions: compilerOptions,
AbiDefinition: abi,
UserDoc: userdoc,
DeveloperDoc: devdoc,
},
}
contracts[base] = contract
}
return
return contracts, nil
}
func SaveInfo(info *ContractInfo, filename string) (contenthash common.Hash, err error) {

View File

@ -26,7 +26,7 @@ import (
"github.com/ethereum/go-ethereum/common"
)
const solcVersion = "0.9.23"
const solcVersion = "0.1.1"
var (
source = `
@ -37,18 +37,18 @@ contract test {
}
}
`
code = "0x605880600c6000396000f3006000357c010000000000000000000000000000000000000000000000000000000090048063c6888fa114602e57005b603d6004803590602001506047565b8060005260206000f35b60006007820290506053565b91905056"
info = `{"source":"\ncontract test {\n /// @notice Will multiply ` + "`a`" + ` by 7.\n function multiply(uint a) returns(uint d) {\n return a * 7;\n }\n}\n","language":"Solidity","languageVersion":"0","compilerVersion":"0.9.23","abiDefinition":[{"constant":false,"inputs":[{"name":"a","type":"uint256"}],"name":"multiply","outputs":[{"name":"d","type":"uint256"}],"type":"function"}],"userDoc":{"methods":{"multiply(uint256)":{"notice":"Will multiply ` + "`a`" + ` by 7."}}},"developerDoc":{"methods":{}}}`
code = "0x6060604052606d8060116000396000f30060606040526000357c010000000000000000000000000000000000000000000000000000000090048063c6888fa1146037576035565b005b6046600480359060200150605c565b6040518082815260200191505060405180910390f35b60006007820290506068565b91905056"
info = `{"source":"\ncontract test {\n /// @notice Will multiply ` + "`a`" + ` by 7.\n function multiply(uint a) returns(uint d) {\n return a * 7;\n }\n}\n","language":"Solidity","languageVersion":"0.1.1","compilerVersion":"0.1.1","compilerOptions":"--binary file --json-abi file --natspec-user file --natspec-dev file --add-std 1","abiDefinition":[{"constant":false,"inputs":[{"name":"a","type":"uint256"}],"name":"multiply","outputs":[{"name":"d","type":"uint256"}],"type":"function"}],"userDoc":{"methods":{"multiply(uint256)":{"notice":"Will multiply ` + "`a`" + ` by 7."}}},"developerDoc":{"methods":{}}}`
infohash = common.HexToHash("0xea782f674eb898e477c20e8a7cf11c2c28b09fa68b5278732104f7a101aed255")
infohash = common.HexToHash("0x9f3803735e7f16120c5a140ab3f02121fd3533a9655c69b33a10e78752cc49b0")
)
func TestCompiler(t *testing.T) {
sol, err := New("")
if err != nil {
t.Skip("solc not found: skip")
t.Skipf("solc not found: %v", err)
} else if sol.Version() != solcVersion {
t.Skip("WARNING: skipping due to a newer version of solc found (%v, expect %v)", sol.Version(), solcVersion)
t.Skipf("WARNING: a newer version of solc found (%v, expect %v)", sol.Version(), solcVersion)
}
contracts, err := sol.Compile(source)
if err != nil {
@ -83,7 +83,7 @@ func TestCompileError(t *testing.T) {
func TestNoCompiler(t *testing.T) {
_, err := New("/path/to/solc")
if err != nil {
t.Log("solidity quits with error: %v", err)
t.Logf("solidity quits with error: %v", err)
} else {
t.Errorf("no solc installed, but got no error")
}

View File

@ -134,7 +134,7 @@ func testEth(t *testing.T) (ethereum *eth.Ethereum, err error) {
db, _ := ethdb.NewMemDatabase()
// set up mock genesis with balance on the testAddress
core.WriteGenesisBlockForTesting(db, common.HexToAddress(testAddress), common.String2Big(testBalance))
core.WriteGenesisBlockForTesting(db, core.GenesisAccount{common.HexToAddress(testAddress), common.String2Big(testBalance)})
// only use minimalistic stack with no networking
ethereum, err = eth.New(&eth.Config{
@ -143,7 +143,7 @@ func testEth(t *testing.T) (ethereum *eth.Ethereum, err error) {
MaxPeers: 0,
PowTest: true,
Etherbase: common.HexToAddress(testAddress),
NewDB: func(path string) (common.Database, error) { return db, nil },
NewDB: func(path string) (ethdb.Database, error) { return db, nil },
})
if err != nil {

View File

@ -144,7 +144,7 @@ func genUncles(i int, gen *BlockGen) {
func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
// Create the database in memory or in a temporary directory.
var db common.Database
var db ethdb.Database
if !disk {
db, _ = ethdb.NewMemDatabase()
} else {
@ -162,7 +162,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
// Generate a chain of b.N blocks using the supplied block
// generator function.
genesis := WriteGenesisBlockForTesting(db, benchRootAddr, benchRootFunds)
genesis := WriteGenesisBlockForTesting(db, GenesisAccount{benchRootAddr, benchRootFunds})
chain := GenerateChain(genesis, db, b.N, gen)
// Time the insertion of the new chain.

View File

@ -1,120 +0,0 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
// BlockCache implements a caching mechanism specifically for blocks and uses FILO to pop
type BlockCache struct {
size int
hashes []common.Hash
blocks map[common.Hash]*types.Block
mu sync.RWMutex
}
// Creates and returns a `BlockCache` with `size`. If `size` is smaller than 1 it will panic
func NewBlockCache(size int) *BlockCache {
if size < 1 {
panic("block cache size not allowed to be smaller than 1")
}
bc := &BlockCache{size: size}
bc.Clear()
return bc
}
func (bc *BlockCache) Clear() {
bc.blocks = make(map[common.Hash]*types.Block)
bc.hashes = nil
}
func (bc *BlockCache) Push(block *types.Block) {
bc.mu.Lock()
defer bc.mu.Unlock()
if len(bc.hashes) == bc.size {
delete(bc.blocks, bc.hashes[0])
// XXX There are a few other options on solving this
// 1) use a poller / GC like mechanism to clean up untracked objects
// 2) copy as below
// re-use the slice and remove the reference to bc.hashes[0]
// this will allow the element to be garbage collected.
copy(bc.hashes, bc.hashes[1:])
} else {
bc.hashes = append(bc.hashes, common.Hash{})
}
hash := block.Hash()
bc.blocks[hash] = block
bc.hashes[len(bc.hashes)-1] = hash
}
func (bc *BlockCache) Delete(hash common.Hash) {
bc.mu.Lock()
defer bc.mu.Unlock()
if _, ok := bc.blocks[hash]; ok {
delete(bc.blocks, hash)
for i, h := range bc.hashes {
if hash == h {
bc.hashes = bc.hashes[:i+copy(bc.hashes[i:], bc.hashes[i+1:])]
// or ? => bc.hashes = append(bc.hashes[:i], bc.hashes[i+1]...)
break
}
}
}
}
func (bc *BlockCache) Get(hash common.Hash) *types.Block {
bc.mu.RLock()
defer bc.mu.RUnlock()
if block, haz := bc.blocks[hash]; haz {
return block
}
return nil
}
func (bc *BlockCache) Has(hash common.Hash) bool {
bc.mu.RLock()
defer bc.mu.RUnlock()
_, ok := bc.blocks[hash]
return ok
}
func (bc *BlockCache) Each(cb func(int, *types.Block)) {
bc.mu.Lock()
defer bc.mu.Unlock()
i := 0
for _, block := range bc.blocks {
cb(i, block)
i++
}
}

View File

@ -1,76 +0,0 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"math/big"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
func newChain(size int) (chain []*types.Block) {
var parentHash common.Hash
for i := 0; i < size; i++ {
head := &types.Header{ParentHash: parentHash, Number: big.NewInt(int64(i))}
block := types.NewBlock(head, nil, nil, nil)
chain = append(chain, block)
parentHash = block.Hash()
}
return chain
}
func insertChainCache(cache *BlockCache, chain []*types.Block) {
for _, block := range chain {
cache.Push(block)
}
}
func TestNewBlockCache(t *testing.T) {
chain := newChain(3)
cache := NewBlockCache(2)
insertChainCache(cache, chain)
if cache.hashes[0] != chain[1].Hash() {
t.Error("oldest block incorrect")
}
}
func TestInclusion(t *testing.T) {
chain := newChain(3)
cache := NewBlockCache(3)
insertChainCache(cache, chain)
for _, block := range chain {
if b := cache.Get(block.Hash()); b == nil {
t.Errorf("getting %x failed", block.Hash())
}
}
}
func TestDeletion(t *testing.T) {
chain := newChain(3)
cache := NewBlockCache(3)
insertChainCache(cache, chain)
cache.Delete(chain[1].Hash())
if cache.Has(chain[1].Hash()) {
t.Errorf("expected %x not to be included")
}
}

View File

@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
@ -41,7 +42,7 @@ const (
)
type BlockProcessor struct {
chainDb common.Database
chainDb ethdb.Database
// Mutex for locking the block processor. Blocks can only be handled one at a time
mutex sync.Mutex
// Canonical block chain
@ -56,7 +57,19 @@ type BlockProcessor struct {
eventMux *event.TypeMux
}
func NewBlockProcessor(db common.Database, pow pow.PoW, chainManager *ChainManager, eventMux *event.TypeMux) *BlockProcessor {
// TODO: type GasPool big.Int
//
// GasPool is implemented by state.StateObject. This is a historical
// coincidence. Gas tracking should move out of StateObject.
// GasPool tracks the amount of gas available during
// execution of the transactions in a block.
type GasPool interface {
AddGas(gas, price *big.Int)
SubGas(gas, price *big.Int) error
}
func NewBlockProcessor(db ethdb.Database, pow pow.PoW, chainManager *ChainManager, eventMux *event.TypeMux) *BlockProcessor {
sm := &BlockProcessor{
chainDb: db,
mem: make(map[string]*big.Int),
@ -64,16 +77,15 @@ func NewBlockProcessor(db common.Database, pow pow.PoW, chainManager *ChainManag
bc: chainManager,
eventMux: eventMux,
}
return sm
}
func (sm *BlockProcessor) TransitionState(statedb *state.StateDB, parent, block *types.Block, transientProcess bool) (receipts types.Receipts, err error) {
coinbase := statedb.GetOrNewStateObject(block.Coinbase())
coinbase.SetGasLimit(block.GasLimit())
gp := statedb.GetOrNewStateObject(block.Coinbase())
gp.SetGasLimit(block.GasLimit())
// Process the transactions on to parent state
receipts, err = sm.ApplyTransactions(coinbase, statedb, block, block.Transactions(), transientProcess)
receipts, err = sm.ApplyTransactions(gp, statedb, block, block.Transactions(), transientProcess)
if err != nil {
return nil, err
}
@ -81,9 +93,8 @@ func (sm *BlockProcessor) TransitionState(statedb *state.StateDB, parent, block
return receipts, nil
}
func (self *BlockProcessor) ApplyTransaction(coinbase *state.StateObject, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *big.Int, transientProcess bool) (*types.Receipt, *big.Int, error) {
cb := statedb.GetStateObject(coinbase.Address())
_, gas, err := ApplyMessage(NewEnv(statedb, self.bc, tx, header), tx, cb)
func (self *BlockProcessor) ApplyTransaction(gp GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *big.Int, transientProcess bool) (*types.Receipt, *big.Int, error) {
_, gas, err := ApplyMessage(NewEnv(statedb, self.bc, tx, header), tx, gp)
if err != nil {
return nil, nil, err
}
@ -118,7 +129,7 @@ func (self *BlockProcessor) ChainManager() *ChainManager {
return self.bc
}
func (self *BlockProcessor) ApplyTransactions(coinbase *state.StateObject, statedb *state.StateDB, block *types.Block, txs types.Transactions, transientProcess bool) (types.Receipts, error) {
func (self *BlockProcessor) ApplyTransactions(gp GasPool, statedb *state.StateDB, block *types.Block, txs types.Transactions, transientProcess bool) (types.Receipts, error) {
var (
receipts types.Receipts
totalUsedGas = big.NewInt(0)
@ -130,7 +141,7 @@ func (self *BlockProcessor) ApplyTransactions(coinbase *state.StateObject, state
for i, tx := range txs {
statedb.StartRecord(tx.Hash(), block.Hash(), i)
receipt, txGas, err := self.ApplyTransaction(coinbase, statedb, header, tx, totalUsedGas, transientProcess)
receipt, txGas, err := self.ApplyTransaction(gp, statedb, header, tx, totalUsedGas, transientProcess)
if err != nil {
return nil, err
}
@ -203,7 +214,7 @@ func (sm *BlockProcessor) processWithParent(block, parent *types.Block) (logs st
txs := block.Transactions()
// Block validation
if err = ValidateHeader(sm.Pow, header, parent, false); err != nil {
if err = ValidateHeader(sm.Pow, header, parent.Header(), false, false); err != nil {
return
}
@ -327,7 +338,7 @@ func (sm *BlockProcessor) VerifyUncles(statedb *state.StateDB, block, parent *ty
return UncleError("uncle[%d](%x)'s parent is not ancestor (%x)", i, hash[:4], uncle.ParentHash[0:4])
}
if err := ValidateHeader(sm.Pow, uncle, ancestors[uncle.ParentHash], true); err != nil {
if err := ValidateHeader(sm.Pow, uncle, ancestors[uncle.ParentHash].Header(), true, true); err != nil {
return ValidationError(fmt.Sprintf("uncle[%d](%x) header invalid: %v", i, hash[:4], err))
}
}
@ -357,46 +368,50 @@ func (sm *BlockProcessor) GetLogs(block *types.Block) (logs state.Logs, err erro
}
// See YP section 4.3.4. "Block Header Validity"
// Validates a block. Returns an error if the block is invalid.
func ValidateHeader(pow pow.PoW, block *types.Header, parent *types.Block, checkPow bool) error {
if big.NewInt(int64(len(block.Extra))).Cmp(params.MaximumExtraDataSize) == 1 {
return fmt.Errorf("Block extra data too long (%d)", len(block.Extra))
// Validates a header. Returns an error if the header is invalid.
func ValidateHeader(pow pow.PoW, header *types.Header, parent *types.Header, checkPow, uncle bool) error {
if big.NewInt(int64(len(header.Extra))).Cmp(params.MaximumExtraDataSize) == 1 {
return fmt.Errorf("Header extra data too long (%d)", len(header.Extra))
}
if block.Time > uint64(time.Now().Unix()) {
if uncle {
if header.Time.Cmp(common.MaxBig) == 1 {
return BlockTSTooBigErr
}
} else {
if header.Time.Cmp(big.NewInt(time.Now().Unix())) == 1 {
return BlockFutureErr
}
if block.Time <= parent.Time() {
}
if header.Time.Cmp(parent.Time) != 1 {
return BlockEqualTSErr
}
expd := CalcDifficulty(block.Time, parent.Time(), parent.Number(), parent.Difficulty())
if expd.Cmp(block.Difficulty) != 0 {
return fmt.Errorf("Difficulty check failed for block %v, %v", block.Difficulty, expd)
expd := CalcDifficulty(header.Time.Uint64(), parent.Time.Uint64(), parent.Number, parent.Difficulty)
if expd.Cmp(header.Difficulty) != 0 {
return fmt.Errorf("Difficulty check failed for header %v, %v", header.Difficulty, expd)
}
var a, b *big.Int
a = parent.GasLimit()
a = a.Sub(a, block.GasLimit)
a := new(big.Int).Set(parent.GasLimit)
a = a.Sub(a, header.GasLimit)
a.Abs(a)
b = parent.GasLimit()
b := new(big.Int).Set(parent.GasLimit)
b = b.Div(b, params.GasLimitBoundDivisor)
if !(a.Cmp(b) < 0) || (block.GasLimit.Cmp(params.MinGasLimit) == -1) {
return fmt.Errorf("GasLimit check failed for block %v (%v > %v)", block.GasLimit, a, b)
if !(a.Cmp(b) < 0) || (header.GasLimit.Cmp(params.MinGasLimit) == -1) {
return fmt.Errorf("GasLimit check failed for header %v (%v > %v)", header.GasLimit, a, b)
}
num := parent.Number()
num.Sub(block.Number, num)
num := new(big.Int).Set(parent.Number)
num.Sub(header.Number, num)
if num.Cmp(big.NewInt(1)) != 0 {
return BlockNumberErr
}
if checkPow {
// Verify the nonce of the block. Return an error if it's not valid
if !pow.Verify(types.NewBlockWithHeader(block)) {
return ValidationError("Block's nonce is invalid (= %x)", block.Nonce)
// Verify the nonce of the header. Return an error if it's not valid
if !pow.Verify(types.NewBlockWithHeader(header)) {
return ValidationError("Header's nonce is invalid (= %x)", header.Nonce)
}
}
return nil
}

View File

@ -48,13 +48,13 @@ func TestNumber(t *testing.T) {
statedb := state.New(chain.Genesis().Root(), chain.chainDb)
header := makeHeader(chain.Genesis(), statedb)
header.Number = big.NewInt(3)
err := ValidateHeader(pow, header, chain.Genesis(), false)
err := ValidateHeader(pow, header, chain.Genesis().Header(), false, false)
if err != BlockNumberErr {
t.Errorf("expected block number error, got %q", err)
}
header = makeHeader(chain.Genesis(), statedb)
err = ValidateHeader(pow, header, chain.Genesis(), false)
err = ValidateHeader(pow, header, chain.Genesis().Header(), false, false)
if err == BlockNumberErr {
t.Errorf("didn't expect block number error")
}

View File

@ -22,6 +22,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/pow"
)
@ -130,6 +131,17 @@ func (b *BlockGen) PrevBlock(index int) *types.Block {
return b.chain[index]
}
// OffsetTime modifies the time instance of a block, implicitly changing its
// associated difficulty. It's useful to test scenarios where forking is not
// tied to chain length directly.
func (b *BlockGen) OffsetTime(seconds int64) {
b.header.Time.Add(b.header.Time, new(big.Int).SetInt64(seconds))
if b.header.Time.Cmp(b.parent.Header().Time) <= 0 {
panic("block time out of range")
}
b.header.Difficulty = CalcDifficulty(b.header.Time.Uint64(), b.parent.Time().Uint64(), b.parent.Number(), b.parent.Difficulty())
}
// GenerateChain creates a chain of n blocks. The first block's
// parent will be the provided parent. db is used to store
// intermediate states and should contain the parent's state trie.
@ -142,7 +154,7 @@ func (b *BlockGen) PrevBlock(index int) *types.Block {
// Blocks created by GenerateChain do not contain valid proof of work
// values. Inserting them into ChainManager requires use of FakePow or
// a similar non-validating proof of work implementation.
func GenerateChain(parent *types.Block, db common.Database, n int, gen func(int, *BlockGen)) []*types.Block {
func GenerateChain(parent *types.Block, db ethdb.Database, n int, gen func(int, *BlockGen)) []*types.Block {
statedb := state.New(parent.Root(), db)
blocks := make(types.Blocks, n)
genblock := func(i int, h *types.Header) *types.Block {
@ -158,7 +170,6 @@ func GenerateChain(parent *types.Block, db common.Database, n int, gen func(int,
for i := 0; i < n; i++ {
header := makeHeader(parent, statedb)
block := genblock(i, header)
block.Td = CalcTD(block, parent)
blocks[i] = block
parent = block
}
@ -166,22 +177,27 @@ func GenerateChain(parent *types.Block, db common.Database, n int, gen func(int,
}
func makeHeader(parent *types.Block, state *state.StateDB) *types.Header {
time := parent.Time() + 10 // block time is fixed at 10 seconds
var time *big.Int
if parent.Time() == nil {
time = big.NewInt(10)
} else {
time = new(big.Int).Add(parent.Time(), big.NewInt(10)) // block time is fixed at 10 seconds
}
return &types.Header{
Root: state.Root(),
ParentHash: parent.Hash(),
Coinbase: parent.Coinbase(),
Difficulty: CalcDifficulty(time, parent.Time(), parent.Number(), parent.Difficulty()),
Difficulty: CalcDifficulty(time.Uint64(), new(big.Int).Sub(time, big.NewInt(10)).Uint64(), parent.Number(), parent.Difficulty()),
GasLimit: CalcGasLimit(parent),
GasUsed: new(big.Int),
Number: new(big.Int).Add(parent.Number(), common.Big1),
Time: uint64(time),
Time: time,
}
}
// newCanonical creates a new deterministic canonical chain by running
// InsertChain on the result of makeChain.
func newCanonical(n int, db common.Database) (*BlockProcessor, error) {
func newCanonical(n int, db ethdb.Database) (*BlockProcessor, error) {
evmux := &event.TypeMux{}
WriteTestNetGenesisBlock(db, 0)
@ -197,7 +213,7 @@ func newCanonical(n int, db common.Database) (*BlockProcessor, error) {
return bman, err
}
func makeChain(parent *types.Block, n int, db common.Database, seed int) []*types.Block {
func makeChain(parent *types.Block, n int, db ethdb.Database, seed int) []*types.Block {
return GenerateChain(parent, db, n, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)})
})

View File

@ -42,7 +42,7 @@ func ExampleGenerateChain() {
)
// Ensure that key1 has some funds in the genesis block.
genesis := WriteGenesisBlockForTesting(db, addr1, big.NewInt(1000000))
genesis := WriteGenesisBlockForTesting(db, GenesisAccount{addr1, big.NewInt(1000000)})
// This call generates a chain of 5 blocks. The function runs for
// each block and adds different features to gen based on the

View File

@ -22,7 +22,6 @@ import (
"fmt"
"io"
"math/big"
"runtime"
"sync"
"sync/atomic"
"time"
@ -30,11 +29,13 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/pow"
"github.com/ethereum/go-ethereum/rlp"
"github.com/hashicorp/golang-lru"
)
@ -48,6 +49,9 @@ var (
)
const (
headerCacheLimit = 512
bodyCacheLimit = 256
tdCacheLimit = 1024
blockCacheLimit = 256
maxFutureBlocks = 256
maxTimeFutureBlocks = 30
@ -56,7 +60,7 @@ const (
type ChainManager struct {
//eth EthManager
chainDb common.Database
chainDb ethdb.Database
processor types.BlockProcessor
eventMux *event.TypeMux
genesisBlock *types.Block
@ -68,10 +72,13 @@ type ChainManager struct {
checkpoint int // checkpoint counts towards the new checkpoint
td *big.Int
currentBlock *types.Block
lastBlockHash common.Hash
currentGasLimit *big.Int
cache *lru.Cache // cache is the LRU caching
headerCache *lru.Cache // Cache for the most recent block headers
bodyCache *lru.Cache // Cache for the most recent block bodies
bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format
tdCache *lru.Cache // Cache for the most recent block total difficulties
blockCache *lru.Cache // Cache for the most recent entire blocks
futureBlocks *lru.Cache // future blocks are blocks added for later processing
quit chan struct{}
@ -83,13 +90,24 @@ type ChainManager struct {
pow pow.PoW
}
func NewChainManager(chainDb common.Database, pow pow.PoW, mux *event.TypeMux) (*ChainManager, error) {
cache, _ := lru.New(blockCacheLimit)
func NewChainManager(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*ChainManager, error) {
headerCache, _ := lru.New(headerCacheLimit)
bodyCache, _ := lru.New(bodyCacheLimit)
bodyRLPCache, _ := lru.New(bodyCacheLimit)
tdCache, _ := lru.New(tdCacheLimit)
blockCache, _ := lru.New(blockCacheLimit)
futureBlocks, _ := lru.New(maxFutureBlocks)
bc := &ChainManager{
chainDb: chainDb,
eventMux: mux,
quit: make(chan struct{}),
cache: cache,
headerCache: headerCache,
bodyCache: bodyCache,
bodyRLPCache: bodyRLPCache,
tdCache: tdCache,
blockCache: blockCache,
futureBlocks: futureBlocks,
pow: pow,
}
@ -105,11 +123,9 @@ func NewChainManager(chainDb common.Database, pow pow.PoW, mux *event.TypeMux) (
}
glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block")
}
if err := bc.setLastState(); err != nil {
return nil, err
}
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
for hash, _ := range BadHashes {
if block := bc.GetBlock(hash); block != nil {
@ -123,14 +139,8 @@ func NewChainManager(chainDb common.Database, pow pow.PoW, mux *event.TypeMux) (
glog.V(logger.Error).Infoln("Chain reorg was successfull. Resuming normal operation")
}
}
// Take ownership of this particular state
bc.futureBlocks, _ = lru.New(maxFutureBlocks)
bc.makeCache()
go bc.update()
return bc, nil
}
@ -139,14 +149,16 @@ func (bc *ChainManager) SetHead(head *types.Block) {
defer bc.mu.Unlock()
for block := bc.currentBlock; block != nil && block.Hash() != head.Hash(); block = bc.GetBlock(block.ParentHash()) {
bc.removeBlock(block)
DeleteBlock(bc.chainDb, block.Hash())
}
bc.headerCache.Purge()
bc.bodyCache.Purge()
bc.bodyRLPCache.Purge()
bc.blockCache.Purge()
bc.futureBlocks.Purge()
bc.cache, _ = lru.New(blockCacheLimit)
bc.currentBlock = head
bc.makeCache()
bc.setTotalDifficulty(head.Td)
bc.setTotalDifficulty(bc.GetTd(head.Hash()))
bc.insert(head)
bc.setLastState()
}
@ -169,7 +181,7 @@ func (self *ChainManager) LastBlockHash() common.Hash {
self.mu.RLock()
defer self.mu.RUnlock()
return self.lastBlockHash
return self.currentBlock.Hash()
}
func (self *ChainManager) CurrentBlock() *types.Block {
@ -199,13 +211,13 @@ func (bc *ChainManager) recover() bool {
if len(data) != 0 {
block := bc.GetBlock(common.BytesToHash(data))
if block != nil {
err := bc.chainDb.Put([]byte("LastBlock"), block.Hash().Bytes())
if err != nil {
glog.Fatalln("db write err:", err)
if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil {
glog.Fatalf("failed to write database head number: %v", err)
}
if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil {
glog.Fatalf("failed to write database head hash: %v", err)
}
bc.currentBlock = block
bc.lastBlockHash = block.Hash()
return true
}
}
@ -213,14 +225,13 @@ func (bc *ChainManager) recover() bool {
}
func (bc *ChainManager) setLastState() error {
data, _ := bc.chainDb.Get([]byte("LastBlock"))
if len(data) != 0 {
block := bc.GetBlock(common.BytesToHash(data))
head := GetHeadBlockHash(bc.chainDb)
if head != (common.Hash{}) {
block := bc.GetBlock(head)
if block != nil {
bc.currentBlock = block
bc.lastBlockHash = block.Hash()
} else {
glog.Infof("LastBlock (%x) not found. Recovering...\n", data)
glog.Infof("LastBlock (%x) not found. Recovering...\n", head)
if bc.recover() {
glog.Infof("Recover successful")
} else {
@ -230,7 +241,7 @@ func (bc *ChainManager) setLastState() error {
} else {
bc.Reset()
}
bc.td = bc.currentBlock.Td
bc.td = bc.GetTd(bc.currentBlock.Hash())
bc.currentGasLimit = CalcGasLimit(bc.currentBlock)
if glog.V(logger.Info) {
@ -240,63 +251,38 @@ func (bc *ChainManager) setLastState() error {
return nil
}
func (bc *ChainManager) makeCache() {
bc.cache, _ = lru.New(blockCacheLimit)
// load in last `blockCacheLimit` - 1 blocks. Last block is the current.
bc.cache.Add(bc.genesisBlock.Hash(), bc.genesisBlock)
for _, block := range bc.GetBlocksFromHash(bc.currentBlock.Hash(), blockCacheLimit) {
bc.cache.Add(block.Hash(), block)
}
}
// Reset purges the entire blockchain, restoring it to its genesis state.
func (bc *ChainManager) Reset() {
bc.ResetWithGenesisBlock(bc.genesisBlock)
}
// ResetWithGenesisBlock purges the entire blockchain, restoring it to the
// specified genesis state.
func (bc *ChainManager) ResetWithGenesisBlock(genesis *types.Block) {
bc.mu.Lock()
defer bc.mu.Unlock()
// Dump the entire block chain and purge the caches
for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.ParentHash()) {
bc.removeBlock(block)
DeleteBlock(bc.chainDb, block.Hash())
}
bc.headerCache.Purge()
bc.bodyCache.Purge()
bc.bodyRLPCache.Purge()
bc.blockCache.Purge()
bc.futureBlocks.Purge()
bc.cache, _ = lru.New(blockCacheLimit)
// Prepare the genesis block
err := WriteBlock(bc.chainDb, bc.genesisBlock)
if err != nil {
glog.Fatalln("db err:", err)
// Prepare the genesis block and reinitialize the chain
if err := WriteTd(bc.chainDb, genesis.Hash(), genesis.Difficulty()); err != nil {
glog.Fatalf("failed to write genesis block TD: %v", err)
}
if err := WriteBlock(bc.chainDb, genesis); err != nil {
glog.Fatalf("failed to write genesis block: %v", err)
}
bc.genesisBlock = genesis
bc.insert(bc.genesisBlock)
bc.currentBlock = bc.genesisBlock
bc.makeCache()
bc.setTotalDifficulty(common.Big("0"))
}
func (bc *ChainManager) removeBlock(block *types.Block) {
bc.chainDb.Delete(append(blockHashPre, block.Hash().Bytes()...))
}
func (bc *ChainManager) ResetWithGenesisBlock(gb *types.Block) {
bc.mu.Lock()
defer bc.mu.Unlock()
for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.ParentHash()) {
bc.removeBlock(block)
}
// Prepare the genesis block
gb.Td = gb.Difficulty()
bc.genesisBlock = gb
err := WriteBlock(bc.chainDb, bc.genesisBlock)
if err != nil {
glog.Fatalln("db err:", err)
}
bc.insert(bc.genesisBlock)
bc.currentBlock = bc.genesisBlock
bc.makeCache()
bc.td = gb.Difficulty()
bc.setTotalDifficulty(genesis.Difficulty())
}
// Export writes the active chain to the given writer.
@ -335,23 +321,23 @@ func (self *ChainManager) ExportN(w io.Writer, first uint64, last uint64) error
// insert injects a block into the current chain block chain. Note, this function
// assumes that the `mu` mutex is held!
func (bc *ChainManager) insert(block *types.Block) {
err := WriteHead(bc.chainDb, block)
if err != nil {
glog.Fatal("db write fail:", err)
// Add the block to the canonical chain number scheme and mark as the head
if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil {
glog.Fatalf("failed to insert block number: %v", err)
}
if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil {
glog.Fatalf("failed to insert block number: %v", err)
}
// Add a new restore point if we reached some limit
bc.checkpoint++
if bc.checkpoint > checkpointLimit {
err = bc.chainDb.Put([]byte("checkpoint"), block.Hash().Bytes())
if err != nil {
glog.Fatal("db write fail:", err)
if err := bc.chainDb.Put([]byte("checkpoint"), block.Hash().Bytes()); err != nil {
glog.Fatalf("failed to create checkpoint: %v", err)
}
bc.checkpoint = 0
}
// Update the internal internal state with the head block
bc.currentBlock = block
bc.lastBlockHash = block.Hash()
}
// Accessors
@ -359,61 +345,141 @@ func (bc *ChainManager) Genesis() *types.Block {
return bc.genesisBlock
}
// Block fetching methods
// HasHeader checks if a block header is present in the database or not, caching
// it if present.
func (bc *ChainManager) HasHeader(hash common.Hash) bool {
return bc.GetHeader(hash) != nil
}
// GetHeader retrieves a block header from the database by hash, caching it if
// found.
func (self *ChainManager) GetHeader(hash common.Hash) *types.Header {
// Short circuit if the header's already in the cache, retrieve otherwise
if header, ok := self.headerCache.Get(hash); ok {
return header.(*types.Header)
}
header := GetHeader(self.chainDb, hash)
if header == nil {
return nil
}
// Cache the found header for next time and return
self.headerCache.Add(header.Hash(), header)
return header
}
// GetHeaderByNumber retrieves a block header from the database by number,
// caching it (associated with its hash) if found.
func (self *ChainManager) GetHeaderByNumber(number uint64) *types.Header {
hash := GetCanonicalHash(self.chainDb, number)
if hash == (common.Hash{}) {
return nil
}
return self.GetHeader(hash)
}
// GetBody retrieves a block body (transactions and uncles) from the database by
// hash, caching it if found.
func (self *ChainManager) GetBody(hash common.Hash) *types.Body {
// Short circuit if the body's already in the cache, retrieve otherwise
if cached, ok := self.bodyCache.Get(hash); ok {
body := cached.(*types.Body)
return body
}
body := GetBody(self.chainDb, hash)
if body == nil {
return nil
}
// Cache the found body for next time and return
self.bodyCache.Add(hash, body)
return body
}
// GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
// caching it if found.
func (self *ChainManager) GetBodyRLP(hash common.Hash) rlp.RawValue {
// Short circuit if the body's already in the cache, retrieve otherwise
if cached, ok := self.bodyRLPCache.Get(hash); ok {
return cached.(rlp.RawValue)
}
body := GetBodyRLP(self.chainDb, hash)
if len(body) == 0 {
return nil
}
// Cache the found body for next time and return
self.bodyRLPCache.Add(hash, body)
return body
}
// GetTd retrieves a block's total difficulty in the canonical chain from the
// database by hash, caching it if found.
func (self *ChainManager) GetTd(hash common.Hash) *big.Int {
// Short circuit if the td's already in the cache, retrieve otherwise
if cached, ok := self.tdCache.Get(hash); ok {
return cached.(*big.Int)
}
td := GetTd(self.chainDb, hash)
if td == nil {
return nil
}
// Cache the found body for next time and return
self.tdCache.Add(hash, td)
return td
}
// HasBlock checks if a block is fully present in the database or not, caching
// it if present.
func (bc *ChainManager) HasBlock(hash common.Hash) bool {
if bc.cache.Contains(hash) {
return true
}
data, _ := bc.chainDb.Get(append(blockHashPre, hash[:]...))
return len(data) != 0
}
func (self *ChainManager) GetBlockHashesFromHash(hash common.Hash, max uint64) (chain []common.Hash) {
block := self.GetBlock(hash)
if block == nil {
return
}
// XXX Could be optimised by using a different database which only holds hashes (i.e., linked list)
for i := uint64(0); i < max; i++ {
block = self.GetBlock(block.ParentHash())
if block == nil {
break
}
chain = append(chain, block.Hash())
if block.Number().Cmp(common.Big0) <= 0 {
break
}
}
return
return bc.GetBlock(hash) != nil
}
// GetBlock retrieves a block from the database by hash, caching it if found.
func (self *ChainManager) GetBlock(hash common.Hash) *types.Block {
if block, ok := self.cache.Get(hash); ok {
// Short circuit if the block's already in the cache, retrieve otherwise
if block, ok := self.blockCache.Get(hash); ok {
return block.(*types.Block)
}
block := GetBlockByHash(self.chainDb, hash)
block := GetBlock(self.chainDb, hash)
if block == nil {
return nil
}
// Add the block to the cache
self.cache.Add(hash, (*types.Block)(block))
return (*types.Block)(block)
// Cache the found block for next time and return
self.blockCache.Add(block.Hash(), block)
return block
}
func (self *ChainManager) GetBlockByNumber(num uint64) *types.Block {
self.mu.RLock()
defer self.mu.RUnlock()
return self.getBlockByNumber(num)
// GetBlockByNumber retrieves a block from the database by number, caching it
// (associated with its hash) if found.
func (self *ChainManager) GetBlockByNumber(number uint64) *types.Block {
hash := GetCanonicalHash(self.chainDb, number)
if hash == (common.Hash{}) {
return nil
}
return self.GetBlock(hash)
}
// GetBlockHashesFromHash retrieves a number of block hashes starting at a given
// hash, fetching towards the genesis block.
func (self *ChainManager) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
// Get the origin header from which to fetch
header := self.GetHeader(hash)
if header == nil {
return nil
}
// Iterate the headers until enough is collected or the genesis reached
chain := make([]common.Hash, 0, max)
for i := uint64(0); i < max; i++ {
if header = self.GetHeader(header.ParentHash); header == nil {
break
}
chain = append(chain, header.Hash())
if header.Number.Cmp(common.Big0) == 0 {
break
}
}
return chain
}
// [deprecated by eth/62]
// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
func (self *ChainManager) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
for i := 0; i < n; i++ {
@ -427,11 +493,6 @@ func (self *ChainManager) GetBlocksFromHash(hash common.Hash, n int) (blocks []*
return
}
// non blocking version
func (self *ChainManager) getBlockByNumber(num uint64) *types.Block {
return GetBlockByNumber(self.chainDb, num)
}
func (self *ChainManager) GetUnclesInChain(block *types.Block, length int) (uncles []*types.Header) {
for i := 0; block != nil && i < length; i++ {
uncles = append(uncles, block.Uncles()...)
@ -487,39 +548,48 @@ const (
SideStatTy
)
// WriteBlock writes the block to the chain (or pending queue)
func (self *ChainManager) WriteBlock(block *types.Block, queued bool) (status writeStatus, err error) {
// WriteBlock writes the block to the chain.
func (self *ChainManager) WriteBlock(block *types.Block) (status writeStatus, err error) {
self.wg.Add(1)
defer self.wg.Done()
// Calculate the total difficulty of the block
ptd := self.GetTd(block.ParentHash())
if ptd == nil {
return NonStatTy, ParentError(block.ParentHash())
}
td := new(big.Int).Add(block.Difficulty(), ptd)
self.mu.RLock()
cblock := self.currentBlock
self.mu.RUnlock()
// Compare the TD of the last known block in the canonical chain to make sure it's greater.
// At this point it's possible that a different chain (fork) becomes the new canonical chain.
if block.Td.Cmp(self.Td()) > 0 {
if td.Cmp(self.Td()) > 0 {
// chain fork
if block.ParentHash() != cblock.Hash() {
// during split we merge two different chains and create the new canonical chain
err := self.merge(cblock, block)
err := self.reorg(cblock, block)
if err != nil {
return NonStatTy, err
}
status = SplitStatTy
}
status = CanonStatTy
self.mu.Lock()
self.setTotalDifficulty(block.Td)
self.setTotalDifficulty(td)
self.insert(block)
self.mu.Unlock()
status = CanonStatTy
} else {
status = SideStatTy
}
err = WriteBlock(self.chainDb, block)
if err != nil {
glog.Fatalln("db err:", err)
if err := WriteTd(self.chainDb, block.Hash(), td); err != nil {
glog.Fatalf("failed to write block total difficulty: %v", err)
}
if err := WriteBlock(self.chainDb, block); err != nil {
glog.Fatalf("filed to write block contents: %v", err)
}
// Delete from future blocks
self.futureBlocks.Remove(block.Hash())
@ -545,14 +615,12 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
stats struct{ queued, processed, ignored int }
tstart = time.Now()
nonceDone = make(chan nonceResult, len(chain))
nonceQuit = make(chan struct{})
nonceChecked = make([]bool, len(chain))
)
// Start the parallel nonce verifier.
go verifyNonces(self.pow, chain, nonceQuit, nonceDone)
defer close(nonceQuit)
nonceAbort, nonceResults := verifyNoncesFromBlocks(self.pow, chain)
defer close(nonceAbort)
txcount := 0
for i, block := range chain {
@ -565,24 +633,19 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
// Wait for block i's nonce to be verified before processing
// its state transition.
for !nonceChecked[i] {
r := <-nonceDone
nonceChecked[r.i] = true
r := <-nonceResults
nonceChecked[r.index] = true
if !r.valid {
block := chain[r.i]
return r.i, &BlockNonceErr{Hash: block.Hash(), Number: block.Number(), Nonce: block.Nonce()}
block := chain[r.index]
return r.index, &BlockNonceErr{Hash: block.Hash(), Number: block.Number(), Nonce: block.Nonce()}
}
}
if BadHashes[block.Hash()] {
err := fmt.Errorf("Found known bad hash in chain %x", block.Hash())
err := BadHashError(block.Hash())
blockErr(block, err)
return i, err
}
// Setting block.Td regardless of error (known for example) prevents errors down the line
// in the protocol handler
block.Td = new(big.Int).Set(CalcTD(block, self.GetBlock(block.ParentHash())))
// Call in to the block processor and check for errors. It's likely that if one block fails
// all others will fail too (unless a known block is returned).
logs, receipts, err := self.processor.Process(block)
@ -596,7 +659,8 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
// Allow up to MaxFuture second in the future blocks. If this limit
// is exceeded the chain is discarded and processed at a later time
// if given.
if max := uint64(time.Now().Unix()) + maxTimeFutureBlocks; block.Time() > max {
max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
if block.Time().Cmp(max) == 1 {
return i, fmt.Errorf("%v: BlockFutureErr, %v > %v", BlockFutureErr, block.Time(), max)
}
@ -617,11 +681,13 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
return i, err
}
if err := PutBlockReceipts(self.chainDb, block, receipts); err != nil {
glog.V(logger.Warn).Infoln("error writing block receipts:", err)
}
txcount += len(block.Transactions())
// write the block to the chain and get the status
status, err := self.WriteBlock(block, true)
status, err := self.WriteBlock(block)
if err != nil {
return i, err
}
@ -647,10 +713,6 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
queue[i] = ChainSplitEvent{block, logs}
queueEvent.splitCount++
}
if err := PutBlockReceipts(self.chainDb, block, receipts); err != nil {
glog.V(logger.Warn).Infoln("error writing block receipts:", err)
}
stats.processed++
}
@ -665,20 +727,26 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
return 0, nil
}
// diff takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
// to be part of the new canonical chain.
func (self *ChainManager) diff(oldBlock, newBlock *types.Block) (types.Blocks, error) {
// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
// to be part of the new canonical chain and accumulates potential missing transactions and post an
// event about them
func (self *ChainManager) reorg(oldBlock, newBlock *types.Block) error {
self.mu.Lock()
defer self.mu.Unlock()
var (
newChain types.Blocks
commonBlock *types.Block
oldStart = oldBlock
newStart = newBlock
deletedTxs types.Transactions
)
// first reduce whoever is higher bound
if oldBlock.NumberU64() > newBlock.NumberU64() {
// reduce old chain
for oldBlock = oldBlock; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = self.GetBlock(oldBlock.ParentHash()) {
deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
}
} else {
// reduce new chain and append new chain blocks for inserting later on
@ -687,10 +755,10 @@ func (self *ChainManager) diff(oldBlock, newBlock *types.Block) (types.Blocks, e
}
}
if oldBlock == nil {
return nil, fmt.Errorf("Invalid old chain")
return fmt.Errorf("Invalid old chain")
}
if newBlock == nil {
return nil, fmt.Errorf("Invalid new chain")
return fmt.Errorf("Invalid new chain")
}
numSplit := newBlock.Number()
@ -700,13 +768,14 @@ func (self *ChainManager) diff(oldBlock, newBlock *types.Block) (types.Blocks, e
break
}
newChain = append(newChain, newBlock)
deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
oldBlock, newBlock = self.GetBlock(oldBlock.ParentHash()), self.GetBlock(newBlock.ParentHash())
if oldBlock == nil {
return nil, fmt.Errorf("Invalid old chain")
return fmt.Errorf("Invalid old chain")
}
if newBlock == nil {
return nil, fmt.Errorf("Invalid new chain")
return fmt.Errorf("Invalid new chain")
}
}
@ -715,18 +784,8 @@ func (self *ChainManager) diff(oldBlock, newBlock *types.Block) (types.Blocks, e
glog.Infof("Chain split detected @ %x. Reorganising chain from #%v %x to %x", commonHash[:4], numSplit, oldStart.Hash().Bytes()[:4], newStart.Hash().Bytes()[:4])
}
return newChain, nil
}
// merge merges two different chain to the new canonical chain
func (self *ChainManager) merge(oldBlock, newBlock *types.Block) error {
newChain, err := self.diff(oldBlock, newBlock)
if err != nil {
return fmt.Errorf("chain reorg failed: %v", err)
}
var addedTxs types.Transactions
// insert blocks. Order does not matter. Last block will be written in ImportChain itself which creates the new head properly
self.mu.Lock()
for _, block := range newChain {
// insert the block in the canonical way, re-writing history
self.insert(block)
@ -734,8 +793,20 @@ func (self *ChainManager) merge(oldBlock, newBlock *types.Block) error {
PutTransactions(self.chainDb, block, block.Transactions())
PutReceipts(self.chainDb, GetBlockReceipts(self.chainDb, block.Hash()))
addedTxs = append(addedTxs, block.Transactions()...)
}
self.mu.Unlock()
// calculate the difference between deleted and added transactions
diff := types.TxDifference(deletedTxs, addedTxs)
// When transactions get deleted from the database that means the
// receipts that were created in the fork must also be deleted
for _, tx := range diff {
DeleteReceipt(self.chainDb, tx.Hash())
DeleteTransaction(self.chainDb, tx.Hash())
}
// Must be posted in a goroutine because of the transaction pool trying
// to acquire the chain manager lock
go self.eventMux.Post(RemovedTransactionEvent{diff})
return nil
}
@ -754,12 +825,11 @@ out:
case ChainEvent:
// We need some control over the mining operation. Acquiring locks and waiting for the miner to create new block takes too long
// and in most cases isn't even necessary.
if self.lastBlockHash == event.Hash {
if self.currentBlock.Hash() == event.Hash {
self.currentGasLimit = CalcGasLimit(event.Block)
self.eventMux.Post(ChainHeadEvent{event.Block})
}
}
self.eventMux.Post(event)
}
}
@ -777,40 +847,3 @@ func blockErr(block *types.Block, err error) {
glog.V(logger.Error).Infoln(err)
glog.V(logger.Debug).Infoln(verifyNonces)
}
type nonceResult struct {
i int
valid bool
}
// block verifies nonces of the given blocks in parallel and returns
// an error if one of the blocks nonce verifications failed.
func verifyNonces(pow pow.PoW, blocks []*types.Block, quit <-chan struct{}, done chan<- nonceResult) {
// Spawn a few workers. They listen for blocks on the in channel
// and send results on done. The workers will exit in the
// background when in is closed.
var (
in = make(chan int)
nworkers = runtime.GOMAXPROCS(0)
)
defer close(in)
if len(blocks) < nworkers {
nworkers = len(blocks)
}
for i := 0; i < nworkers; i++ {
go func() {
for i := range in {
done <- nonceResult{i: i, valid: pow.Verify(blocks[i])}
}
}()
}
// Feed block indices to the workers.
for i := range blocks {
select {
case in <- i:
continue
case <-quit:
return
}
}
}

View File

@ -30,8 +30,10 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/pow"
"github.com/ethereum/go-ethereum/rlp"
"github.com/hashicorp/golang-lru"
@ -46,7 +48,7 @@ func thePow() pow.PoW {
return pow
}
func theChainManager(db common.Database, t *testing.T) *ChainManager {
func theChainManager(db ethdb.Database, t *testing.T) *ChainManager {
var eventMux event.TypeMux
WriteTestNetGenesisBlock(db, 0)
chainMan, err := NewChainManager(db, thePow(), &eventMux)
@ -73,10 +75,11 @@ func testFork(t *testing.T, bman *BlockProcessor, i, N int, f func(td1, td2 *big
if err != nil {
t.Fatal("could not make new canonical in testFork", err)
}
// asert the bmans have the same block at i
// assert the bmans have the same block at i
bi1 := bman.bc.GetBlockByNumber(uint64(i)).Hash()
bi2 := bman2.bc.GetBlockByNumber(uint64(i)).Hash()
if bi1 != bi2 {
fmt.Printf("%+v\n%+v\n\n", bi1, bi2)
t.Fatal("chains do not have the same hash at height", i)
}
bman2.bc.SetProcessor(bman2)
@ -110,7 +113,6 @@ func printChain(bc *ChainManager) {
// process blocks against a chain
func testChain(chainB types.Blocks, bman *BlockProcessor) (*big.Int, error) {
td := new(big.Int)
for _, block := range chainB {
_, _, err := bman.bc.processor.Process(block)
if err != nil {
@ -119,17 +121,12 @@ func testChain(chainB types.Blocks, bman *BlockProcessor) (*big.Int, error) {
}
return nil, err
}
parent := bman.bc.GetBlock(block.ParentHash())
block.Td = CalcTD(block, parent)
td = block.Td
bman.bc.mu.Lock()
{
WriteTd(bman.bc.chainDb, block.Hash(), new(big.Int).Add(block.Difficulty(), bman.bc.GetTd(block.ParentHash())))
WriteBlock(bman.bc.chainDb, block)
}
bman.bc.mu.Unlock()
}
return td, nil
return bman.bc.GetTd(chainB[len(chainB)-1].Hash()), nil
}
func loadChain(fn string, t *testing.T) (types.Blocks, error) {
@ -385,10 +382,14 @@ func makeChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block
return chain
}
func chm(genesis *types.Block, db common.Database) *ChainManager {
func chm(genesis *types.Block, db ethdb.Database) *ChainManager {
var eventMux event.TypeMux
bc := &ChainManager{chainDb: db, genesisBlock: genesis, eventMux: &eventMux, pow: FakePow{}}
bc.cache, _ = lru.New(100)
bc.headerCache, _ = lru.New(100)
bc.bodyCache, _ = lru.New(100)
bc.bodyRLPCache, _ = lru.New(100)
bc.tdCache, _ = lru.New(100)
bc.blockCache, _ = lru.New(100)
bc.futureBlocks, _ = lru.New(100)
bc.processor = bproc{}
bc.ResetWithGenesisBlock(genesis)
@ -420,6 +421,59 @@ func TestReorgLongest(t *testing.T) {
}
}
func TestBadHashes(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
genesis, err := WriteTestNetGenesisBlock(db, 0)
if err != nil {
t.Error(err)
t.FailNow()
}
bc := chm(genesis, db)
chain := makeChainWithDiff(genesis, []int{1, 2, 4}, 10)
BadHashes[chain[2].Header().Hash()] = true
_, err = bc.InsertChain(chain)
if !IsBadHashError(err) {
t.Errorf("error mismatch: want: BadHashError, have: %v", err)
}
}
func TestReorgBadHashes(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
genesis, err := WriteTestNetGenesisBlock(db, 0)
if err != nil {
t.Error(err)
t.FailNow()
}
bc := chm(genesis, db)
chain := makeChainWithDiff(genesis, []int{1, 2, 3, 4}, 11)
bc.InsertChain(chain)
if chain[3].Header().Hash() != bc.LastBlockHash() {
t.Errorf("last block hash mismatch: want: %x, have: %x", chain[3].Header().Hash(), bc.LastBlockHash())
}
// NewChainManager should check BadHashes when loading it db
BadHashes[chain[3].Header().Hash()] = true
var eventMux event.TypeMux
ncm, err := NewChainManager(db, FakePow{}, &eventMux)
if err != nil {
t.Errorf("NewChainManager err: %s", err)
}
// check it set head to (valid) parent of bad hash block
if chain[2].Header().Hash() != ncm.LastBlockHash() {
t.Errorf("last block hash mismatch: want: %x, have: %x", chain[2].Header().Hash(), ncm.LastBlockHash())
}
if chain[2].Header().GasLimit.Cmp(ncm.GasLimit()) != 0 {
t.Errorf("current block gasLimit mismatch: want: %x, have: %x", chain[2].Header().GasLimit, ncm.GasLimit())
}
}
func TestReorgShortest(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
genesis, err := WriteTestNetGenesisBlock(db, 0)
@ -457,7 +511,7 @@ func TestInsertNonceError(t *testing.T) {
fail := rand.Int() % len(blocks)
failblock := blocks[fail]
bc.pow = failpow{failblock.NumberU64()}
bc.pow = failPow{failblock.NumberU64()}
n, err := bc.InsertChain(blocks)
// Check that the returned error indicates the nonce failure.
@ -484,34 +538,115 @@ func TestInsertNonceError(t *testing.T) {
}
}
/*
func TestGenesisMismatch(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
var mux event.TypeMux
genesis := GenesisBlock(0, db)
_, err := NewChainManager(genesis, db, db, db, thePow(), &mux)
if err != nil {
t.Error(err)
// Tests that chain reorganizations handle transaction removals and reinsertions.
func TestChainTxReorgs(t *testing.T) {
params.MinGasLimit = big.NewInt(125000) // Minimum the gas limit may ever be.
params.GenesisGasLimit = big.NewInt(3141592) // Gas limit of the Genesis block.
var (
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
key3, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
addr2 = crypto.PubkeyToAddress(key2.PublicKey)
addr3 = crypto.PubkeyToAddress(key3.PublicKey)
db, _ = ethdb.NewMemDatabase()
)
genesis := WriteGenesisBlockForTesting(db,
GenesisAccount{addr1, big.NewInt(1000000)},
GenesisAccount{addr2, big.NewInt(1000000)},
GenesisAccount{addr3, big.NewInt(1000000)},
)
// Create two transactions shared between the chains:
// - postponed: transaction included at a later block in the forked chain
// - swapped: transaction included at the same block number in the forked chain
postponed, _ := types.NewTransaction(0, addr1, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key1)
swapped, _ := types.NewTransaction(1, addr1, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key1)
// Create two transactions that will be dropped by the forked chain:
// - pastDrop: transaction dropped retroactively from a past block
// - freshDrop: transaction dropped exactly at the block where the reorg is detected
var pastDrop, freshDrop *types.Transaction
// Create three transactions that will be added in the forked chain:
// - pastAdd: transaction added before the reorganiztion is detected
// - freshAdd: transaction added at the exact block the reorg is detected
// - futureAdd: transaction added after the reorg has already finished
var pastAdd, freshAdd, futureAdd *types.Transaction
chain := GenerateChain(genesis, db, 3, func(i int, gen *BlockGen) {
switch i {
case 0:
pastDrop, _ = types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key2)
gen.AddTx(pastDrop) // This transaction will be dropped in the fork from below the split point
gen.AddTx(postponed) // This transaction will be postponed till block #3 in the fork
case 2:
freshDrop, _ = types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key2)
gen.AddTx(freshDrop) // This transaction will be dropped in the fork from exactly at the split point
gen.AddTx(swapped) // This transaction will be swapped out at the exact height
gen.OffsetTime(9) // Lower the block difficulty to simulate a weaker chain
}
})
// Import the chain. This runs all block validation rules.
evmux := &event.TypeMux{}
chainman, _ := NewChainManager(db, FakePow{}, evmux)
chainman.SetProcessor(NewBlockProcessor(db, FakePow{}, chainman, evmux))
if i, err := chainman.InsertChain(chain); err != nil {
t.Fatalf("failed to insert original chain[%d]: %v", i, err)
}
// overwrite the old chain
chain = GenerateChain(genesis, db, 5, func(i int, gen *BlockGen) {
switch i {
case 0:
pastAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3)
gen.AddTx(pastAdd) // This transaction needs to be injected during reorg
case 2:
gen.AddTx(postponed) // This transaction was postponed from block #1 in the original chain
gen.AddTx(swapped) // This transaction was swapped from the exact current spot in the original chain
freshAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3)
gen.AddTx(freshAdd) // This transaction will be added exactly at reorg time
case 3:
futureAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3)
gen.AddTx(futureAdd) // This transaction will be added after a full reorg
}
})
if _, err := chainman.InsertChain(chain); err != nil {
t.Fatalf("failed to insert forked chain: %v", err)
}
// removed tx
for i, tx := range (types.Transactions{pastDrop, freshDrop}) {
if GetTransaction(db, tx.Hash()) != nil {
t.Errorf("drop %d: tx found while shouldn't have been", i)
}
if GetReceipt(db, tx.Hash()) != nil {
t.Errorf("drop %d: receipt found while shouldn't have been", i)
}
}
// added tx
for i, tx := range (types.Transactions{pastAdd, freshAdd, futureAdd}) {
if GetTransaction(db, tx.Hash()) == nil {
t.Errorf("add %d: expected tx to be found", i)
}
if GetReceipt(db, tx.Hash()) == nil {
t.Errorf("add %d: expected receipt to be found", i)
}
}
// shared tx
for i, tx := range (types.Transactions{postponed, swapped}) {
if GetTransaction(db, tx.Hash()) == nil {
t.Errorf("share %d: expected tx to be found", i)
}
if GetReceipt(db, tx.Hash()) == nil {
t.Errorf("share %d: expected receipt to be found", i)
}
genesis = GenesisBlock(1, db)
_, err = NewChainManager(genesis, db, db, db, thePow(), &mux)
if err == nil {
t.Error("expected genesis mismatch error")
}
}
*/
// failpow returns false from Verify for a certain block number.
type failpow struct{ num uint64 }
func (pow failpow) Search(pow.Block, <-chan struct{}) (nonce uint64, mixHash []byte) {
return 0, nil
}
func (pow failpow) Verify(b pow.Block) bool {
return b.NumberU64() != pow.num
}
func (pow failpow) GetHashrate() int64 {
return 0
}
func (pow failpow) Turbo(bool) {
}

87
core/chain_pow.go Normal file
View File

@ -0,0 +1,87 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"runtime"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/pow"
)
// nonceCheckResult contains the result of a nonce verification.
type nonceCheckResult struct {
index int // Index of the item verified from an input array
valid bool // Result of the nonce verification
}
// verifyNoncesFromHeaders starts a concurrent header nonce verification,
// returning a quit channel to abort the operations and a results channel
// to retrieve the async verifications.
func verifyNoncesFromHeaders(checker pow.PoW, headers []*types.Header) (chan<- struct{}, <-chan nonceCheckResult) {
items := make([]pow.Block, len(headers))
for i, header := range headers {
items[i] = types.NewBlockWithHeader(header)
}
return verifyNonces(checker, items)
}
// verifyNoncesFromBlocks starts a concurrent block nonce verification,
// returning a quit channel to abort the operations and a results channel
// to retrieve the async verifications.
func verifyNoncesFromBlocks(checker pow.PoW, blocks []*types.Block) (chan<- struct{}, <-chan nonceCheckResult) {
items := make([]pow.Block, len(blocks))
for i, block := range blocks {
items[i] = block
}
return verifyNonces(checker, items)
}
// verifyNonces starts a concurrent nonce verification, returning a quit channel
// to abort the operations and a results channel to retrieve the async checks.
func verifyNonces(checker pow.PoW, items []pow.Block) (chan<- struct{}, <-chan nonceCheckResult) {
// Spawn as many workers as allowed threads
workers := runtime.GOMAXPROCS(0)
if len(items) < workers {
workers = len(items)
}
// Create a task channel and spawn the verifiers
tasks := make(chan int, workers)
results := make(chan nonceCheckResult, len(items)) // Buffered to make sure all workers stop
for i := 0; i < workers; i++ {
go func() {
for index := range tasks {
results <- nonceCheckResult{index: index, valid: checker.Verify(items[index])}
}
}()
}
// Feed item indices to the workers until done or aborted
abort := make(chan struct{})
go func() {
defer close(tasks)
for i := range items {
select {
case tasks <- i:
continue
case <-abort:
return
}
}
}()
return abort, results
}

233
core/chain_pow_test.go Normal file
View File

@ -0,0 +1,233 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"math/big"
"runtime"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/pow"
)
// failPow is a non-validating proof of work implementation, that returns true
// from Verify for all but one block.
type failPow struct {
failing uint64
}
func (pow failPow) Search(pow.Block, <-chan struct{}) (uint64, []byte) {
return 0, nil
}
func (pow failPow) Verify(block pow.Block) bool { return block.NumberU64() != pow.failing }
func (pow failPow) GetHashrate() int64 { return 0 }
func (pow failPow) Turbo(bool) {}
// delayedPow is a non-validating proof of work implementation, that returns true
// from Verify for all blocks, but delays them the configured amount of time.
type delayedPow struct {
delay time.Duration
}
func (pow delayedPow) Search(pow.Block, <-chan struct{}) (uint64, []byte) {
return 0, nil
}
func (pow delayedPow) Verify(block pow.Block) bool { time.Sleep(pow.delay); return true }
func (pow delayedPow) GetHashrate() int64 { return 0 }
func (pow delayedPow) Turbo(bool) {}
// Tests that simple POW verification works, for both good and bad blocks.
func TestPowVerification(t *testing.T) {
// Create a simple chain to verify
var (
testdb, _ = ethdb.NewMemDatabase()
genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
blocks = GenerateChain(genesis, testdb, 8, nil)
)
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
headers[i] = block.Header()
}
// Run the POW checker for blocks one-by-one, checking for both valid and invalid nonces
for i := 0; i < len(blocks); i++ {
for j, full := range []bool{true, false} {
for k, valid := range []bool{true, false} {
var results <-chan nonceCheckResult
switch {
case full && valid:
_, results = verifyNoncesFromBlocks(FakePow{}, []*types.Block{blocks[i]})
case full && !valid:
_, results = verifyNoncesFromBlocks(failPow{blocks[i].NumberU64()}, []*types.Block{blocks[i]})
case !full && valid:
_, results = verifyNoncesFromHeaders(FakePow{}, []*types.Header{headers[i]})
case !full && !valid:
_, results = verifyNoncesFromHeaders(failPow{headers[i].Number.Uint64()}, []*types.Header{headers[i]})
}
// Wait for the verification result
select {
case result := <-results:
if result.index != 0 {
t.Errorf("test %d.%d.%d: invalid index: have %d, want 0", i, j, k, result.index)
}
if result.valid != valid {
t.Errorf("test %d.%d.%d: validity mismatch: have %v, want %v", i, j, k, result.valid, valid)
}
case <-time.After(time.Second):
t.Fatalf("test %d.%d.%d: verification timeout", i, j, k)
}
// Make sure no more data is returned
select {
case result := <-results:
t.Fatalf("test %d.%d.%d: unexpected result returned: %v", i, j, k, result)
case <-time.After(25 * time.Millisecond):
}
}
}
}
}
// Tests that concurrent POW verification works, for both good and bad blocks.
func TestPowConcurrentVerification2(t *testing.T) { testPowConcurrentVerification(t, 2) }
func TestPowConcurrentVerification8(t *testing.T) { testPowConcurrentVerification(t, 8) }
func TestPowConcurrentVerification32(t *testing.T) { testPowConcurrentVerification(t, 32) }
func testPowConcurrentVerification(t *testing.T, threads int) {
// Create a simple chain to verify
var (
testdb, _ = ethdb.NewMemDatabase()
genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
blocks = GenerateChain(genesis, testdb, 8, nil)
)
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
headers[i] = block.Header()
}
// Set the number of threads to verify on
old := runtime.GOMAXPROCS(threads)
defer runtime.GOMAXPROCS(old)
// Run the POW checker for the entire block chain at once both for a valid and
// also an invalid chain (enough if one is invalid, last but one (arbitrary)).
for i, full := range []bool{true, false} {
for j, valid := range []bool{true, false} {
var results <-chan nonceCheckResult
switch {
case full && valid:
_, results = verifyNoncesFromBlocks(FakePow{}, blocks)
case full && !valid:
_, results = verifyNoncesFromBlocks(failPow{uint64(len(blocks) - 1)}, blocks)
case !full && valid:
_, results = verifyNoncesFromHeaders(FakePow{}, headers)
case !full && !valid:
_, results = verifyNoncesFromHeaders(failPow{uint64(len(headers) - 1)}, headers)
}
// Wait for all the verification results
checks := make(map[int]bool)
for k := 0; k < len(blocks); k++ {
select {
case result := <-results:
if _, ok := checks[result.index]; ok {
t.Fatalf("test %d.%d.%d: duplicate results for %d", i, j, k, result.index)
}
if result.index < 0 || result.index >= len(blocks) {
t.Fatalf("test %d.%d.%d: result %d out of bounds [%d, %d]", i, j, k, result.index, 0, len(blocks)-1)
}
checks[result.index] = result.valid
case <-time.After(time.Second):
t.Fatalf("test %d.%d.%d: verification timeout", i, j, k)
}
}
// Check nonce check validity
for k := 0; k < len(blocks); k++ {
want := valid || (k != len(blocks)-2) // We chose the last but one nonce in the chain to fail
if checks[k] != want {
t.Errorf("test %d.%d.%d: validity mismatch: have %v, want %v", i, j, k, checks[k], want)
}
}
// Make sure no more data is returned
select {
case result := <-results:
t.Fatalf("test %d.%d: unexpected result returned: %v", i, j, result)
case <-time.After(25 * time.Millisecond):
}
}
}
}
// Tests that aborting a POW validation indeed prevents further checks from being
// run, as well as checks that no left-over goroutines are leaked.
func TestPowConcurrentAbortion2(t *testing.T) { testPowConcurrentAbortion(t, 2) }
func TestPowConcurrentAbortion8(t *testing.T) { testPowConcurrentAbortion(t, 8) }
func TestPowConcurrentAbortion32(t *testing.T) { testPowConcurrentAbortion(t, 32) }
func testPowConcurrentAbortion(t *testing.T, threads int) {
// Create a simple chain to verify
var (
testdb, _ = ethdb.NewMemDatabase()
genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
blocks = GenerateChain(genesis, testdb, 1024, nil)
)
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
headers[i] = block.Header()
}
// Set the number of threads to verify on
old := runtime.GOMAXPROCS(threads)
defer runtime.GOMAXPROCS(old)
// Run the POW checker for the entire block chain at once
for i, full := range []bool{true, false} {
var abort chan<- struct{}
var results <-chan nonceCheckResult
// Start the verifications and immediately abort
if full {
abort, results = verifyNoncesFromBlocks(delayedPow{time.Millisecond}, blocks)
} else {
abort, results = verifyNoncesFromHeaders(delayedPow{time.Millisecond}, headers)
}
close(abort)
// Deplete the results channel
verified := make(map[int]struct{})
for depleted := false; !depleted; {
select {
case result := <-results:
verified[result.index] = struct{}{}
case <-time.After(50 * time.Millisecond):
depleted = true
}
}
// Check that abortion was honored by not processing too many POWs
if len(verified) > 2*threads {
t.Errorf("test %d: verification count too large: have %d, want below %d", i, len(verified), 2*threads)
}
// Check that there are no gaps in the results
for j := 0; j < len(verified); j++ {
if _, ok := verified[j]; !ok {
t.Errorf("test %d.%d: gap found in verification results", i, j)
}
}
}
}

View File

@ -19,10 +19,10 @@ package core
import (
"bytes"
"math/big"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/params"
@ -30,9 +30,18 @@ import (
)
var (
blockHashPre = []byte("block-hash-")
blockNumPre = []byte("block-num-")
headHeaderKey = []byte("LastHeader")
headBlockKey = []byte("LastBlock")
blockPrefix = []byte("block-")
blockNumPrefix = []byte("block-num-")
headerSuffix = []byte("-header")
bodySuffix = []byte("-body")
tdSuffix = []byte("-td")
ExpDiffPeriod = big.NewInt(100000)
blockHashPre = []byte("block-hash-") // [deprecated by eth/63]
)
// CalcDifficulty is the difficulty adjustment algorithm. It returns
@ -69,16 +78,6 @@ func CalcDifficulty(time, parentTime uint64, parentNumber, parentDiff *big.Int)
return diff
}
// CalcTD computes the total difficulty of block.
func CalcTD(block, parent *types.Block) *big.Int {
if parent == nil {
return block.Difficulty()
}
d := block.Difficulty()
d.Add(d, parent.Td)
return d
}
// CalcGasLimit computes the gas limit of the next block after parent.
// The result may be modified by the caller.
// This is miner strategy, not consensus protocol.
@ -112,8 +111,230 @@ func CalcGasLimit(parent *types.Block) *big.Int {
return gl
}
// GetBlockByHash returns the block corresponding to the hash or nil if not found
func GetBlockByHash(db common.Database, hash common.Hash) *types.Block {
// GetCanonicalHash retrieves a hash assigned to a canonical block number.
func GetCanonicalHash(db ethdb.Database, number uint64) common.Hash {
data, _ := db.Get(append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...))
if len(data) == 0 {
return common.Hash{}
}
return common.BytesToHash(data)
}
// GetHeadHeaderHash retrieves the hash of the current canonical head block's
// header. The difference between this and GetHeadBlockHash is that whereas the
// last block hash is only updated upon a full block import, the last header
// hash is updated already at header import, allowing head tracking for the
// fast synchronization mechanism.
func GetHeadHeaderHash(db ethdb.Database) common.Hash {
data, _ := db.Get(headHeaderKey)
if len(data) == 0 {
return common.Hash{}
}
return common.BytesToHash(data)
}
// GetHeadBlockHash retrieves the hash of the current canonical head block.
func GetHeadBlockHash(db ethdb.Database) common.Hash {
data, _ := db.Get(headBlockKey)
if len(data) == 0 {
return common.Hash{}
}
return common.BytesToHash(data)
}
// GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil
// if the header's not found.
func GetHeaderRLP(db ethdb.Database, hash common.Hash) rlp.RawValue {
data, _ := db.Get(append(append(blockPrefix, hash[:]...), headerSuffix...))
return data
}
// GetHeader retrieves the block header corresponding to the hash, nil if none
// found.
func GetHeader(db ethdb.Database, hash common.Hash) *types.Header {
data := GetHeaderRLP(db, hash)
if len(data) == 0 {
return nil
}
header := new(types.Header)
if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
glog.V(logger.Error).Infof("invalid block header RLP for hash %x: %v", hash, err)
return nil
}
return header
}
// GetBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
func GetBodyRLP(db ethdb.Database, hash common.Hash) rlp.RawValue {
data, _ := db.Get(append(append(blockPrefix, hash[:]...), bodySuffix...))
return data
}
// GetBody retrieves the block body (transactons, uncles) corresponding to the
// hash, nil if none found.
func GetBody(db ethdb.Database, hash common.Hash) *types.Body {
data := GetBodyRLP(db, hash)
if len(data) == 0 {
return nil
}
body := new(types.Body)
if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
glog.V(logger.Error).Infof("invalid block body RLP for hash %x: %v", hash, err)
return nil
}
return body
}
// GetTd retrieves a block's total difficulty corresponding to the hash, nil if
// none found.
func GetTd(db ethdb.Database, hash common.Hash) *big.Int {
data, _ := db.Get(append(append(blockPrefix, hash.Bytes()...), tdSuffix...))
if len(data) == 0 {
return nil
}
td := new(big.Int)
if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
glog.V(logger.Error).Infof("invalid block total difficulty RLP for hash %x: %v", hash, err)
return nil
}
return td
}
// GetBlock retrieves an entire block corresponding to the hash, assembling it
// back from the stored header and body.
func GetBlock(db ethdb.Database, hash common.Hash) *types.Block {
// Retrieve the block header and body contents
header := GetHeader(db, hash)
if header == nil {
return nil
}
body := GetBody(db, hash)
if body == nil {
return nil
}
// Reassemble the block and return
return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles)
}
// WriteCanonicalHash stores the canonical hash for the given block number.
func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) error {
key := append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...)
if err := db.Put(key, hash.Bytes()); err != nil {
glog.Fatalf("failed to store number to hash mapping into database: %v", err)
return err
}
return nil
}
// WriteHeadHeaderHash stores the head header's hash.
func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error {
if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
glog.Fatalf("failed to store last header's hash into database: %v", err)
return err
}
return nil
}
// WriteHeadBlockHash stores the head block's hash.
func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
glog.Fatalf("failed to store last block's hash into database: %v", err)
return err
}
return nil
}
// WriteHeader serializes a block header into the database.
func WriteHeader(db ethdb.Database, header *types.Header) error {
data, err := rlp.EncodeToBytes(header)
if err != nil {
return err
}
key := append(append(blockPrefix, header.Hash().Bytes()...), headerSuffix...)
if err := db.Put(key, data); err != nil {
glog.Fatalf("failed to store header into database: %v", err)
return err
}
glog.V(logger.Debug).Infof("stored header #%v [%x…]", header.Number, header.Hash().Bytes()[:4])
return nil
}
// WriteBody serializes the body of a block into the database.
func WriteBody(db ethdb.Database, hash common.Hash, body *types.Body) error {
data, err := rlp.EncodeToBytes(body)
if err != nil {
return err
}
key := append(append(blockPrefix, hash.Bytes()...), bodySuffix...)
if err := db.Put(key, data); err != nil {
glog.Fatalf("failed to store block body into database: %v", err)
return err
}
glog.V(logger.Debug).Infof("stored block body [%x…]", hash.Bytes()[:4])
return nil
}
// WriteTd serializes the total difficulty of a block into the database.
func WriteTd(db ethdb.Database, hash common.Hash, td *big.Int) error {
data, err := rlp.EncodeToBytes(td)
if err != nil {
return err
}
key := append(append(blockPrefix, hash.Bytes()...), tdSuffix...)
if err := db.Put(key, data); err != nil {
glog.Fatalf("failed to store block total difficulty into database: %v", err)
return err
}
glog.V(logger.Debug).Infof("stored block total difficulty [%x…]: %v", hash.Bytes()[:4], td)
return nil
}
// WriteBlock serializes a block into the database, header and body separately.
func WriteBlock(db ethdb.Database, block *types.Block) error {
// Store the body first to retain database consistency
if err := WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil {
return err
}
// Store the header too, signaling full block ownership
if err := WriteHeader(db, block.Header()); err != nil {
return err
}
return nil
}
// DeleteCanonicalHash removes the number to hash canonical mapping.
func DeleteCanonicalHash(db ethdb.Database, number uint64) {
db.Delete(append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...))
}
// DeleteHeader removes all block header data associated with a hash.
func DeleteHeader(db ethdb.Database, hash common.Hash) {
db.Delete(append(append(blockPrefix, hash.Bytes()...), headerSuffix...))
}
// DeleteBody removes all block body data associated with a hash.
func DeleteBody(db ethdb.Database, hash common.Hash) {
db.Delete(append(append(blockPrefix, hash.Bytes()...), bodySuffix...))
}
// DeleteTd removes all block total difficulty data associated with a hash.
func DeleteTd(db ethdb.Database, hash common.Hash) {
db.Delete(append(append(blockPrefix, hash.Bytes()...), tdSuffix...))
}
// DeleteBlock removes all block data associated with a hash.
func DeleteBlock(db ethdb.Database, hash common.Hash) {
DeleteHeader(db, hash)
DeleteBody(db, hash)
DeleteTd(db, hash)
}
// [deprecated by eth/63]
// GetBlockByHashOld returns the old combined block corresponding to the hash
// or nil if not found. This method is only used by the upgrade mechanism to
// access the old combined block representation. It will be dropped after the
// network transitions to eth/63.
func GetBlockByHashOld(db ethdb.Database, hash common.Hash) *types.Block {
data, _ := db.Get(append(blockHashPre, hash[:]...))
if len(data) == 0 {
return nil
@ -125,55 +346,3 @@ func GetBlockByHash(db common.Database, hash common.Hash) *types.Block {
}
return (*types.Block)(&block)
}
// GetBlockByHash returns the canonical block by number or nil if not found
func GetBlockByNumber(db common.Database, number uint64) *types.Block {
key, _ := db.Get(append(blockNumPre, big.NewInt(int64(number)).Bytes()...))
if len(key) == 0 {
return nil
}
return GetBlockByHash(db, common.BytesToHash(key))
}
// WriteCanonNumber writes the canonical hash for the given block
func WriteCanonNumber(db common.Database, block *types.Block) error {
key := append(blockNumPre, block.Number().Bytes()...)
err := db.Put(key, block.Hash().Bytes())
if err != nil {
return err
}
return nil
}
// WriteHead force writes the current head
func WriteHead(db common.Database, block *types.Block) error {
err := WriteCanonNumber(db, block)
if err != nil {
return err
}
err = db.Put([]byte("LastBlock"), block.Hash().Bytes())
if err != nil {
return err
}
return nil
}
// WriteBlock writes a block to the database
func WriteBlock(db common.Database, block *types.Block) error {
tstart := time.Now()
enc, _ := rlp.EncodeToBytes((*types.StorageBlock)(block))
key := append(blockHashPre, block.Hash().Bytes()...)
err := db.Put(key, enc)
if err != nil {
glog.Fatal("db write fail:", err)
return err
}
if glog.V(logger.Debug) {
glog.Infof("wrote block #%v %s. Took %v\n", block.Number(), common.PP(block.Hash().Bytes()), time.Since(tstart))
}
return nil
}

View File

@ -23,6 +23,10 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
)
type diffTest struct {
@ -75,3 +79,242 @@ func TestDifficulty(t *testing.T) {
}
}
}
// Tests block header storage and retrieval operations.
func TestHeaderStorage(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
// Create a test header to move around the database and make sure it's really new
header := &types.Header{Extra: []byte("test header")}
if entry := GetHeader(db, header.Hash()); entry != nil {
t.Fatalf("Non existent header returned: %v", entry)
}
// Write and verify the header in the database
if err := WriteHeader(db, header); err != nil {
t.Fatalf("Failed to write header into database: %v", err)
}
if entry := GetHeader(db, header.Hash()); entry == nil {
t.Fatalf("Stored header not found")
} else if entry.Hash() != header.Hash() {
t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, header)
}
if entry := GetHeaderRLP(db, header.Hash()); entry == nil {
t.Fatalf("Stored header RLP not found")
} else {
hasher := sha3.NewKeccak256()
hasher.Write(entry)
if hash := common.BytesToHash(hasher.Sum(nil)); hash != header.Hash() {
t.Fatalf("Retrieved RLP header mismatch: have %v, want %v", entry, header)
}
}
// Delete the header and verify the execution
DeleteHeader(db, header.Hash())
if entry := GetHeader(db, header.Hash()); entry != nil {
t.Fatalf("Deleted header returned: %v", entry)
}
}
// Tests block body storage and retrieval operations.
func TestBodyStorage(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
// Create a test body to move around the database and make sure it's really new
body := &types.Body{Uncles: []*types.Header{{Extra: []byte("test header")}}}
hasher := sha3.NewKeccak256()
rlp.Encode(hasher, body)
hash := common.BytesToHash(hasher.Sum(nil))
if entry := GetBody(db, hash); entry != nil {
t.Fatalf("Non existent body returned: %v", entry)
}
// Write and verify the body in the database
if err := WriteBody(db, hash, body); err != nil {
t.Fatalf("Failed to write body into database: %v", err)
}
if entry := GetBody(db, hash); entry == nil {
t.Fatalf("Stored body not found")
} else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(types.Transactions(body.Transactions)) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) {
t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, body)
}
if entry := GetBodyRLP(db, hash); entry == nil {
t.Fatalf("Stored body RLP not found")
} else {
hasher := sha3.NewKeccak256()
hasher.Write(entry)
if calc := common.BytesToHash(hasher.Sum(nil)); calc != hash {
t.Fatalf("Retrieved RLP body mismatch: have %v, want %v", entry, body)
}
}
// Delete the body and verify the execution
DeleteBody(db, hash)
if entry := GetBody(db, hash); entry != nil {
t.Fatalf("Deleted body returned: %v", entry)
}
}
// Tests block storage and retrieval operations.
func TestBlockStorage(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
// Create a test block to move around the database and make sure it's really new
block := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block")})
if entry := GetBlock(db, block.Hash()); entry != nil {
t.Fatalf("Non existent block returned: %v", entry)
}
if entry := GetHeader(db, block.Hash()); entry != nil {
t.Fatalf("Non existent header returned: %v", entry)
}
if entry := GetBody(db, block.Hash()); entry != nil {
t.Fatalf("Non existent body returned: %v", entry)
}
// Write and verify the block in the database
if err := WriteBlock(db, block); err != nil {
t.Fatalf("Failed to write block into database: %v", err)
}
if entry := GetBlock(db, block.Hash()); entry == nil {
t.Fatalf("Stored block not found")
} else if entry.Hash() != block.Hash() {
t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
}
if entry := GetHeader(db, block.Hash()); entry == nil {
t.Fatalf("Stored header not found")
} else if entry.Hash() != block.Header().Hash() {
t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, block.Header())
}
if entry := GetBody(db, block.Hash()); entry == nil {
t.Fatalf("Stored body not found")
} else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(block.Transactions()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) {
t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, &types.Body{block.Transactions(), block.Uncles()})
}
// Delete the block and verify the execution
DeleteBlock(db, block.Hash())
if entry := GetBlock(db, block.Hash()); entry != nil {
t.Fatalf("Deleted block returned: %v", entry)
}
if entry := GetHeader(db, block.Hash()); entry != nil {
t.Fatalf("Deleted header returned: %v", entry)
}
if entry := GetBody(db, block.Hash()); entry != nil {
t.Fatalf("Deleted body returned: %v", entry)
}
}
// Tests that partial block contents don't get reassembled into full blocks.
func TestPartialBlockStorage(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
block := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block")})
// Store a header and check that it's not recognized as a block
if err := WriteHeader(db, block.Header()); err != nil {
t.Fatalf("Failed to write header into database: %v", err)
}
if entry := GetBlock(db, block.Hash()); entry != nil {
t.Fatalf("Non existent block returned: %v", entry)
}
DeleteHeader(db, block.Hash())
// Store a body and check that it's not recognized as a block
if err := WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil {
t.Fatalf("Failed to write body into database: %v", err)
}
if entry := GetBlock(db, block.Hash()); entry != nil {
t.Fatalf("Non existent block returned: %v", entry)
}
DeleteBody(db, block.Hash())
// Store a header and a body separately and check reassembly
if err := WriteHeader(db, block.Header()); err != nil {
t.Fatalf("Failed to write header into database: %v", err)
}
if err := WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil {
t.Fatalf("Failed to write body into database: %v", err)
}
if entry := GetBlock(db, block.Hash()); entry == nil {
t.Fatalf("Stored block not found")
} else if entry.Hash() != block.Hash() {
t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
}
}
// Tests block total difficulty storage and retrieval operations.
func TestTdStorage(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
// Create a test TD to move around the database and make sure it's really new
hash, td := common.Hash{}, big.NewInt(314)
if entry := GetTd(db, hash); entry != nil {
t.Fatalf("Non existent TD returned: %v", entry)
}
// Write and verify the TD in the database
if err := WriteTd(db, hash, td); err != nil {
t.Fatalf("Failed to write TD into database: %v", err)
}
if entry := GetTd(db, hash); entry == nil {
t.Fatalf("Stored TD not found")
} else if entry.Cmp(td) != 0 {
t.Fatalf("Retrieved TD mismatch: have %v, want %v", entry, td)
}
// Delete the TD and verify the execution
DeleteTd(db, hash)
if entry := GetTd(db, hash); entry != nil {
t.Fatalf("Deleted TD returned: %v", entry)
}
}
// Tests that canonical numbers can be mapped to hashes and retrieved.
func TestCanonicalMappingStorage(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
// Create a test canonical number and assinged hash to move around
hash, number := common.Hash{0: 0xff}, uint64(314)
if entry := GetCanonicalHash(db, number); entry != (common.Hash{}) {
t.Fatalf("Non existent canonical mapping returned: %v", entry)
}
// Write and verify the TD in the database
if err := WriteCanonicalHash(db, hash, number); err != nil {
t.Fatalf("Failed to write canonical mapping into database: %v", err)
}
if entry := GetCanonicalHash(db, number); entry == (common.Hash{}) {
t.Fatalf("Stored canonical mapping not found")
} else if entry != hash {
t.Fatalf("Retrieved canonical mapping mismatch: have %v, want %v", entry, hash)
}
// Delete the TD and verify the execution
DeleteCanonicalHash(db, number)
if entry := GetCanonicalHash(db, number); entry != (common.Hash{}) {
t.Fatalf("Deleted canonical mapping returned: %v", entry)
}
}
// Tests that head headers and head blocks can be assigned, individually.
func TestHeadStorage(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
blockHead := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block header")})
blockFull := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block full")})
// Check that no head entries are in a pristine database
if entry := GetHeadHeaderHash(db); entry != (common.Hash{}) {
t.Fatalf("Non head header entry returned: %v", entry)
}
if entry := GetHeadBlockHash(db); entry != (common.Hash{}) {
t.Fatalf("Non head block entry returned: %v", entry)
}
// Assign separate entries for the head header and block
if err := WriteHeadHeaderHash(db, blockHead.Hash()); err != nil {
t.Fatalf("Failed to write head header hash: %v", err)
}
if err := WriteHeadBlockHash(db, blockFull.Hash()); err != nil {
t.Fatalf("Failed to write head block hash: %v", err)
}
// Check that both heads are present, and different (i.e. two heads maintained)
if entry := GetHeadHeaderHash(db); entry != blockHead.Hash() {
t.Fatalf("Head header hash mismatch: have %v, want %v", entry, blockHead.Hash())
}
if entry := GetHeadBlockHash(db); entry != blockFull.Hash() {
t.Fatalf("Head block hash mismatch: have %v, want %v", entry, blockFull.Hash())
}
}

View File

@ -27,6 +27,7 @@ import (
var (
BlockNumberErr = errors.New("block number invalid")
BlockFutureErr = errors.New("block time is in the future")
BlockTSTooBigErr = errors.New("block time too big")
BlockEqualTSErr = errors.New("block time stamp equal to previous")
)
@ -176,3 +177,14 @@ func IsValueTransferErr(e error) bool {
_, ok := e.(*ValueTransferError)
return ok
}
type BadHashError common.Hash
func (h BadHashError) Error() string {
return fmt.Sprintf("Found known bad hash in chain %x", h[:])
}
func IsBadHashError(err error) bool {
_, ok := err.(BadHashError)
return ok
}

View File

@ -36,6 +36,9 @@ type NewBlockEvent struct{ Block *types.Block }
// NewMinedBlockEvent is posted when a block has been imported.
type NewMinedBlockEvent struct{ Block *types.Block }
// RemovedTransactionEvent is posted when a reorg happens
type RemovedTransactionEvent struct{ Txs types.Transactions }
// ChainSplit is posted when a new head is detected
type ChainSplitEvent struct {
Block *types.Block

View File

@ -131,12 +131,12 @@ done:
func includes(addresses []common.Address, a common.Address) bool {
for _, addr := range addresses {
if addr != a {
return false
if addr == a {
return true
}
}
return true
return false
}
func (self *Filter) FilterLogs(logs state.Logs) state.Logs {

View File

@ -27,13 +27,14 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/params"
)
// WriteGenesisBlock writes the genesis block to the database as block number 0
func WriteGenesisBlock(chainDb common.Database, reader io.Reader) (*types.Block, error) {
func WriteGenesisBlock(chainDb ethdb.Database, reader io.Reader) (*types.Block, error) {
contents, err := ioutil.ReadAll(reader)
if err != nil {
return nil, err
@ -73,7 +74,7 @@ func WriteGenesisBlock(chainDb common.Database, reader io.Reader) (*types.Block,
difficulty := common.String2Big(genesis.Difficulty)
block := types.NewBlock(&types.Header{
Nonce: types.EncodeNonce(common.String2Big(genesis.Nonce).Uint64()),
Time: common.String2Big(genesis.Timestamp).Uint64(),
Time: common.String2Big(genesis.Timestamp),
ParentHash: common.HexToHash(genesis.ParentHash),
Extra: common.FromHex(genesis.ExtraData),
GasLimit: common.String2Big(genesis.GasLimit),
@ -82,34 +83,35 @@ func WriteGenesisBlock(chainDb common.Database, reader io.Reader) (*types.Block,
Coinbase: common.HexToAddress(genesis.Coinbase),
Root: statedb.Root(),
}, nil, nil, nil)
block.Td = difficulty
if block := GetBlockByHash(chainDb, block.Hash()); block != nil {
if block := GetBlock(chainDb, block.Hash()); block != nil {
glog.V(logger.Info).Infoln("Genesis block already in chain. Writing canonical number")
err := WriteCanonNumber(chainDb, block)
err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
if err != nil {
return nil, err
}
return block, nil
}
statedb.Sync()
err = WriteBlock(chainDb, block)
if err != nil {
if err := WriteTd(chainDb, block.Hash(), difficulty); err != nil {
return nil, err
}
err = WriteHead(chainDb, block)
if err != nil {
if err := WriteBlock(chainDb, block); err != nil {
return nil, err
}
if err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()); err != nil {
return nil, err
}
if err := WriteHeadBlockHash(chainDb, block.Hash()); err != nil {
return nil, err
}
return block, nil
}
// GenesisBlockForTesting creates a block in which addr has the given wei balance.
// The state trie of the block is written to db.
func GenesisBlockForTesting(db common.Database, addr common.Address, balance *big.Int) *types.Block {
func GenesisBlockForTesting(db ethdb.Database, addr common.Address, balance *big.Int) *types.Block {
statedb := state.New(common.Hash{}, db)
obj := statedb.GetOrNewStateObject(addr)
obj.SetBalance(balance)
@ -120,24 +122,35 @@ func GenesisBlockForTesting(db common.Database, addr common.Address, balance *bi
GasLimit: params.GenesisGasLimit,
Root: statedb.Root(),
}, nil, nil, nil)
block.Td = params.GenesisDifficulty
return block
}
func WriteGenesisBlockForTesting(db common.Database, addr common.Address, balance *big.Int) *types.Block {
type GenesisAccount struct {
Address common.Address
Balance *big.Int
}
func WriteGenesisBlockForTesting(db ethdb.Database, accounts ...GenesisAccount) *types.Block {
accountJson := "{"
for i, account := range accounts {
if i != 0 {
accountJson += ","
}
accountJson += fmt.Sprintf(`"0x%x":{"balance":"0x%x"}`, account.Address, account.Balance.Bytes())
}
accountJson += "}"
testGenesis := fmt.Sprintf(`{
"nonce":"0x%x",
"gasLimit":"0x%x",
"difficulty":"0x%x",
"alloc": {
"0x%x":{"balance":"0x%x"}
}
}`, types.EncodeNonce(0), params.GenesisGasLimit.Bytes(), params.GenesisDifficulty.Bytes(), addr, balance.Bytes())
"alloc": %s
}`, types.EncodeNonce(0), params.GenesisGasLimit.Bytes(), params.GenesisDifficulty.Bytes(), accountJson)
block, _ := WriteGenesisBlock(db, strings.NewReader(testGenesis))
return block
}
func WriteTestNetGenesisBlock(chainDb common.Database, nonce uint64) (*types.Block, error) {
func WriteTestNetGenesisBlock(chainDb ethdb.Database, nonce uint64) (*types.Block, error) {
testGenesis := fmt.Sprintf(`{
"nonce":"0x%x",
"gasLimit":"0x%x",

View File

@ -22,7 +22,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
// "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
)
@ -32,7 +32,7 @@ type TestManager struct {
// stateManager *StateManager
eventMux *event.TypeMux
db common.Database
db ethdb.Database
txPool *TxPool
blockChain *ChainManager
Blocks []*types.Block
@ -74,7 +74,7 @@ func (tm *TestManager) EventMux() *event.TypeMux {
// return nil
// }
func (tm *TestManager) Db() common.Database {
func (tm *TestManager) Db() ethdb.Database {
return tm.db
}

View File

@ -18,7 +18,7 @@ package core
import (
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
)
@ -28,7 +28,7 @@ type Backend interface {
BlockProcessor() *BlockProcessor
ChainManager() *ChainManager
TxPool() *TxPool
ChainDb() common.Database
DappDb() common.Database
ChainDb() ethdb.Database
DappDb() ethdb.Database
EventMux() *event.TypeMux
}

View File

@ -23,6 +23,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/rlp"
@ -56,7 +57,7 @@ func (self Storage) Copy() Storage {
type StateObject struct {
// State database for storing state changes
db common.Database
db ethdb.Database
trie *trie.SecureTrie
// Address belonging to this account
@ -87,11 +88,7 @@ type StateObject struct {
dirty bool
}
func (self *StateObject) Reset() {
self.storage = make(Storage)
}
func NewStateObject(address common.Address, db common.Database) *StateObject {
func NewStateObject(address common.Address, db ethdb.Database) *StateObject {
object := &StateObject{db: db, address: address, balance: new(big.Int), gasPool: new(big.Int), dirty: true}
object.trie = trie.NewSecure((common.Hash{}).Bytes(), db)
object.storage = make(Storage)
@ -100,7 +97,7 @@ func NewStateObject(address common.Address, db common.Database) *StateObject {
return object
}
func NewStateObjectFromBytes(address common.Address, data []byte, db common.Database) *StateObject {
func NewStateObjectFromBytes(address common.Address, data []byte, db ethdb.Database) *StateObject {
// TODO clean me up
var extobject struct {
Nonce uint64
@ -184,14 +181,6 @@ func (self *StateObject) Update() {
}
}
func (c *StateObject) GetInstr(pc *big.Int) *common.Value {
if int64(len(c.code)-1) < pc.Int64() {
return common.NewValue(0)
}
return common.NewValueFromBytes([]byte{c.code[pc.Int64()]})
}
func (c *StateObject) AddBalance(amount *big.Int) {
c.SetBalance(new(big.Int).Add(c.balance, amount))
@ -263,14 +252,11 @@ func (self *StateObject) Copy() *StateObject {
stateObject.gasPool.Set(self.gasPool)
stateObject.remove = self.remove
stateObject.dirty = self.dirty
stateObject.deleted = self.deleted
return stateObject
}
func (self *StateObject) Set(stateObject *StateObject) {
*self = *stateObject
}
//
// Attribute accessors
//
@ -279,20 +265,11 @@ func (self *StateObject) Balance() *big.Int {
return self.balance
}
func (c *StateObject) N() *big.Int {
return big.NewInt(int64(c.nonce))
}
// Returns the address of the contract/account
func (c *StateObject) Address() common.Address {
return c.address
}
// Returns the initialization Code
func (c *StateObject) Init() Code {
return c.initCode
}
func (self *StateObject) Trie() *trie.SecureTrie {
return self.trie
}
@ -310,11 +287,6 @@ func (self *StateObject) SetCode(code []byte) {
self.dirty = true
}
func (self *StateObject) SetInitCode(code []byte) {
self.initCode = code
self.dirty = true
}
func (self *StateObject) SetNonce(nonce uint64) {
self.nonce = nonce
self.dirty = true
@ -353,19 +325,6 @@ func (c *StateObject) CodeHash() common.Bytes {
return crypto.Sha3(c.code)
}
func (c *StateObject) RlpDecode(data []byte) {
decoder := common.NewValueFromBytes(data)
c.nonce = decoder.Get(0).Uint()
c.balance = decoder.Get(1).BigInt()
c.trie = trie.NewSecure(decoder.Get(2).Bytes(), c.db)
c.storage = make(map[string]common.Hash)
c.gasPool = new(big.Int)
c.codeHash = decoder.Get(3).Bytes()
c.code, _ = c.db.Get(c.codeHash)
}
// Storage change object. Used by the manifest for notifying changes to
// the sub channels.
type StorageState struct {

View File

@ -17,6 +17,7 @@
package state
import (
"bytes"
"math/big"
"testing"
@ -117,3 +118,106 @@ func (s *StateSuite) TestSnapshot(c *checker.C) {
c.Assert(data1, checker.DeepEquals, res)
}
// use testing instead of checker because checker does not support
// printing/logging in tests (-check.vv does not work)
func TestSnapshot2(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
state := New(common.Hash{}, db)
stateobjaddr0 := toAddr([]byte("so0"))
stateobjaddr1 := toAddr([]byte("so1"))
var storageaddr common.Hash
data0 := common.BytesToHash([]byte{17})
data1 := common.BytesToHash([]byte{18})
state.SetState(stateobjaddr0, storageaddr, data0)
state.SetState(stateobjaddr1, storageaddr, data1)
// db, trie are already non-empty values
so0 := state.GetStateObject(stateobjaddr0)
so0.balance = big.NewInt(42)
so0.nonce = 43
so0.gasPool = big.NewInt(44)
so0.code = []byte{'c', 'a', 'f', 'e'}
so0.codeHash = so0.CodeHash()
so0.remove = true
so0.deleted = false
so0.dirty = false
state.SetStateObject(so0)
// and one with deleted == true
so1 := state.GetStateObject(stateobjaddr1)
so1.balance = big.NewInt(52)
so1.nonce = 53
so1.gasPool = big.NewInt(54)
so1.code = []byte{'c', 'a', 'f', 'e', '2'}
so1.codeHash = so1.CodeHash()
so1.remove = true
so1.deleted = true
so1.dirty = true
state.SetStateObject(so1)
so1 = state.GetStateObject(stateobjaddr1)
if so1 != nil {
t.Fatalf("deleted object not nil when getting")
}
snapshot := state.Copy()
state.Set(snapshot)
so0Restored := state.GetStateObject(stateobjaddr0)
so1Restored := state.GetStateObject(stateobjaddr1)
// non-deleted is equal (restored)
compareStateObjects(so0Restored, so0, t)
// deleted should be nil, both before and after restore of state copy
if so1Restored != nil {
t.Fatalf("deleted object not nil after restoring snapshot")
}
}
func compareStateObjects(so0, so1 *StateObject, t *testing.T) {
if so0.address != so1.address {
t.Fatalf("Address mismatch: have %v, want %v", so0.address, so1.address)
}
if so0.balance.Cmp(so1.balance) != 0 {
t.Fatalf("Balance mismatch: have %v, want %v", so0.balance, so1.balance)
}
if so0.nonce != so1.nonce {
t.Fatalf("Nonce mismatch: have %v, want %v", so0.nonce, so1.nonce)
}
if !bytes.Equal(so0.codeHash, so1.codeHash) {
t.Fatalf("CodeHash mismatch: have %v, want %v", so0.codeHash, so1.codeHash)
}
if !bytes.Equal(so0.code, so1.code) {
t.Fatalf("Code mismatch: have %v, want %v", so0.code, so1.code)
}
if !bytes.Equal(so0.initCode, so1.initCode) {
t.Fatalf("InitCode mismatch: have %v, want %v", so0.initCode, so1.initCode)
}
for k, v := range so1.storage {
if so0.storage[k] != v {
t.Fatalf("Storage key %s mismatch: have %v, want %v", k, so0.storage[k], v)
}
}
for k, v := range so0.storage {
if so1.storage[k] != v {
t.Fatalf("Storage key %s mismatch: have %v, want none.", k, v)
}
}
if so0.gasPool.Cmp(so1.gasPool) != 0 {
t.Fatalf("GasPool mismatch: have %v, want %v", so0.gasPool, so1.gasPool)
}
if so0.remove != so1.remove {
t.Fatalf("Remove mismatch: have %v, want %v", so0.remove, so1.remove)
}
if so0.deleted != so1.deleted {
t.Fatalf("Deleted mismatch: have %v, want %v", so0.deleted, so1.deleted)
}
if so0.dirty != so1.dirty {
t.Fatalf("Dirty mismatch: have %v, want %v", so0.dirty, so1.dirty)
}
}

View File

@ -18,10 +18,10 @@
package state
import (
"bytes"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/trie"
@ -33,7 +33,7 @@ import (
// * Contracts
// * Accounts
type StateDB struct {
db common.Database
db ethdb.Database
trie *trie.SecureTrie
root common.Hash
@ -48,7 +48,7 @@ type StateDB struct {
}
// Create a new state from a given trie
func New(root common.Hash, db common.Database) *StateDB {
func New(root common.Hash, db ethdb.Database) *StateDB {
trie := trie.NewSecure(root[:], db)
return &StateDB{root: root, db: db, trie: trie, stateObjects: make(map[string]*StateObject), refund: new(big.Int), logs: make(map[common.Hash]Logs)}
}
@ -276,10 +276,6 @@ func (self *StateDB) CreateAccount(addr common.Address) *StateObject {
// Setting, copying of the state methods
//
func (s *StateDB) Cmp(other *StateDB) bool {
return bytes.Equal(s.trie.Root(), other.trie.Root())
}
func (self *StateDB) Copy() *StateDB {
state := New(common.Hash{}, self.db)
state.trie = self.trie
@ -311,22 +307,6 @@ func (s *StateDB) Root() common.Hash {
return common.BytesToHash(s.trie.Root())
}
func (s *StateDB) Trie() *trie.SecureTrie {
return s.trie
}
// Resets the trie and all siblings
func (s *StateDB) Reset() {
s.trie.Reset()
// Reset all nested states
for _, stateObject := range s.stateObjects {
stateObject.Reset()
}
s.Empty()
}
// Syncs the trie and all siblings
func (s *StateDB) Sync() {
// Sync all nested states

View File

@ -45,7 +45,7 @@ import (
* 6) Derive new state root
*/
type StateTransition struct {
coinbase common.Address
gp GasPool
msg Message
gas, gasPrice *big.Int
initialGas *big.Int
@ -53,8 +53,6 @@ type StateTransition struct {
data []byte
state *state.StateDB
cb, rec, sen *state.StateObject
env vm.Environment
}
@ -96,13 +94,13 @@ func IntrinsicGas(data []byte) *big.Int {
return igas
}
func ApplyMessage(env vm.Environment, msg Message, coinbase *state.StateObject) ([]byte, *big.Int, error) {
return NewStateTransition(env, msg, coinbase).transitionState()
func ApplyMessage(env vm.Environment, msg Message, gp GasPool) ([]byte, *big.Int, error) {
return NewStateTransition(env, msg, gp).transitionState()
}
func NewStateTransition(env vm.Environment, msg Message, coinbase *state.StateObject) *StateTransition {
func NewStateTransition(env vm.Environment, msg Message, gp GasPool) *StateTransition {
return &StateTransition{
coinbase: coinbase.Address(),
gp: gp,
env: env,
msg: msg,
gas: new(big.Int),
@ -111,13 +109,9 @@ func NewStateTransition(env vm.Environment, msg Message, coinbase *state.StateOb
value: msg.Value(),
data: msg.Data(),
state: env.State(),
cb: coinbase,
}
}
func (self *StateTransition) Coinbase() *state.StateObject {
return self.state.GetOrNewStateObject(self.coinbase)
}
func (self *StateTransition) From() (*state.StateObject, error) {
f, err := self.msg.From()
if err != nil {
@ -160,7 +154,7 @@ func (self *StateTransition) BuyGas() error {
if sender.Balance().Cmp(mgval) < 0 {
return fmt.Errorf("insufficient ETH for gas (%x). Req %v, has %v", sender.Address().Bytes()[:4], mgval, sender.Balance())
}
if err = self.Coinbase().SubGas(mgas, self.gasPrice); err != nil {
if err = self.gp.SubGas(mgas, self.gasPrice); err != nil {
return err
}
self.AddGas(mgas)
@ -241,13 +235,12 @@ func (self *StateTransition) transitionState() (ret []byte, usedGas *big.Int, er
}
self.refundGas()
self.state.AddBalance(self.coinbase, new(big.Int).Mul(self.gasUsed(), self.gasPrice))
self.state.AddBalance(self.env.Coinbase(), new(big.Int).Mul(self.gasUsed(), self.gasPrice))
return ret, self.gasUsed(), err
}
func (self *StateTransition) refundGas() {
coinbase := self.Coinbase()
sender, _ := self.From() // err already checked
// Return remaining gas
remaining := new(big.Int).Mul(self.gas, self.gasPrice)
@ -258,7 +251,7 @@ func (self *StateTransition) refundGas() {
self.gas.Add(self.gas, refund)
self.state.AddBalance(sender.Address(), refund.Mul(refund, self.gasPrice))
coinbase.AddGas(self.gas, self.gasPrice)
self.gp.AddGas(self.gas, self.gasPrice)
}
func (self *StateTransition) gasUsed() *big.Int {

View File

@ -81,7 +81,7 @@ func NewTxPool(eventMux *event.TypeMux, currentStateFn stateFn, gasLimitFn func(
gasLimit: gasLimitFn,
minGasPrice: new(big.Int),
pendingState: state.ManageState(currentStateFn()),
events: eventMux.Subscribe(ChainHeadEvent{}, GasPriceChanged{}),
events: eventMux.Subscribe(ChainHeadEvent{}, GasPriceChanged{}, RemovedTransactionEvent{}),
}
go pool.eventLoop()
@ -93,16 +93,18 @@ func (pool *TxPool) eventLoop() {
// we need to know the new state. The new state will help us determine
// the nonces in the managed state
for ev := range pool.events.Chan() {
pool.mu.Lock()
switch ev := ev.(type) {
case ChainHeadEvent:
pool.mu.Lock()
pool.resetState()
case GasPriceChanged:
pool.minGasPrice = ev.Price
}
pool.mu.Unlock()
case GasPriceChanged:
pool.mu.Lock()
pool.minGasPrice = ev.Price
pool.mu.Unlock()
case RemovedTransactionEvent:
pool.AddTransactions(ev.Txs)
}
}
}
@ -121,8 +123,8 @@ func (pool *TxPool) resetState() {
if addr, err := tx.From(); err == nil {
// Set the nonce. Transaction nonce can never be lower
// than the state nonce; validatePool took care of that.
if pool.pendingState.GetNonce(addr) < tx.Nonce() {
pool.pendingState.SetNonce(addr, tx.Nonce())
if pool.pendingState.GetNonce(addr) <= tx.Nonce() {
pool.pendingState.SetNonce(addr, tx.Nonce()+1)
}
}
}

View File

@ -219,3 +219,34 @@ func TestMissingNonce(t *testing.T) {
t.Error("expected 1 queued transaction, got", len(pool.queue[addr]))
}
}
func TestNonceRecovery(t *testing.T) {
const n = 10
pool, key := setupTxPool()
addr := crypto.PubkeyToAddress(key.PublicKey)
pool.currentState().SetNonce(addr, n)
pool.currentState().AddBalance(addr, big.NewInt(100000000000000))
pool.resetState()
tx := transaction(n, big.NewInt(100000), key)
if err := pool.Add(tx); err != nil {
t.Error(err)
}
// simulate some weird re-order of transactions and missing nonce(s)
pool.currentState().SetNonce(addr, n-1)
pool.resetState()
if fn := pool.pendingState.GetNonce(addr); fn != n+1 {
t.Errorf("expected nonce to be %d, got %d", n+1, fn)
}
}
func TestRemovedTxEvent(t *testing.T) {
pool, key := setupTxPool()
tx := transaction(0, big.NewInt(1000000), key)
from, _ := tx.From()
pool.currentState().AddBalance(from, big.NewInt(1000000000000))
pool.eventMux.Post(RemovedTransactionEvent{types.Transactions{tx}})
pool.eventMux.Post(ChainHeadEvent{nil})
if len(pool.pending) != 1 {
t.Error("expected 1 pending tx, got", len(pool.pending))
}
}

View File

@ -32,7 +32,7 @@ var (
)
// PutTransactions stores the transactions in the given database
func PutTransactions(db common.Database, block *types.Block, txs types.Transactions) {
func PutTransactions(db ethdb.Database, block *types.Block, txs types.Transactions) {
batch := new(leveldb.Batch)
_, batchWrite := db.(*ethdb.LDBDatabase)
@ -77,8 +77,24 @@ func PutTransactions(db common.Database, block *types.Block, txs types.Transacti
}
}
func DeleteTransaction(db ethdb.Database, txHash common.Hash) {
db.Delete(txHash[:])
}
func GetTransaction(db ethdb.Database, txhash common.Hash) *types.Transaction {
data, _ := db.Get(txhash[:])
if len(data) != 0 {
var tx types.Transaction
if err := rlp.DecodeBytes(data, &tx); err != nil {
return nil
}
return &tx
}
return nil
}
// PutReceipts stores the receipts in the current database
func PutReceipts(db common.Database, receipts types.Receipts) error {
func PutReceipts(db ethdb.Database, receipts types.Receipts) error {
batch := new(leveldb.Batch)
_, batchWrite := db.(*ethdb.LDBDatabase)
@ -107,8 +123,13 @@ func PutReceipts(db common.Database, receipts types.Receipts) error {
return nil
}
// Delete a receipts from the database
func DeleteReceipt(db ethdb.Database, txHash common.Hash) {
db.Delete(append(receiptsPre, txHash[:]...))
}
// GetReceipt returns a receipt by hash
func GetReceipt(db common.Database, txHash common.Hash) *types.Receipt {
func GetReceipt(db ethdb.Database, txHash common.Hash) *types.Receipt {
data, _ := db.Get(append(receiptsPre, txHash[:]...))
if len(data) == 0 {
return nil
@ -124,7 +145,7 @@ func GetReceipt(db common.Database, txHash common.Hash) *types.Receipt {
// GetBlockReceipts returns the receipts generated by the transactions
// included in block's given hash.
func GetBlockReceipts(db common.Database, hash common.Hash) types.Receipts {
func GetBlockReceipts(db ethdb.Database, hash common.Hash) types.Receipts {
data, _ := db.Get(append(blockReceiptsPre, hash[:]...))
if len(data) == 0 {
return nil
@ -141,7 +162,7 @@ func GetBlockReceipts(db common.Database, hash common.Hash) types.Receipts {
// PutBlockReceipts stores the block's transactions associated receipts
// and stores them by block hash in a single slice. This is required for
// forks and chain reorgs
func PutBlockReceipts(db common.Database, block *types.Block, receipts types.Receipts) error {
func PutBlockReceipts(db ethdb.Database, block *types.Block, receipts types.Receipts) error {
rs := make([]*types.ReceiptForStorage, len(receipts))
for i, receipt := range receipts {
rs[i] = (*types.ReceiptForStorage)(receipt)

View File

@ -60,7 +60,7 @@ type Header struct {
Number *big.Int // The block number
GasLimit *big.Int // Gas limit
GasUsed *big.Int // Gas used
Time uint64 // Creation time
Time *big.Int // Creation time
Extra []byte // Extra data
MixDigest common.Hash // for quick difficulty verification
Nonce BlockNonce
@ -94,7 +94,7 @@ func (h *Header) UnmarshalJSON(data []byte) error {
Coinbase string
Difficulty string
GasLimit string
Time uint64
Time *big.Int
Extra string
}
dec := json.NewDecoder(bytes.NewReader(data))
@ -117,6 +117,13 @@ func rlpHash(x interface{}) (h common.Hash) {
return h
}
// Body is a simple (mutable, non-safe) data container for storing and moving
// a block's data contents (transactions and uncles) together.
type Body struct {
Transactions []*Transaction
Uncles []*Header
}
type Block struct {
header *Header
uncles []*Header
@ -129,12 +136,20 @@ type Block struct {
// Td is used by package core to store the total difficulty
// of the chain up to and including the block.
Td *big.Int
td *big.Int
// ReceivedAt is used by package eth to track block propagation time.
ReceivedAt time.Time
}
// DeprecatedTd is an old relic for extracting the TD of a block. It is in the
// code solely to facilitate upgrading the database from the old format to the
// new, after which it should be deleted. Do not use!
func (b *Block) DeprecatedTd() *big.Int {
return b.td
}
// [deprecated by eth/63]
// StorageBlock defines the RLP encoding of a Block stored in the
// state database. The StorageBlock encoding contains fields that
// would otherwise need to be recomputed.
@ -147,6 +162,7 @@ type extblock struct {
Uncles []*Header
}
// [deprecated by eth/63]
// "storage" block encoding. used for database.
type storageblock struct {
Header *Header
@ -168,7 +184,7 @@ var (
// are ignored and set to values derived from the given txs, uncles
// and receipts.
func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt) *Block {
b := &Block{header: copyHeader(header), Td: new(big.Int)}
b := &Block{header: copyHeader(header), td: new(big.Int)}
// TODO: panic if len(txs) != len(receipts)
if len(txs) == 0 {
@ -210,6 +226,9 @@ func NewBlockWithHeader(header *Header) *Block {
func copyHeader(h *Header) *Header {
cpy := *h
if cpy.Time = new(big.Int); h.Time != nil {
cpy.Time.Set(h.Time)
}
if cpy.Difficulty = new(big.Int); h.Difficulty != nil {
cpy.Difficulty.Set(h.Difficulty)
}
@ -265,24 +284,16 @@ func (b *Block) EncodeRLP(w io.Writer) error {
})
}
// [deprecated by eth/63]
func (b *StorageBlock) DecodeRLP(s *rlp.Stream) error {
var sb storageblock
if err := s.Decode(&sb); err != nil {
return err
}
b.header, b.uncles, b.transactions, b.Td = sb.Header, sb.Uncles, sb.Txs, sb.TD
b.header, b.uncles, b.transactions, b.td = sb.Header, sb.Uncles, sb.Txs, sb.TD
return nil
}
func (b *StorageBlock) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, storageblock{
Header: b.header,
Txs: b.transactions,
Uncles: b.uncles,
TD: b.Td,
})
}
// TODO: copies
func (b *Block) Uncles() []*Header { return b.uncles }
func (b *Block) Transactions() Transactions { return b.transactions }
@ -301,13 +312,13 @@ func (b *Block) Number() *big.Int { return new(big.Int).Set(b.header.Number)
func (b *Block) GasLimit() *big.Int { return new(big.Int).Set(b.header.GasLimit) }
func (b *Block) GasUsed() *big.Int { return new(big.Int).Set(b.header.GasUsed) }
func (b *Block) Difficulty() *big.Int { return new(big.Int).Set(b.header.Difficulty) }
func (b *Block) Time() *big.Int { return new(big.Int).Set(b.header.Time) }
func (b *Block) NumberU64() uint64 { return b.header.Number.Uint64() }
func (b *Block) MixDigest() common.Hash { return b.header.MixDigest }
func (b *Block) Nonce() uint64 { return binary.BigEndian.Uint64(b.header.Nonce[:]) }
func (b *Block) Bloom() Bloom { return b.header.Bloom }
func (b *Block) Coinbase() common.Address { return b.header.Coinbase }
func (b *Block) Time() uint64 { return b.header.Time }
func (b *Block) Root() common.Hash { return b.header.Root }
func (b *Block) ParentHash() common.Hash { return b.header.ParentHash }
func (b *Block) TxHash() common.Hash { return b.header.TxHash }
@ -353,10 +364,23 @@ func (b *Block) WithMiningResult(nonce uint64, mixDigest common.Hash) *Block {
transactions: b.transactions,
receipts: b.receipts,
uncles: b.uncles,
Td: b.Td,
}
}
// WithBody returns a new block with the given transaction and uncle contents.
func (b *Block) WithBody(transactions []*Transaction, uncles []*Header) *Block {
block := &Block{
header: copyHeader(b.header),
transactions: make([]*Transaction, len(transactions)),
uncles: make([]*Header, len(uncles)),
}
copy(block.transactions, transactions)
for i := range uncles {
block.uncles[i] = copyHeader(uncles[i])
}
return block
}
// Implement pow.Block
func (b *Block) Hash() common.Hash {
@ -369,7 +393,7 @@ func (b *Block) Hash() common.Hash {
}
func (b *Block) String() string {
str := fmt.Sprintf(`Block(#%v): Size: %v TD: %v {
str := fmt.Sprintf(`Block(#%v): Size: %v {
MinerHash: %x
%v
Transactions:
@ -377,7 +401,7 @@ Transactions:
Uncles:
%v
}
`, b.Number(), b.Size(), b.Td, b.header.HashNoNonce(), b.header, b.transactions, b.uncles)
`, b.Number(), b.Size(), b.header.HashNoNonce(), b.header, b.transactions, b.uncles)
return str
}

View File

@ -47,7 +47,7 @@ func TestBlockEncoding(t *testing.T) {
check("Root", block.Root(), common.HexToHash("ef1552a40b7165c3cd773806b9e0c165b75356e0314bf0706f279c729f51e017"))
check("Hash", block.Hash(), common.HexToHash("0a5843ac1cb04865017cb35a57b50b07084e5fcee39b5acadade33149f4fff9e"))
check("Nonce", block.Nonce(), uint64(0xa13a5a8c8f2bb1c4))
check("Time", block.Time(), uint64(1426516743))
check("Time", block.Time(), big.NewInt(1426516743))
check("Size", block.Size(), common.StorageSize(len(blockEnc)))
tx1 := NewTransaction(0, common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"), big.NewInt(10), big.NewInt(50000), big.NewInt(10), nil)

View File

@ -33,10 +33,6 @@ import (
var ErrInvalidSig = errors.New("invalid v, r, s values")
func IsContractAddr(addr []byte) bool {
return len(addr) == 0
}
type Transaction struct {
data txdata
// caches
@ -276,14 +272,36 @@ func (tx *Transaction) String() string {
// Transaction slice type for basic sorting.
type Transactions []*Transaction
// Len returns the length of s
func (s Transactions) Len() int { return len(s) }
// Swap swaps the i'th and the j'th element in s
func (s Transactions) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// GetRlp implements Rlpable and returns the i'th element of s in rlp
func (s Transactions) GetRlp(i int) []byte {
enc, _ := rlp.EncodeToBytes(s[i])
return enc
}
// Returns a new set t which is the difference between a to b
func TxDifference(a, b Transactions) (keep Transactions) {
keep = make(Transactions, 0, len(a))
remove := make(map[common.Hash]struct{})
for _, tx := range b {
remove[tx.Hash()] = struct{}{}
}
for _, tx := range a {
if _, ok := remove[tx.Hash()]; !ok {
keep = append(keep, tx)
}
}
return keep
}
type TxByNonce struct{ Transactions }
func (s TxByNonce) Less(i, j int) bool {

View File

@ -31,9 +31,11 @@ type Environment interface {
Origin() common.Address
BlockNumber() *big.Int
GetHash(n uint64) common.Hash
// The n'th hash ago from this block number
GetHash(uint64) common.Hash
// The handler's address
Coinbase() common.Address
Time() uint64
Time() *big.Int
Difficulty() *big.Int
GasLimit() *big.Int
CanTransfer(from Account, balance *big.Int) bool

View File

@ -25,20 +25,3 @@ import (
var OutOfGasError = errors.New("Out of gas")
var DepthError = fmt.Errorf("Max call depth exceeded (%d)", params.CallCreateDepth)
type StackError struct {
req, has int
}
func StackErr(req, has int) StackError {
return StackError{req, has}
}
func (self StackError) Error() string {
return fmt.Sprintf("stack error! require %v, have %v", self.req, self.has)
}
func IsStackErr(err error) bool {
_, ok := err.(StackError)
return ok
}

View File

@ -341,7 +341,7 @@ func opCoinbase(instr instruction, env Environment, context *Context, memory *Me
}
func opTimestamp(instr instruction, env Environment, context *Context, memory *Memory, stack *stack) {
stack.push(U256(new(big.Int).SetUint64(env.Time())))
stack.push(U256(new(big.Int).Set(env.Time())))
}
func opNumber(instr instruction, env Environment, context *Context, memory *Memory, stack *stack) {

View File

@ -93,7 +93,7 @@ func (self *Env) StructLogs() []StructLog {
//func (self *Env) PrevHash() []byte { return self.parent }
func (self *Env) Coinbase() common.Address { return common.Address{} }
func (self *Env) Time() uint64 { return uint64(time.Now().Unix()) }
func (self *Env) Time() *big.Int { return big.NewInt(time.Now().Unix()) }
func (self *Env) Difficulty() *big.Int { return big.NewInt(0) }
func (self *Env) State() *state.StateDB { return nil }
func (self *Env) GasLimit() *big.Int { return self.gasLimit }

View File

@ -491,7 +491,7 @@ func (self *Vm) Run(context *Context, input []byte) (ret []byte, err error) {
case TIMESTAMP:
time := self.env.Time()
stack.push(new(big.Int).SetUint64(time))
stack.push(new(big.Int).Set(time))
case NUMBER:
number := self.env.BlockNumber()

View File

@ -49,7 +49,7 @@ func NewEnv(state *state.StateDB, chain *ChainManager, msg Message, header *type
func (self *VMEnv) Origin() common.Address { f, _ := self.msg.From(); return f }
func (self *VMEnv) BlockNumber() *big.Int { return self.header.Number }
func (self *VMEnv) Coinbase() common.Address { return self.header.Coinbase }
func (self *VMEnv) Time() uint64 { return self.header.Time }
func (self *VMEnv) Time() *big.Int { return self.header.Time }
func (self *VMEnv) Difficulty() *big.Int { return self.header.Difficulty }
func (self *VMEnv) GasLimit() *big.Int { return self.header.GasLimit }
func (self *VMEnv) Value() *big.Int { return self.msg.Value() }
@ -59,9 +59,11 @@ func (self *VMEnv) SetDepth(i int) { self.depth = i }
func (self *VMEnv) VmType() vm.Type { return self.typ }
func (self *VMEnv) SetVmType(t vm.Type) { self.typ = t }
func (self *VMEnv) GetHash(n uint64) common.Hash {
if block := self.chain.GetBlockByNumber(n); block != nil {
for block := self.chain.GetBlock(self.header.ParentHash); block != nil; block = self.chain.GetBlock(block.ParentHash()) {
if block.NumberU64() == n {
return block.Hash()
}
}
return common.Hash{}
}

View File

@ -33,12 +33,12 @@ import (
"encoding/json"
"errors"
"code.google.com/p/go-uuid/uuid"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto/ecies"
"github.com/ethereum/go-ethereum/crypto/secp256k1"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/rlp"
"github.com/pborman/uuid"
"golang.org/x/crypto/pbkdf2"
"golang.org/x/crypto/ripemd160"
)

View File

@ -23,8 +23,8 @@ import (
"encoding/json"
"io"
"code.google.com/p/go-uuid/uuid"
"github.com/ethereum/go-ethereum/common"
"github.com/pborman/uuid"
)
const (

View File

@ -36,9 +36,9 @@ import (
"io"
"reflect"
"code.google.com/p/go-uuid/uuid"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto/randentropy"
"github.com/pborman/uuid"
"golang.org/x/crypto/pbkdf2"
"golang.org/x/crypto/scrypt"
)

View File

@ -18,6 +18,7 @@
package eth
import (
"bytes"
"crypto/ecdsa"
"encoding/json"
"fmt"
@ -73,6 +74,8 @@ var (
)
type Config struct {
DevMode bool
Name string
NetworkId int
GenesisNonce int
@ -125,7 +128,7 @@ type Config struct {
// NewDB is used to create databases.
// If nil, the default is to create leveldb databases on disk.
NewDB func(path string) (common.Database, error)
NewDB func(path string) (ethdb.Database, error)
}
func (cfg *Config) parseBootNodes() []*discover.Node {
@ -207,11 +210,8 @@ type Ethereum struct {
shutdownChan chan bool
// DB interfaces
chainDb common.Database // Block chain databe
dappDb common.Database // Dapp database
// Closed when databases are flushed and closed
databasesClosed chan bool
chainDb ethdb.Database // Block chain database
dappDb ethdb.Database // Dapp database
//*** SERVICES ***
// State manager for processing new blocks and managing the over all states
@ -264,14 +264,10 @@ func New(config *Config) (*Ethereum, error) {
newdb := config.NewDB
if newdb == nil {
newdb = func(path string) (common.Database, error) { return ethdb.NewLDBDatabase(path, config.DatabaseCache) }
}
// attempt to merge database together, upgrading from an old version
if err := mergeDatabases(config.DataDir, newdb); err != nil {
return nil, err
newdb = func(path string) (ethdb.Database, error) { return ethdb.NewLDBDatabase(path, config.DatabaseCache) }
}
// Open the chain database and perform any upgrades needed
chainDb, err := newdb(filepath.Join(config.DataDir, "chaindata"))
if err != nil {
return nil, fmt.Errorf("blockchain db err: %v", err)
@ -279,6 +275,10 @@ func New(config *Config) (*Ethereum, error) {
if db, ok := chainDb.(*ethdb.LDBDatabase); ok {
db.Meter("eth/db/chaindata/")
}
if err := upgradeChainDatabase(chainDb); err != nil {
return nil, err
}
dappDb, err := newdb(filepath.Join(config.DataDir, "dapp"))
if err != nil {
return nil, fmt.Errorf("dapp db err: %v", err)
@ -303,18 +303,23 @@ func New(config *Config) (*Ethereum, error) {
glog.V(logger.Info).Infof("Successfully wrote genesis block. New genesis hash = %x\n", block.Hash())
}
if config.Olympic {
// different modes
switch {
case config.Olympic:
glog.V(logger.Error).Infoln("Starting Olympic network")
fallthrough
case config.DevMode:
_, err := core.WriteTestNetGenesisBlock(chainDb, 42)
if err != nil {
return nil, err
}
glog.V(logger.Error).Infoln("Starting Olympic network")
}
// This is for testing only.
if config.GenesisBlock != nil {
core.WriteTd(chainDb, config.GenesisBlock.Hash(), config.GenesisBlock.Difficulty())
core.WriteBlock(chainDb, config.GenesisBlock)
core.WriteHead(chainDb, config.GenesisBlock)
core.WriteCanonicalHash(chainDb, config.GenesisBlock.Hash(), config.GenesisBlock.NumberU64())
core.WriteHeadBlockHash(chainDb, config.GenesisBlock.Hash())
}
if !config.SkipBcVersionCheck {
@ -329,7 +334,6 @@ func New(config *Config) (*Ethereum, error) {
eth := &Ethereum{
shutdownChan: make(chan bool),
databasesClosed: make(chan bool),
chainDb: chainDb,
dappDb: dappDb,
eventMux: &event.TypeMux{},
@ -373,7 +377,7 @@ func New(config *Config) (*Ethereum, error) {
eth.blockProcessor = core.NewBlockProcessor(chainDb, eth.pow, eth.chainManager, eth.EventMux())
eth.chainManager.SetProcessor(eth.blockProcessor)
eth.protocolManager = NewProtocolManager(config.NetworkId, eth.eventMux, eth.txPool, eth.pow, eth.chainManager)
eth.protocolManager = NewProtocolManager(config.NetworkId, eth.eventMux, eth.txPool, eth.pow, eth.chainManager, chainDb)
eth.miner = miner.New(eth, eth.EventMux(), eth.pow)
eth.miner.SetGasPrice(config.GasPrice)
@ -519,8 +523,8 @@ func (s *Ethereum) BlockProcessor() *core.BlockProcessor { return s.blockProcess
func (s *Ethereum) TxPool() *core.TxPool { return s.txPool }
func (s *Ethereum) Whisper() *whisper.Whisper { return s.whisper }
func (s *Ethereum) EventMux() *event.TypeMux { return s.eventMux }
func (s *Ethereum) ChainDb() common.Database { return s.chainDb }
func (s *Ethereum) DappDb() common.Database { return s.dappDb }
func (s *Ethereum) ChainDb() ethdb.Database { return s.chainDb }
func (s *Ethereum) DappDb() ethdb.Database { return s.dappDb }
func (s *Ethereum) IsListening() bool { return true } // Always listening
func (s *Ethereum) PeerCount() int { return s.net.PeerCount() }
func (s *Ethereum) Peers() []*p2p.Peer { return s.net.Peers() }
@ -541,8 +545,6 @@ func (s *Ethereum) Start() error {
if err != nil {
return err
}
// periodically flush databases
go s.syncDatabases()
if s.AutoDAG {
s.StartAutoDAG()
@ -558,32 +560,6 @@ func (s *Ethereum) Start() error {
return nil
}
// sync databases every minute. If flushing fails we exit immediatly. The system
// may not continue under any circumstances.
func (s *Ethereum) syncDatabases() {
ticker := time.NewTicker(1 * time.Minute)
done:
for {
select {
case <-ticker.C:
// don't change the order of database flushes
if err := s.dappDb.Flush(); err != nil {
glog.Fatalf("fatal error: flush dappDb: %v (Restart your node. We are aware of this issue)\n", err)
}
if err := s.chainDb.Flush(); err != nil {
glog.Fatalf("fatal error: flush chainDb: %v (Restart your node. We are aware of this issue)\n", err)
}
case <-s.shutdownChan:
break done
}
}
s.chainDb.Close()
s.dappDb.Close()
close(s.databasesClosed)
}
func (s *Ethereum) StartForTest() {
jsonlogger.LogJson(&logger.LogStarting{
ClientString: s.net.Name,
@ -614,12 +590,13 @@ func (s *Ethereum) Stop() {
}
s.StopAutoDAG()
s.chainDb.Close()
s.dappDb.Close()
close(s.shutdownChan)
}
// This function will wait for a shutdown and resumes main thread execution
func (s *Ethereum) WaitForShutdown() {
<-s.databasesClosed
<-s.shutdownChan
}
@ -709,7 +686,7 @@ func dagFiles(epoch uint64) (string, string) {
return dag, "full-R" + dag
}
func saveBlockchainVersion(db common.Database, bcVersion int) {
func saveBlockchainVersion(db ethdb.Database, bcVersion int) {
d, _ := db.Get([]byte("BlockchainVersion"))
blockchainVersion := common.NewValue(d).Uint()
@ -718,74 +695,61 @@ func saveBlockchainVersion(db common.Database, bcVersion int) {
}
}
// mergeDatabases when required merge old database layout to one single database
func mergeDatabases(datadir string, newdb func(path string) (common.Database, error)) error {
// Check if already upgraded
data := filepath.Join(datadir, "chaindata")
if _, err := os.Stat(data); !os.IsNotExist(err) {
// upgradeChainDatabase ensures that the chain database stores block split into
// separate header and body entries.
func upgradeChainDatabase(db ethdb.Database) error {
// Short circuit if the head block is stored already as separate header and body
data, err := db.Get([]byte("LastBlock"))
if err != nil {
return nil
}
// make sure it's not just a clean path
chainPath := filepath.Join(datadir, "blockchain")
if _, err := os.Stat(chainPath); os.IsNotExist(err) {
head := common.BytesToHash(data)
if block := core.GetBlockByHashOld(db, head); block == nil {
return nil
}
glog.Infoln("Database upgrade required. Upgrading...")
// At least some of the database is still the old format, upgrade (skip the head block!)
glog.V(logger.Info).Info("Old database detected, upgrading...")
database, err := newdb(data)
if err != nil {
return fmt.Errorf("creating data db err: %v", err)
if db, ok := db.(*ethdb.LDBDatabase); ok {
blockPrefix := []byte("block-hash-")
for it := db.NewIterator(); it.Next(); {
// Skip anything other than a combined block
if !bytes.HasPrefix(it.Key(), blockPrefix) {
continue
}
defer database.Close()
// Skip the head block (merge last to signal upgrade completion)
if bytes.HasSuffix(it.Key(), head.Bytes()) {
continue
}
// Load the block, split and serialize (order!)
block := core.GetBlockByHashOld(db, common.BytesToHash(bytes.TrimPrefix(it.Key(), blockPrefix)))
// Migrate blocks
chainDb, err := newdb(chainPath)
if err != nil {
return fmt.Errorf("state db err: %v", err)
if err := core.WriteTd(db, block.Hash(), block.DeprecatedTd()); err != nil {
return err
}
defer chainDb.Close()
if err := core.WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil {
return err
}
if err := core.WriteHeader(db, block.Header()); err != nil {
return err
}
if err := db.Delete(it.Key()); err != nil {
return err
}
}
// Lastly, upgrade the head block, disabling the upgrade mechanism
current := core.GetBlockByHashOld(db, head)
if chain, ok := chainDb.(*ethdb.LDBDatabase); ok {
glog.Infoln("Merging blockchain database...")
it := chain.NewIterator()
for it.Next() {
database.Put(it.Key(), it.Value())
if err := core.WriteTd(db, current.Hash(), current.DeprecatedTd()); err != nil {
return err
}
it.Release()
if err := core.WriteBody(db, current.Hash(), &types.Body{current.Transactions(), current.Uncles()}); err != nil {
return err
}
// Migrate state
stateDb, err := newdb(filepath.Join(datadir, "state"))
if err != nil {
return fmt.Errorf("state db err: %v", err)
if err := core.WriteHeader(db, current.Header()); err != nil {
return err
}
defer stateDb.Close()
if state, ok := stateDb.(*ethdb.LDBDatabase); ok {
glog.Infoln("Merging state database...")
it := state.NewIterator()
for it.Next() {
database.Put(it.Key(), it.Value())
}
it.Release()
}
// Migrate transaction / receipts
extraDb, err := newdb(filepath.Join(datadir, "extra"))
if err != nil {
return fmt.Errorf("state db err: %v", err)
}
defer extraDb.Close()
if extra, ok := extraDb.(*ethdb.LDBDatabase); ok {
glog.Infoln("Merging transaction database...")
it := extra.NewIterator()
for it.Next() {
database.Put(it.Key(), it.Value())
}
it.Release()
}
return nil
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

45
eth/downloader/metrics.go Normal file
View File

@ -0,0 +1,45 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Contains the metrics collected by the downloader.
package downloader
import (
"github.com/ethereum/go-ethereum/metrics"
)
var (
hashInMeter = metrics.NewMeter("eth/downloader/hashes/in")
hashReqTimer = metrics.NewTimer("eth/downloader/hashes/req")
hashDropMeter = metrics.NewMeter("eth/downloader/hashes/drop")
hashTimeoutMeter = metrics.NewMeter("eth/downloader/hashes/timeout")
blockInMeter = metrics.NewMeter("eth/downloader/blocks/in")
blockReqTimer = metrics.NewTimer("eth/downloader/blocks/req")
blockDropMeter = metrics.NewMeter("eth/downloader/blocks/drop")
blockTimeoutMeter = metrics.NewMeter("eth/downloader/blocks/timeout")
headerInMeter = metrics.NewMeter("eth/downloader/headers/in")
headerReqTimer = metrics.NewTimer("eth/downloader/headers/req")
headerDropMeter = metrics.NewMeter("eth/downloader/headers/drop")
headerTimeoutMeter = metrics.NewMeter("eth/downloader/headers/timeout")
bodyInMeter = metrics.NewMeter("eth/downloader/bodies/in")
bodyReqTimer = metrics.NewTimer("eth/downloader/bodies/req")
bodyDropMeter = metrics.NewMeter("eth/downloader/bodies/drop")
bodyTimeoutMeter = metrics.NewMeter("eth/downloader/bodies/timeout")
)

View File

@ -31,10 +31,16 @@ import (
"gopkg.in/fatih/set.v0"
)
// Hash and block fetchers belonging to eth/61 and below
type relativeHashFetcherFn func(common.Hash) error
type absoluteHashFetcherFn func(uint64, int) error
type blockFetcherFn func([]common.Hash) error
// Block header and body fethers belonging to eth/62 and above
type relativeHeaderFetcherFn func(common.Hash, int, int, bool) error
type absoluteHeaderFetcherFn func(uint64, int, int, bool) error
type blockBodyFetcherFn func([]common.Hash) error
var (
errAlreadyFetching = errors.New("already fetching blocks from peer")
errAlreadyRegistered = errors.New("peer is already registered")
@ -54,24 +60,36 @@ type peer struct {
ignored *set.Set // Set of hashes not to request (didn't have previously)
getRelHashes relativeHashFetcherFn // Method to retrieve a batch of hashes from an origin hash
getAbsHashes absoluteHashFetcherFn // Method to retrieve a batch of hashes from an absolute position
getBlocks blockFetcherFn // Method to retrieve a batch of blocks
getRelHashes relativeHashFetcherFn // [eth/61] Method to retrieve a batch of hashes from an origin hash
getAbsHashes absoluteHashFetcherFn // [eth/61] Method to retrieve a batch of hashes from an absolute position
getBlocks blockFetcherFn // [eth/61] Method to retrieve a batch of blocks
getRelHeaders relativeHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an origin hash
getAbsHeaders absoluteHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an absolute position
getBlockBodies blockBodyFetcherFn // [eth/62] Method to retrieve a batch of block bodies
version int // Eth protocol version number to switch strategies
}
// newPeer create a new downloader peer, with specific hash and block retrieval
// mechanisms.
func newPeer(id string, version int, head common.Hash, getRelHashes relativeHashFetcherFn, getAbsHashes absoluteHashFetcherFn, getBlocks blockFetcherFn) *peer {
func newPeer(id string, version int, head common.Hash,
getRelHashes relativeHashFetcherFn, getAbsHashes absoluteHashFetcherFn, getBlocks blockFetcherFn, // eth/61 callbacks, remove when upgrading
getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn) *peer {
return &peer{
id: id,
head: head,
capacity: 1,
ignored: set.New(),
getRelHashes: getRelHashes,
getAbsHashes: getAbsHashes,
getBlocks: getBlocks,
ignored: set.New(),
getRelHeaders: getRelHeaders,
getAbsHeaders: getAbsHeaders,
getBlockBodies: getBlockBodies,
version: version,
}
}
@ -83,8 +101,8 @@ func (p *peer) Reset() {
p.ignored.Clear()
}
// Fetch sends a block retrieval request to the remote peer.
func (p *peer) Fetch(request *fetchRequest) error {
// Fetch61 sends a block retrieval request to the remote peer.
func (p *peer) Fetch61(request *fetchRequest) error {
// Short circuit if the peer is already fetching
if !atomic.CompareAndSwapInt32(&p.idle, 0, 1) {
return errAlreadyFetching
@ -101,10 +119,28 @@ func (p *peer) Fetch(request *fetchRequest) error {
return nil
}
// SetIdle sets the peer to idle, allowing it to execute new retrieval requests.
// Fetch sends a block body retrieval request to the remote peer.
func (p *peer) Fetch(request *fetchRequest) error {
// Short circuit if the peer is already fetching
if !atomic.CompareAndSwapInt32(&p.idle, 0, 1) {
return errAlreadyFetching
}
p.started = time.Now()
// Convert the header set to a retrievable slice
hashes := make([]common.Hash, 0, len(request.Headers))
for _, header := range request.Headers {
hashes = append(hashes, header.Hash())
}
go p.getBlockBodies(hashes)
return nil
}
// SetIdle61 sets the peer to idle, allowing it to execute new retrieval requests.
// Its block retrieval allowance will also be updated either up- or downwards,
// depending on whether the previous fetch completed in time or not.
func (p *peer) SetIdle() {
func (p *peer) SetIdle61() {
// Update the peer's download allowance based on previous performance
scale := 2.0
if time.Since(p.started) > blockSoftTTL {
@ -131,6 +167,36 @@ func (p *peer) SetIdle() {
atomic.StoreInt32(&p.idle, 0)
}
// SetIdle sets the peer to idle, allowing it to execute new retrieval requests.
// Its block body retrieval allowance will also be updated either up- or downwards,
// depending on whether the previous fetch completed in time or not.
func (p *peer) SetIdle() {
// Update the peer's download allowance based on previous performance
scale := 2.0
if time.Since(p.started) > bodySoftTTL {
scale = 0.5
if time.Since(p.started) > bodyHardTTL {
scale = 1 / float64(MaxBodyFetch) // reduces capacity to 1
}
}
for {
// Calculate the new download bandwidth allowance
prev := atomic.LoadInt32(&p.capacity)
next := int32(math.Max(1, math.Min(float64(MaxBodyFetch), float64(prev)*scale)))
// Try to update the old value
if atomic.CompareAndSwapInt32(&p.capacity, prev, next) {
// If we're having problems at 1 capacity, try to find better peers
if next == 1 {
p.Demote()
}
break
}
}
// Set the peer to idle to allow further block requests
atomic.StoreInt32(&p.idle, 0)
}
// Capacity retrieves the peers block download allowance based on its previously
// discovered bandwidth capacity.
func (p *peer) Capacity() int {
@ -246,16 +312,18 @@ func (ps *peerSet) AllPeers() []*peer {
// IdlePeers retrieves a flat list of all the currently idle peers within the
// active peer set, ordered by their reputation.
func (ps *peerSet) IdlePeers() []*peer {
func (ps *peerSet) IdlePeers(version int) []*peer {
ps.lock.RLock()
defer ps.lock.RUnlock()
list := make([]*peer, 0, len(ps.peers))
for _, p := range ps.peers {
if (version == eth61 && p.version == eth61) || (version >= eth62 && p.version >= eth62) {
if atomic.LoadInt32(&p.idle) == 0 {
list = append(list, p)
}
}
}
for i := 0; i < len(list); i++ {
for j := i + 1; j < len(list); j++ {
if atomic.LoadInt32(&list[i].rep) < atomic.LoadInt32(&list[j].rep) {

View File

@ -44,15 +44,20 @@ var (
// fetchRequest is a currently running block retrieval operation.
type fetchRequest struct {
Peer *peer // Peer to which the request was sent
Hashes map[common.Hash]int // Requested hashes with their insertion index (priority)
Hashes map[common.Hash]int // [eth/61] Requested hashes with their insertion index (priority)
Headers []*types.Header // [eth/62] Requested headers, sorted by request order
Time time.Time // Time when the request was made
}
// queue represents hashes that are either need fetching or are being fetched
type queue struct {
hashPool map[common.Hash]int // Pending hashes, mapping to their insertion index (priority)
hashQueue *prque.Prque // Priority queue of the block hashes to fetch
hashCounter int // Counter indexing the added hashes to ensure retrieval order
hashPool map[common.Hash]int // [eth/61] Pending hashes, mapping to their insertion index (priority)
hashQueue *prque.Prque // [eth/61] Priority queue of the block hashes to fetch
hashCounter int // [eth/61] Counter indexing the added hashes to ensure retrieval order
headerPool map[common.Hash]*types.Header // [eth/62] Pending headers, mapping from their hashes
headerQueue *prque.Prque // [eth/62] Priority queue of the headers to fetch the bodies for
headerHead common.Hash // [eth/62] Hash of the last queued header to verify order
pendPool map[string]*fetchRequest // Currently pending block retrieval operations
@ -68,6 +73,8 @@ func newQueue() *queue {
return &queue{
hashPool: make(map[common.Hash]int),
hashQueue: prque.New(),
headerPool: make(map[common.Hash]*types.Header),
headerQueue: prque.New(),
pendPool: make(map[string]*fetchRequest),
blockPool: make(map[common.Hash]uint64),
blockCache: make([]*Block, blockCacheLimit),
@ -83,6 +90,10 @@ func (q *queue) Reset() {
q.hashQueue.Reset()
q.hashCounter = 0
q.headerPool = make(map[common.Hash]*types.Header)
q.headerQueue.Reset()
q.headerHead = common.Hash{}
q.pendPool = make(map[string]*fetchRequest)
q.blockPool = make(map[common.Hash]uint64)
@ -90,21 +101,21 @@ func (q *queue) Reset() {
q.blockCache = make([]*Block, blockCacheLimit)
}
// Size retrieves the number of hashes in the queue, returning separately for
// Size retrieves the number of blocks in the queue, returning separately for
// pending and already downloaded.
func (q *queue) Size() (int, int) {
q.lock.RLock()
defer q.lock.RUnlock()
return len(q.hashPool), len(q.blockPool)
return len(q.hashPool) + len(q.headerPool), len(q.blockPool)
}
// Pending retrieves the number of hashes pending for retrieval.
// Pending retrieves the number of blocks pending for retrieval.
func (q *queue) Pending() int {
q.lock.RLock()
defer q.lock.RUnlock()
return q.hashQueue.Size()
return q.hashQueue.Size() + q.headerQueue.Size()
}
// InFlight retrieves the number of fetch requests currently in flight.
@ -124,7 +135,7 @@ func (q *queue) Throttle() bool {
// Calculate the currently in-flight block requests
pending := 0
for _, request := range q.pendPool {
pending += len(request.Hashes)
pending += len(request.Hashes) + len(request.Headers)
}
// Throttle if more blocks are in-flight than free space in the cache
return pending >= len(q.blockCache)-len(q.blockPool)
@ -138,15 +149,18 @@ func (q *queue) Has(hash common.Hash) bool {
if _, ok := q.hashPool[hash]; ok {
return true
}
if _, ok := q.headerPool[hash]; ok {
return true
}
if _, ok := q.blockPool[hash]; ok {
return true
}
return false
}
// Insert adds a set of hashes for the download queue for scheduling, returning
// Insert61 adds a set of hashes for the download queue for scheduling, returning
// the new hashes encountered.
func (q *queue) Insert(hashes []common.Hash, fifo bool) []common.Hash {
func (q *queue) Insert61(hashes []common.Hash, fifo bool) []common.Hash {
q.lock.Lock()
defer q.lock.Unlock()
@ -172,6 +186,40 @@ func (q *queue) Insert(hashes []common.Hash, fifo bool) []common.Hash {
return inserts
}
// Insert adds a set of headers for the download queue for scheduling, returning
// the new headers encountered.
func (q *queue) Insert(headers []*types.Header, from uint64) []*types.Header {
q.lock.Lock()
defer q.lock.Unlock()
// Insert all the headers prioritized by the contained block number
inserts := make([]*types.Header, 0, len(headers))
for _, header := range headers {
// Make sure no duplicate requests are executed
hash := header.Hash()
if _, ok := q.headerPool[hash]; ok {
glog.V(logger.Warn).Infof("Header #%d [%x] already scheduled", header.Number.Uint64(), hash[:4])
continue
}
// Make sure chain order is honored and preserved throughout
if header.Number == nil || header.Number.Uint64() != from {
glog.V(logger.Warn).Infof("Header #%v [%x] broke chain ordering, expected %d", header.Number, hash[:4], from)
break
}
if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash {
glog.V(logger.Warn).Infof("Header #%v [%x] broke chain ancestry", header.Number, hash[:4])
break
}
// Queue the header for body retrieval
inserts = append(inserts, header)
q.headerPool[hash] = header
q.headerQueue.Push(header, -float32(header.Number.Uint64()))
q.headerHead = hash
from++
}
return inserts
}
// GetHeadBlock retrieves the first block from the cache, or nil if it hasn't
// been downloaded yet (or simply non existent).
func (q *queue) GetHeadBlock() *Block {
@ -227,9 +275,9 @@ func (q *queue) TakeBlocks() []*Block {
return blocks
}
// Reserve reserves a set of hashes for the given peer, skipping any previously
// Reserve61 reserves a set of hashes for the given peer, skipping any previously
// failed download.
func (q *queue) Reserve(p *peer, count int) *fetchRequest {
func (q *queue) Reserve61(p *peer, count int) *fetchRequest {
q.lock.Lock()
defer q.lock.Unlock()
@ -276,6 +324,68 @@ func (q *queue) Reserve(p *peer, count int) *fetchRequest {
return request
}
// Reserve reserves a set of headers for the given peer, skipping any previously
// failed download. Beside the next batch of needed fetches, it also returns a
// flag whether empty blocks were queued requiring processing.
func (q *queue) Reserve(p *peer, count int) (*fetchRequest, bool, error) {
q.lock.Lock()
defer q.lock.Unlock()
// Short circuit if the pool has been depleted, or if the peer's already
// downloading something (sanity check not to corrupt state)
if q.headerQueue.Empty() {
return nil, false, nil
}
if _, ok := q.pendPool[p.id]; ok {
return nil, false, nil
}
// Calculate an upper limit on the bodies we might fetch (i.e. throttling)
space := len(q.blockCache) - len(q.blockPool)
for _, request := range q.pendPool {
space -= len(request.Headers)
}
// Retrieve a batch of headers, skipping previously failed ones
send := make([]*types.Header, 0, count)
skip := make([]*types.Header, 0)
process := false
for proc := 0; proc < space && len(send) < count && !q.headerQueue.Empty(); proc++ {
header := q.headerQueue.PopItem().(*types.Header)
// If the header defines an empty block, deliver straight
if header.TxHash == types.DeriveSha(types.Transactions{}) && header.UncleHash == types.CalcUncleHash([]*types.Header{}) {
if err := q.enqueue("", types.NewBlockWithHeader(header)); err != nil {
return nil, false, errInvalidChain
}
delete(q.headerPool, header.Hash())
process, space, proc = true, space-1, proc-1
continue
}
// If it's a content block, add to the body fetch request
if p.ignored.Has(header.Hash()) {
skip = append(skip, header)
} else {
send = append(send, header)
}
}
// Merge all the skipped headers back
for _, header := range skip {
q.headerQueue.Push(header, -float32(header.Number.Uint64()))
}
// Assemble and return the block download request
if len(send) == 0 {
return nil, process, nil
}
request := &fetchRequest{
Peer: p,
Headers: send,
Time: time.Now(),
}
q.pendPool[p.id] = request
return request, process, nil
}
// Cancel aborts a fetch request, returning all pending hashes to the queue.
func (q *queue) Cancel(request *fetchRequest) {
q.lock.Lock()
@ -284,6 +394,9 @@ func (q *queue) Cancel(request *fetchRequest) {
for hash, index := range request.Hashes {
q.hashQueue.Push(hash, float32(index))
}
for _, header := range request.Headers {
q.headerQueue.Push(header, -float32(header.Number.Uint64()))
}
delete(q.pendPool, request.Peer.id)
}
@ -297,9 +410,19 @@ func (q *queue) Expire(timeout time.Duration) []string {
peers := []string{}
for id, request := range q.pendPool {
if time.Since(request.Time) > timeout {
// Update the metrics with the timeout
if len(request.Hashes) > 0 {
blockTimeoutMeter.Mark(1)
} else {
bodyTimeoutMeter.Mark(1)
}
// Return any non satisfied requests to the pool
for hash, index := range request.Hashes {
q.hashQueue.Push(hash, float32(index))
}
for _, header := range request.Headers {
q.headerQueue.Push(header, -float32(header.Number.Uint64()))
}
peers = append(peers, id)
}
}
@ -310,8 +433,8 @@ func (q *queue) Expire(timeout time.Duration) []string {
return peers
}
// Deliver injects a block retrieval response into the download queue.
func (q *queue) Deliver(id string, blocks []*types.Block) (err error) {
// Deliver61 injects a block retrieval response into the download queue.
func (q *queue) Deliver61(id string, blocks []*types.Block) (err error) {
q.lock.Lock()
defer q.lock.Unlock()
@ -320,6 +443,7 @@ func (q *queue) Deliver(id string, blocks []*types.Block) (err error) {
if request == nil {
return errNoFetchesPending
}
blockReqTimer.UpdateSince(request.Time)
delete(q.pendPool, id)
// If no blocks were retrieved, mark them as unavailable for the origin peer
@ -337,19 +461,12 @@ func (q *queue) Deliver(id string, blocks []*types.Block) (err error) {
errs = append(errs, fmt.Errorf("non-requested block %x", hash))
continue
}
// If a requested block falls out of the range, the hash chain is invalid
index := int(int64(block.NumberU64()) - int64(q.blockOffset))
if index >= len(q.blockCache) || index < 0 {
return errInvalidChain
}
// Otherwise merge the block and mark the hash block
q.blockCache[index] = &Block{
RawBlock: block,
OriginPeer: id,
// Queue the block up for processing
if err := q.enqueue(id, block); err != nil {
return err
}
delete(request.Hashes, hash)
delete(q.hashPool, hash)
q.blockPool[hash] = block.NumberU64()
}
// Return all failed or missing fetches to the queue
for hash, index := range request.Hashes {
@ -365,6 +482,89 @@ func (q *queue) Deliver(id string, blocks []*types.Block) (err error) {
return nil
}
// Deliver injects a block body retrieval response into the download queue.
func (q *queue) Deliver(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) error {
q.lock.Lock()
defer q.lock.Unlock()
// Short circuit if the block bodies were never requested
request := q.pendPool[id]
if request == nil {
return errNoFetchesPending
}
bodyReqTimer.UpdateSince(request.Time)
delete(q.pendPool, id)
// If no block bodies were retrieved, mark them as unavailable for the origin peer
if len(txLists) == 0 || len(uncleLists) == 0 {
for hash, _ := range request.Headers {
request.Peer.ignored.Add(hash)
}
}
// Assemble each of the block bodies with their headers and queue for processing
errs := make([]error, 0)
for i, header := range request.Headers {
// Short circuit block assembly if no more bodies are found
if i >= len(txLists) || i >= len(uncleLists) {
break
}
// Reconstruct the next block if contents match up
if types.DeriveSha(types.Transactions(txLists[i])) != header.TxHash || types.CalcUncleHash(uncleLists[i]) != header.UncleHash {
errs = []error{errInvalidBody}
break
}
block := types.NewBlockWithHeader(header).WithBody(txLists[i], uncleLists[i])
// Queue the block up for processing
if err := q.enqueue(id, block); err != nil {
errs = []error{err}
break
}
request.Headers[i] = nil
delete(q.headerPool, header.Hash())
}
// Return all failed or missing fetches to the queue
for _, header := range request.Headers {
if header != nil {
q.headerQueue.Push(header, -float32(header.Number.Uint64()))
}
}
// If none of the blocks were good, it's a stale delivery
switch {
case len(errs) == 0:
return nil
case len(errs) == 1 && errs[0] == errInvalidBody:
return errInvalidBody
case len(errs) == 1 && errs[0] == errInvalidChain:
return errInvalidChain
case len(errs) == len(request.Headers):
return errStaleDelivery
default:
return fmt.Errorf("multiple failures: %v", errs)
}
}
// enqueue inserts a new block into the final delivery queue, waiting for pickup
// by the processor.
func (q *queue) enqueue(origin string, block *types.Block) error {
// If a requested block falls out of the range, the hash chain is invalid
index := int(int64(block.NumberU64()) - int64(q.blockOffset))
if index >= len(q.blockCache) || index < 0 {
return errInvalidChain
}
// Otherwise merge the block and mark the hash done
q.blockCache[index] = &Block{
RawBlock: block,
OriginPeer: origin,
}
q.blockPool[block.Header().Hash()] = block.NumberU64()
return nil
}
// Prepare configures the block cache offset to allow accepting inbound blocks.
func (q *queue) Prepare(offset uint64) {
q.lock.Lock()

View File

@ -51,6 +51,12 @@ type blockRetrievalFn func(common.Hash) *types.Block
// blockRequesterFn is a callback type for sending a block retrieval request.
type blockRequesterFn func([]common.Hash) error
// headerRequesterFn is a callback type for sending a header retrieval request.
type headerRequesterFn func(common.Hash) error
// bodyRequesterFn is a callback type for sending a body retrieval request.
type bodyRequesterFn func([]common.Hash) error
// blockValidatorFn is a callback type to verify a block's header for fast propagation.
type blockValidatorFn func(block *types.Block, parent *types.Block) error
@ -70,10 +76,29 @@ type peerDropFn func(id string)
// network.
type announce struct {
hash common.Hash // Hash of the block being announced
number uint64 // Number of the block being announced (0 = unknown | old protocol)
header *types.Header // Header of the block partially reassembled (new protocol)
time time.Time // Timestamp of the announcement
origin string // Identifier of the peer originating the notification
fetch blockRequesterFn // Fetcher function to retrieve
fetch61 blockRequesterFn // [eth/61] Fetcher function to retrieve an announced block
fetchHeader headerRequesterFn // [eth/62] Fetcher function to retrieve the header of an announced block
fetchBodies bodyRequesterFn // [eth/62] Fetcher function to retrieve the body of an announced block
}
// headerFilterTask represents a batch of headers needing fetcher filtering.
type headerFilterTask struct {
headers []*types.Header // Collection of headers to filter
time time.Time // Arrival time of the headers
}
// headerFilterTask represents a batch of block bodies (transactions and uncles)
// needing fetcher filtering.
type bodyFilterTask struct {
transactions [][]*types.Transaction // Collection of transactions per block bodies
uncles [][]*types.Header // Collection of uncles per block bodies
time time.Time // Arrival time of the blocks' contents
}
// inject represents a schedules import operation.
@ -88,7 +113,11 @@ type Fetcher struct {
// Various event channels
notify chan *announce
inject chan *inject
filter chan chan []*types.Block
blockFilter chan chan []*types.Block
headerFilter chan chan *headerFilterTask
bodyFilter chan chan *bodyFilterTask
done chan common.Hash
quit chan struct{}
@ -96,6 +125,8 @@ type Fetcher struct {
announces map[string]int // Per peer announce counts to prevent memory exhaustion
announced map[common.Hash][]*announce // Announced blocks, scheduled for fetching
fetching map[common.Hash]*announce // Announced blocks, currently fetching
fetched map[common.Hash][]*announce // Blocks with headers fetched, scheduled for body retrieval
completing map[common.Hash]*announce // Blocks with headers, currently body-completing
// Block cache
queue *prque.Prque // Queue containing the import operations (block number sorted)
@ -111,8 +142,9 @@ type Fetcher struct {
dropPeer peerDropFn // Drops a peer for misbehaving
// Testing hooks
fetchingHook func([]common.Hash) // Method to call upon starting a block fetch
importedHook func(*types.Block) // Method to call upon successful block import
fetchingHook func([]common.Hash) // Method to call upon starting a block (eth/61) or header (eth/62) fetch
completingHook func([]common.Hash) // Method to call upon starting a block body fetch (eth/62)
importedHook func(*types.Block) // Method to call upon successful block import (both eth/61 and eth/62)
}
// New creates a block fetcher to retrieve blocks based on hash announcements.
@ -120,12 +152,16 @@ func New(getBlock blockRetrievalFn, validateBlock blockValidatorFn, broadcastBlo
return &Fetcher{
notify: make(chan *announce),
inject: make(chan *inject),
filter: make(chan chan []*types.Block),
blockFilter: make(chan chan []*types.Block),
headerFilter: make(chan chan *headerFilterTask),
bodyFilter: make(chan chan *bodyFilterTask),
done: make(chan common.Hash),
quit: make(chan struct{}),
announces: make(map[string]int),
announced: make(map[common.Hash][]*announce),
fetching: make(map[common.Hash]*announce),
fetched: make(map[common.Hash][]*announce),
completing: make(map[common.Hash]*announce),
queue: prque.New(),
queues: make(map[string]int),
queued: make(map[common.Hash]*inject),
@ -152,12 +188,17 @@ func (f *Fetcher) Stop() {
// Notify announces the fetcher of the potential availability of a new block in
// the network.
func (f *Fetcher) Notify(peer string, hash common.Hash, time time.Time, fetcher blockRequesterFn) error {
func (f *Fetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time,
blockFetcher blockRequesterFn, // eth/61 specific whole block fetcher
headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error {
block := &announce{
hash: hash,
number: number,
time: time,
origin: peer,
fetch: fetcher,
fetch61: blockFetcher,
fetchHeader: headerFetcher,
fetchBodies: bodyFetcher,
}
select {
case f.notify <- block:
@ -181,14 +222,16 @@ func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
}
}
// Filter extracts all the blocks that were explicitly requested by the fetcher,
// FilterBlocks extracts all the blocks that were explicitly requested by the fetcher,
// returning those that should be handled differently.
func (f *Fetcher) Filter(blocks types.Blocks) types.Blocks {
func (f *Fetcher) FilterBlocks(blocks types.Blocks) types.Blocks {
glog.V(logger.Detail).Infof("[eth/61] filtering %d blocks", len(blocks))
// Send the filter channel to the fetcher
filter := make(chan []*types.Block)
select {
case f.filter <- filter:
case f.blockFilter <- filter:
case <-f.quit:
return nil
}
@ -207,11 +250,69 @@ func (f *Fetcher) Filter(blocks types.Blocks) types.Blocks {
}
}
// FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
// returning those that should be handled differently.
func (f *Fetcher) FilterHeaders(headers []*types.Header, time time.Time) []*types.Header {
glog.V(logger.Detail).Infof("[eth/62] filtering %d headers", len(headers))
// Send the filter channel to the fetcher
filter := make(chan *headerFilterTask)
select {
case f.headerFilter <- filter:
case <-f.quit:
return nil
}
// Request the filtering of the header list
select {
case filter <- &headerFilterTask{headers: headers, time: time}:
case <-f.quit:
return nil
}
// Retrieve the headers remaining after filtering
select {
case task := <-filter:
return task.headers
case <-f.quit:
return nil
}
}
// FilterBodies extracts all the block bodies that were explicitly requested by
// the fetcher, returning those that should be handled differently.
func (f *Fetcher) FilterBodies(transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
glog.V(logger.Detail).Infof("[eth/62] filtering %d:%d bodies", len(transactions), len(uncles))
// Send the filter channel to the fetcher
filter := make(chan *bodyFilterTask)
select {
case f.bodyFilter <- filter:
case <-f.quit:
return nil, nil
}
// Request the filtering of the body list
select {
case filter <- &bodyFilterTask{transactions: transactions, uncles: uncles, time: time}:
case <-f.quit:
return nil, nil
}
// Retrieve the bodies remaining after filtering
select {
case task := <-filter:
return task.transactions, task.uncles
case <-f.quit:
return nil, nil
}
}
// Loop is the main fetcher loop, checking and processing various notification
// events.
func (f *Fetcher) loop() {
// Iterate the block fetching until a quit is requested
fetch := time.NewTimer(0)
fetchTimer := time.NewTimer(0)
completeTimer := time.NewTimer(0)
for {
// Clean up any expired block fetches
for hash, announce := range f.fetching {
@ -246,26 +347,38 @@ func (f *Fetcher) loop() {
case notification := <-f.notify:
// A block was announced, make sure the peer isn't DOSing us
announceMeter.Mark(1)
propAnnounceInMeter.Mark(1)
count := f.announces[notification.origin] + 1
if count > hashLimit {
glog.V(logger.Debug).Infof("Peer %s: exceeded outstanding announces (%d)", notification.origin, hashLimit)
propAnnounceDOSMeter.Mark(1)
break
}
// If we have a valid block number, check that it's potentially useful
if notification.number > 0 {
if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
glog.V(logger.Debug).Infof("[eth/62] Peer %s: discarded announcement #%d [%x…], distance %d", notification.origin, notification.number, notification.hash[:4], dist)
propAnnounceDropMeter.Mark(1)
break
}
}
// All is well, schedule the announce if block's not yet downloading
if _, ok := f.fetching[notification.hash]; ok {
break
}
if _, ok := f.completing[notification.hash]; ok {
break
}
f.announces[notification.origin] = count
f.announced[notification.hash] = append(f.announced[notification.hash], notification)
if len(f.announced) == 1 {
f.reschedule(fetch)
f.rescheduleFetch(fetchTimer)
}
case op := <-f.inject:
// A direct block insertion was requested, try and fill any pending gaps
broadcastMeter.Mark(1)
propBroadcastInMeter.Mark(1)
f.enqueue(op.origin, op.block)
case hash := <-f.done:
@ -273,7 +386,7 @@ func (f *Fetcher) loop() {
f.forgetHash(hash)
f.forgetBlock(hash)
case <-fetch.C:
case <-fetchTimer.C:
// At least one block's timer ran out, check for needing retrieval
request := make(map[string][]common.Hash)
@ -290,30 +403,80 @@ func (f *Fetcher) loop() {
}
}
}
// Send out all block requests
// Send out all block (eth/61) or header (eth/62) requests
for peer, hashes := range request {
if glog.V(logger.Detail) && len(hashes) > 0 {
list := "["
for _, hash := range hashes {
list += fmt.Sprintf("%x, ", hash[:4])
list += fmt.Sprintf("%x, ", hash[:4])
}
list = list[:len(list)-2] + "]"
glog.V(logger.Detail).Infof("Peer %s: fetching %s", peer, list)
if f.fetching[hashes[0]].fetch61 != nil {
glog.V(logger.Detail).Infof("[eth/61] Peer %s: fetching blocks %s", peer, list)
} else {
glog.V(logger.Detail).Infof("[eth/62] Peer %s: fetching headers %s", peer, list)
}
}
// Create a closure of the fetch and schedule in on a new thread
fetcher, hashes := f.fetching[hashes[0]].fetch, hashes
fetchBlocks, fetchHeader, hashes := f.fetching[hashes[0]].fetch61, f.fetching[hashes[0]].fetchHeader, hashes
go func() {
if f.fetchingHook != nil {
f.fetchingHook(hashes)
}
fetcher(hashes)
if fetchBlocks != nil {
// Use old eth/61 protocol to retrieve whole blocks
blockFetchMeter.Mark(int64(len(hashes)))
fetchBlocks(hashes)
} else {
// Use new eth/62 protocol to retrieve headers first
for _, hash := range hashes {
headerFetchMeter.Mark(1)
fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals
}
}
}()
}
// Schedule the next fetch if blocks are still pending
f.reschedule(fetch)
f.rescheduleFetch(fetchTimer)
case filter := <-f.filter:
case <-completeTimer.C:
// At least one header's timer ran out, retrieve everything
request := make(map[string][]common.Hash)
for hash, announces := range f.fetched {
// Pick a random peer to retrieve from, reset all others
announce := announces[rand.Intn(len(announces))]
f.forgetHash(hash)
// If the block still didn't arrive, queue for completion
if f.getBlock(hash) == nil {
request[announce.origin] = append(request[announce.origin], hash)
f.completing[hash] = announce
}
}
// Send out all block body requests
for peer, hashes := range request {
if glog.V(logger.Detail) && len(hashes) > 0 {
list := "["
for _, hash := range hashes {
list += fmt.Sprintf("%x…, ", hash[:4])
}
list = list[:len(list)-2] + "]"
glog.V(logger.Detail).Infof("[eth/62] Peer %s: fetching bodies %s", peer, list)
}
// Create a closure of the fetch and schedule in on a new thread
if f.completingHook != nil {
f.completingHook(hashes)
}
bodyFetchMeter.Mark(int64(len(hashes)))
go f.completing[hashes[0]].fetchBodies(hashes)
}
// Schedule the next fetch if blocks are still pending
f.rescheduleComplete(completeTimer)
case filter := <-f.blockFilter:
// Blocks arrived, extract any explicit fetches, return all else
var blocks types.Blocks
select {
@ -321,6 +484,7 @@ func (f *Fetcher) loop() {
case <-f.quit:
return
}
blockFilterInMeter.Mark(int64(len(blocks)))
explicit, download := []*types.Block{}, []*types.Block{}
for _, block := range blocks {
@ -339,6 +503,7 @@ func (f *Fetcher) loop() {
}
}
blockFilterOutMeter.Mark(int64(len(download)))
select {
case filter <- download:
case <-f.quit:
@ -350,12 +515,146 @@ func (f *Fetcher) loop() {
f.enqueue(announce.origin, block)
}
}
case filter := <-f.headerFilter:
// Headers arrived from a remote peer. Extract those that were explicitly
// requested by the fetcher, and return everything else so it's delivered
// to other parts of the system.
var task *headerFilterTask
select {
case task = <-filter:
case <-f.quit:
return
}
headerFilterInMeter.Mark(int64(len(task.headers)))
// Split the batch of headers into unknown ones (to return to the caller),
// known incomplete ones (requiring body retrievals) and completed blocks.
unknown, incomplete, complete := []*types.Header{}, []*announce{}, []*types.Block{}
for _, header := range task.headers {
hash := header.Hash()
// Filter fetcher-requested headers from other synchronisation algorithms
if announce := f.fetching[hash]; announce != nil && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil {
// If the delivered header does not match the promised number, drop the announcer
if header.Number.Uint64() != announce.number {
glog.V(logger.Detail).Infof("[eth/62] Peer %s: invalid block number for [%x…]: announced %d, provided %d", announce.origin, header.Hash().Bytes()[:4], announce.number, header.Number.Uint64())
f.dropPeer(announce.origin)
f.forgetHash(hash)
continue
}
// Only keep if not imported by other means
if f.getBlock(hash) == nil {
announce.header = header
announce.time = task.time
// If the block is empty (header only), short circuit into the final import queue
if header.TxHash == types.DeriveSha(types.Transactions{}) && header.UncleHash == types.CalcUncleHash([]*types.Header{}) {
glog.V(logger.Detail).Infof("[eth/62] Peer %s: block #%d [%x…] empty, skipping body retrieval", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4])
block := types.NewBlockWithHeader(header)
block.ReceivedAt = task.time
complete = append(complete, block)
f.completing[hash] = announce
continue
}
// Otherwise add to the list of blocks needing completion
incomplete = append(incomplete, announce)
} else {
glog.V(logger.Detail).Infof("[eth/62] Peer %s: block #%d [%x…] already imported, discarding header", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4])
f.forgetHash(hash)
}
} else {
// Fetcher doesn't know about it, add to the return list
unknown = append(unknown, header)
}
}
headerFilterOutMeter.Mark(int64(len(unknown)))
select {
case filter <- &headerFilterTask{headers: unknown, time: task.time}:
case <-f.quit:
return
}
// Schedule the retrieved headers for body completion
for _, announce := range incomplete {
hash := announce.header.Hash()
if _, ok := f.completing[hash]; ok {
continue
}
f.fetched[hash] = append(f.fetched[hash], announce)
if len(f.fetched) == 1 {
f.rescheduleComplete(completeTimer)
}
}
// Schedule the header-only blocks for import
for _, block := range complete {
if announce := f.completing[block.Hash()]; announce != nil {
f.enqueue(announce.origin, block)
}
}
case filter := <-f.bodyFilter:
// Block bodies arrived, extract any explicitly requested blocks, return the rest
var task *bodyFilterTask
select {
case task = <-filter:
case <-f.quit:
return
}
bodyFilterInMeter.Mark(int64(len(task.transactions)))
blocks := []*types.Block{}
for i := 0; i < len(task.transactions) && i < len(task.uncles); i++ {
// Match up a body to any possible completion request
matched := false
for hash, announce := range f.completing {
if f.queued[hash] == nil {
txnHash := types.DeriveSha(types.Transactions(task.transactions[i]))
uncleHash := types.CalcUncleHash(task.uncles[i])
if txnHash == announce.header.TxHash && uncleHash == announce.header.UncleHash {
// Mark the body matched, reassemble if still unknown
matched = true
if f.getBlock(hash) == nil {
block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i])
block.ReceivedAt = task.time
blocks = append(blocks, block)
} else {
f.forgetHash(hash)
}
}
}
}
if matched {
task.transactions = append(task.transactions[:i], task.transactions[i+1:]...)
task.uncles = append(task.uncles[:i], task.uncles[i+1:]...)
i--
continue
}
}
bodyFilterOutMeter.Mark(int64(len(task.transactions)))
select {
case filter <- task:
case <-f.quit:
return
}
// Schedule the retrieved blocks for ordered import
for _, block := range blocks {
if announce := f.completing[block.Hash()]; announce != nil {
f.enqueue(announce.origin, block)
}
}
}
}
}
// reschedule resets the specified fetch timer to the next announce timeout.
func (f *Fetcher) reschedule(fetch *time.Timer) {
// rescheduleFetch resets the specified fetch timer to the next announce timeout.
func (f *Fetcher) rescheduleFetch(fetch *time.Timer) {
// Short circuit if no blocks are announced
if len(f.announced) == 0 {
return
@ -370,6 +669,22 @@ func (f *Fetcher) reschedule(fetch *time.Timer) {
fetch.Reset(arriveTimeout - time.Since(earliest))
}
// rescheduleComplete resets the specified completion timer to the next fetch timeout.
func (f *Fetcher) rescheduleComplete(complete *time.Timer) {
// Short circuit if no headers are fetched
if len(f.fetched) == 0 {
return
}
// Otherwise find the earliest expiring announcement
earliest := time.Now()
for _, announces := range f.fetched {
if earliest.After(announces[0].time) {
earliest = announces[0].time
}
}
complete.Reset(gatherSlack - time.Since(earliest))
}
// enqueue schedules a new future import operation, if the block to be imported
// has not yet been seen.
func (f *Fetcher) enqueue(peer string, block *types.Block) {
@ -378,13 +693,16 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) {
// Ensure the peer isn't DOSing us
count := f.queues[peer] + 1
if count > blockLimit {
glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x], exceeded allowance (%d)", peer, block.NumberU64(), hash.Bytes()[:4], blockLimit)
glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x], exceeded allowance (%d)", peer, block.NumberU64(), hash.Bytes()[:4], blockLimit)
propBroadcastDOSMeter.Mark(1)
f.forgetHash(hash)
return
}
// Discard any past or too distant blocks
if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist)
discardMeter.Mark(1)
glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist)
propBroadcastDropMeter.Mark(1)
f.forgetHash(hash)
return
}
// Schedule the block for future importing
@ -398,7 +716,7 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) {
f.queue.Push(op, -float32(block.NumberU64()))
if glog.V(logger.Debug) {
glog.Infof("Peer %s: queued block #%d [%x], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size())
glog.Infof("Peer %s: queued block #%d [%x], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size())
}
}
}
@ -410,39 +728,39 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
hash := block.Hash()
// Run the import on a new thread
glog.V(logger.Debug).Infof("Peer %s: importing block #%d [%x]", peer, block.NumberU64(), hash[:4])
glog.V(logger.Debug).Infof("Peer %s: importing block #%d [%x]", peer, block.NumberU64(), hash[:4])
go func() {
defer func() { f.done <- hash }()
// If the parent's unknown, abort insertion
parent := f.getBlock(block.ParentHash())
if parent == nil {
glog.V(logger.Debug).Infof("Peer %s: parent []%x] of block #%d [%x…] unknown", block.ParentHash().Bytes()[:4], peer, block.NumberU64(), hash[:4])
return
}
// Quickly validate the header and propagate the block if it passes
switch err := f.validateBlock(block, parent); err {
case nil:
// All ok, quickly propagate to our peers
broadcastTimer.UpdateSince(block.ReceivedAt)
propBroadcastOutTimer.UpdateSince(block.ReceivedAt)
go f.broadcastBlock(block, true)
case core.BlockFutureErr:
futureMeter.Mark(1)
// Weird future block, don't fail, but neither propagate
default:
// Something went very wrong, drop the peer
glog.V(logger.Debug).Infof("Peer %s: block #%d [%x] verification failed: %v", peer, block.NumberU64(), hash[:4], err)
glog.V(logger.Debug).Infof("Peer %s: block #%d [%x] verification failed: %v", peer, block.NumberU64(), hash[:4], err)
f.dropPeer(peer)
return
}
// Run the actual import and log any issues
if _, err := f.insertChain(types.Blocks{block}); err != nil {
glog.V(logger.Warn).Infof("Peer %s: block #%d [%x] import failed: %v", peer, block.NumberU64(), hash[:4], err)
glog.V(logger.Warn).Infof("Peer %s: block #%d [%x] import failed: %v", peer, block.NumberU64(), hash[:4], err)
return
}
// If import succeeded, broadcast the block
announceTimer.UpdateSince(block.ReceivedAt)
propAnnounceOutTimer.UpdateSince(block.ReceivedAt)
go f.broadcastBlock(block, false)
// Invoke the testing hook if needed
@ -472,9 +790,27 @@ func (f *Fetcher) forgetHash(hash common.Hash) {
}
delete(f.fetching, hash)
}
// Remove any pending completion requests and decrement the DOS counters
for _, announce := range f.fetched[hash] {
f.announces[announce.origin]--
if f.announces[announce.origin] == 0 {
delete(f.announces, announce.origin)
}
}
delete(f.fetched, hash)
// Remove any pending completions and decrement the DOS counters
if announce := f.completing[hash]; announce != nil {
f.announces[announce.origin]--
if f.announces[announce.origin] == 0 {
delete(f.announces, announce.origin)
}
delete(f.completing, hash)
}
}
// forgetBlock removes all traces of a queued block frmo the fetcher's internal
// forgetBlock removes all traces of a queued block from the fetcher's internal
// state.
func (f *Fetcher) forgetBlock(hash common.Hash) {
if insert := f.queued[hash]; insert != nil {

View File

@ -27,21 +27,39 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
)
var (
testdb, _ = ethdb.NewMemDatabase()
genesis = core.GenesisBlockForTesting(testdb, common.Address{}, big.NewInt(0))
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
unknownBlock = types.NewBlock(&types.Header{GasLimit: params.GenesisGasLimit}, nil, nil, nil)
)
// makeChain creates a chain of n blocks starting at and including parent.
// the returned hash chain is ordered head->parent.
// the returned hash chain is ordered head->parent. In addition, every 3rd block
// contains a transaction and every 5th an uncle to allow testing correct block
// reassembly.
func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common.Hash]*types.Block) {
blocks := core.GenerateChain(parent, testdb, n, func(i int, gen *core.BlockGen) {
gen.SetCoinbase(common.Address{seed})
blocks := core.GenerateChain(parent, testdb, n, func(i int, block *core.BlockGen) {
block.SetCoinbase(common.Address{seed})
// If the block number is multiple of 3, send a bonus transaction to the miner
if parent == genesis && i%3 == 0 {
tx, err := types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(testKey)
if err != nil {
panic(err)
}
block.AddTx(tx)
}
// If the block number is a multiple of 5, add a bonus uncle to the block
if i%5 == 0 {
block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 1).Hash(), Number: big.NewInt(int64(i - 1))})
}
})
hashes := make([]common.Hash, n+1)
hashes[len(hashes)-1] = parent.Hash()
@ -60,6 +78,7 @@ type fetcherTester struct {
hashes []common.Hash // Hash chain belonging to the tester
blocks map[common.Hash]*types.Block // Blocks belonging to the tester
drops map[string]bool // Map of peers dropped by the fetcher
lock sync.RWMutex
}
@ -69,6 +88,7 @@ func newTester() *fetcherTester {
tester := &fetcherTester{
hashes: []common.Hash{genesis.Hash()},
blocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
drops: make(map[string]bool),
}
tester.fetcher = New(tester.getBlock, tester.verifyBlock, tester.broadcastBlock, tester.chainHeight, tester.insertChain, tester.dropPeer)
tester.fetcher.Start()
@ -122,12 +142,14 @@ func (f *fetcherTester) insertChain(blocks types.Blocks) (int, error) {
return 0, nil
}
// dropPeer is a nop placeholder for the peer removal.
// dropPeer is an emulator for the peer removal, simply accumulating the various
// peers dropped by the fetcher.
func (f *fetcherTester) dropPeer(peer string) {
f.drops[peer] = true
}
// peerFetcher retrieves a fetcher associated with a simulated peer.
func (f *fetcherTester) makeFetcher(blocks map[common.Hash]*types.Block) blockRequesterFn {
// makeBlockFetcher retrieves a block fetcher associated with a simulated peer.
func (f *fetcherTester) makeBlockFetcher(blocks map[common.Hash]*types.Block) blockRequesterFn {
closure := make(map[common.Hash]*types.Block)
for hash, block := range blocks {
closure[hash] = block
@ -142,19 +164,106 @@ func (f *fetcherTester) makeFetcher(blocks map[common.Hash]*types.Block) blockRe
}
}
// Return on a new thread
go f.fetcher.Filter(blocks)
go f.fetcher.FilterBlocks(blocks)
return nil
}
}
// makeHeaderFetcher retrieves a block header fetcher associated with a simulated peer.
func (f *fetcherTester) makeHeaderFetcher(blocks map[common.Hash]*types.Block, drift time.Duration) headerRequesterFn {
closure := make(map[common.Hash]*types.Block)
for hash, block := range blocks {
closure[hash] = block
}
// Create a function that return a header from the closure
return func(hash common.Hash) error {
// Gather the blocks to return
headers := make([]*types.Header, 0, 1)
if block, ok := closure[hash]; ok {
headers = append(headers, block.Header())
}
// Return on a new thread
go f.fetcher.FilterHeaders(headers, time.Now().Add(drift))
return nil
}
}
// makeBodyFetcher retrieves a block body fetcher associated with a simulated peer.
func (f *fetcherTester) makeBodyFetcher(blocks map[common.Hash]*types.Block, drift time.Duration) bodyRequesterFn {
closure := make(map[common.Hash]*types.Block)
for hash, block := range blocks {
closure[hash] = block
}
// Create a function that returns blocks from the closure
return func(hashes []common.Hash) error {
// Gather the block bodies to return
transactions := make([][]*types.Transaction, 0, len(hashes))
uncles := make([][]*types.Header, 0, len(hashes))
for _, hash := range hashes {
if block, ok := closure[hash]; ok {
transactions = append(transactions, block.Transactions())
uncles = append(uncles, block.Uncles())
}
}
// Return on a new thread
go f.fetcher.FilterBodies(transactions, uncles, time.Now().Add(drift))
return nil
}
}
// verifyFetchingEvent verifies that one single event arrive on an fetching channel.
func verifyFetchingEvent(t *testing.T, fetching chan []common.Hash, arrive bool) {
if arrive {
select {
case <-fetching:
case <-time.After(time.Second):
t.Fatalf("fetching timeout")
}
} else {
select {
case <-fetching:
t.Fatalf("fetching invoked")
case <-time.After(10 * time.Millisecond):
}
}
}
// verifyCompletingEvent verifies that one single event arrive on an completing channel.
func verifyCompletingEvent(t *testing.T, completing chan []common.Hash, arrive bool) {
if arrive {
select {
case <-completing:
case <-time.After(time.Second):
t.Fatalf("completing timeout")
}
} else {
select {
case <-completing:
t.Fatalf("completing invoked")
case <-time.After(10 * time.Millisecond):
}
}
}
// verifyImportEvent verifies that one single event arrive on an import channel.
func verifyImportEvent(t *testing.T, imported chan *types.Block) {
func verifyImportEvent(t *testing.T, imported chan *types.Block, arrive bool) {
if arrive {
select {
case <-imported:
case <-time.After(time.Second):
t.Fatalf("import timeout")
}
} else {
select {
case <-imported:
t.Fatalf("import invoked")
case <-time.After(10 * time.Millisecond):
}
}
}
// verifyImportCount verifies that exactly count number of events arrive on an
@ -164,7 +273,7 @@ func verifyImportCount(t *testing.T, imported chan *types.Block, count int) {
select {
case <-imported:
case <-time.After(time.Second):
t.Fatalf("block %d: import timeout", i)
t.Fatalf("block %d: import timeout", i+1)
}
}
verifyImportDone(t, imported)
@ -181,51 +290,78 @@ func verifyImportDone(t *testing.T, imported chan *types.Block) {
// Tests that a fetcher accepts block announcements and initiates retrievals for
// them, successfully importing into the local chain.
func TestSequentialAnnouncements(t *testing.T) {
func TestSequentialAnnouncements61(t *testing.T) { testSequentialAnnouncements(t, 61) }
func TestSequentialAnnouncements62(t *testing.T) { testSequentialAnnouncements(t, 62) }
func TestSequentialAnnouncements63(t *testing.T) { testSequentialAnnouncements(t, 63) }
func TestSequentialAnnouncements64(t *testing.T) { testSequentialAnnouncements(t, 64) }
func testSequentialAnnouncements(t *testing.T, protocol int) {
// Create a chain of blocks to import
targetBlocks := 4 * hashLimit
hashes, blocks := makeChain(targetBlocks, 0, genesis)
tester := newTester()
fetcher := tester.makeFetcher(blocks)
blockFetcher := tester.makeBlockFetcher(blocks)
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
// Iteratively announce blocks until all are imported
imported := make(chan *types.Block)
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
for i := len(hashes) - 2; i >= 0; i-- {
tester.fetcher.Notify("valid", hashes[i], time.Now().Add(-arriveTimeout), fetcher)
verifyImportEvent(t, imported)
if protocol < 62 {
tester.fetcher.Notify("valid", hashes[i], 0, time.Now().Add(-arriveTimeout), blockFetcher, nil, nil)
} else {
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
}
verifyImportEvent(t, imported, true)
}
verifyImportDone(t, imported)
}
// Tests that if blocks are announced by multiple peers (or even the same buggy
// peer), they will only get downloaded at most once.
func TestConcurrentAnnouncements(t *testing.T) {
func TestConcurrentAnnouncements61(t *testing.T) { testConcurrentAnnouncements(t, 61) }
func TestConcurrentAnnouncements62(t *testing.T) { testConcurrentAnnouncements(t, 62) }
func TestConcurrentAnnouncements63(t *testing.T) { testConcurrentAnnouncements(t, 63) }
func TestConcurrentAnnouncements64(t *testing.T) { testConcurrentAnnouncements(t, 64) }
func testConcurrentAnnouncements(t *testing.T, protocol int) {
// Create a chain of blocks to import
targetBlocks := 4 * hashLimit
hashes, blocks := makeChain(targetBlocks, 0, genesis)
// Assemble a tester with a built in counter for the requests
tester := newTester()
fetcher := tester.makeFetcher(blocks)
blockFetcher := tester.makeBlockFetcher(blocks)
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
counter := uint32(0)
wrapper := func(hashes []common.Hash) error {
blockWrapper := func(hashes []common.Hash) error {
atomic.AddUint32(&counter, uint32(len(hashes)))
return fetcher(hashes)
return blockFetcher(hashes)
}
headerWrapper := func(hash common.Hash) error {
atomic.AddUint32(&counter, 1)
return headerFetcher(hash)
}
// Iteratively announce blocks until all are imported
imported := make(chan *types.Block)
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
for i := len(hashes) - 2; i >= 0; i-- {
tester.fetcher.Notify("first", hashes[i], time.Now().Add(-arriveTimeout), wrapper)
tester.fetcher.Notify("second", hashes[i], time.Now().Add(-arriveTimeout+time.Millisecond), wrapper)
tester.fetcher.Notify("second", hashes[i], time.Now().Add(-arriveTimeout-time.Millisecond), wrapper)
verifyImportEvent(t, imported)
if protocol < 62 {
tester.fetcher.Notify("first", hashes[i], 0, time.Now().Add(-arriveTimeout), blockWrapper, nil, nil)
tester.fetcher.Notify("second", hashes[i], 0, time.Now().Add(-arriveTimeout+time.Millisecond), blockWrapper, nil, nil)
tester.fetcher.Notify("second", hashes[i], 0, time.Now().Add(-arriveTimeout-time.Millisecond), blockWrapper, nil, nil)
} else {
tester.fetcher.Notify("first", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), nil, headerWrapper, bodyFetcher)
tester.fetcher.Notify("second", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout+time.Millisecond), nil, headerWrapper, bodyFetcher)
tester.fetcher.Notify("second", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout-time.Millisecond), nil, headerWrapper, bodyFetcher)
}
verifyImportEvent(t, imported, true)
}
verifyImportDone(t, imported)
@ -237,56 +373,90 @@ func TestConcurrentAnnouncements(t *testing.T) {
// Tests that announcements arriving while a previous is being fetched still
// results in a valid import.
func TestOverlappingAnnouncements(t *testing.T) {
func TestOverlappingAnnouncements61(t *testing.T) { testOverlappingAnnouncements(t, 61) }
func TestOverlappingAnnouncements62(t *testing.T) { testOverlappingAnnouncements(t, 62) }
func TestOverlappingAnnouncements63(t *testing.T) { testOverlappingAnnouncements(t, 63) }
func TestOverlappingAnnouncements64(t *testing.T) { testOverlappingAnnouncements(t, 64) }
func testOverlappingAnnouncements(t *testing.T, protocol int) {
// Create a chain of blocks to import
targetBlocks := 4 * hashLimit
hashes, blocks := makeChain(targetBlocks, 0, genesis)
tester := newTester()
fetcher := tester.makeFetcher(blocks)
blockFetcher := tester.makeBlockFetcher(blocks)
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
// Iteratively announce blocks, but overlap them continuously
fetching := make(chan []common.Hash)
overlap := 16
imported := make(chan *types.Block, len(hashes)-1)
tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- hashes }
for i := 0; i < overlap; i++ {
imported <- nil
}
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
for i := len(hashes) - 2; i >= 0; i-- {
tester.fetcher.Notify("valid", hashes[i], time.Now().Add(-arriveTimeout), fetcher)
if protocol < 62 {
tester.fetcher.Notify("valid", hashes[i], 0, time.Now().Add(-arriveTimeout), blockFetcher, nil, nil)
} else {
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
}
select {
case <-fetching:
case <-imported:
case <-time.After(time.Second):
t.Fatalf("hash %d: announce timeout", len(hashes)-i)
t.Fatalf("block %d: import timeout", len(hashes)-i)
}
}
// Wait for all the imports to complete and check count
verifyImportCount(t, imported, len(hashes)-1)
verifyImportCount(t, imported, overlap)
}
// Tests that announces already being retrieved will not be duplicated.
func TestPendingDeduplication(t *testing.T) {
func TestPendingDeduplication61(t *testing.T) { testPendingDeduplication(t, 61) }
func TestPendingDeduplication62(t *testing.T) { testPendingDeduplication(t, 62) }
func TestPendingDeduplication63(t *testing.T) { testPendingDeduplication(t, 63) }
func TestPendingDeduplication64(t *testing.T) { testPendingDeduplication(t, 64) }
func testPendingDeduplication(t *testing.T, protocol int) {
// Create a hash and corresponding block
hashes, blocks := makeChain(1, 0, genesis)
// Assemble a tester with a built in counter and delayed fetcher
tester := newTester()
fetcher := tester.makeFetcher(blocks)
blockFetcher := tester.makeBlockFetcher(blocks)
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
delay := 50 * time.Millisecond
counter := uint32(0)
wrapper := func(hashes []common.Hash) error {
blockWrapper := func(hashes []common.Hash) error {
atomic.AddUint32(&counter, uint32(len(hashes)))
// Simulate a long running fetch
go func() {
time.Sleep(delay)
fetcher(hashes)
blockFetcher(hashes)
}()
return nil
}
headerWrapper := func(hash common.Hash) error {
atomic.AddUint32(&counter, 1)
// Simulate a long running fetch
go func() {
time.Sleep(delay)
headerFetcher(hash)
}()
return nil
}
// Announce the same block many times until it's fetched (wait for any pending ops)
for tester.getBlock(hashes[0]) == nil {
tester.fetcher.Notify("repeater", hashes[0], time.Now().Add(-arriveTimeout), wrapper)
if protocol < 62 {
tester.fetcher.Notify("repeater", hashes[0], 0, time.Now().Add(-arriveTimeout), blockWrapper, nil, nil)
} else {
tester.fetcher.Notify("repeater", hashes[0], 1, time.Now().Add(-arriveTimeout), nil, headerWrapper, bodyFetcher)
}
time.Sleep(time.Millisecond)
}
time.Sleep(delay)
@ -302,14 +472,21 @@ func TestPendingDeduplication(t *testing.T) {
// Tests that announcements retrieved in a random order are cached and eventually
// imported when all the gaps are filled in.
func TestRandomArrivalImport(t *testing.T) {
func TestRandomArrivalImport61(t *testing.T) { testRandomArrivalImport(t, 61) }
func TestRandomArrivalImport62(t *testing.T) { testRandomArrivalImport(t, 62) }
func TestRandomArrivalImport63(t *testing.T) { testRandomArrivalImport(t, 63) }
func TestRandomArrivalImport64(t *testing.T) { testRandomArrivalImport(t, 64) }
func testRandomArrivalImport(t *testing.T, protocol int) {
// Create a chain of blocks to import, and choose one to delay
targetBlocks := maxQueueDist
hashes, blocks := makeChain(targetBlocks, 0, genesis)
skip := targetBlocks / 2
tester := newTester()
fetcher := tester.makeFetcher(blocks)
blockFetcher := tester.makeBlockFetcher(blocks)
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
// Iteratively announce blocks, skipping one entry
imported := make(chan *types.Block, len(hashes)-1)
@ -317,25 +494,40 @@ func TestRandomArrivalImport(t *testing.T) {
for i := len(hashes) - 1; i >= 0; i-- {
if i != skip {
tester.fetcher.Notify("valid", hashes[i], time.Now().Add(-arriveTimeout), fetcher)
if protocol < 62 {
tester.fetcher.Notify("valid", hashes[i], 0, time.Now().Add(-arriveTimeout), blockFetcher, nil, nil)
} else {
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
}
time.Sleep(time.Millisecond)
}
}
// Finally announce the skipped entry and check full import
tester.fetcher.Notify("valid", hashes[skip], time.Now().Add(-arriveTimeout), fetcher)
if protocol < 62 {
tester.fetcher.Notify("valid", hashes[skip], 0, time.Now().Add(-arriveTimeout), blockFetcher, nil, nil)
} else {
tester.fetcher.Notify("valid", hashes[skip], uint64(len(hashes)-skip-1), time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
}
verifyImportCount(t, imported, len(hashes)-1)
}
// Tests that direct block enqueues (due to block propagation vs. hash announce)
// are correctly schedule, filling and import queue gaps.
func TestQueueGapFill(t *testing.T) {
func TestQueueGapFill61(t *testing.T) { testQueueGapFill(t, 61) }
func TestQueueGapFill62(t *testing.T) { testQueueGapFill(t, 62) }
func TestQueueGapFill63(t *testing.T) { testQueueGapFill(t, 63) }
func TestQueueGapFill64(t *testing.T) { testQueueGapFill(t, 64) }
func testQueueGapFill(t *testing.T, protocol int) {
// Create a chain of blocks to import, and choose one to not announce at all
targetBlocks := maxQueueDist
hashes, blocks := makeChain(targetBlocks, 0, genesis)
skip := targetBlocks / 2
tester := newTester()
fetcher := tester.makeFetcher(blocks)
blockFetcher := tester.makeBlockFetcher(blocks)
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
// Iteratively announce blocks, skipping one entry
imported := make(chan *types.Block, len(hashes)-1)
@ -343,7 +535,11 @@ func TestQueueGapFill(t *testing.T) {
for i := len(hashes) - 1; i >= 0; i-- {
if i != skip {
tester.fetcher.Notify("valid", hashes[i], time.Now().Add(-arriveTimeout), fetcher)
if protocol < 62 {
tester.fetcher.Notify("valid", hashes[i], 0, time.Now().Add(-arriveTimeout), blockFetcher, nil, nil)
} else {
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
}
time.Sleep(time.Millisecond)
}
}
@ -354,13 +550,20 @@ func TestQueueGapFill(t *testing.T) {
// Tests that blocks arriving from various sources (multiple propagations, hash
// announces, etc) do not get scheduled for import multiple times.
func TestImportDeduplication(t *testing.T) {
func TestImportDeduplication61(t *testing.T) { testImportDeduplication(t, 61) }
func TestImportDeduplication62(t *testing.T) { testImportDeduplication(t, 62) }
func TestImportDeduplication63(t *testing.T) { testImportDeduplication(t, 63) }
func TestImportDeduplication64(t *testing.T) { testImportDeduplication(t, 64) }
func testImportDeduplication(t *testing.T, protocol int) {
// Create two blocks to import (one for duplication, the other for stalling)
hashes, blocks := makeChain(2, 0, genesis)
// Create the tester and wrap the importer with a counter
tester := newTester()
fetcher := tester.makeFetcher(blocks)
blockFetcher := tester.makeBlockFetcher(blocks)
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
counter := uint32(0)
tester.fetcher.insertChain = func(blocks types.Blocks) (int, error) {
@ -374,7 +577,11 @@ func TestImportDeduplication(t *testing.T) {
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
// Announce the duplicating block, wait for retrieval, and also propagate directly
tester.fetcher.Notify("valid", hashes[0], time.Now().Add(-arriveTimeout), fetcher)
if protocol < 62 {
tester.fetcher.Notify("valid", hashes[0], 0, time.Now().Add(-arriveTimeout), blockFetcher, nil, nil)
} else {
tester.fetcher.Notify("valid", hashes[0], 1, time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
}
<-fetching
tester.fetcher.Enqueue("valid", blocks[hashes[0]])
@ -391,35 +598,157 @@ func TestImportDeduplication(t *testing.T) {
}
// Tests that blocks with numbers much lower or higher than out current head get
// discarded no prevent wasting resources on useless blocks from faulty peers.
func TestDistantDiscarding(t *testing.T) {
// Create a long chain to import
// discarded to prevent wasting resources on useless blocks from faulty peers.
func TestDistantPropagationDiscarding(t *testing.T) {
// Create a long chain to import and define the discard boundaries
hashes, blocks := makeChain(3*maxQueueDist, 0, genesis)
head := hashes[len(hashes)/2]
low, high := len(hashes)/2+maxUncleDist+1, len(hashes)/2-maxQueueDist-1
// Create a tester and simulate a head block being the middle of the above chain
tester := newTester()
tester.hashes = []common.Hash{head}
tester.blocks = map[common.Hash]*types.Block{head: blocks[head]}
// Ensure that a block with a lower number than the threshold is discarded
tester.fetcher.Enqueue("lower", blocks[hashes[0]])
tester.fetcher.Enqueue("lower", blocks[hashes[low]])
time.Sleep(10 * time.Millisecond)
if !tester.fetcher.queue.Empty() {
t.Fatalf("fetcher queued stale block")
}
// Ensure that a block with a higher number than the threshold is discarded
tester.fetcher.Enqueue("higher", blocks[hashes[len(hashes)-1]])
tester.fetcher.Enqueue("higher", blocks[hashes[high]])
time.Sleep(10 * time.Millisecond)
if !tester.fetcher.queue.Empty() {
t.Fatalf("fetcher queued future block")
}
}
// Tests that announcements with numbers much lower or higher than out current
// head get discarded to prevent wasting resources on useless blocks from faulty
// peers.
func TestDistantAnnouncementDiscarding62(t *testing.T) { testDistantAnnouncementDiscarding(t, 62) }
func TestDistantAnnouncementDiscarding63(t *testing.T) { testDistantAnnouncementDiscarding(t, 63) }
func TestDistantAnnouncementDiscarding64(t *testing.T) { testDistantAnnouncementDiscarding(t, 64) }
func testDistantAnnouncementDiscarding(t *testing.T, protocol int) {
// Create a long chain to import and define the discard boundaries
hashes, blocks := makeChain(3*maxQueueDist, 0, genesis)
head := hashes[len(hashes)/2]
low, high := len(hashes)/2+maxUncleDist+1, len(hashes)/2-maxQueueDist-1
// Create a tester and simulate a head block being the middle of the above chain
tester := newTester()
tester.hashes = []common.Hash{head}
tester.blocks = map[common.Hash]*types.Block{head: blocks[head]}
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
fetching := make(chan struct{}, 2)
tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- struct{}{} }
// Ensure that a block with a lower number than the threshold is discarded
tester.fetcher.Notify("lower", hashes[low], blocks[hashes[low]].NumberU64(), time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
select {
case <-time.After(50 * time.Millisecond):
case <-fetching:
t.Fatalf("fetcher requested stale header")
}
// Ensure that a block with a higher number than the threshold is discarded
tester.fetcher.Notify("higher", hashes[high], blocks[hashes[high]].NumberU64(), time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
select {
case <-time.After(50 * time.Millisecond):
case <-fetching:
t.Fatalf("fetcher requested future header")
}
}
// Tests that peers announcing blocks with invalid numbers (i.e. not matching
// the headers provided afterwards) get dropped as malicious.
func TestInvalidNumberAnnouncement62(t *testing.T) { testInvalidNumberAnnouncement(t, 62) }
func TestInvalidNumberAnnouncement63(t *testing.T) { testInvalidNumberAnnouncement(t, 63) }
func TestInvalidNumberAnnouncement64(t *testing.T) { testInvalidNumberAnnouncement(t, 64) }
func testInvalidNumberAnnouncement(t *testing.T, protocol int) {
// Create a single block to import and check numbers against
hashes, blocks := makeChain(1, 0, genesis)
tester := newTester()
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
imported := make(chan *types.Block)
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
// Announce a block with a bad number, check for immediate drop
tester.fetcher.Notify("bad", hashes[0], 2, time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
verifyImportEvent(t, imported, false)
if !tester.drops["bad"] {
t.Fatalf("peer with invalid numbered announcement not dropped")
}
// Make sure a good announcement passes without a drop
tester.fetcher.Notify("good", hashes[0], 1, time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
verifyImportEvent(t, imported, true)
if tester.drops["good"] {
t.Fatalf("peer with valid numbered announcement dropped")
}
verifyImportDone(t, imported)
}
// Tests that if a block is empty (i.e. header only), no body request should be
// made, and instead the header should be assembled into a whole block in itself.
func TestEmptyBlockShortCircuit62(t *testing.T) { testEmptyBlockShortCircuit(t, 62) }
func TestEmptyBlockShortCircuit63(t *testing.T) { testEmptyBlockShortCircuit(t, 63) }
func TestEmptyBlockShortCircuit64(t *testing.T) { testEmptyBlockShortCircuit(t, 64) }
func testEmptyBlockShortCircuit(t *testing.T, protocol int) {
// Create a chain of blocks to import
hashes, blocks := makeChain(32, 0, genesis)
tester := newTester()
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
// Add a monitoring hook for all internal events
fetching := make(chan []common.Hash)
tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- hashes }
completing := make(chan []common.Hash)
tester.fetcher.completingHook = func(hashes []common.Hash) { completing <- hashes }
imported := make(chan *types.Block)
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
// Iteratively announce blocks until all are imported
for i := len(hashes) - 2; i >= 0; i-- {
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
// All announces should fetch the header
verifyFetchingEvent(t, fetching, true)
// Only blocks with data contents should request bodies
verifyCompletingEvent(t, completing, len(blocks[hashes[i]].Transactions()) > 0 || len(blocks[hashes[i]].Uncles()) > 0)
// Irrelevant of the construct, import should succeed
verifyImportEvent(t, imported, true)
}
verifyImportDone(t, imported)
}
// Tests that a peer is unable to use unbounded memory with sending infinite
// block announcements to a node, but that even in the face of such an attack,
// the fetcher remains operational.
func TestHashMemoryExhaustionAttack(t *testing.T) {
func TestHashMemoryExhaustionAttack61(t *testing.T) { testHashMemoryExhaustionAttack(t, 61) }
func TestHashMemoryExhaustionAttack62(t *testing.T) { testHashMemoryExhaustionAttack(t, 62) }
func TestHashMemoryExhaustionAttack63(t *testing.T) { testHashMemoryExhaustionAttack(t, 63) }
func TestHashMemoryExhaustionAttack64(t *testing.T) { testHashMemoryExhaustionAttack(t, 64) }
func testHashMemoryExhaustionAttack(t *testing.T, protocol int) {
// Create a tester with instrumented import hooks
tester := newTester()
@ -429,17 +758,29 @@ func TestHashMemoryExhaustionAttack(t *testing.T) {
// Create a valid chain and an infinite junk chain
targetBlocks := hashLimit + 2*maxQueueDist
hashes, blocks := makeChain(targetBlocks, 0, genesis)
valid := tester.makeFetcher(blocks)
validBlockFetcher := tester.makeBlockFetcher(blocks)
validHeaderFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
validBodyFetcher := tester.makeBodyFetcher(blocks, 0)
attack, _ := makeChain(targetBlocks, 0, unknownBlock)
attacker := tester.makeFetcher(nil)
attackerBlockFetcher := tester.makeBlockFetcher(nil)
attackerHeaderFetcher := tester.makeHeaderFetcher(nil, -gatherSlack)
attackerBodyFetcher := tester.makeBodyFetcher(nil, 0)
// Feed the tester a huge hashset from the attacker, and a limited from the valid peer
for i := 0; i < len(attack); i++ {
if i < maxQueueDist {
tester.fetcher.Notify("valid", hashes[len(hashes)-2-i], time.Now(), valid)
if protocol < 62 {
tester.fetcher.Notify("valid", hashes[len(hashes)-2-i], 0, time.Now(), validBlockFetcher, nil, nil)
} else {
tester.fetcher.Notify("valid", hashes[len(hashes)-2-i], uint64(i+1), time.Now(), nil, validHeaderFetcher, validBodyFetcher)
}
}
if protocol < 62 {
tester.fetcher.Notify("attacker", attack[i], 0, time.Now(), attackerBlockFetcher, nil, nil)
} else {
tester.fetcher.Notify("attacker", attack[i], 1 /* don't distance drop */, time.Now(), nil, attackerHeaderFetcher, attackerBodyFetcher)
}
tester.fetcher.Notify("attacker", attack[i], time.Now(), attacker)
}
if len(tester.fetcher.announced) != hashLimit+maxQueueDist {
t.Fatalf("queued announce count mismatch: have %d, want %d", len(tester.fetcher.announced), hashLimit+maxQueueDist)
@ -449,8 +790,12 @@ func TestHashMemoryExhaustionAttack(t *testing.T) {
// Feed the remaining valid hashes to ensure DOS protection state remains clean
for i := len(hashes) - maxQueueDist - 2; i >= 0; i-- {
tester.fetcher.Notify("valid", hashes[i], time.Now().Add(-arriveTimeout), valid)
verifyImportEvent(t, imported)
if protocol < 62 {
tester.fetcher.Notify("valid", hashes[i], 0, time.Now().Add(-arriveTimeout), validBlockFetcher, nil, nil)
} else {
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), nil, validHeaderFetcher, validBodyFetcher)
}
verifyImportEvent(t, imported, true)
}
verifyImportDone(t, imported)
}
@ -498,7 +843,7 @@ func TestBlockMemoryExhaustionAttack(t *testing.T) {
// Insert the remaining blocks in chunks to ensure clean DOS protection
for i := maxQueueDist; i < len(hashes)-1; i++ {
tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-2-i]])
verifyImportEvent(t, imported)
verifyImportEvent(t, imported, true)
}
verifyImportDone(t, imported)
}

View File

@ -23,10 +23,24 @@ import (
)
var (
announceMeter = metrics.NewMeter("eth/sync/RemoteAnnounces")
announceTimer = metrics.NewTimer("eth/sync/LocalAnnounces")
broadcastMeter = metrics.NewMeter("eth/sync/RemoteBroadcasts")
broadcastTimer = metrics.NewTimer("eth/sync/LocalBroadcasts")
discardMeter = metrics.NewMeter("eth/sync/DiscardedBlocks")
futureMeter = metrics.NewMeter("eth/sync/FutureBlocks")
propAnnounceInMeter = metrics.NewMeter("eth/fetcher/prop/announces/in")
propAnnounceOutTimer = metrics.NewTimer("eth/fetcher/prop/announces/out")
propAnnounceDropMeter = metrics.NewMeter("eth/fetcher/prop/announces/drop")
propAnnounceDOSMeter = metrics.NewMeter("eth/fetcher/prop/announces/dos")
propBroadcastInMeter = metrics.NewMeter("eth/fetcher/prop/broadcasts/in")
propBroadcastOutTimer = metrics.NewTimer("eth/fetcher/prop/broadcasts/out")
propBroadcastDropMeter = metrics.NewMeter("eth/fetcher/prop/broadcasts/drop")
propBroadcastDOSMeter = metrics.NewMeter("eth/fetcher/prop/broadcasts/dos")
blockFetchMeter = metrics.NewMeter("eth/fetcher/fetch/blocks")
headerFetchMeter = metrics.NewMeter("eth/fetcher/fetch/headers")
bodyFetchMeter = metrics.NewMeter("eth/fetcher/fetch/bodies")
blockFilterInMeter = metrics.NewMeter("eth/fetcher/filter/blocks/in")
blockFilterOutMeter = metrics.NewMeter("eth/fetcher/filter/blocks/out")
headerFilterInMeter = metrics.NewMeter("eth/fetcher/filter/headers/in")
headerFilterOutMeter = metrics.NewMeter("eth/fetcher/filter/headers/out")
bodyFilterInMeter = metrics.NewMeter("eth/fetcher/filter/bodies/in")
bodyFilterOutMeter = metrics.NewMeter("eth/fetcher/filter/bodies/out")
)

View File

@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/fetcher"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
@ -36,10 +37,10 @@ import (
"github.com/ethereum/go-ethereum/rlp"
)
// This is the target maximum size of returned blocks for the
// getBlocks message. The reply message may exceed it
// if a single block is larger than the limit.
const maxBlockRespSize = 2 * 1024 * 1024
const (
softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data.
estHeaderRlpSize = 500 // Approximate size of an RLP encoded block header
)
func errResp(code errCode, format string, v ...interface{}) error {
return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
@ -59,9 +60,10 @@ func (ep extProt) GetHashes(hash common.Hash) error { return ep.getHashes(has
func (ep extProt) GetBlock(hashes []common.Hash) error { return ep.getBlocks(hashes) }
type ProtocolManager struct {
protVer, netId int
txpool txPool
chainman *core.ChainManager
chaindb ethdb.Database
downloader *downloader.Downloader
fetcher *fetcher.Fetcher
peers *peerSet
@ -85,17 +87,17 @@ type ProtocolManager struct {
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
// with the ethereum network.
func NewProtocolManager(networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, chainman *core.ChainManager) *ProtocolManager {
func NewProtocolManager(networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, chainman *core.ChainManager, chaindb ethdb.Database) *ProtocolManager {
// Create the protocol manager with the base fields
manager := &ProtocolManager{
eventMux: mux,
txpool: txpool,
chainman: chainman,
chaindb: chaindb,
peers: newPeerSet(),
newPeerCh: make(chan *peer, 1),
txsyncCh: make(chan *txsync),
quitSync: make(chan struct{}),
netId: networkId,
}
// Initiate a sub-protocol for every implemented version we can handle
manager.SubProtocols = make([]p2p.Protocol, len(ProtocolVersions))
@ -114,10 +116,10 @@ func NewProtocolManager(networkId int, mux *event.TypeMux, txpool txPool, pow po
}
}
// Construct the different synchronisation mechanisms
manager.downloader = downloader.New(manager.eventMux, manager.chainman.HasBlock, manager.chainman.GetBlock, manager.chainman.CurrentBlock, manager.chainman.InsertChain, manager.removePeer)
manager.downloader = downloader.New(manager.eventMux, manager.chainman.HasBlock, manager.chainman.GetBlock, manager.chainman.CurrentBlock, manager.chainman.GetTd, manager.chainman.InsertChain, manager.removePeer)
validator := func(block *types.Block, parent *types.Block) error {
return core.ValidateHeader(pow, block.Header(), parent, true)
return core.ValidateHeader(pow, block.Header(), parent.Header(), true, false)
}
heighter := func() uint64 {
return manager.chainman.CurrentBlock().NumberU64()
@ -176,7 +178,7 @@ func (pm *ProtocolManager) Stop() {
}
func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
return newPeer(pv, nv, p, rw)
return newPeer(pv, nv, p, newMeteredMsgWriter(rw))
}
// handle is the callback invoked to manage the life cycle of an eth peer. When
@ -190,6 +192,9 @@ func (pm *ProtocolManager) handle(p *peer) error {
glog.V(logger.Debug).Infof("%v: handshake failed: %v", p, err)
return err
}
if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
rw.Init(p.version)
}
// Register the peer locally
glog.V(logger.Detail).Infof("%v: adding peer", p)
if err := pm.peers.Register(p); err != nil {
@ -199,7 +204,9 @@ func (pm *ProtocolManager) handle(p *peer) error {
defer pm.removePeer(p.id)
// Register the peer in the downloader. If the downloader considers it banned, we disconnect
if err := pm.downloader.RegisterPeer(p.id, p.version, p.Head(), p.RequestHashes, p.RequestHashesFromNumber, p.RequestBlocks); err != nil {
if err := pm.downloader.RegisterPeer(p.id, p.version, p.Head(),
p.RequestHashes, p.RequestHashesFromNumber, p.RequestBlocks,
p.RequestHeadersByHash, p.RequestHeadersByNumber, p.RequestBodies); err != nil {
return err
}
// Propagate existing transactions. new transactions appearing
@ -230,12 +237,12 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
defer msg.Discard()
// Handle the message depending on its contents
switch msg.Code {
case StatusMsg:
switch {
case msg.Code == StatusMsg:
// Status messages should never arrive after the handshake
return errResp(ErrExtraStatusMsg, "uncontrolled status message")
case GetBlockHashesMsg:
case p.version < eth62 && msg.Code == GetBlockHashesMsg:
// Retrieve the number of hashes to return and from which origin hash
var request getBlockHashesData
if err := msg.Decode(&request); err != nil {
@ -251,7 +258,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
}
return p.SendBlockHashes(hashes)
case GetBlockHashesFromNumberMsg:
case p.version < eth62 && msg.Code == GetBlockHashesFromNumberMsg:
// Retrieve and decode the number of hashes to return and from which origin number
var request getBlockHashesFromNumberData
if err := msg.Decode(&request); err != nil {
@ -278,24 +285,19 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
}
return p.SendBlockHashes(hashes)
case BlockHashesMsg:
case p.version < eth62 && msg.Code == BlockHashesMsg:
// A batch of hashes arrived to one of our previous requests
msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
reqHashInPacketsMeter.Mark(1)
var hashes []common.Hash
if err := msgStream.Decode(&hashes); err != nil {
if err := msg.Decode(&hashes); err != nil {
break
}
reqHashInTrafficMeter.Mark(int64(32 * len(hashes)))
// Deliver them all to the downloader for queuing
err := pm.downloader.DeliverHashes(p.id, hashes)
err := pm.downloader.DeliverHashes61(p.id, hashes)
if err != nil {
glog.V(logger.Debug).Infoln(err)
}
case GetBlocksMsg:
case p.version < eth62 && msg.Code == GetBlocksMsg:
// Decode the retrieval message
msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
if _, err := msgStream.List(); err != nil {
@ -305,94 +307,279 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
var (
hash common.Hash
bytes common.StorageSize
hashes []common.Hash
blocks []*types.Block
)
for {
for len(blocks) < downloader.MaxBlockFetch && bytes < softResponseLimit {
//Retrieve the hash of the next block
err := msgStream.Decode(&hash)
if err == rlp.EOL {
break
} else if err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
hashes = append(hashes, hash)
// Retrieve the requested block, stopping if enough was found
if block := pm.chainman.GetBlock(hash); block != nil {
blocks = append(blocks, block)
bytes += block.Size()
if len(blocks) >= downloader.MaxBlockFetch || bytes > maxBlockRespSize {
break
}
}
}
if glog.V(logger.Detail) && len(blocks) == 0 && len(hashes) > 0 {
list := "["
for _, hash := range hashes {
list += fmt.Sprintf("%x, ", hash[:4])
}
list = list[:len(list)-2] + "]"
glog.Infof("%v: no blocks found for requested hashes %s", p, list)
}
return p.SendBlocks(blocks)
case BlocksMsg:
case p.version < eth62 && msg.Code == BlocksMsg:
// Decode the arrived block message
msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
reqBlockInPacketsMeter.Mark(1)
var blocks []*types.Block
if err := msgStream.Decode(&blocks); err != nil {
if err := msg.Decode(&blocks); err != nil {
glog.V(logger.Detail).Infoln("Decode error", err)
blocks = nil
}
// Update the receive timestamp of each block
for _, block := range blocks {
reqBlockInTrafficMeter.Mark(block.Size().Int64())
block.ReceivedAt = msg.ReceivedAt
}
// Filter out any explicitly requested blocks, deliver the rest to the downloader
if blocks := pm.fetcher.Filter(blocks); len(blocks) > 0 {
pm.downloader.DeliverBlocks(p.id, blocks)
if blocks := pm.fetcher.FilterBlocks(blocks); len(blocks) > 0 {
pm.downloader.DeliverBlocks61(p.id, blocks)
}
case NewBlockHashesMsg:
// Retrieve and deseralize the remote new block hashes notification
msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
var hashes []common.Hash
if err := msgStream.Decode(&hashes); err != nil {
// Block header query, collect the requested headers and reply
case p.version >= eth62 && msg.Code == GetBlockHeadersMsg:
// Decode the complex header query
var query getBlockHeadersData
if err := msg.Decode(&query); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err)
}
// Gather headers until the fetch or network limits is reached
var (
bytes common.StorageSize
headers []*types.Header
unknown bool
)
for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit && len(headers) < downloader.MaxHeaderFetch {
// Retrieve the next header satisfying the query
var origin *types.Header
if query.Origin.Hash != (common.Hash{}) {
origin = pm.chainman.GetHeader(query.Origin.Hash)
} else {
origin = pm.chainman.GetHeaderByNumber(query.Origin.Number)
}
if origin == nil {
break
}
propHashInPacketsMeter.Mark(1)
propHashInTrafficMeter.Mark(int64(32 * len(hashes)))
headers = append(headers, origin)
bytes += estHeaderRlpSize
// Mark the hashes as present at the remote node
// Advance to the next header of the query
switch {
case query.Origin.Hash != (common.Hash{}) && query.Reverse:
// Hash based traversal towards the genesis block
for i := 0; i < int(query.Skip)+1; i++ {
if header := pm.chainman.GetHeader(query.Origin.Hash); header != nil {
query.Origin.Hash = header.ParentHash
} else {
unknown = true
break
}
}
case query.Origin.Hash != (common.Hash{}) && !query.Reverse:
// Hash based traversal towards the leaf block
if header := pm.chainman.GetHeaderByNumber(origin.Number.Uint64() + query.Skip + 1); header != nil {
if pm.chainman.GetBlockHashesFromHash(header.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash {
query.Origin.Hash = header.Hash()
} else {
unknown = true
}
} else {
unknown = true
}
case query.Reverse:
// Number based traversal towards the genesis block
if query.Origin.Number >= query.Skip+1 {
query.Origin.Number -= (query.Skip + 1)
} else {
unknown = true
}
case !query.Reverse:
// Number based traversal towards the leaf block
query.Origin.Number += (query.Skip + 1)
}
}
return p.SendBlockHeaders(headers)
case p.version >= eth62 && msg.Code == BlockHeadersMsg:
// A batch of headers arrived to one of our previous requests
var headers []*types.Header
if err := msg.Decode(&headers); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Filter out any explicitly requested headers, deliver the rest to the downloader
filter := len(headers) == 1
if filter {
headers = pm.fetcher.FilterHeaders(headers, time.Now())
}
if len(headers) > 0 || !filter {
err := pm.downloader.DeliverHeaders(p.id, headers)
if err != nil {
glog.V(logger.Debug).Infoln(err)
}
}
case p.version >= eth62 && msg.Code == BlockBodiesMsg:
// A batch of block bodies arrived to one of our previous requests
var request blockBodiesData
if err := msg.Decode(&request); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Deliver them all to the downloader for queuing
trasactions := make([][]*types.Transaction, len(request))
uncles := make([][]*types.Header, len(request))
for i, body := range request {
trasactions[i] = body.Transactions
uncles[i] = body.Uncles
}
// Filter out any explicitly requested bodies, deliver the rest to the downloader
if trasactions, uncles := pm.fetcher.FilterBodies(trasactions, uncles, time.Now()); len(trasactions) > 0 || len(uncles) > 0 {
err := pm.downloader.DeliverBodies(p.id, trasactions, uncles)
if err != nil {
glog.V(logger.Debug).Infoln(err)
}
}
case p.version >= eth62 && msg.Code == GetBlockBodiesMsg:
// Decode the retrieval message
msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
if _, err := msgStream.List(); err != nil {
return err
}
// Gather blocks until the fetch or network limits is reached
var (
hash common.Hash
bytes int
bodies []rlp.RawValue
)
for bytes < softResponseLimit && len(bodies) < downloader.MaxBlockFetch {
// Retrieve the hash of the next block
if err := msgStream.Decode(&hash); err == rlp.EOL {
break
} else if err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Retrieve the requested block body, stopping if enough was found
if data := pm.chainman.GetBodyRLP(hash); len(data) != 0 {
bodies = append(bodies, data)
bytes += len(data)
}
}
return p.SendBlockBodiesRLP(bodies)
case p.version >= eth63 && msg.Code == GetNodeDataMsg:
// Decode the retrieval message
msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
if _, err := msgStream.List(); err != nil {
return err
}
// Gather state data until the fetch or network limits is reached
var (
hash common.Hash
bytes int
data [][]byte
)
for bytes < softResponseLimit && len(data) < downloader.MaxStateFetch {
// Retrieve the hash of the next state entry
if err := msgStream.Decode(&hash); err == rlp.EOL {
break
} else if err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Retrieve the requested state entry, stopping if enough was found
if entry, err := pm.chaindb.Get(hash.Bytes()); err == nil {
data = append(data, entry)
bytes += len(entry)
}
}
return p.SendNodeData(data)
case p.version >= eth63 && msg.Code == GetReceiptsMsg:
// Decode the retrieval message
msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
if _, err := msgStream.List(); err != nil {
return err
}
// Gather state data until the fetch or network limits is reached
var (
hash common.Hash
bytes int
receipts []*types.Receipt
)
for bytes < softResponseLimit && len(receipts) < downloader.MaxReceiptsFetch {
// Retrieve the hash of the next transaction receipt
if err := msgStream.Decode(&hash); err == rlp.EOL {
break
} else if err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Retrieve the requested receipt, stopping if enough was found
if receipt := core.GetReceipt(pm.chaindb, hash); receipt != nil {
receipts = append(receipts, receipt)
bytes += len(receipt.RlpEncode())
}
}
return p.SendReceipts(receipts)
case msg.Code == NewBlockHashesMsg:
// Retrieve and deseralize the remote new block hashes notification
type announce struct {
Hash common.Hash
Number uint64
}
var announces = []announce{}
if p.version < eth62 {
// We're running the old protocol, make block number unknown (0)
var hashes []common.Hash
if err := msg.Decode(&hashes); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err)
}
for _, hash := range hashes {
p.MarkBlock(hash)
p.SetHead(hash)
announces = append(announces, announce{hash, 0})
}
} else {
// Otherwise extract both block hash and number
var request newBlockHashesData
if err := msg.Decode(&request); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err)
}
for _, block := range request {
announces = append(announces, announce{block.Hash, block.Number})
}
}
// Mark the hashes as present at the remote node
for _, block := range announces {
p.MarkBlock(block.Hash)
p.SetHead(block.Hash)
}
// Schedule all the unknown hashes for retrieval
unknown := make([]common.Hash, 0, len(hashes))
for _, hash := range hashes {
if !pm.chainman.HasBlock(hash) {
unknown = append(unknown, hash)
unknown := make([]announce, 0, len(announces))
for _, block := range announces {
if !pm.chainman.HasBlock(block.Hash) {
unknown = append(unknown, block)
}
}
for _, hash := range unknown {
pm.fetcher.Notify(p.id, hash, time.Now(), p.RequestBlocks)
for _, block := range unknown {
if p.version < eth62 {
pm.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestBlocks, nil, nil)
} else {
pm.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), nil, p.RequestOneHeader, p.RequestBodies)
}
}
case NewBlockMsg:
case msg.Code == NewBlockMsg:
// Retrieve and decode the propagated block
var request newBlockData
if err := msg.Decode(&request); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err)
}
propBlockInPacketsMeter.Mark(1)
propBlockInTrafficMeter.Mark(request.Block.Size().Int64())
if err := request.Block.ValidateFields(); err != nil {
return errResp(ErrDecode, "block validation %v: %v", msg, err)
}
@ -421,13 +608,12 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
}
}
case TxMsg:
case msg.Code == TxMsg:
// Transactions arrived, parse all of them and deliver to the pool
var txs []*types.Transaction
if err := msg.Decode(&txs); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
propTxnInPacketsMeter.Mark(1)
for i, tx := range txs {
// Validate and mark the remote transaction
if tx == nil {
@ -436,7 +622,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
p.MarkTransaction(tx.Hash())
// Log it's arrival for later analysis
propTxnInTrafficMeter.Mark(tx.Size().Int64())
jsonlogger.LogJson(&logger.EthTxReceived{
TxHash: tx.Hash().Hex(),
RemoteId: p.ID().String(),
@ -461,7 +646,7 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
// Calculate the TD of the block (it's not imported yet, so block.Td is not valid)
var td *big.Int
if parent := pm.chainman.GetBlock(block.ParentHash()); parent != nil {
td = new(big.Int).Add(parent.Td, block.Difficulty())
td = new(big.Int).Add(block.Difficulty(), pm.chainman.GetTd(block.ParentHash()))
} else {
glog.V(logger.Error).Infof("propagating dangling block #%d [%x]", block.NumberU64(), hash[:4])
return
@ -476,7 +661,11 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
// Otherwise if the block is indeed in out own chain, announce it
if pm.chainman.HasBlock(hash) {
for _, peer := range peers {
peer.SendNewBlockHashes([]common.Hash{hash})
if peer.version < eth62 {
peer.SendNewBlockHashes61([]common.Hash{hash})
} else {
peer.SendNewBlockHashes([]common.Hash{hash}, []uint64{block.NumberU64()})
}
}
glog.V(logger.Detail).Infof("announced block %x to %d peers in %v", hash[:4], len(peers), time.Since(block.ReceivedAt))
}

522
eth/handler_test.go Normal file
View File

@ -0,0 +1,522 @@
package eth
import (
"fmt"
"math/big"
"math/rand"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/params"
)
// Tests that hashes can be retrieved from a remote chain by hashes in reverse
// order.
func TestGetBlockHashes61(t *testing.T) { testGetBlockHashes(t, 61) }
func testGetBlockHashes(t *testing.T, protocol int) {
pm := newTestProtocolManager(downloader.MaxHashFetch+15, nil, nil)
peer, _ := newTestPeer("peer", protocol, pm, true)
defer peer.close()
// Create a batch of tests for various scenarios
limit := downloader.MaxHashFetch
tests := []struct {
origin common.Hash
number int
result int
}{
{common.Hash{}, 1, 0}, // Make sure non existent hashes don't return results
{pm.chainman.Genesis().Hash(), 1, 0}, // There are no hashes to retrieve up from the genesis
{pm.chainman.GetBlockByNumber(5).Hash(), 5, 5}, // All the hashes including the genesis requested
{pm.chainman.GetBlockByNumber(5).Hash(), 10, 5}, // More hashes than available till the genesis requested
{pm.chainman.GetBlockByNumber(100).Hash(), 10, 10}, // All hashes available from the middle of the chain
{pm.chainman.CurrentBlock().Hash(), 10, 10}, // All hashes available from the head of the chain
{pm.chainman.CurrentBlock().Hash(), limit, limit}, // Request the maximum allowed hash count
{pm.chainman.CurrentBlock().Hash(), limit + 1, limit}, // Request more than the maximum allowed hash count
}
// Run each of the tests and verify the results against the chain
for i, tt := range tests {
// Assemble the hash response we would like to receive
resp := make([]common.Hash, tt.result)
if len(resp) > 0 {
from := pm.chainman.GetBlock(tt.origin).NumberU64() - 1
for j := 0; j < len(resp); j++ {
resp[j] = pm.chainman.GetBlockByNumber(uint64(int(from) - j)).Hash()
}
}
// Send the hash request and verify the response
p2p.Send(peer.app, 0x03, getBlockHashesData{tt.origin, uint64(tt.number)})
if err := p2p.ExpectMsg(peer.app, 0x04, resp); err != nil {
t.Errorf("test %d: block hashes mismatch: %v", i, err)
}
}
}
// Tests that hashes can be retrieved from a remote chain by numbers in forward
// order.
func TestGetBlockHashesFromNumber61(t *testing.T) { testGetBlockHashesFromNumber(t, 61) }
func testGetBlockHashesFromNumber(t *testing.T, protocol int) {
pm := newTestProtocolManager(downloader.MaxHashFetch+15, nil, nil)
peer, _ := newTestPeer("peer", protocol, pm, true)
defer peer.close()
// Create a batch of tests for various scenarios
limit := downloader.MaxHashFetch
tests := []struct {
origin uint64
number int
result int
}{
{pm.chainman.CurrentBlock().NumberU64() + 1, 1, 0}, // Out of bounds requests should return empty
{pm.chainman.CurrentBlock().NumberU64(), 1, 1}, // Make sure the head hash can be retrieved
{pm.chainman.CurrentBlock().NumberU64() - 4, 5, 5}, // All hashes, including the head hash requested
{pm.chainman.CurrentBlock().NumberU64() - 4, 10, 5}, // More hashes requested than available till the head
{pm.chainman.CurrentBlock().NumberU64() - 100, 10, 10}, // All hashes available from the middle of the chain
{0, 10, 10}, // All hashes available from the root of the chain
{0, limit, limit}, // Request the maximum allowed hash count
{0, limit + 1, limit}, // Request more than the maximum allowed hash count
{0, 1, 1}, // Make sure the genesis hash can be retrieved
}
// Run each of the tests and verify the results against the chain
for i, tt := range tests {
// Assemble the hash response we would like to receive
resp := make([]common.Hash, tt.result)
for j := 0; j < len(resp); j++ {
resp[j] = pm.chainman.GetBlockByNumber(tt.origin + uint64(j)).Hash()
}
// Send the hash request and verify the response
p2p.Send(peer.app, 0x08, getBlockHashesFromNumberData{tt.origin, uint64(tt.number)})
if err := p2p.ExpectMsg(peer.app, 0x04, resp); err != nil {
t.Errorf("test %d: block hashes mismatch: %v", i, err)
}
}
}
// Tests that blocks can be retrieved from a remote chain based on their hashes.
func TestGetBlocks61(t *testing.T) { testGetBlocks(t, 61) }
func testGetBlocks(t *testing.T, protocol int) {
pm := newTestProtocolManager(downloader.MaxHashFetch+15, nil, nil)
peer, _ := newTestPeer("peer", protocol, pm, true)
defer peer.close()
// Create a batch of tests for various scenarios
limit := downloader.MaxBlockFetch
tests := []struct {
random int // Number of blocks to fetch randomly from the chain
explicit []common.Hash // Explicitly requested blocks
available []bool // Availability of explicitly requested blocks
expected int // Total number of existing blocks to expect
}{
{1, nil, nil, 1}, // A single random block should be retrievable
{10, nil, nil, 10}, // Multiple random blocks should be retrievable
{limit, nil, nil, limit}, // The maximum possible blocks should be retrievable
{limit + 1, nil, nil, limit}, // No more that the possible block count should be returned
{0, []common.Hash{pm.chainman.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable
{0, []common.Hash{pm.chainman.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable
{0, []common.Hash{common.Hash{}}, []bool{false}, 0}, // A non existent block should not be returned
// Existing and non-existing blocks interleaved should not cause problems
{0, []common.Hash{
common.Hash{},
pm.chainman.GetBlockByNumber(1).Hash(),
common.Hash{},
pm.chainman.GetBlockByNumber(10).Hash(),
common.Hash{},
pm.chainman.GetBlockByNumber(100).Hash(),
common.Hash{},
}, []bool{false, true, false, true, false, true, false}, 3},
}
// Run each of the tests and verify the results against the chain
for i, tt := range tests {
// Collect the hashes to request, and the response to expect
hashes, seen := []common.Hash{}, make(map[int64]bool)
blocks := []*types.Block{}
for j := 0; j < tt.random; j++ {
for {
num := rand.Int63n(int64(pm.chainman.CurrentBlock().NumberU64()))
if !seen[num] {
seen[num] = true
block := pm.chainman.GetBlockByNumber(uint64(num))
hashes = append(hashes, block.Hash())
if len(blocks) < tt.expected {
blocks = append(blocks, block)
}
break
}
}
}
for j, hash := range tt.explicit {
hashes = append(hashes, hash)
if tt.available[j] && len(blocks) < tt.expected {
blocks = append(blocks, pm.chainman.GetBlock(hash))
}
}
// Send the hash request and verify the response
p2p.Send(peer.app, 0x05, hashes)
if err := p2p.ExpectMsg(peer.app, 0x06, blocks); err != nil {
t.Errorf("test %d: blocks mismatch: %v", i, err)
}
}
}
// Tests that block headers can be retrieved from a remote chain based on user queries.
func TestGetBlockHeaders62(t *testing.T) { testGetBlockHeaders(t, 62) }
func TestGetBlockHeaders63(t *testing.T) { testGetBlockHeaders(t, 63) }
func TestGetBlockHeaders64(t *testing.T) { testGetBlockHeaders(t, 64) }
func testGetBlockHeaders(t *testing.T, protocol int) {
pm := newTestProtocolManager(downloader.MaxHashFetch+15, nil, nil)
peer, _ := newTestPeer("peer", protocol, pm, true)
defer peer.close()
// Create a "random" unknown hash for testing
var unknown common.Hash
for i, _ := range unknown {
unknown[i] = byte(i)
}
// Create a batch of tests for various scenarios
limit := uint64(downloader.MaxHeaderFetch)
tests := []struct {
query *getBlockHeadersData // The query to execute for header retrieval
expect []common.Hash // The hashes of the block whose headers are expected
}{
// A single random block should be retrievable by hash and number too
{
&getBlockHeadersData{Origin: hashOrNumber{Hash: pm.chainman.GetBlockByNumber(limit / 2).Hash()}, Amount: 1},
[]common.Hash{pm.chainman.GetBlockByNumber(limit / 2).Hash()},
}, {
&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 1},
[]common.Hash{pm.chainman.GetBlockByNumber(limit / 2).Hash()},
},
// Multiple headers should be retrievable in both directions
{
&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3},
[]common.Hash{
pm.chainman.GetBlockByNumber(limit / 2).Hash(),
pm.chainman.GetBlockByNumber(limit/2 + 1).Hash(),
pm.chainman.GetBlockByNumber(limit/2 + 2).Hash(),
},
}, {
&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true},
[]common.Hash{
pm.chainman.GetBlockByNumber(limit / 2).Hash(),
pm.chainman.GetBlockByNumber(limit/2 - 1).Hash(),
pm.chainman.GetBlockByNumber(limit/2 - 2).Hash(),
},
},
// Multiple headers with skip lists should be retrievable
{
&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3},
[]common.Hash{
pm.chainman.GetBlockByNumber(limit / 2).Hash(),
pm.chainman.GetBlockByNumber(limit/2 + 4).Hash(),
pm.chainman.GetBlockByNumber(limit/2 + 8).Hash(),
},
}, {
&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true},
[]common.Hash{
pm.chainman.GetBlockByNumber(limit / 2).Hash(),
pm.chainman.GetBlockByNumber(limit/2 - 4).Hash(),
pm.chainman.GetBlockByNumber(limit/2 - 8).Hash(),
},
},
// The chain endpoints should be retrievable
{
&getBlockHeadersData{Origin: hashOrNumber{Number: 0}, Amount: 1},
[]common.Hash{pm.chainman.GetBlockByNumber(0).Hash()},
}, {
&getBlockHeadersData{Origin: hashOrNumber{Number: pm.chainman.CurrentBlock().NumberU64()}, Amount: 1},
[]common.Hash{pm.chainman.CurrentBlock().Hash()},
},
// Ensure protocol limits are honored
{
&getBlockHeadersData{Origin: hashOrNumber{Number: pm.chainman.CurrentBlock().NumberU64() - 1}, Amount: limit + 10, Reverse: true},
pm.chainman.GetBlockHashesFromHash(pm.chainman.CurrentBlock().Hash(), limit),
},
// Check that requesting more than available is handled gracefully
{
&getBlockHeadersData{Origin: hashOrNumber{Number: pm.chainman.CurrentBlock().NumberU64() - 4}, Skip: 3, Amount: 3},
[]common.Hash{
pm.chainman.GetBlockByNumber(pm.chainman.CurrentBlock().NumberU64() - 4).Hash(),
pm.chainman.GetBlockByNumber(pm.chainman.CurrentBlock().NumberU64()).Hash(),
},
}, {
&getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true},
[]common.Hash{
pm.chainman.GetBlockByNumber(4).Hash(),
pm.chainman.GetBlockByNumber(0).Hash(),
},
},
// Check that requesting more than available is handled gracefully, even if mid skip
{
&getBlockHeadersData{Origin: hashOrNumber{Number: pm.chainman.CurrentBlock().NumberU64() - 4}, Skip: 2, Amount: 3},
[]common.Hash{
pm.chainman.GetBlockByNumber(pm.chainman.CurrentBlock().NumberU64() - 4).Hash(),
pm.chainman.GetBlockByNumber(pm.chainman.CurrentBlock().NumberU64() - 1).Hash(),
},
}, {
&getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true},
[]common.Hash{
pm.chainman.GetBlockByNumber(4).Hash(),
pm.chainman.GetBlockByNumber(1).Hash(),
},
},
// Check that non existing headers aren't returned
{
&getBlockHeadersData{Origin: hashOrNumber{Hash: unknown}, Amount: 1},
[]common.Hash{},
}, {
&getBlockHeadersData{Origin: hashOrNumber{Number: pm.chainman.CurrentBlock().NumberU64() + 1}, Amount: 1},
[]common.Hash{},
},
}
// Run each of the tests and verify the results against the chain
for i, tt := range tests {
// Collect the headers to expect in the response
headers := []*types.Header{}
for _, hash := range tt.expect {
headers = append(headers, pm.chainman.GetBlock(hash).Header())
}
// Send the hash request and verify the response
p2p.Send(peer.app, 0x03, tt.query)
if err := p2p.ExpectMsg(peer.app, 0x04, headers); err != nil {
t.Errorf("test %d: headers mismatch: %v", i, err)
}
}
}
// Tests that block contents can be retrieved from a remote chain based on their hashes.
func TestGetBlockBodies62(t *testing.T) { testGetBlockBodies(t, 62) }
func TestGetBlockBodies63(t *testing.T) { testGetBlockBodies(t, 63) }
func TestGetBlockBodies64(t *testing.T) { testGetBlockBodies(t, 64) }
func testGetBlockBodies(t *testing.T, protocol int) {
pm := newTestProtocolManager(downloader.MaxBlockFetch+15, nil, nil)
peer, _ := newTestPeer("peer", protocol, pm, true)
defer peer.close()
// Create a batch of tests for various scenarios
limit := downloader.MaxBlockFetch
tests := []struct {
random int // Number of blocks to fetch randomly from the chain
explicit []common.Hash // Explicitly requested blocks
available []bool // Availability of explicitly requested blocks
expected int // Total number of existing blocks to expect
}{
{1, nil, nil, 1}, // A single random block should be retrievable
{10, nil, nil, 10}, // Multiple random blocks should be retrievable
{limit, nil, nil, limit}, // The maximum possible blocks should be retrievable
{limit + 1, nil, nil, limit}, // No more that the possible block count should be returned
{0, []common.Hash{pm.chainman.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable
{0, []common.Hash{pm.chainman.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable
{0, []common.Hash{common.Hash{}}, []bool{false}, 0}, // A non existent block should not be returned
// Existing and non-existing blocks interleaved should not cause problems
{0, []common.Hash{
common.Hash{},
pm.chainman.GetBlockByNumber(1).Hash(),
common.Hash{},
pm.chainman.GetBlockByNumber(10).Hash(),
common.Hash{},
pm.chainman.GetBlockByNumber(100).Hash(),
common.Hash{},
}, []bool{false, true, false, true, false, true, false}, 3},
}
// Run each of the tests and verify the results against the chain
for i, tt := range tests {
// Collect the hashes to request, and the response to expect
hashes, seen := []common.Hash{}, make(map[int64]bool)
bodies := []*blockBody{}
for j := 0; j < tt.random; j++ {
for {
num := rand.Int63n(int64(pm.chainman.CurrentBlock().NumberU64()))
if !seen[num] {
seen[num] = true
block := pm.chainman.GetBlockByNumber(uint64(num))
hashes = append(hashes, block.Hash())
if len(bodies) < tt.expected {
bodies = append(bodies, &blockBody{Transactions: block.Transactions(), Uncles: block.Uncles()})
}
break
}
}
}
for j, hash := range tt.explicit {
hashes = append(hashes, hash)
if tt.available[j] && len(bodies) < tt.expected {
block := pm.chainman.GetBlock(hash)
bodies = append(bodies, &blockBody{Transactions: block.Transactions(), Uncles: block.Uncles()})
}
}
// Send the hash request and verify the response
p2p.Send(peer.app, 0x05, hashes)
if err := p2p.ExpectMsg(peer.app, 0x06, bodies); err != nil {
t.Errorf("test %d: bodies mismatch: %v", i, err)
}
}
}
// Tests that the node state database can be retrieved based on hashes.
func TestGetNodeData63(t *testing.T) { testGetNodeData(t, 63) }
func TestGetNodeData64(t *testing.T) { testGetNodeData(t, 64) }
func testGetNodeData(t *testing.T, protocol int) {
// Define three accounts to simulate transactions with
acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey)
acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey)
// Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_makerts_test)
generator := func(i int, block *core.BlockGen) {
switch i {
case 0:
// In block 1, the test bank sends account #1 some ether.
tx, _ := types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil).SignECDSA(testBankKey)
block.AddTx(tx)
case 1:
// In block 2, the test bank sends some more ether to account #1.
// acc1Addr passes it on to account #2.
tx1, _ := types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(testBankKey)
tx2, _ := types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(acc1Key)
block.AddTx(tx1)
block.AddTx(tx2)
case 2:
// Block 3 is empty but was mined by account #2.
block.SetCoinbase(acc2Addr)
block.SetExtra([]byte("yeehaw"))
case 3:
// Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data).
b2 := block.PrevBlock(1).Header()
b2.Extra = []byte("foo")
block.AddUncle(b2)
b3 := block.PrevBlock(2).Header()
b3.Extra = []byte("foo")
block.AddUncle(b3)
}
}
// Assemble the test environment
pm := newTestProtocolManager(4, generator, nil)
peer, _ := newTestPeer("peer", protocol, pm, true)
defer peer.close()
// Fetch for now the entire chain db
hashes := []common.Hash{}
for _, key := range pm.chaindb.(*ethdb.MemDatabase).Keys() {
hashes = append(hashes, common.BytesToHash(key))
}
p2p.Send(peer.app, 0x0d, hashes)
msg, err := peer.app.ReadMsg()
if err != nil {
t.Fatalf("failed to read node data response: %v", err)
}
if msg.Code != 0x0e {
t.Fatalf("response packet code mismatch: have %x, want %x", msg.Code, 0x0c)
}
var data [][]byte
if err := msg.Decode(&data); err != nil {
t.Fatalf("failed to decode response node data: %v", err)
}
// Verify that all hashes correspond to the requested data, and reconstruct a state tree
for i, want := range hashes {
if hash := crypto.Sha3Hash(data[i]); hash != want {
fmt.Errorf("data hash mismatch: have %x, want %x", hash, want)
}
}
statedb, _ := ethdb.NewMemDatabase()
for i := 0; i < len(data); i++ {
statedb.Put(hashes[i].Bytes(), data[i])
}
accounts := []common.Address{testBankAddress, acc1Addr, acc2Addr}
for i := uint64(0); i <= pm.chainman.CurrentBlock().NumberU64(); i++ {
trie := state.New(pm.chainman.GetBlockByNumber(i).Root(), statedb)
for j, acc := range accounts {
bw := pm.chainman.State().GetBalance(acc)
bh := trie.GetBalance(acc)
if (bw != nil && bh == nil) || (bw == nil && bh != nil) {
t.Errorf("test %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw)
}
if bw != nil && bh != nil && bw.Cmp(bw) != 0 {
t.Errorf("test %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw)
}
}
}
}
// Tests that the transaction receipts can be retrieved based on hashes.
func TestGetReceipt63(t *testing.T) { testGetReceipt(t, 63) }
func TestGetReceipt64(t *testing.T) { testGetReceipt(t, 64) }
func testGetReceipt(t *testing.T, protocol int) {
// Define three accounts to simulate transactions with
acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey)
acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey)
// Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_makerts_test)
generator := func(i int, block *core.BlockGen) {
switch i {
case 0:
// In block 1, the test bank sends account #1 some ether.
tx, _ := types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil).SignECDSA(testBankKey)
block.AddTx(tx)
case 1:
// In block 2, the test bank sends some more ether to account #1.
// acc1Addr passes it on to account #2.
tx1, _ := types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(testBankKey)
tx2, _ := types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(acc1Key)
block.AddTx(tx1)
block.AddTx(tx2)
case 2:
// Block 3 is empty but was mined by account #2.
block.SetCoinbase(acc2Addr)
block.SetExtra([]byte("yeehaw"))
case 3:
// Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data).
b2 := block.PrevBlock(1).Header()
b2.Extra = []byte("foo")
block.AddUncle(b2)
b3 := block.PrevBlock(2).Header()
b3.Extra = []byte("foo")
block.AddUncle(b3)
}
}
// Assemble the test environment
pm := newTestProtocolManager(4, generator, nil)
peer, _ := newTestPeer("peer", protocol, pm, true)
defer peer.close()
// Collect the hashes to request, and the response to expect
hashes := []common.Hash{}
for i := uint64(0); i <= pm.chainman.CurrentBlock().NumberU64(); i++ {
for _, tx := range pm.chainman.GetBlockByNumber(i).Transactions() {
hashes = append(hashes, tx.Hash())
}
}
receipts := make([]*types.Receipt, len(hashes))
for i, hash := range hashes {
receipts[i] = core.GetReceipt(pm.chaindb, hash)
}
// Send the hash request and verify the response
p2p.Send(peer.app, 0x0f, hashes)
if err := p2p.ExpectMsg(peer.app, 0x10, receipts); err != nil {
t.Errorf("receipts mismatch: %v", err)
}
}

147
eth/helper_test.go Normal file
View File

@ -0,0 +1,147 @@
// This file contains some shares testing functionality, common to multiple
// different files and modules being tested.
package eth
import (
"crypto/rand"
"math/big"
"sync"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
)
var (
testBankKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
testBankAddress = crypto.PubkeyToAddress(testBankKey.PublicKey)
testBankFunds = big.NewInt(1000000)
)
// newTestProtocolManager creates a new protocol manager for testing purposes,
// with the given number of blocks already known, and potential notification
// channels for different events.
func newTestProtocolManager(blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) *ProtocolManager {
var (
evmux = new(event.TypeMux)
pow = new(core.FakePow)
db, _ = ethdb.NewMemDatabase()
genesis = core.WriteGenesisBlockForTesting(db, core.GenesisAccount{testBankAddress, testBankFunds})
chainman, _ = core.NewChainManager(db, pow, evmux)
blockproc = core.NewBlockProcessor(db, pow, chainman, evmux)
)
chainman.SetProcessor(blockproc)
if _, err := chainman.InsertChain(core.GenerateChain(genesis, db, blocks, generator)); err != nil {
panic(err)
}
pm := NewProtocolManager(NetworkId, evmux, &testTxPool{added: newtx}, pow, chainman, db)
pm.Start()
return pm
}
// testTxPool is a fake, helper transaction pool for testing purposes
type testTxPool struct {
pool []*types.Transaction // Collection of all transactions
added chan<- []*types.Transaction // Notification channel for new transactions
lock sync.RWMutex // Protects the transaction pool
}
// AddTransactions appends a batch of transactions to the pool, and notifies any
// listeners if the addition channel is non nil
func (p *testTxPool) AddTransactions(txs []*types.Transaction) {
p.lock.Lock()
defer p.lock.Unlock()
p.pool = append(p.pool, txs...)
if p.added != nil {
p.added <- txs
}
}
// GetTransactions returns all the transactions known to the pool
func (p *testTxPool) GetTransactions() types.Transactions {
p.lock.RLock()
defer p.lock.RUnlock()
txs := make([]*types.Transaction, len(p.pool))
copy(txs, p.pool)
return txs
}
// newTestTransaction create a new dummy transaction.
func newTestTransaction(from *crypto.Key, nonce uint64, datasize int) *types.Transaction {
tx := types.NewTransaction(nonce, common.Address{}, big.NewInt(0), big.NewInt(100000), big.NewInt(0), make([]byte, datasize))
tx, _ = tx.SignECDSA(from.PrivateKey)
return tx
}
// testPeer is a simulated peer to allow testing direct network calls.
type testPeer struct {
net p2p.MsgReadWriter // Network layer reader/writer to simulate remote messaging
app *p2p.MsgPipeRW // Application layer reader/writer to simulate the local side
*peer
}
// newTestPeer creates a new peer registered at the given protocol manager.
func newTestPeer(name string, version int, pm *ProtocolManager, shake bool) (*testPeer, <-chan error) {
// Create a message pipe to communicate through
app, net := p2p.MsgPipe()
// Generate a random id and create the peer
var id discover.NodeID
rand.Read(id[:])
peer := pm.newPeer(version, NetworkId, p2p.NewPeer(id, name, nil), net)
// Start the peer on a new thread
errc := make(chan error, 1)
go func() {
pm.newPeerCh <- peer
errc <- pm.handle(peer)
}()
tp := &testPeer{
app: app,
net: net,
peer: peer,
}
// Execute any implicitly requested handshakes and return
if shake {
td, head, genesis := pm.chainman.Status()
tp.handshake(nil, td, head, genesis)
}
return tp, errc
}
// handshake simulates a trivial handshake that expects the same state from the
// remote side as we are simulating locally.
func (p *testPeer) handshake(t *testing.T, td *big.Int, head common.Hash, genesis common.Hash) {
msg := &statusData{
ProtocolVersion: uint32(p.version),
NetworkId: uint32(NetworkId),
TD: td,
CurrentBlock: head,
GenesisBlock: genesis,
}
if err := p2p.ExpectMsg(p.app, StatusMsg, msg); err != nil {
t.Fatalf("status recv: %v", err)
}
if err := p2p.Send(p.app, StatusMsg, msg); err != nil {
t.Fatalf("status send: %v", err)
}
}
// close terminates the local side of the peer, notifying the remote protocol
// manager of termination.
func (p *testPeer) close() {
p.app.Close()
}

View File

@ -18,6 +18,7 @@ package eth
import (
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p"
)
var (
@ -41,4 +42,116 @@ var (
reqBlockInTrafficMeter = metrics.NewMeter("eth/req/blocks/in/traffic")
reqBlockOutPacketsMeter = metrics.NewMeter("eth/req/blocks/out/packets")
reqBlockOutTrafficMeter = metrics.NewMeter("eth/req/blocks/out/traffic")
reqHeaderInPacketsMeter = metrics.NewMeter("eth/req/headers/in/packets")
reqHeaderInTrafficMeter = metrics.NewMeter("eth/req/headers/in/traffic")
reqHeaderOutPacketsMeter = metrics.NewMeter("eth/req/headers/out/packets")
reqHeaderOutTrafficMeter = metrics.NewMeter("eth/req/headers/out/traffic")
reqBodyInPacketsMeter = metrics.NewMeter("eth/req/bodies/in/packets")
reqBodyInTrafficMeter = metrics.NewMeter("eth/req/bodies/in/traffic")
reqBodyOutPacketsMeter = metrics.NewMeter("eth/req/bodies/out/packets")
reqBodyOutTrafficMeter = metrics.NewMeter("eth/req/bodies/out/traffic")
reqStateInPacketsMeter = metrics.NewMeter("eth/req/states/in/packets")
reqStateInTrafficMeter = metrics.NewMeter("eth/req/states/in/traffic")
reqStateOutPacketsMeter = metrics.NewMeter("eth/req/states/out/packets")
reqStateOutTrafficMeter = metrics.NewMeter("eth/req/states/out/traffic")
reqReceiptInPacketsMeter = metrics.NewMeter("eth/req/receipts/in/packets")
reqReceiptInTrafficMeter = metrics.NewMeter("eth/req/receipts/in/traffic")
reqReceiptOutPacketsMeter = metrics.NewMeter("eth/req/receipts/out/packets")
reqReceiptOutTrafficMeter = metrics.NewMeter("eth/req/receipts/out/traffic")
miscInPacketsMeter = metrics.NewMeter("eth/misc/in/packets")
miscInTrafficMeter = metrics.NewMeter("eth/misc/in/traffic")
miscOutPacketsMeter = metrics.NewMeter("eth/misc/out/packets")
miscOutTrafficMeter = metrics.NewMeter("eth/misc/out/traffic")
)
// meteredMsgReadWriter is a wrapper around a p2p.MsgReadWriter, capable of
// accumulating the above defined metrics based on the data stream contents.
type meteredMsgReadWriter struct {
p2p.MsgReadWriter // Wrapped message stream to meter
version int // Protocol version to select correct meters
}
// newMeteredMsgWriter wraps a p2p MsgReadWriter with metering support. If the
// metrics system is disabled, this fucntion returns the original object.
func newMeteredMsgWriter(rw p2p.MsgReadWriter) p2p.MsgReadWriter {
if !metrics.Enabled {
return rw
}
return &meteredMsgReadWriter{MsgReadWriter: rw}
}
// Init sets the protocol version used by the stream to know which meters to
// increment in case of overlapping message ids between protocol versions.
func (rw *meteredMsgReadWriter) Init(version int) {
rw.version = version
}
func (rw *meteredMsgReadWriter) ReadMsg() (p2p.Msg, error) {
// Read the message and short circuit in case of an error
msg, err := rw.MsgReadWriter.ReadMsg()
if err != nil {
return msg, err
}
// Account for the data traffic
packets, traffic := miscInPacketsMeter, miscInTrafficMeter
switch {
case rw.version < eth62 && msg.Code == BlockHashesMsg:
packets, traffic = reqHashInPacketsMeter, reqHashInTrafficMeter
case rw.version < eth62 && msg.Code == BlocksMsg:
packets, traffic = reqBlockInPacketsMeter, reqBlockInTrafficMeter
case rw.version >= eth62 && msg.Code == BlockHeadersMsg:
packets, traffic = reqBlockInPacketsMeter, reqBlockInTrafficMeter
case rw.version >= eth62 && msg.Code == BlockBodiesMsg:
packets, traffic = reqBodyInPacketsMeter, reqBodyInTrafficMeter
case rw.version >= eth63 && msg.Code == NodeDataMsg:
packets, traffic = reqStateInPacketsMeter, reqStateInTrafficMeter
case rw.version >= eth63 && msg.Code == ReceiptsMsg:
packets, traffic = reqReceiptInPacketsMeter, reqReceiptInTrafficMeter
case msg.Code == NewBlockHashesMsg:
packets, traffic = propHashInPacketsMeter, propHashInTrafficMeter
case msg.Code == NewBlockMsg:
packets, traffic = propBlockInPacketsMeter, propBlockInTrafficMeter
case msg.Code == TxMsg:
packets, traffic = propTxnInPacketsMeter, propTxnInTrafficMeter
}
packets.Mark(1)
traffic.Mark(int64(msg.Size))
return msg, err
}
func (rw *meteredMsgReadWriter) WriteMsg(msg p2p.Msg) error {
// Account for the data traffic
packets, traffic := miscOutPacketsMeter, miscOutTrafficMeter
switch {
case rw.version < eth62 && msg.Code == BlockHashesMsg:
packets, traffic = reqHashOutPacketsMeter, reqHashOutTrafficMeter
case rw.version < eth62 && msg.Code == BlocksMsg:
packets, traffic = reqBlockOutPacketsMeter, reqBlockOutTrafficMeter
case rw.version >= eth62 && msg.Code == BlockHeadersMsg:
packets, traffic = reqHeaderOutPacketsMeter, reqHeaderOutTrafficMeter
case rw.version >= eth62 && msg.Code == BlockBodiesMsg:
packets, traffic = reqBodyOutPacketsMeter, reqBodyOutTrafficMeter
case rw.version >= eth63 && msg.Code == NodeDataMsg:
packets, traffic = reqStateOutPacketsMeter, reqStateOutTrafficMeter
case rw.version >= eth63 && msg.Code == ReceiptsMsg:
packets, traffic = reqReceiptOutPacketsMeter, reqReceiptOutTrafficMeter
case msg.Code == NewBlockHashesMsg:
packets, traffic = propHashOutPacketsMeter, propHashOutTrafficMeter
case msg.Code == NewBlockMsg:
packets, traffic = propBlockOutPacketsMeter, propBlockOutTrafficMeter
case msg.Code == TxMsg:
packets, traffic = propTxnOutPacketsMeter, propTxnOutTrafficMeter
}
packets.Mark(1)
traffic.Mark(int64(msg.Size))
// Send the packet to the p2p layer
return rw.MsgReadWriter.WriteMsg(msg)
}

View File

@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/rlp"
"gopkg.in/fatih/set.v0"
)
@ -129,9 +130,7 @@ func (p *peer) MarkTransaction(hash common.Hash) {
// SendTransactions sends transactions to the peer and includes the hashes
// in its transaction hash set for future reference.
func (p *peer) SendTransactions(txs types.Transactions) error {
propTxnOutPacketsMeter.Mark(1)
for _, tx := range txs {
propTxnOutTrafficMeter.Mark(tx.Size().Int64())
p.knownTxs.Add(tx.Hash())
}
return p2p.Send(p.rw, TxMsg, txs)
@ -139,62 +138,132 @@ func (p *peer) SendTransactions(txs types.Transactions) error {
// SendBlockHashes sends a batch of known hashes to the remote peer.
func (p *peer) SendBlockHashes(hashes []common.Hash) error {
reqHashOutPacketsMeter.Mark(1)
reqHashOutTrafficMeter.Mark(int64(32 * len(hashes)))
return p2p.Send(p.rw, BlockHashesMsg, hashes)
}
// SendBlocks sends a batch of blocks to the remote peer.
func (p *peer) SendBlocks(blocks []*types.Block) error {
reqBlockOutPacketsMeter.Mark(1)
for _, block := range blocks {
reqBlockOutTrafficMeter.Mark(block.Size().Int64())
}
return p2p.Send(p.rw, BlocksMsg, blocks)
}
// SendNewBlockHashes announces the availability of a number of blocks through
// SendNewBlockHashes61 announces the availability of a number of blocks through
// a hash notification.
func (p *peer) SendNewBlockHashes(hashes []common.Hash) error {
propHashOutPacketsMeter.Mark(1)
propHashOutTrafficMeter.Mark(int64(32 * len(hashes)))
func (p *peer) SendNewBlockHashes61(hashes []common.Hash) error {
for _, hash := range hashes {
p.knownBlocks.Add(hash)
}
return p2p.Send(p.rw, NewBlockHashesMsg, hashes)
}
// SendNewBlockHashes announces the availability of a number of blocks through
// a hash notification.
func (p *peer) SendNewBlockHashes(hashes []common.Hash, numbers []uint64) error {
for _, hash := range hashes {
p.knownBlocks.Add(hash)
}
request := make(newBlockHashesData, len(hashes))
for i := 0; i < len(hashes); i++ {
request[i].Hash = hashes[i]
request[i].Number = numbers[i]
}
return p2p.Send(p.rw, NewBlockHashesMsg, request)
}
// SendNewBlock propagates an entire block to a remote peer.
func (p *peer) SendNewBlock(block *types.Block, td *big.Int) error {
propBlockOutPacketsMeter.Mark(1)
propBlockOutTrafficMeter.Mark(block.Size().Int64())
p.knownBlocks.Add(block.Hash())
return p2p.Send(p.rw, NewBlockMsg, []interface{}{block, td})
}
// SendBlockHeaders sends a batch of block headers to the remote peer.
func (p *peer) SendBlockHeaders(headers []*types.Header) error {
return p2p.Send(p.rw, BlockHeadersMsg, headers)
}
// SendBlockBodies sends a batch of block contents to the remote peer.
func (p *peer) SendBlockBodies(bodies []*blockBody) error {
return p2p.Send(p.rw, BlockBodiesMsg, blockBodiesData(bodies))
}
// SendBlockBodiesRLP sends a batch of block contents to the remote peer from
// an already RLP encoded format.
func (p *peer) SendBlockBodiesRLP(bodies []rlp.RawValue) error {
return p2p.Send(p.rw, BlockBodiesMsg, bodies)
}
// SendNodeData sends a batch of arbitrary internal data, corresponding to the
// hashes requested.
func (p *peer) SendNodeData(data [][]byte) error {
return p2p.Send(p.rw, NodeDataMsg, data)
}
// SendReceipts sends a batch of transaction receipts, corresponding to the ones
// requested.
func (p *peer) SendReceipts(receipts []*types.Receipt) error {
return p2p.Send(p.rw, ReceiptsMsg, receipts)
}
// RequestHashes fetches a batch of hashes from a peer, starting at from, going
// towards the genesis block.
func (p *peer) RequestHashes(from common.Hash) error {
glog.V(logger.Debug).Infof("Peer [%s] fetching hashes (%d) from %x...\n", p.id, downloader.MaxHashFetch, from[:4])
glog.V(logger.Debug).Infof("%v fetching hashes (%d) from %x...", p, downloader.MaxHashFetch, from[:4])
return p2p.Send(p.rw, GetBlockHashesMsg, getBlockHashesData{from, uint64(downloader.MaxHashFetch)})
}
// RequestHashesFromNumber fetches a batch of hashes from a peer, starting at the
// requested block number, going upwards towards the genesis block.
// RequestHashesFromNumber fetches a batch of hashes from a peer, starting at
// the requested block number, going upwards towards the genesis block.
func (p *peer) RequestHashesFromNumber(from uint64, count int) error {
glog.V(logger.Debug).Infof("Peer [%s] fetching hashes (%d) from #%d...\n", p.id, count, from)
glog.V(logger.Debug).Infof("%v fetching hashes (%d) from #%d...", p, count, from)
return p2p.Send(p.rw, GetBlockHashesFromNumberMsg, getBlockHashesFromNumberData{from, uint64(count)})
}
// RequestBlocks fetches a batch of blocks corresponding to the specified hashes.
func (p *peer) RequestBlocks(hashes []common.Hash) error {
glog.V(logger.Debug).Infof("[%s] fetching %v blocks\n", p.id, len(hashes))
glog.V(logger.Debug).Infof("%v fetching %v blocks", p, len(hashes))
return p2p.Send(p.rw, GetBlocksMsg, hashes)
}
// RequestHeaders is a wrapper around the header query functions to fetch a
// single header. It is used solely by the fetcher.
func (p *peer) RequestOneHeader(hash common.Hash) error {
glog.V(logger.Debug).Infof("%v fetching a single header: %x", p, hash)
return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Hash: hash}, Amount: uint64(1), Skip: uint64(0), Reverse: false})
}
// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
// specified header query, based on the hash of an origin block.
func (p *peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
glog.V(logger.Debug).Infof("%v fetching %d headers from %x, skipping %d (reverse = %v)", p, amount, origin[:4], skip, reverse)
return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
}
// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
// specified header query, based on the number of an origin block.
func (p *peer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
glog.V(logger.Debug).Infof("%v fetching %d headers from #%d, skipping %d (reverse = %v)", p, amount, origin, skip, reverse)
return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
}
// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
// specified.
func (p *peer) RequestBodies(hashes []common.Hash) error {
glog.V(logger.Debug).Infof("%v fetching %d block bodies", p, len(hashes))
return p2p.Send(p.rw, GetBlockBodiesMsg, hashes)
}
// RequestNodeData fetches a batch of arbitrary data from a node's known state
// data, corresponding to the specified hashes.
func (p *peer) RequestNodeData(hashes []common.Hash) error {
glog.V(logger.Debug).Infof("%v fetching %v state data", p, len(hashes))
return p2p.Send(p.rw, GetNodeDataMsg, hashes)
}
// RequestReceipts fetches a batch of transaction receipts from a remote node.
func (p *peer) RequestReceipts(hashes []common.Hash) error {
glog.V(logger.Debug).Infof("%v fetching %v receipts", p, len(hashes))
return p2p.Send(p.rw, GetReceiptsMsg, hashes)
}
// Handshake executes the eth protocol handshake, negotiating version number,
// network IDs, difficulties, head and genesis blocks.
func (p *peer) Handshake(td *big.Int, head common.Hash, genesis common.Hash) error {

View File

@ -17,17 +17,28 @@
package eth
import (
"fmt"
"io"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
)
// Constants to match up protocol versions and messages
const (
eth61 = 61
eth62 = 62
eth63 = 63
eth64 = 64
)
// Supported versions of the eth protocol (first is primary).
var ProtocolVersions = []uint{61, 60}
var ProtocolVersions = []uint{eth64, eth63, eth62, eth61}
// Number of implemented message corresponding to different protocol versions.
var ProtocolLengths = []uint64{9, 8}
var ProtocolLengths = []uint64{15, 12, 8, 9}
const (
NetworkId = 1
@ -36,15 +47,37 @@ const (
// eth protocol message codes
const (
StatusMsg = iota
NewBlockHashesMsg
TxMsg
GetBlockHashesMsg
BlockHashesMsg
GetBlocksMsg
BlocksMsg
NewBlockMsg
GetBlockHashesFromNumberMsg
// Protocol messages belonging to eth/61
StatusMsg = 0x00
NewBlockHashesMsg = 0x01
TxMsg = 0x02
GetBlockHashesMsg = 0x03
BlockHashesMsg = 0x04
GetBlocksMsg = 0x05
BlocksMsg = 0x06
NewBlockMsg = 0x07
GetBlockHashesFromNumberMsg = 0x08
// Protocol messages belonging to eth/62 (new protocol from scratch)
// StatusMsg = 0x00 (uncomment after eth/61 deprecation)
// NewBlockHashesMsg = 0x01 (uncomment after eth/61 deprecation)
// TxMsg = 0x02 (uncomment after eth/61 deprecation)
GetBlockHeadersMsg = 0x03
BlockHeadersMsg = 0x04
GetBlockBodiesMsg = 0x05
BlockBodiesMsg = 0x06
// NewBlockMsg = 0x07 (uncomment after eth/61 deprecation)
// Protocol messages belonging to eth/63
GetNodeDataMsg = 0x0d
NodeDataMsg = 0x0e
GetReceiptsMsg = 0x0f
ReceiptsMsg = 0x10
// Protocol messages belonging to eth/64
GetAcctProofMsg = 0x11
GetStorageDataProof = 0x12
Proof = 0x13
)
type errCode int
@ -102,22 +135,85 @@ type statusData struct {
GenesisBlock common.Hash
}
// getBlockHashesData is the network packet for the hash based block retrieval
// message.
// newBlockHashesData is the network packet for the block announcements.
type newBlockHashesData []struct {
Hash common.Hash // Hash of one particular block being announced
Number uint64 // Number of one particular block being announced
}
// getBlockHashesData is the network packet for the hash based hash retrieval.
type getBlockHashesData struct {
Hash common.Hash
Amount uint64
}
// getBlockHashesFromNumberData is the network packet for the number based block
// retrieval message.
// getBlockHashesFromNumberData is the network packet for the number based hash
// retrieval.
type getBlockHashesFromNumberData struct {
Number uint64
Amount uint64
}
// getBlockHeadersData represents a block header query.
type getBlockHeadersData struct {
Origin hashOrNumber // Block from which to retrieve headers
Amount uint64 // Maximum number of headers to retrieve
Skip uint64 // Blocks to skip between consecutive headers
Reverse bool // Query direction (false = rising towards latest, true = falling towards genesis)
}
// hashOrNumber is a combined field for specifying an origin block.
type hashOrNumber struct {
Hash common.Hash // Block hash from which to retrieve headers (excludes Number)
Number uint64 // Block hash from which to retrieve headers (excludes Hash)
}
// EncodeRLP is a specialized encoder for hashOrNumber to encode only one of the
// two contained union fields.
func (hn *hashOrNumber) EncodeRLP(w io.Writer) error {
if hn.Hash == (common.Hash{}) {
return rlp.Encode(w, hn.Number)
}
if hn.Number != 0 {
return fmt.Errorf("both origin hash (%x) and number (%d) provided", hn.Hash, hn.Number)
}
return rlp.Encode(w, hn.Hash)
}
// DecodeRLP is a specialized decoder for hashOrNumber to decode the contents
// into either a block hash or a block number.
func (hn *hashOrNumber) DecodeRLP(s *rlp.Stream) error {
_, size, _ := s.Kind()
origin, err := s.Raw()
if err == nil {
switch {
case size == 32:
err = rlp.DecodeBytes(origin, &hn.Hash)
case size <= 8:
err = rlp.DecodeBytes(origin, &hn.Number)
default:
err = fmt.Errorf("invalid input size %d for origin", size)
}
}
return err
}
// newBlockData is the network packet for the block propagation message.
type newBlockData struct {
Block *types.Block
TD *big.Int
}
// blockBody represents the data content of a single block.
type blockBody struct {
Transactions []*types.Transaction // Transactions contained within a block
Uncles []*types.Header // Uncles contained within a block
}
// blockBodiesData is the network packet for block content distribution.
type blockBodiesData []*blockBody
// nodeDataData is the network response packet for a node data retrieval.
type nodeDataData []struct {
Value []byte
}

View File

@ -18,19 +18,16 @@ package eth
import (
"crypto/rand"
"math/big"
"fmt"
"sync"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/rlp"
)
func init() {
@ -40,8 +37,14 @@ func init() {
var testAccount = crypto.NewKey(rand.Reader)
func TestStatusMsgErrors(t *testing.T) {
pm := newProtocolManagerForTesting(nil)
// Tests that handshake failures are detected and reported correctly.
func TestStatusMsgErrors61(t *testing.T) { testStatusMsgErrors(t, 61) }
func TestStatusMsgErrors62(t *testing.T) { testStatusMsgErrors(t, 62) }
func TestStatusMsgErrors63(t *testing.T) { testStatusMsgErrors(t, 63) }
func TestStatusMsgErrors64(t *testing.T) { testStatusMsgErrors(t, 64) }
func testStatusMsgErrors(t *testing.T, protocol int) {
pm := newTestProtocolManager(0, nil, nil)
td, currentBlock, genesis := pm.chainman.Status()
defer pm.Stop()
@ -56,23 +59,23 @@ func TestStatusMsgErrors(t *testing.T) {
},
{
code: StatusMsg, data: statusData{10, NetworkId, td, currentBlock, genesis},
wantError: errResp(ErrProtocolVersionMismatch, "10 (!= 0)"),
wantError: errResp(ErrProtocolVersionMismatch, "10 (!= %d)", protocol),
},
{
code: StatusMsg, data: statusData{uint32(ProtocolVersions[0]), 999, td, currentBlock, genesis},
code: StatusMsg, data: statusData{uint32(protocol), 999, td, currentBlock, genesis},
wantError: errResp(ErrNetworkIdMismatch, "999 (!= 1)"),
},
{
code: StatusMsg, data: statusData{uint32(ProtocolVersions[0]), NetworkId, td, currentBlock, common.Hash{3}},
code: StatusMsg, data: statusData{uint32(protocol), NetworkId, td, currentBlock, common.Hash{3}},
wantError: errResp(ErrGenesisBlockMismatch, "0300000000000000000000000000000000000000000000000000000000000000 (!= %x)", genesis),
},
}
for i, test := range tests {
p, errc := newTestPeer(pm)
p, errc := newTestPeer("peer", protocol, pm, false)
// The send call might hang until reset because
// the protocol might not read the payload.
go p2p.Send(p, test.code, test.data)
go p2p.Send(p.app, test.code, test.data)
select {
case err := <-errc:
@ -89,16 +92,20 @@ func TestStatusMsgErrors(t *testing.T) {
}
// This test checks that received transactions are added to the local pool.
func TestRecvTransactions(t *testing.T) {
func TestRecvTransactions61(t *testing.T) { testRecvTransactions(t, 61) }
func TestRecvTransactions62(t *testing.T) { testRecvTransactions(t, 62) }
func TestRecvTransactions63(t *testing.T) { testRecvTransactions(t, 63) }
func TestRecvTransactions64(t *testing.T) { testRecvTransactions(t, 64) }
func testRecvTransactions(t *testing.T, protocol int) {
txAdded := make(chan []*types.Transaction)
pm := newProtocolManagerForTesting(txAdded)
p, _ := newTestPeer(pm)
pm := newTestProtocolManager(0, nil, txAdded)
p, _ := newTestPeer("peer", protocol, pm, true)
defer pm.Stop()
defer p.close()
p.handshake(t)
tx := newtx(testAccount, 0, 0)
if err := p2p.Send(p, TxMsg, []interface{}{tx}); err != nil {
tx := newTestTransaction(testAccount, 0, 0)
if err := p2p.Send(p.app, TxMsg, []interface{}{tx}); err != nil {
t.Fatalf("send error: %v", err)
}
select {
@ -114,15 +121,20 @@ func TestRecvTransactions(t *testing.T) {
}
// This test checks that pending transactions are sent.
func TestSendTransactions(t *testing.T) {
pm := newProtocolManagerForTesting(nil)
func TestSendTransactions61(t *testing.T) { testSendTransactions(t, 61) }
func TestSendTransactions62(t *testing.T) { testSendTransactions(t, 62) }
func TestSendTransactions63(t *testing.T) { testSendTransactions(t, 63) }
func TestSendTransactions64(t *testing.T) { testSendTransactions(t, 64) }
func testSendTransactions(t *testing.T, protocol int) {
pm := newTestProtocolManager(0, nil, nil)
defer pm.Stop()
// Fill the pool with big transactions.
const txsize = txsyncPackSize / 10
alltxs := make([]*types.Transaction, 100)
for nonce := range alltxs {
alltxs[nonce] = newtx(testAccount, uint64(nonce), txsize)
alltxs[nonce] = newTestTransaction(testAccount, uint64(nonce), txsize)
}
pm.txpool.AddTransactions(alltxs)
@ -137,7 +149,7 @@ func TestSendTransactions(t *testing.T) {
}
for n := 0; n < len(alltxs) && !t.Failed(); {
var txs []*types.Transaction
msg, err := p.ReadMsg()
msg, err := p.app.ReadMsg()
if err != nil {
t.Errorf("%v: read error: %v", p.Peer, err)
} else if msg.Code != TxMsg {
@ -161,97 +173,53 @@ func TestSendTransactions(t *testing.T) {
}
}
for i := 0; i < 3; i++ {
p, _ := newTestPeer(pm)
p.handshake(t)
p, _ := newTestPeer(fmt.Sprintf("peer #%d", i), protocol, pm, true)
wg.Add(1)
go checktxs(p)
}
wg.Wait()
}
// testPeer wraps all peer-related data for tests.
type testPeer struct {
p2p.MsgReadWriter // writing to the test peer feeds the protocol
pipe *p2p.MsgPipeRW // the protocol read/writes on this end
pm *ProtocolManager
*peer
}
func newProtocolManagerForTesting(txAdded chan<- []*types.Transaction) *ProtocolManager {
db, _ := ethdb.NewMemDatabase()
core.WriteTestNetGenesisBlock(db, 0)
var (
em = new(event.TypeMux)
chain, _ = core.NewChainManager(db, core.FakePow{}, em)
txpool = &fakeTxPool{added: txAdded}
pm = NewProtocolManager(NetworkId, em, txpool, core.FakePow{}, chain)
)
pm.Start()
return pm
}
func newTestPeer(pm *ProtocolManager) (*testPeer, <-chan error) {
var id discover.NodeID
rand.Read(id[:])
rw1, rw2 := p2p.MsgPipe()
peer := pm.newPeer(pm.protVer, pm.netId, p2p.NewPeer(id, "test peer", nil), rw2)
errc := make(chan error, 1)
go func() {
pm.newPeerCh <- peer
errc <- pm.handle(peer)
}()
return &testPeer{rw1, rw2, pm, peer}, errc
}
func (p *testPeer) handshake(t *testing.T) {
td, currentBlock, genesis := p.pm.chainman.Status()
msg := &statusData{
ProtocolVersion: uint32(p.pm.protVer),
NetworkId: uint32(p.pm.netId),
TD: td,
CurrentBlock: currentBlock,
GenesisBlock: genesis,
// Tests that the custom union field encoder and decoder works correctly.
func TestGetBlockHeadersDataEncodeDecode(t *testing.T) {
// Create a "random" hash for testing
var hash common.Hash
for i, _ := range hash {
hash[i] = byte(i)
}
// Assemble some table driven tests
tests := []struct {
packet *getBlockHeadersData
fail bool
}{
// Providing the origin as either a hash or a number should both work
{fail: false, packet: &getBlockHeadersData{Origin: hashOrNumber{Number: 314}}},
{fail: false, packet: &getBlockHeadersData{Origin: hashOrNumber{Hash: hash}}},
// Providing arbitrary query field should also work
{fail: false, packet: &getBlockHeadersData{Origin: hashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}},
{fail: false, packet: &getBlockHeadersData{Origin: hashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}},
// Providing both the origin hash and origin number must fail
{fail: true, packet: &getBlockHeadersData{Origin: hashOrNumber{Hash: hash, Number: 314}}},
}
// Iterate over each of the tests and try to encode and then decode
for i, tt := range tests {
bytes, err := rlp.EncodeToBytes(tt.packet)
if err != nil && !tt.fail {
t.Fatalf("test %d: failed to encode packet: %v", i, err)
} else if err == nil && tt.fail {
t.Fatalf("test %d: encode should have failed", i)
}
if !tt.fail {
packet := new(getBlockHeadersData)
if err := rlp.DecodeBytes(bytes, packet); err != nil {
t.Fatalf("test %d: failed to decode packet: %v", i, err)
}
if packet.Origin.Hash != tt.packet.Origin.Hash || packet.Origin.Number != tt.packet.Origin.Number || packet.Amount != tt.packet.Amount ||
packet.Skip != tt.packet.Skip || packet.Reverse != tt.packet.Reverse {
t.Fatalf("test %d: encode decode mismatch: have %+v, want %+v", i, packet, tt.packet)
}
if err := p2p.ExpectMsg(p, StatusMsg, msg); err != nil {
t.Fatalf("status recv: %v", err)
}
if err := p2p.Send(p, StatusMsg, msg); err != nil {
t.Fatalf("status send: %v", err)
}
}
func (p *testPeer) close() {
p.pipe.Close()
}
type fakeTxPool struct {
// all transactions are collected.
mu sync.Mutex
all []*types.Transaction
// if added is non-nil, it receives added transactions.
added chan<- []*types.Transaction
}
func (pool *fakeTxPool) AddTransactions(txs []*types.Transaction) {
pool.mu.Lock()
defer pool.mu.Unlock()
pool.all = append(pool.all, txs...)
if pool.added != nil {
pool.added <- txs
}
}
func (pool *fakeTxPool) GetTransactions() types.Transactions {
pool.mu.Lock()
defer pool.mu.Unlock()
txs := make([]*types.Transaction, len(pool.all))
copy(txs, pool.all)
return types.Transactions(txs)
}
func newtx(from *crypto.Key, nonce uint64, datasize int) *types.Transaction {
data := make([]byte, datasize)
tx := types.NewTransaction(nonce, common.Address{}, big.NewInt(0), big.NewInt(100000), big.NewInt(0), data)
tx, _ = tx.SignECDSA(from.PrivateKey)
return tx
}

View File

@ -61,9 +61,7 @@ type LDBDatabase struct {
quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
}
// NewLDBDatabase returns a LevelDB wrapped object. LDBDatabase does not persist data by
// it self but requires a background poller which syncs every X. `Flush` should be called
// when data needs to be stored and written to disk.
// NewLDBDatabase returns a LevelDB wrapped object.
func NewLDBDatabase(file string, cache int) (*LDBDatabase, error) {
// Calculate the cache allowance for this particular database
cache = int(float64(cache) * cacheRatio[filepath.Base(file)])
@ -142,11 +140,6 @@ func (self *LDBDatabase) NewIterator() iterator.Iterator {
return self.db.NewIterator(nil, nil)
}
// Flush flushes out the queue to leveldb
func (self *LDBDatabase) Flush() error {
return nil
}
func (self *LDBDatabase) Close() {
// Stop the metrics collection to avoid internal database races
self.quitLock.Lock()
@ -159,12 +152,14 @@ func (self *LDBDatabase) Close() {
glog.V(logger.Error).Infof("metrics failure in '%s': %v\n", self.fn, err)
}
}
// Flush and close the database
if err := self.Flush(); err != nil {
glog.V(logger.Error).Infof("flushing '%s' failed: %v\n", self.fn, err)
err := self.db.Close()
if glog.V(logger.Error) {
if err == nil {
glog.Infoln("closed db:", self.fn)
} else {
glog.Errorf("error closing db %s: %v", self.fn, err)
}
}
self.db.Close()
glog.V(logger.Error).Infoln("flushed and closed db:", self.fn)
}
func (self *LDBDatabase) LDB() *leveldb.DB {
@ -268,3 +263,23 @@ func (self *LDBDatabase) meter(refresh time.Duration) {
}
}
}
// TODO: remove this stuff and expose leveldb directly
func (db *LDBDatabase) NewBatch() Batch {
return &ldbBatch{db: db.db, b: new(leveldb.Batch)}
}
type ldbBatch struct {
db *leveldb.DB
b *leveldb.Batch
}
func (b *ldbBatch) Put(key, value []byte) error {
b.b.Put(key, value)
return nil
}
func (b *ldbBatch) Write() error {
return b.db.Write(b.b, nil)
}

View File

@ -14,13 +14,17 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package common
package ethdb
// Database interface
type Database interface {
Put(key []byte, value []byte) error
Get(key []byte) ([]byte, error)
Delete(key []byte) error
Close()
Flush() error
NewBatch() Batch
}
type Batch interface {
Put(key, value []byte) error
Write() error
}

View File

@ -36,8 +36,7 @@ func NewMemDatabase() (*MemDatabase, error) {
}
func (db *MemDatabase) Put(key []byte, value []byte) error {
db.db[string(key)] = value
db.db[string(key)] = common.CopyBytes(value)
return nil
}
@ -49,6 +48,14 @@ func (db *MemDatabase) Get(key []byte) ([]byte, error) {
return db.db[string(key)], nil
}
func (db *MemDatabase) Keys() [][]byte {
keys := [][]byte{}
for key, _ := range db.db {
keys = append(keys, []byte(key))
}
return keys
}
/*
func (db *MemDatabase) GetKeys() []*common.Key {
data, _ := db.Get([]byte("KeyRing"))
@ -84,6 +91,25 @@ func (db *MemDatabase) LastKnownTD() []byte {
return data
}
func (db *MemDatabase) Flush() error {
func (db *MemDatabase) NewBatch() Batch {
return &memBatch{db: db}
}
type kv struct{ k, v []byte }
type memBatch struct {
db *MemDatabase
writes []kv
}
func (w *memBatch) Put(key, value []byte) error {
w.writes = append(w.writes, kv{key, common.CopyBytes(value)})
return nil
}
func (w *memBatch) Write() error {
for _, kv := range w.writes {
w.db.db[string(kv.k)] = kv.v
}
return nil
}

View File

@ -1002,7 +1002,7 @@ var formatInputDynamicBytes = function (value) {
* @returns {SolidityParam}
*/
var formatInputString = function (value) {
var result = utils.fromAscii(value).substr(2);
var result = utils.fromUtf8(value).substr(2);
var length = result.length / 2;
var l = Math.floor((result.length + 63) / 64);
result = utils.padRight(result, l * 64);
@ -1139,7 +1139,7 @@ var formatOutputDynamicBytes = function (param) {
*/
var formatOutputString = function (param) {
var length = (new BigNumber(param.dynamicPart().slice(0, 64), 16)).toNumber() * 2;
return utils.toAscii(param.dynamicPart().substr(64, length));
return utils.toUtf8(param.dynamicPart().substr(64, length));
};
/**
@ -1697,7 +1697,7 @@ var SolidityType = require('./type');
*/
var SolidityTypeUInt = function () {
this._inputFormatter = f.formatInputInt;
this._outputFormatter = f.formatOutputInt;
this._outputFormatter = f.formatOutputUInt;
};
SolidityTypeUInt.prototype = new SolidityType({});
@ -1876,7 +1876,7 @@ module.exports = function (str, isNew) {
console.warn('new usage: \'web3.sha3("hello")\'');
console.warn('see https://github.com/ethereum/web3.js/pull/205');
console.warn('if you need to hash hex value, you can do \'sha3("0xfff", true)\'');
str = utils.toAscii(str);
str = utils.toUtf8(str);
}
return sha3(str, {
@ -1885,7 +1885,7 @@ module.exports = function (str, isNew) {
};
},{"./utils":20,"crypto-js/sha3":47}],20:[function(require,module,exports){
},{"./utils":20,"crypto-js/sha3":48}],20:[function(require,module,exports){
/*
This file is part of ethereum.js.
@ -1923,6 +1923,7 @@ module.exports = function (str, isNew) {
var BigNumber = require('bignumber.js');
var utf8 = require('utf8');
var unitMap = {
'wei': '1',
@ -1978,8 +1979,29 @@ var padRight = function (string, chars, sign) {
};
/**
* Should be called to get sting from it's hex representation
* TODO: it should be called toUTF8
* Should be called to get utf8 from it's hex representation
*
* @method toUtf8
* @param {String} string in hex
* @returns {String} ascii string representation of hex value
*/
var toUtf8 = function(hex) {
// Find termination
var str = "";
var i = 0, l = hex.length;
if (hex.substring(0, 2) === '0x') {
i = 2;
}
for (; i < l; i+=2) {
var code = parseInt(hex.substr(i, 2), 16);
str += String.fromCharCode(code);
}
return utf8.decode(str);
};
/**
* Should be called to get ascii from it's hex representation
*
* @method toAscii
* @param {String} string in hex
@ -1997,25 +2019,26 @@ var toAscii = function(hex) {
str += String.fromCharCode(code);
}
return decodeURIComponent(escape(str)); // jshint ignore:line
return str;
};
/**
* Shold be called to get hex representation (prefixed by 0x) of ascii string
* Shold be called to get hex representation (prefixed by 0x) of utf8 string
*
* @method toHexNative
* @method fromUtf8
* @param {String} string
* @param {Number} optional padding
* @returns {String} hex representation of input string
*/
var toHexNative = function(str) {
str = unescape(encodeURIComponent(str)); // jshint ignore:line
var fromUtf8 = function(str) {
str = utf8.encode(str);
var hex = "";
for(var i = 0; i < str.length; i++) {
var n = str.charCodeAt(i).toString(16);
hex += n.length < 2 ? '0' + n : n;
}
return hex;
return "0x" + hex;
};
/**
@ -2026,11 +2049,14 @@ var toHexNative = function(str) {
* @param {Number} optional padding
* @returns {String} hex representation of input string
*/
var fromAscii = function(str, pad) {
pad = pad === undefined ? 0 : pad;
var hex = toHexNative(str);
while (hex.length < pad*2)
hex += "00";
var fromAscii = function(str) {
var hex = "";
for(var i = 0; i < str.length; i++) {
var code = str.charCodeAt(i);
var n = code.toString(16);
hex += n.length < 2 ? '0' + n : n;
}
return "0x" + hex;
};
@ -2113,7 +2139,7 @@ var toHex = function (val) {
return fromDecimal(val);
if (isObject(val))
return fromAscii(JSON.stringify(val));
return fromUtf8(JSON.stringify(val));
// if its a negative number, pass it through fromDecimal
if (isString(val)) {
@ -2242,7 +2268,7 @@ var toTwosComplement = function (number) {
* @return {Boolean}
*/
var isStrictAddress = function (address) {
return /^0x[0-9a-f]{40}$/.test(address);
return /^0x[0-9a-f]{40}$/i.test(address);
};
/**
@ -2253,7 +2279,7 @@ var isStrictAddress = function (address) {
* @return {Boolean}
*/
var isAddress = function (address) {
return /^(0x)?[0-9a-f]{40}$/.test(address);
return /^(0x)?[0-9a-f]{40}$/i.test(address);
};
/**
@ -2365,7 +2391,9 @@ module.exports = {
toHex: toHex,
toDecimal: toDecimal,
fromDecimal: fromDecimal,
toUtf8: toUtf8,
toAscii: toAscii,
fromUtf8: fromUtf8,
fromAscii: fromAscii,
transformToFullName: transformToFullName,
extractDisplayName: extractDisplayName,
@ -2386,10 +2414,9 @@ module.exports = {
isJson: isJson
};
},{"bignumber.js":"bignumber.js"}],21:[function(require,module,exports){
},{"bignumber.js":"bignumber.js","utf8":50}],21:[function(require,module,exports){
module.exports={
"version": "0.12.1"
"version": "0.13.0"
}
},{}],22:[function(require,module,exports){
@ -2426,6 +2453,7 @@ var db = require('./web3/methods/db');
var shh = require('./web3/methods/shh');
var watches = require('./web3/methods/watches');
var Filter = require('./web3/filter');
var IsSyncing = require('./web3/syncing');
var utils = require('./utils/utils');
var formatters = require('./web3/formatters');
var RequestManager = require('./web3/requestmanager');
@ -2480,6 +2508,10 @@ web3.version = {};
web3.version.api = version.version;
web3.eth = {};
web3.eth.isSyncing = function (callback) {
return new IsSyncing(callback);
};
/*jshint maxparams:4 */
web3.eth.filter = function (fil, callback) {
return new Filter(fil, watches.eth(), formatters.outputLogFormatter, callback);
@ -2499,14 +2531,16 @@ web3.setProvider = function (provider) {
web3.isConnected = function(){
return (this.currentProvider && this.currentProvider.isConnected());
};
web3.reset = function () {
RequestManager.getInstance().reset();
web3.reset = function (keepIsSyncing) {
RequestManager.getInstance().reset(keepIsSyncing);
c.defaultBlock = 'latest';
c.defaultAccount = undefined;
};
web3.toHex = utils.toHex;
web3.toAscii = utils.toAscii;
web3.toUtf8 = utils.toUtf8;
web3.fromAscii = utils.fromAscii;
web3.fromUtf8 = utils.fromUtf8;
web3.toDecimal = utils.toDecimal;
web3.fromDecimal = utils.fromDecimal;
web3.toBigNumber = utils.toBigNumber;
@ -2569,7 +2603,7 @@ setupMethods(web3.shh, shh.methods);
module.exports = web3;
},{"./utils/config":18,"./utils/sha3":19,"./utils/utils":20,"./version.json":21,"./web3/batch":24,"./web3/filter":28,"./web3/formatters":29,"./web3/method":35,"./web3/methods/db":36,"./web3/methods/eth":37,"./web3/methods/net":38,"./web3/methods/shh":39,"./web3/methods/watches":40,"./web3/property":42,"./web3/requestmanager":43}],23:[function(require,module,exports){
},{"./utils/config":18,"./utils/sha3":19,"./utils/utils":20,"./version.json":21,"./web3/batch":24,"./web3/filter":28,"./web3/formatters":29,"./web3/method":35,"./web3/methods/db":36,"./web3/methods/eth":37,"./web3/methods/net":38,"./web3/methods/shh":39,"./web3/methods/watches":40,"./web3/property":42,"./web3/requestmanager":43,"./web3/syncing":44}],23:[function(require,module,exports){
/*
This file is part of ethereum.js.
@ -3301,7 +3335,7 @@ var toTopic = function(value){
if(value.indexOf('0x') === 0)
return value;
else
return utils.fromAscii(value);
return utils.fromUtf8(value);
};
/// This method should be called on options object, to verify deprecated properties && lazy load dynamic ones
@ -3371,12 +3405,14 @@ var pollFilter = function(self) {
});
}
if(utils.isArray(messages)) {
messages.forEach(function (message) {
message = self.formatter ? self.formatter(message) : message;
self.callbacks.forEach(function (callback) {
callback(null, message);
});
});
}
};
RequestManager.getInstance().startPolling({
@ -3396,6 +3432,7 @@ var Filter = function (options, methods, formatter, callback) {
this.implementation = implementation;
this.filterId = null;
this.callbacks = [];
this.getLogsCallbacks = [];
this.pollFilters = [];
this.formatter = formatter;
this.implementation.newFilter(this.options, function(error, id){
@ -3406,6 +3443,13 @@ var Filter = function (options, methods, formatter, callback) {
} else {
self.filterId = id;
// check if there are get pending callbacks as a consequence
// of calling get() with filterId unassigned.
self.getLogsCallbacks.forEach(function (cb){
self.get(cb);
});
self.getLogsCallbacks = [];
// get filter logs for the already existing watch calls
self.callbacks.forEach(function(cb){
getLogsAtStart(self, cb);
@ -3444,6 +3488,11 @@ Filter.prototype.stopWatching = function () {
Filter.prototype.get = function (callback) {
var self = this;
if (utils.isFunction(callback)) {
if (this.filterId === null) {
// If filterId is not set yet, call it back
// when newFilter() assigns it.
this.getLogsCallbacks.push(callback);
} else {
this.implementation.getLogs(this.filterId, function(err, res){
if (err) {
callback(err);
@ -3453,7 +3502,11 @@ Filter.prototype.get = function (callback) {
}));
}
});
}
} else {
if (this.filterId === null) {
throw new Error('Filter ID Error: filter().get() can\'t be chained synchronous, please provide a callback for the get() method.');
}
var logs = this.implementation.getLogs(this.filterId);
return logs.map(function (log) {
return self.formatter ? self.formatter(log) : log;
@ -3690,7 +3743,7 @@ var inputPostFormatter = function(post) {
// format the following options
post.topics = post.topics.map(function(topic){
return utils.fromAscii(topic);
return utils.fromUtf8(topic);
});
return post;
@ -3710,7 +3763,7 @@ var outputPostFormatter = function(post){
post.ttl = utils.toDecimal(post.ttl);
post.workProved = utils.toDecimal(post.workProved);
post.payloadRaw = post.payload;
post.payload = utils.toAscii(post.payload);
post.payload = utils.toUtf8(post.payload);
if (utils.isJson(post.payload)) {
post.payload = JSON.parse(post.payload);
@ -3721,7 +3774,7 @@ var outputPostFormatter = function(post){
post.topics = [];
}
post.topics = post.topics.map(function(topic){
return utils.toAscii(topic);
return utils.toUtf8(topic);
});
return post;
@ -3739,6 +3792,16 @@ var inputAddressFormatter = function (address) {
throw 'invalid address';
};
var outputSyncingFormatter = function(result) {
result.startingBlock = utils.toDecimal(result.startingBlock);
result.currentBlock = utils.toDecimal(result.currentBlock);
result.highestBlock = utils.toDecimal(result.highestBlock);
return result;
};
module.exports = {
inputDefaultBlockNumberFormatter: inputDefaultBlockNumberFormatter,
inputBlockNumberFormatter: inputBlockNumberFormatter,
@ -3751,7 +3814,8 @@ module.exports = {
outputTransactionReceiptFormatter: outputTransactionReceiptFormatter,
outputBlockFormatter: outputBlockFormatter,
outputLogFormatter: outputLogFormatter,
outputPostFormatter: outputPostFormatter
outputPostFormatter: outputPostFormatter,
outputSyncingFormatter: outputSyncingFormatter
};
@ -4289,7 +4353,7 @@ Iban.isValid = function (iban) {
* @returns {Boolean} true if it is, otherwise false
*/
Iban.prototype.isValid = function () {
return /^XE[0-9]{2}(ETH[0-9A-Z]{13}|[0-9A-Z]{30})$/.test(this._iban) &&
return /^XE[0-9]{2}(ETH[0-9A-Z]{13}|[0-9A-Z]{30,31})$/.test(this._iban) &&
mod9710(iso13616Prepare(this._iban)) === 1;
};
@ -5185,6 +5249,11 @@ var properties = [
getter: 'eth_hashrate',
outputFormatter: utils.toDecimal
}),
new Property({
name: 'syncing',
getter: 'eth_syncing',
outputFormatter: formatters.outputSyncingFormatter
}),
new Property({
name: 'gasPrice',
getter: 'eth_gasPrice',
@ -5811,11 +5880,15 @@ RequestManager.prototype.stopPolling = function (pollId) {
*
* @method reset
*/
RequestManager.prototype.reset = function () {
RequestManager.prototype.reset = function (keepIsSyncing) {
for (var key in this.polls) {
// remove all polls, except sync polls,
// they need to be removed manually by calling syncing.stopWatching()
if(!keepIsSyncing || key.indexOf('syncPoll_') === -1) {
this.polls[key].uninstall();
delete this.polls[key];
}
}
this.polls = {};
if (this.timeout) {
clearTimeout(this.timeout);
@ -5843,10 +5916,10 @@ RequestManager.prototype.poll = function () {
}
var pollsData = [];
var pollsKeys = [];
var pollsIds = [];
for (var key in this.polls) {
pollsData.push(this.polls[key].data);
pollsKeys.push(key);
pollsIds.push(key);
}
if (pollsData.length === 0) {
@ -5855,8 +5928,17 @@ RequestManager.prototype.poll = function () {
var payload = Jsonrpc.getInstance().toBatchPayload(pollsData);
// map the request id to they poll id
var pollsIdMap = {};
payload.forEach(function(load, index){
pollsIdMap[load.id] = pollsIds[index];
});
var self = this;
this.provider.sendAsync(payload, function (error, results) {
// TODO: console log?
if (error) {
return;
@ -5865,12 +5947,12 @@ RequestManager.prototype.poll = function () {
if (!utils.isArray(results)) {
throw errors.InvalidResponse(results);
}
results.map(function (result) {
var id = pollsIdMap[result.id];
results.map(function (result, index) {
var key = pollsKeys[index];
// make sure the filter is still installed after arrival of the request
if (self.polls[key]) {
result.callback = self.polls[key].callback;
if (self.polls[id]) {
result.callback = self.polls[id].callback;
return result;
} else
return false;
@ -5882,8 +5964,6 @@ RequestManager.prototype.poll = function () {
result.callback(errors.InvalidResponse(result));
}
return valid;
}).filter(function (result) {
return utils.isArray(result.result) && result.result.length > 0;
}).forEach(function (result) {
result.callback(null, result.result);
});
@ -5910,6 +5990,109 @@ module.exports = RequestManager;
You should have received a copy of the GNU Lesser General Public License
along with ethereum.js. If not, see <http://www.gnu.org/licenses/>.
*/
/** @file syncing.js
* @authors:
* Fabian Vogelsteller <fabian@ethdev.com>
* @date 2015
*/
var RequestManager = require('./requestmanager');
var Method = require('./method');
var formatters = require('./formatters');
var utils = require('../utils/utils');
/**
Adds the callback and sets up the methods, to iterate over the results.
@method pollSyncing
@param {Object} self
*/
var pollSyncing = function(self) {
var lastSyncState = false;
var onMessage = function (error, sync) {
if (error) {
return self.callbacks.forEach(function (callback) {
callback(error);
});
}
if(utils.isObject(sync))
sync = self.implementation.outputFormatter(sync);
self.callbacks.forEach(function (callback) {
if(lastSyncState !== sync) {
// call the callback with true first so the app can stop anything, before receiving the sync data
if(!lastSyncState && utils.isObject(sync))
callback(null, true);
// call on the next CPU cycle, so the actions of the sync stop can be processes first
setTimeout(function() {
callback(null, sync);
}, 1);
lastSyncState = sync;
}
});
};
RequestManager.getInstance().startPolling({
method: self.implementation.call,
params: [],
}, self.pollId, onMessage, self.stopWatching.bind(self));
};
var IsSyncing = function (callback) {
this.pollId = 'syncPoll_'+ Math.floor(Math.random() * 1000);
this.callbacks = [];
this.implementation = new Method({
name: 'isSyncing',
call: 'eth_syncing',
params: 0,
outputFormatter: formatters.outputSyncingFormatter
});
this.addCallback(callback);
pollSyncing(this);
return this;
};
IsSyncing.prototype.addCallback = function (callback) {
if(callback)
this.callbacks.push(callback);
return this;
};
IsSyncing.prototype.stopWatching = function () {
RequestManager.getInstance().stopPolling(this.pollId);
this.callbacks = [];
};
module.exports = IsSyncing;
},{"../utils/utils":20,"./formatters":29,"./method":35,"./requestmanager":43}],45:[function(require,module,exports){
/*
This file is part of ethereum.js.
ethereum.js is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ethereum.js is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with ethereum.js. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* @file transfer.js
* @author Marek Kotewicz <marek@ethdev.com>
@ -5990,9 +6173,9 @@ var deposit = function (from, to, value, client, callback) {
module.exports = transfer;
},{"../contracts/SmartExchange.json":3,"../web3":22,"./contract":25,"./iban":32,"./namereg":41}],45:[function(require,module,exports){
},{"../contracts/SmartExchange.json":3,"../web3":22,"./contract":25,"./iban":32,"./namereg":41}],46:[function(require,module,exports){
},{}],46:[function(require,module,exports){
},{}],47:[function(require,module,exports){
;(function (root, factory) {
if (typeof exports === "object") {
// CommonJS
@ -6735,7 +6918,7 @@ module.exports = transfer;
return CryptoJS;
}));
},{}],47:[function(require,module,exports){
},{}],48:[function(require,module,exports){
;(function (root, factory, undef) {
if (typeof exports === "object") {
// CommonJS
@ -7059,7 +7242,7 @@ module.exports = transfer;
return CryptoJS.SHA3;
}));
},{"./core":46,"./x64-core":48}],48:[function(require,module,exports){
},{"./core":47,"./x64-core":49}],49:[function(require,module,exports){
;(function (root, factory) {
if (typeof exports === "object") {
// CommonJS
@ -7364,7 +7547,253 @@ module.exports = transfer;
return CryptoJS;
}));
},{"./core":46}],"bignumber.js":[function(require,module,exports){
},{"./core":47}],50:[function(require,module,exports){
/*! https://mths.be/utf8js v2.0.0 by @mathias */
;(function(root) {
// Detect free variables 'exports'
var freeExports = typeof exports == 'object' && exports;
// Detect free variable 'module'
var freeModule = typeof module == 'object' && module &&
module.exports == freeExports && module;
// Detect free variable 'global', from Node.js or Browserified code,
// and use it as 'root'
var freeGlobal = typeof global == 'object' && global;
if (freeGlobal.global === freeGlobal || freeGlobal.window === freeGlobal) {
root = freeGlobal;
}
/*--------------------------------------------------------------------------*/
var stringFromCharCode = String.fromCharCode;
// Taken from https://mths.be/punycode
function ucs2decode(string) {
var output = [];
var counter = 0;
var length = string.length;
var value;
var extra;
while (counter < length) {
value = string.charCodeAt(counter++);
if (value >= 0xD800 && value <= 0xDBFF && counter < length) {
// high surrogate, and there is a next character
extra = string.charCodeAt(counter++);
if ((extra & 0xFC00) == 0xDC00) { // low surrogate
output.push(((value & 0x3FF) << 10) + (extra & 0x3FF) + 0x10000);
} else {
// unmatched surrogate; only append this code unit, in case the next
// code unit is the high surrogate of a surrogate pair
output.push(value);
counter--;
}
} else {
output.push(value);
}
}
return output;
}
// Taken from https://mths.be/punycode
function ucs2encode(array) {
var length = array.length;
var index = -1;
var value;
var output = '';
while (++index < length) {
value = array[index];
if (value > 0xFFFF) {
value -= 0x10000;
output += stringFromCharCode(value >>> 10 & 0x3FF | 0xD800);
value = 0xDC00 | value & 0x3FF;
}
output += stringFromCharCode(value);
}
return output;
}
function checkScalarValue(codePoint) {
if (codePoint >= 0xD800 && codePoint <= 0xDFFF) {
throw Error(
'Lone surrogate U+' + codePoint.toString(16).toUpperCase() +
' is not a scalar value'
);
}
}
/*--------------------------------------------------------------------------*/
function createByte(codePoint, shift) {
return stringFromCharCode(((codePoint >> shift) & 0x3F) | 0x80);
}
function encodeCodePoint(codePoint) {
if ((codePoint & 0xFFFFFF80) == 0) { // 1-byte sequence
return stringFromCharCode(codePoint);
}
var symbol = '';
if ((codePoint & 0xFFFFF800) == 0) { // 2-byte sequence
symbol = stringFromCharCode(((codePoint >> 6) & 0x1F) | 0xC0);
}
else if ((codePoint & 0xFFFF0000) == 0) { // 3-byte sequence
checkScalarValue(codePoint);
symbol = stringFromCharCode(((codePoint >> 12) & 0x0F) | 0xE0);
symbol += createByte(codePoint, 6);
}
else if ((codePoint & 0xFFE00000) == 0) { // 4-byte sequence
symbol = stringFromCharCode(((codePoint >> 18) & 0x07) | 0xF0);
symbol += createByte(codePoint, 12);
symbol += createByte(codePoint, 6);
}
symbol += stringFromCharCode((codePoint & 0x3F) | 0x80);
return symbol;
}
function utf8encode(string) {
var codePoints = ucs2decode(string);
var length = codePoints.length;
var index = -1;
var codePoint;
var byteString = '';
while (++index < length) {
codePoint = codePoints[index];
byteString += encodeCodePoint(codePoint);
}
return byteString;
}
/*--------------------------------------------------------------------------*/
function readContinuationByte() {
if (byteIndex >= byteCount) {
throw Error('Invalid byte index');
}
var continuationByte = byteArray[byteIndex] & 0xFF;
byteIndex++;
if ((continuationByte & 0xC0) == 0x80) {
return continuationByte & 0x3F;
}
// If we end up here, its not a continuation byte
throw Error('Invalid continuation byte');
}
function decodeSymbol() {
var byte1;
var byte2;
var byte3;
var byte4;
var codePoint;
if (byteIndex > byteCount) {
throw Error('Invalid byte index');
}
if (byteIndex == byteCount) {
return false;
}
// Read first byte
byte1 = byteArray[byteIndex] & 0xFF;
byteIndex++;
// 1-byte sequence (no continuation bytes)
if ((byte1 & 0x80) == 0) {
return byte1;
}
// 2-byte sequence
if ((byte1 & 0xE0) == 0xC0) {
var byte2 = readContinuationByte();
codePoint = ((byte1 & 0x1F) << 6) | byte2;
if (codePoint >= 0x80) {
return codePoint;
} else {
throw Error('Invalid continuation byte');
}
}
// 3-byte sequence (may include unpaired surrogates)
if ((byte1 & 0xF0) == 0xE0) {
byte2 = readContinuationByte();
byte3 = readContinuationByte();
codePoint = ((byte1 & 0x0F) << 12) | (byte2 << 6) | byte3;
if (codePoint >= 0x0800) {
checkScalarValue(codePoint);
return codePoint;
} else {
throw Error('Invalid continuation byte');
}
}
// 4-byte sequence
if ((byte1 & 0xF8) == 0xF0) {
byte2 = readContinuationByte();
byte3 = readContinuationByte();
byte4 = readContinuationByte();
codePoint = ((byte1 & 0x0F) << 0x12) | (byte2 << 0x0C) |
(byte3 << 0x06) | byte4;
if (codePoint >= 0x010000 && codePoint <= 0x10FFFF) {
return codePoint;
}
}
throw Error('Invalid UTF-8 detected');
}
var byteArray;
var byteCount;
var byteIndex;
function utf8decode(byteString) {
byteArray = ucs2decode(byteString);
byteCount = byteArray.length;
byteIndex = 0;
var codePoints = [];
var tmp;
while ((tmp = decodeSymbol()) !== false) {
codePoints.push(tmp);
}
return ucs2encode(codePoints);
}
/*--------------------------------------------------------------------------*/
var utf8 = {
'version': '2.0.0',
'encode': utf8encode,
'decode': utf8decode
};
// Some AMD build optimizers, like r.js, check for specific condition patterns
// like the following:
if (
typeof define == 'function' &&
typeof define.amd == 'object' &&
define.amd
) {
define(function() {
return utf8;
});
} else if (freeExports && !freeExports.nodeType) {
if (freeModule) { // in Node.js or RingoJS v0.8.0+
freeModule.exports = utf8;
} else { // in Narwhal or RingoJS v0.7.0-
var object = {};
var hasOwnProperty = object.hasOwnProperty;
for (var key in utf8) {
hasOwnProperty.call(utf8, key) && (freeExports[key] = utf8[key]);
}
}
} else { // in Rhino or a web browser
root.utf8 = utf8;
}
}(this));
},{}],"bignumber.js":[function(require,module,exports){
'use strict';
module.exports = BigNumber; // jshint ignore:line
@ -7391,6 +7820,6 @@ if (typeof window !== 'undefined' && typeof window.web3 === 'undefined') {
module.exports = web3;
},{"./lib/web3":22,"./lib/web3/contract":25,"./lib/web3/httpprovider":31,"./lib/web3/iban":32,"./lib/web3/ipcprovider":33,"./lib/web3/namereg":41,"./lib/web3/transfer":44}]},{},["web3"])
},{"./lib/web3":22,"./lib/web3/contract":25,"./lib/web3/httpprovider":31,"./lib/web3/iban":32,"./lib/web3/ipcprovider":33,"./lib/web3/namereg":41,"./lib/web3/transfer":45}]},{},["web3"])
//# sourceMappingURL=web3-light.js.map
`

View File

@ -154,7 +154,9 @@ loop:
if err != nil {
fmt.Println("js error:", err, arguments)
}
if timer.interval {
_, inreg := registry[timer] // when clearInterval is called from within the callback don't reset it
if timer.interval && inreg {
timer.timer.Reset(timer.duration)
} else {
delete(registry, timer)

View File

@ -31,8 +31,8 @@ import (
// MetricsEnabledFlag is the CLI flag name to use to enable metrics collections.
var MetricsEnabledFlag = "metrics"
// enabled is the flag specifying if metrics are enable or not.
var enabled = false
// Enabled is the flag specifying if metrics are enable or not.
var Enabled = false
// Init enables or disables the metrics system. Since we need this to run before
// any other code gets to create meters and timers, we'll actually do an ugly hack
@ -41,7 +41,7 @@ func init() {
for _, arg := range os.Args {
if strings.TrimLeft(arg, "-") == MetricsEnabledFlag {
glog.V(logger.Info).Infof("Enabling metrics collection")
enabled = true
Enabled = true
}
}
}
@ -49,7 +49,7 @@ func init() {
// NewMeter create a new metrics Meter, either a real one of a NOP stub depending
// on the metrics flag.
func NewMeter(name string) metrics.Meter {
if !enabled {
if !Enabled {
return new(metrics.NilMeter)
}
return metrics.GetOrRegisterMeter(name, metrics.DefaultRegistry)
@ -58,7 +58,7 @@ func NewMeter(name string) metrics.Meter {
// NewTimer create a new metrics Timer, either a real one of a NOP stub depending
// on the metrics flag.
func NewTimer(name string) metrics.Timer {
if !enabled {
if !Enabled {
return new(metrics.NilTimer)
}
return metrics.GetOrRegisterTimer(name, metrics.DefaultRegistry)
@ -68,7 +68,7 @@ func NewTimer(name string) metrics.Timer {
// process.
func CollectProcessMetrics(refresh time.Duration) {
// Short circuit if the metrics system is disabled
if !enabled {
if !Enabled {
return
}
// Create the various data collectors

View File

@ -19,6 +19,8 @@ package miner
import (
"sync"
"sync/atomic"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
@ -35,6 +37,8 @@ type CpuAgent struct {
index int
pow pow.PoW
isMining int32 // isMining indicates whether the agent is currently mining
}
func NewCpuAgent(index int, pow pow.PoW) *CpuAgent {
@ -61,6 +65,10 @@ func (self *CpuAgent) Start() {
self.mu.Lock()
defer self.mu.Unlock()
if !atomic.CompareAndSwapInt32(&self.isMining, 0, 1) {
return // agent already started
}
self.quit = make(chan struct{})
// creating current op ch makes sure we're not closing a nil ch
// later on
@ -99,10 +107,11 @@ done:
case <-self.workCh:
default:
close(self.workCh)
break done
}
}
atomic.StoreInt32(&self.isMining, 0)
}
func (self *CpuAgent) mine(work *Work, stop <-chan struct{}) {

View File

@ -17,6 +17,7 @@
package miner
import (
"errors"
"math/big"
"sync"
"time"
@ -90,7 +91,7 @@ func (a *RemoteAgent) GetHashRate() (tot int64) {
return
}
func (a *RemoteAgent) GetWork() [3]string {
func (a *RemoteAgent) GetWork() ([3]string, error) {
a.mu.Lock()
defer a.mu.Unlock()
@ -110,9 +111,9 @@ func (a *RemoteAgent) GetWork() [3]string {
res[2] = common.BytesToHash(n.Bytes()).Hex()
a.work[block.HashNoNonce()] = a.currentWork
return res, nil
}
return res
return res, errors.New("No work available yet, don't panic.")
}
// Returns true or false, but does not indicate if the PoW was correct

View File

@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
@ -100,7 +101,7 @@ type worker struct {
eth core.Backend
chain *core.ChainManager
proc *core.BlockProcessor
chainDb common.Database
chainDb ethdb.Database
coinbase common.Address
gasPrice *big.Int
@ -278,12 +279,12 @@ func (self *worker) wait() {
glog.V(logger.Error).Infoln("Invalid block found during mining")
continue
}
if err := core.ValidateHeader(self.eth.BlockProcessor().Pow, block.Header(), parent, true); err != nil && err != core.BlockFutureErr {
if err := core.ValidateHeader(self.eth.BlockProcessor().Pow, block.Header(), parent.Header(), true, false); err != nil && err != core.BlockFutureErr {
glog.V(logger.Error).Infoln("Invalid header on mined block:", err)
continue
}
stat, err := self.chain.WriteBlock(block, false)
stat, err := self.chain.WriteBlock(block)
if err != nil {
glog.V(logger.Error).Infoln("error writing block to chain", err)
continue
@ -434,8 +435,8 @@ func (self *worker) commitNewWork() {
tstart := time.Now()
parent := self.chain.CurrentBlock()
tstamp := tstart.Unix()
if tstamp <= int64(parent.Time()) {
tstamp = int64(parent.Time()) + 1
if parent.Time().Cmp(new(big.Int).SetInt64(tstamp)) >= 0 {
tstamp = parent.Time().Int64() + 1
}
// this will ensure we're not going off too far in the future
if now := time.Now().Unix(); tstamp > now+4 {
@ -448,12 +449,12 @@ func (self *worker) commitNewWork() {
header := &types.Header{
ParentHash: parent.Hash(),
Number: num.Add(num, common.Big1),
Difficulty: core.CalcDifficulty(uint64(tstamp), parent.Time(), parent.Number(), parent.Difficulty()),
Difficulty: core.CalcDifficulty(uint64(tstamp), parent.Time().Uint64(), parent.Number(), parent.Difficulty()),
GasLimit: core.CalcGasLimit(parent),
GasUsed: new(big.Int),
Coinbase: self.coinbase,
Extra: self.extra,
Time: uint64(tstamp),
Time: big.NewInt(tstamp),
}
previous := self.current
@ -533,14 +534,12 @@ func (self *worker) commitNewWork() {
// create the new block whose nonce will be mined.
work.Block = types.NewBlock(header, work.txs, uncles, work.receipts)
work.Block.Td = new(big.Int).Set(core.CalcTD(work.Block, self.chain.GetBlock(work.Block.ParentHash())))
// We only care about logging if we're actually mining.
if atomic.LoadInt32(&self.mining) == 1 {
glog.V(logger.Info).Infof("commit new work on block %v with %d txs & %d uncles. Took %v\n", work.Block.Number(), work.tcount, len(uncles), time.Since(tstart))
self.logLocalMinedBlocks(work, previous)
}
self.push(work)
}

Some files were not shown because too many files have changed in this diff Show More