Compare commits

...

235 Commits

Author SHA1 Message Date
465e810c66 VERSION, cmd/geth: bumped version 1.2.2 2015-10-02 12:55:57 +02:00
274f86cd86 eth/downloader: match capabilities when querying idle peers 2015-10-02 12:50:26 +02:00
b527c9c718 core: deadlock in chainmanager after posting RemovedTransactionEvent
This PR solves an issue with the chain manager posting a
`RemovedTransactionEvent`, the tx pool will try to
acquire the chainmanager lock which has previously been locked prior to
posting `RemovedTransactionEvent`. This results in a deadlock in the
core.
2015-10-02 12:41:10 +02:00
9666db2a44 VERSION, cmd/geth: bumped version 1.2.1 2015-10-01 10:38:43 +02:00
e3ac56d502 Merge pull request #1859 from fjl/fix-discover-refresh-race
p2p/discover: fix race involving the seed node iterator
2015-09-30 08:21:40 -07:00
32dda97602 p2p/discover: ignore packet version numbers
The strict matching can get in the way of protocol upgrades.
2015-09-30 16:23:03 +02:00
631bf36102 p2p/discover: remove unused lastLookup field 2015-09-30 16:23:03 +02:00
b4374436f3 p2p/discover: fix race involving the seed node iterator
nodeDB.querySeeds was not safe for concurrent use but could be called
concurrenty on multiple goroutines in the following case:

- the table was empty
- a timed refresh started
- a lookup was started and initiated refresh

These conditions are unlikely to coincide during normal use, but are
much more likely to occur all at once when the user's machine just woke
from sleep. The root cause of the issue is that querySeeds reused the
same leveldb iterator until it was exhausted.

This commit moves the refresh scheduling logic into its own goroutine
(so only one refresh is ever active) and changes querySeeds to not use
a persistent iterator. The seed node selection is now more random and
ignores nodes that have not been contacted in the last 5 days.
2015-09-30 16:23:03 +02:00
46ad5a5f5b Merge pull request #1852 from obscuren/filter-nil-fix
xeth: fixed nil pointer of filter retrieval
2015-09-30 03:06:36 -07:00
9b94076717 Merge pull request #1854 from karalabe/badhasherror-formatting-loop
core: fix a formatting loop in BadHashError
2015-09-29 02:26:01 -07:00
b8b996be74 core: fix a formatting loop in BadHashError 2015-09-29 09:11:38 +03:00
b9359981f4 xeth: fixed nil pointer of filter retrieval
This fix addresses an issue with filters that were (possibly) not yet
added to the filter queues but were expected. I've added additional nil
checks making sure it doesn't crash and swapped the installation of the
filter around so it's installed before use.

Closes #1665
2015-09-25 13:56:53 +02:00
7977e87ce1 Merge pull request #1843 from karalabe/cleanup-downloader-channel
eth/downloader: always send termination wakes, clean leftover
2015-09-25 04:34:59 -07:00
69d86442a5 Merge pull request #1803 from Gustav-Simonsson/badhashes
core: Add BadHashErr and test for BadHashes handling
2015-09-23 11:10:25 -07:00
36f46a61a7 Merge pull request #1844 from obscuren/version-file
VERSION: added version
2015-09-23 05:48:00 -07:00
6e1dc321f4 VERSION: added version 2015-09-23 14:47:20 +02:00
7a2a918067 Merge pull request #1842 from fjl/rpc-fix-unknown-block
rpc/api: don't crash for unknown blocks
2015-09-23 12:57:33 +02:00
f459a3f0ae eth/downloader: always send termination wakes, clean leftover 2015-09-23 12:39:17 +03:00
e456f27795 Merge pull request #1827 from Gustav-Simonsson/common_tests
tests: add test for StateTests/stCallCodes.json
2015-09-23 02:12:59 -07:00
90cd8ae9f2 rpc/api: don't crash for unknown blocks
Most eth RPC calls that work with blocks crashed when the block was not
found because they called Hash on a nil block. This is a regression
introduced in cdc2662c40 (#1779).

While here, remove the insane conversions in get*CountBy*. There is no
need to construct a complete BlockRes and converting
int->int64->*big.Int->[]byte->hexnum->string to format the length of a
slice as hex.
2015-09-22 23:59:26 +02:00
70b6174748 cmd/geth, core: make "geth blocktest" work again
The test genesis block was not written properly, block insertion failed
immediately.

While here, fix the panic when shutting down "geth blocktest" with
Ctrl+C. The signal handler is now installed automatically, causing
ethereum.Stop to crash because everything is already stopped.
2015-09-22 23:55:31 +02:00
bfde1a4305 core: Add BadHashErr and test for BadHashes handling 2015-09-22 18:02:26 +02:00
e56cbc225e Merge pull request #1835 from karalabe/make-cross
makefile: built in cross compilation targets
2015-09-21 11:47:10 -07:00
7bf8e949e7 Merge pull request #1669 from obscuren/tx-pool-auto-resend
core, xeth: chain reorg move missing transactions to transaction pool
2015-09-21 11:45:59 -07:00
6a05c569f2 makefile: built in cross compilation targets 2015-09-21 21:36:01 +03:00
eaa4473dbd core, core/types: readd transactions after chain re-org
Added a `Difference` method to `types.Transactions` which sets the
receiver to the difference of a to b (NOTE: not a **and** b).

Transaction pool subscribes to RemovedTransactionEvent adding back to
those potential missing from the chain.

When a chain re-org occurs remove any transactions that were removed
from the canonical chain during the re-org as well as the receipts that
were generated in the process.

Closes #1746
2015-09-21 20:33:28 +02:00
be76a68aea cmd/geth: changed version number to 1.2.0
Changed the version number of geth to 1.2.0 so that dev builds are now properly build (instead of master). Note to self; increase version number to 1.2.1 for our next actual release.
2015-09-21 16:13:07 +02:00
12c0afe4fe Merge pull request #1822 from karalabe/contain-pow
core: separate and contain POW verifier, extensive tests
2015-09-21 06:52:11 -07:00
5621308949 tests: add test for StateTests/stCallCodes.json 2015-09-21 11:34:02 +02:00
399c920380 core: separate and contain POW verifier, extensive tests 2015-09-21 10:24:49 +03:00
e40b447fea Merge pull request #1814 from Gustav-Simonsson/common_tests
tests: update common test wrappers and test files
2015-09-18 16:34:54 -07:00
b94b9b0158 Merge pull request #1817 from obscuren/nonce-fix
core: transaction nonce recovery
2015-09-18 15:56:10 -07:00
47ca6904b3 tests: use lastblockhash field to validate reorgs and block headers 2015-09-18 17:48:31 +02:00
075815e5ff tests: update common test wrappers and test files 2015-09-18 13:08:36 +02:00
b60a27627b core: transaction nonce recovery fix
When the transaction state recovery kicked in it assigned the last
(incorrect) nonce to the pending state which caused transactions with
the same nonce to occur.

Added test for nonce recovery
2015-09-18 11:59:21 +02:00
216c486a3a Merge pull request #1815 from karalabe/chain-maker-timer
core: allow modifying test-chain block times
2015-09-18 11:23:31 +02:00
ac6248ed7a Merge pull request #1793 from jeffallen/typo
common: Update README.md for the current package name
2015-09-17 19:26:49 +02:00
bdf4fd6091 Merge pull request #1813 from kobigurk/develop
cmd/geth: extradata is correcly initialized with console
2015-09-17 19:25:32 +02:00
69f48e4689 Merge pull request #1811 from bas-vk/timer-clearinterval
timer bugfix when clearInterval was called from within the callback
2015-09-17 19:21:49 +02:00
6f3cb12924 core: allow modifying test-chain block times 2015-09-17 13:43:52 +03:00
58fbcaa750 Merge pull request #1810 from karalabe/pure-header-verifications-2
core, eth, miner: use pure header validation
2015-09-16 14:21:12 -07:00
1a1a1ee4ff cmd/geth: extradata is correcly initialized with console 2015-09-16 21:01:21 +03:00
985b5f29ed Merge pull request #1801 from fjl/ethdb
all: move common.Database to ethdb and add NewBatch
2015-09-16 07:50:14 -07:00
2f65ddc501 jsre: timer bugfix when clearInterval was called from within the callback 2015-09-16 11:57:33 +02:00
1cc2f08041 Merge pull request #1784 from karalabe/standard-sync-stats
eth, rpc: standardize the chain sync progress counters
2015-09-16 02:31:58 -07:00
821619e1c3 core, eth, miner: use pure header validation 2015-09-16 10:46:28 +03:00
e9a80518c7 Merge pull request #1744 from kobigurk/develop
adds extradata flag
2015-09-15 13:56:10 -07:00
321733ab23 cmd/geth: adds extradata flag 2015-09-15 23:35:36 +03:00
d4d3fc6a70 jsre, rpc/api: pull in new web3 and use hex numbers 2015-09-15 17:05:12 +03:00
99b62f36b6 eth/downloader: header-chain order and ancestry check 2015-09-15 14:45:53 +03:00
0a7d059b6a eth, rpc: standardize the chain sync progress counters 2015-09-15 14:45:53 +03:00
55bdcfaeac Merge pull request #1806 from ethersphere/solc2
new solc api - late fixes
2015-09-15 01:08:30 -07:00
3a5e7ed9a6 new solc api:
* use legacy version matcher
* optimise just a boolean flag
* skipf for messages in tests
2015-09-15 00:35:22 +02:00
b252589960 ethdb: remove Flush 2015-09-14 23:36:30 +02:00
d581dfee5f ethdb: copy stored memdb values
Storing a value in LevelDB copies the bytes, modifying the value
afterwards does not affect the content of the database. This commit
ensures that MemDatabase satisfies the same property.
2015-09-14 23:36:30 +02:00
8b32f10f16 ethdb: add NewBatch 2015-09-14 23:36:30 +02:00
8c4dab77ba all: move common.Database to package ethdb 2015-09-14 23:36:30 +02:00
071e2cd08e Merge pull request #1786 from ethersphere/solc
common/compiler: new solc API
2015-09-14 23:32:40 +02:00
47b9c640f5 Merge pull request #1797 from karalabe/ensure-ipcpath-exists
rpc/comms: fix #1795, ensure IPC path exists before binding
2015-09-14 14:45:11 +02:00
a9c809b441 Merge pull request #1792 from jeffallen/uuid
Change go-uuid to use the current supported repository.
2015-09-14 12:06:59 +02:00
0d40727775 Change go-uuid to use the current supported repository. 2015-09-12 16:49:24 +06:00
17b729759b Solidity Compiler - solc new API
* adapt to new compiler versioning
* use compiler version as language version
* implement new solc API for versions >= 0.1.[2-9][0-9]* fixes #1770
* add optimize=1 to options
* backward compatibility (for now) for <= 0.1.1, and old versions (0.[2-9][0-9]*.[0-9]+)
* introduce compilerOptions to ContractInfo
* clean up flair, include full version string to version line and ContractInfo
2015-09-12 10:52:52 +02:00
55ed8d108d Merge pull request #1789 from Gustav-Simonsson/core_remove_unused_functions
core, core/vm, core/state: remove unused functions
2015-09-11 15:29:27 -07:00
f1a4b330dd Merge pull request #1796 from karalabe/ethash-android-support
godeps: pull in ethash android fix
2015-09-11 15:26:01 -07:00
0eac601b5b Merge pull request #1779 from karalabe/split-block-storage-3000
core: split the db blocks into components, move TD out top level
2015-09-11 08:10:37 -07:00
cdc2662c40 core: split out TD from database and all internals 2015-09-11 17:42:25 +03:00
2b339cbbd8 core, eth: split the db blocks into headers and bodies 2015-09-11 17:42:25 +03:00
3e6964b841 rpc/comms: fix #1795, ensure IPC path exists before binding 2015-09-11 17:03:31 +03:00
c6013725a8 godeps: pull in ethash android fix 2015-09-11 15:53:23 +03:00
4e075e4013 Merge pull request #1773 from obscuren/dev-mode
cmd/geth, cmd/utils, eth: added dev mode flag
2015-09-10 21:15:33 +02:00
b81a6e6ab8 core, core/vm, core/state: remove unused functions 2015-09-10 21:10:58 +02:00
62bbf8a09e Merge pull request #1778 from fjl/rlp-trie-changes
rlp: precursor changes for trie, p2p
2015-09-10 12:02:16 -07:00
4ce3dfe9c8 common: Update README.md for the current package name 2015-09-10 23:59:38 +06:00
fc8b246109 rlp: move ListSize to raw.go 2015-09-10 19:41:51 +02:00
24bb68e7cf rlp: add RawValue 2015-09-10 19:41:51 +02:00
bc17dba8fb rlp: add Split functions
These functions allow destructuring of raw rlp-encoded bytes
without the overhead of reflection or copying.
2015-09-10 19:41:51 +02:00
ac32f52ca6 rlp: fix encReader returning nil buffers to the pool
The bug can cause crashes if Read is called after EOF has been returned.
No code performs such calls right now, but hitting the bug gets more
likely as rlp.EncodeToReader gets used in more places.
2015-09-10 19:12:32 +02:00
90f1fe0ed2 Merge pull request #1781 from Gustav-Simonsson/state_object_copy
core/state: deleted field in StateObject Copy() and unit test
2015-09-09 18:42:36 +02:00
28b13a4d1e Merge pull request #1780 from bas-vk/miner-crash
agent/miner Prevent the CpuAgent to be started multiple times
2015-09-09 04:49:28 -07:00
f04b3a6f29 cmd/geth, cmd/utils, eth: added dev mode flag
Dev mode enabled some debugging flags such as:

* VM debugging mode
* Simpler proof of work
* Whisper enabled by default
* Datadir to a tmp datadir
* Maxpeers set to 0
* Gas price of 0
* Random listen port
2015-09-09 08:53:05 +02:00
bf879ef230 core/state: test formatting adhering to Go convention 2015-09-09 00:26:18 +02:00
004ed786b4 core/state: deleted field in StateObject Copy() and unit test 2015-09-08 15:56:11 +02:00
652eea71fe put unlock after lock 2015-09-08 12:42:29 +02:00
618065895b agent/miner Prevent the CpuAgent to be started multiple times 2015-09-08 11:27:55 +02:00
edaea69817 Merge pull request #1777 from hectorchu/develop
rpc/comms: fix bug attaching the console over http
2015-09-08 11:02:09 +03:00
6fe46cc743 Merge pull request #1774 from bas-vk/console-crash
cmd/geth Autocompletion bugfix which let the console crash
2015-09-08 10:33:09 +03:00
4ea81f170a rpc/comms: fix bug attaching the console over http 2015-09-07 15:09:59 +01:00
f69121357d cmd/geth Autocompletion bugfix which let the console crash 2015-09-06 16:25:55 +02:00
e2d7c1a523 Merge pull request #1752 from karalabe/fix-eth61-test
eth/downloader: fix race causing occasional test failure
2015-09-03 15:52:18 +02:00
ebbe25ee71 Merge pull request #1764 from kobigurk/honor_ipc_datadir
honors datadir when attaching
2015-09-03 10:48:23 +03:00
1a86adc5a2 cmd/geth: honor datadir when attaching 2015-09-03 10:28:30 +03:00
e98854588b Merge pull request #1761 from CJentzsch/patch-3
fix block time issue
2015-09-02 15:13:14 -07:00
0fda4c4e15 fix block time issue
currently, under normal circumstances, you always set the timestamp to previous.Time() + 1.
credits to https://www.reddit.com/r/ethereum/comments/3jcs5r/code_avg_block_time_vs_difficulty_adjustment/cuoi4op

style
2015-09-03 00:05:05 +02:00
b2c17a5a63 Merge pull request #1726 from Gustav-Simonsson/update_tests
Add TestBcForkUncle tests & update JSON files
2015-09-02 22:02:44 +02:00
e9b031b88b Merge pull request #1755 from fjl/coinbase
core: improve block gas tracking
2015-09-01 23:36:05 +02:00
00b45acb9e core: improve block gas tracking 2015-09-01 23:11:03 +02:00
1ffc5b0cfd Merge pull request #1751 from maran/fix_filters
core: Filter on addresses should work as an OR not an AND.
2015-09-01 20:10:27 +02:00
5e4cd599eb Merge pull request #1745 from mrdomino/obsd-build-master
Pull in ethash and go-isatty updates
2015-09-01 20:06:13 +02:00
1f1d73ab74 eth/downloader: fix race causing occasional test failure 2015-09-01 16:11:14 +03:00
67225de255 Filter on addresses should work as an OR not an AND. 2015-09-01 09:19:45 +02:00
540eb3d02d Pull in ethash and go-isatty updates
Fixes build on OpenBSD.
2015-08-31 12:14:32 -04:00
fe8093b71f Add TestBcForkUncleTests and update JSON files 2015-08-31 16:45:00 +02:00
9dc23ce284 Merge pull request #1742 from fjl/rpc-receipt-root
rpc: add receiptRoot to getBlock* responses
2015-08-31 14:50:21 +02:00
1801748ccd Merge pull request #1734 from fjl/ldflags-warning-go1.5
build: avoid -X separator warning with Go >= 1.5
2015-08-31 14:49:50 +02:00
8b12bcc0ac rpc: add receiptRoot to getBlock* responses
Fixes #1679
2015-08-29 11:12:01 +02:00
e1037bd0cf Merge pull request #1724 from Gustav-Simonsson/get_work
rpc: return error code for eth_getWork when no work ready
2015-08-29 10:54:10 +02:00
2d1ced8759 Merge pull request #1739 from bas-vk/empty-password
rpc/api allow empty password
2015-08-28 13:14:51 +02:00
39e9560600 rpc/api allow empty password 2015-08-28 12:49:41 +02:00
d9addf79fa Improve error string and remove unneeded else clause 2015-08-28 03:42:01 +02:00
cfd84a6ad9 build: avoid -X separator warning with Go >= 1.5 2015-08-27 13:26:13 +02:00
6ec13e7e2b Merge pull request #1701 from karalabe/eth62-sync-rebase
eth: implement eth/62 synchronization logic
2015-08-27 00:03:59 +02:00
79b644c7a3 Merge pull request #1717 from karalabe/forward-solidity-errors
common/compiler: fix #1598, expose solidity errors
2015-08-26 19:00:11 +02:00
14370a2260 Merge pull request #1718 from caktux/develop
add missing shh_getMessages to RPC mappings
2015-08-26 18:55:51 +02:00
3df6f3fc14 Merge pull request #1721 from bas-vk/console-error-parsing
Improved console error handling
2015-08-26 18:55:31 +02:00
847794a321 Merge pull request #1722 from bas-vk/remote-deleteaccount
Remove personal.deleteAccount from RPC interface
2015-08-26 18:02:51 +02:00
829201382b rpc: return error code for eth_getWork when no work ready 2015-08-26 12:46:50 +02:00
5dd2462816 rpc/api - remove personal.deleteAccount from RPC interface 2015-08-26 11:39:43 +02:00
f448310eef bugfix console error handling 2015-08-26 11:33:02 +02:00
101418b275 common/compiler: fix #1598, expose solidity errors 2015-08-26 10:04:23 +03:00
a1d8015817 add missing shh_getMessages to RPC mappings 2015-08-25 14:42:57 -04:00
17f65cd1e5 eth: update metrics collection to handle eth/62 algos 2015-08-25 17:48:47 +03:00
47a7fe5d22 eth: port the synchronisation algo to eth/62 2015-08-25 17:48:47 +03:00
abce09954b Merge pull request #1711 from Gustav-Simonsson/timestamp_big_int
Add tests for uncle timestamps and refactor timestamp type
2015-08-25 15:49:36 +02:00
a219159e7e Merge pull request #1710 from bas-vk/useragent
user agent messages were dumped in some cases
2015-08-25 12:23:25 +02:00
7324176f70 Add tests for uncle timestamps and refactor timestamp type 2015-08-25 04:46:11 +02:00
ca88e18f59 eth: kill off protocol eth/60 in preparation for eth/62 2015-08-24 17:57:28 +03:00
42f44dda54 eth, eth/downloader: handle header requests, table driven proto tests 2015-08-24 17:57:28 +03:00
d910148a96 Set ipc channel as user agent client 2015-08-24 12:41:34 +02:00
c51e153b5c eth, metrics, p2p: prepare metrics and net packets to eth/62 2015-08-21 10:30:57 +03:00
d51d0022ce cmd/geth: bumped version 1.1.0 2015-08-20 21:43:36 +02:00
3793991c0e remove 0x 2015-08-20 18:50:47 +02:00
b884d6ebaa canary update 2015-08-20 18:38:21 +02:00
36f7fe61c3 core, tests: Double SUICIDE fix 2015-08-20 18:22:50 +02:00
54088b0b8b cmd/geth: bumped version 1.0.3 2015-08-20 13:08:08 +02:00
9fb7bc7443 geth: bumped version 1.0.2 2015-08-19 23:05:39 +02:00
e2d44814a5 Merge pull request #1694 from obscuren/hide-fdtrack
fdtrack: hide message
2015-08-19 13:50:54 -07:00
bd3a44cac9 Merge pull request #1652 from bas-vk/autoreconnect
rpc/comms: reconnect ipc client after write error
2015-08-19 13:29:51 -07:00
61a6911eeb Merge pull request #1689 from fjl/discover-ignore-temp-errors
p2p, p2p/discover: small fixes
2015-08-19 12:55:40 -07:00
9bf17eb05a rpc/comms reconnect ipc client after write error 2015-08-19 21:48:56 +02:00
269c5c7107 Revert "fdtrack: temporary hack for tracking file descriptor usage"
This reverts commit 5c949d3b3b.
2015-08-19 21:46:01 +02:00
382d35bf40 Merge pull request #1688 from karalabe/fix-double-imports
eth: fix an issue with pulling and inserting blocks twice
2015-08-19 08:19:37 -07:00
dd54fef898 p2p/discover: don't attempt to replace nodes that are being replaced
PR #1621 changed Table locking so the mutex is not held while a
contested node is being pinged. If multiple nodes ping the local node
during this time window, multiple ping packets will be sent to the
contested node. The changes in this commit prevent multiple packets by
tracking whether the node is being replaced.
2015-08-19 14:57:16 +02:00
edccc7ae34 p2p: continue listening after temporary errors 2015-08-19 14:39:04 +02:00
7d5ff770e2 p2p/discover: continue reading after temporary errors
Might solve #1579
2015-08-19 14:38:55 +02:00
c6a11fe372 Merge pull request #1680 from maran/fix_removedb
cmd/geth: Fix chain purging from cmd line
2015-08-19 05:17:22 -07:00
941920f2aa eth: fix an issue with pulling and inserting blocks twice 2015-08-19 15:14:26 +03:00
3b997c3e16 Merge pull request #1454 from ethersphere/frozen-cryptoclean
crypto: remove obsolete code
2015-08-19 02:39:02 -07:00
0737cbc5c1 Merge pull request #1683 from ethereum/travis
Switch from Coveralls to Codecov code coverage service
2015-08-18 14:26:14 -07:00
227ff4d2d6 Merge pull request #1682 from obscuren/readme-improvements
Updated README, Added CONTRIBUTING
2015-08-18 14:16:42 -07:00
18d450b2d0 Updated README, Added CONTRIBUTING 2015-08-18 23:00:15 +02:00
cc87551edc Codecov integration 2015-08-18 22:46:48 +02:00
d0dc1b4a60 Merge pull request #1681 from obscuren/miner-receipt-fix
core, miner: write miner receipts
2015-08-18 12:53:26 -07:00
b4369e1015 core, miner: write miner receipts 2015-08-18 21:46:26 +02:00
4d5501c46a cmd/geth: Fix chain purging from cmd line 2015-08-18 15:56:37 +02:00
d4da2f630e crypto: remove obsolete key files 2015-08-18 01:25:04 +02:00
e1da124415 Merge pull request #1675 from obscuren/submithashrate-change
rpc/api: return boolean value for eth_submitHashrate
2015-08-17 11:49:15 -07:00
36081505c4 Merge pull request #1673 from karalabe/fix-api-xeth-responses
rpc: update the xeth over RPC API to use the success/failure messages
2015-08-17 11:47:55 -07:00
2497f28aa9 Merge pull request #1627 from zsfelfoldi/gpo
GPO update
2015-08-17 06:37:58 -07:00
49ece3155c GPO update 2015-08-17 15:20:33 +02:00
8839fee415 rpc/api: return boolean value for eth_submitHashrate 2015-08-17 15:09:30 +02:00
ff1f6fa09d Merge pull request #1649 from maran/pending_tx_response
rpc/api: format pendingTx response. Fixes #1648
2015-08-17 06:02:08 -07:00
7ea30f18f9 Merge pull request #1674 from tgerring/bootnodes
Added SG bootnode
2015-08-17 05:56:38 -07:00
12805c738c Merge pull request #1667 from fjl/pretty-printer-improvements
jsre: pretty printer improvements
2015-08-17 05:55:09 -07:00
80b294c3c7 Update CPP pubkey 2015-08-17 14:51:27 +02:00
8884f856ef Added SG bootnode 2015-08-17 14:36:57 +02:00
309788de37 rpc: update the xeth over RPC API to use the success/failure messages 2015-08-17 14:04:20 +03:00
f6367548e4 Merge pull request #1654 from obscuren/call-gas
xeth: call fix when doing 'create'-like calls
2015-08-16 16:33:29 -07:00
1c3ca3ce6a xeth: max gas limit 2015-08-16 15:27:30 +02:00
8603ec7055 rpc/api: format pendingTx response. Fixes #1648 2015-08-16 11:12:22 +02:00
1086e2f298 jsre: fix annoying indentation when printing arrays of objects
The pretty printer, dumb as it is, printed arrays of objects as

  [{
    ...
    }]

With this change, they now print as:

  [{
    ...
  }]
2015-08-16 00:35:00 +01:00
49703bea0a jsre: bind the pretty printer to "inspect" in JS 2015-08-15 23:55:42 +01:00
59b28cfa31 Merge pull request #1663 from obscuren/issue-1662
xeth: added a transact mu
2015-08-15 14:55:04 -07:00
5c5c3930b7 Merge pull request #1659 from bas-vk/exec-output
Javascript --exec output
2015-08-15 06:23:29 -07:00
7bb5ac045e xeth: added a transact mu
Added a transact mutex. The transact mutex will fix an issue where
transactions were created with the same nonce resulting in some
transactions being dropped. This happened when two concurrent calls
would call the `Transact` method (which is OK) which would both call
`GetNonce`. While the managed is thread safe it does not help us in this
case.
2015-08-15 00:33:52 +02:00
cd81356ace Merge pull request #1658 from bas-vk/liner-ctrl-c
Clear current line on ctrl-C
2015-08-14 04:36:15 -07:00
c472b8f725 main clear current line on ctrl-C 2015-08-14 13:23:41 +02:00
3a66c4ed47 Merge pull request #1642 from ethereum/fix-js-console-windows
cmd/geth, jsre: restore command line editing on windows
2015-08-14 04:14:11 -07:00
29181003d4 Merge pull request #1655 from obscuren/db-merge-fix
eth, trie: removed key prefixing from state entries & merge db fix
2015-08-14 04:12:33 -07:00
87d1cde7e4 main print console output for js statement given by the exec argument 2015-08-14 13:06:34 +02:00
28b14d3e6d Merge pull request #1635 from bas-vk/useragent
support for user agents
2015-08-13 16:25:33 -07:00
73c4e6005c Merge pull request #1638 from obscuren/jit-fixes
core/vm: fixed jit error & added inline docs
2015-08-13 11:49:01 -07:00
b8ca0a830e eth, trie: removed key prefixing from state entries & merge db fix
Fixed database merge strategy to use the correct database. Due to a copy
paste fail when doing type evaluation the same database was being
iterated (chain), all others were ignored.

Removed state prefixing because {H(code): code} is stored in the same
database as the rest of the state.
2015-08-13 20:44:03 +02:00
a89cfe92cc Merge pull request #1470 from ebuchman/encHandshake
p2p: validate recovered ephemeral pubkey
2015-08-13 11:59:27 +02:00
0b0b31c7d2 Merge pull request #1651 from karalabe/rlp-boolean-support
rlp: boolean support
2015-08-13 11:10:26 +02:00
1d2420323c rlp: add support for boolean encoding/decoding 2015-08-13 12:05:39 +03:00
0dd6911c62 Merge pull request #1647 from fjl/fix-disc-reason
p2p: fix value of DiscSubprotocolError
2015-08-12 21:20:21 +02:00
28feafe7af Merge pull request #1646 from fjl/fix-client-identifier
cmd/geth: remove spaces in client identifier
2015-08-12 14:38:48 +02:00
0d10d5a0a5 p2p: fix value of DiscSubprotocolError
We had the wrong value (12) since forever.
2015-08-12 14:15:54 +02:00
31a2793662 cmd/geth: remove spaces in client identifier 2015-08-12 13:32:52 +02:00
f9cbd16f27 support for user agents 2015-08-12 12:22:16 +02:00
0ef80bb3d0 cmd/geth, jsre: restore command line editing on windows
PR #856 broke command line editing by wrapping stdout with a filter that
interprets ANSI escape sequences to fix colored printing on windows.
Implement the printer in Go instead so it can do its own
platform-dependent coloring.

As a nice side effect, the JS console is now noticeably more responsive
when printing results.

Fixes #1608
Fixes #1612
2015-08-12 12:04:00 +02:00
05c66529b2 Merge pull request #1621 from ethereum/fix-discover-hangs
p2p/discover: fix two major bugs in reply packet handling
2015-08-11 12:17:13 -07:00
9cacec70f9 cmd/evm, core/vm, tests: changed DisableVm to EnableVm 2015-08-11 18:43:22 +02:00
94b6f38869 Merge pull request #1641 from obscuren/web3-update
web3: updated
2015-08-11 08:55:55 -07:00
bf6ea2919d web3: updated 2015-08-11 17:17:20 +02:00
c32d6fdf74 Merge pull request #1640 from obscuren/trace-flag-ethtest
cmd/ethtest: added trace flag for debugging
2015-08-11 02:53:02 -07:00
67c8ccc309 cmd/ethtest: added trace flag for debugging 2015-08-11 11:46:52 +02:00
590c99a98f p2p/discover: fix UDP reply packet timeout handling
If the timeout fired (even just nanoseconds) before the deadline of the
next pending reply, the timer was not rescheduled. The timer would've
been rescheduled anyway once the next packet was sent, but there were
cases where no next packet could ever be sent due to the locking issue
fixed in the previous commit.

As timing-related bugs go, this issue had been present for a long time
and I could never reproduce it. The test added in this commit did
reproduce the issue on about one out of 15 runs.
2015-08-11 11:42:17 +02:00
01ed3fa1a9 p2p/discover: unlock the table during ping replacement
Table.mutex was being held while waiting for a reply packet, which
effectively made many parts of the whole stack block on that packet,
including the net_peerCount RPC call.
2015-08-11 11:42:17 +02:00
32395ddb89 core/vm: fixed jit error & added inline docs
opNumber did not create a new big int which could lead to the block's
number being modified.
2015-08-11 00:16:38 +02:00
2fcf7f1241 Merge pull request #1604 from obscuren/db-merge
core, eth, trie, xeth: merged state, chain, extra databases in one
2015-08-09 05:16:37 -07:00
07cb8092e7 Merge pull request #1611 from obscuren/expdiff-olympic-fix
cmd/utils, core: disable exp diff for olympic net
2015-08-09 05:15:13 -07:00
1cbd53add8 Merge pull request #1626 from obscuren/defaults-fix
cmd/geth, core/vm: setup vm settings and defaulted JIT disabled
2015-08-08 17:11:28 -07:00
eec38c5853 cmd/geth, core/vm: setup vm settings and defaulted JIT disabled 2015-08-09 02:06:16 +02:00
c93f0b9f4b Merge pull request #1490 from obscuren/jit-vm
core/vm: jit vm
2015-08-08 06:36:26 -07:00
a23478c0be core, eth, trie, xeth: merged state, chain, extra databases in one 2015-08-07 22:29:02 +02:00
312128384b Merge pull request #1620 from caktux/develop
string version for build server
2015-08-07 11:41:41 -07:00
3ccab5a1e8 string version for build server 2015-08-07 14:13:33 -04:00
dcb23bc3ab Merge pull request #1615 from obscuren/contract-addr-fix
xeth: fixed contract addr check
2015-08-07 05:38:02 -07:00
b6c5b3b4a7 xeth: fixed contract addr check 2015-08-07 14:32:06 +02:00
d7580f21f6 Merge pull request #1595 from obscuren/extra-data
cmd/geth, eth: added canonical extra data
2015-08-07 05:00:36 -07:00
b1fac4270d Merge pull request #1614 from obscuren/web3-finite-fix
web3: regression. Fixes #1613
2015-08-07 04:59:15 -07:00
ac697326a6 core/vm: reduced big int allocations
Reduced big int allocation by making stack items modifiable. Instead of
adding items such as `common.Big0` to the stack, `new(big.Int)` is
added instead. One must expect that any item that is added to the stack
might change.
2015-08-07 12:52:23 +02:00
184e9ae9a8 core, tests: reduced state copy by N calls
Reduced the amount of state copied that are required by N calls by doing
a balance check prior to any state modifications.
2015-08-07 12:52:23 +02:00
846f34f78b core/vm, tests: implemented semi-jit vm
* changed stack and removed stack ptr. Let go decide on slice reuse.
2015-08-07 12:52:17 +02:00
a33726b7db web3: regression. Fixes #1613 2015-08-07 12:33:12 +02:00
132df860d9 miner, rpc: added length check for extra data 2015-08-07 12:24:44 +02:00
785b3e7a57 cmd/geth, eth: added canonical extra data
Implemented canonical extra data according to
https://github.com/ethereum/wiki/wiki/Extra-Data
2015-08-07 12:24:32 +02:00
e89536ca0b Merge pull request #1596 from obscuren/submit-hashrate
miner, rpc: added submit hashrate for remote agents
2015-08-07 03:08:48 -07:00
ac10c9352e Merge pull request #1610 from obscuren/address-check
xeth: added address hex check and length check
2015-08-07 02:05:54 -07:00
cf7cef4293 xeth: added address hex check and length check 2015-08-07 09:52:12 +02:00
698e98d981 Merge pull request #1600 from ethereum/fix-tests-windows
Fix tests on windows
2015-08-06 12:39:29 -07:00
a3b8169938 Merge pull request #1603 from ebuchman/trie_hex_fix
trie: hex fix
2015-08-06 12:02:55 -07:00
46c9594081 trie: run codec tests, add benchmarks, faster 2015-08-06 14:04:16 -04:00
7baa5977c8 Merge pull request #1594 from ebuchman/trie_hex_fix
faster hex-prefix codec and string -> []byte
2015-08-06 08:44:19 -07:00
803096ca0f .gitattributes: add 2015-08-06 17:18:59 +02:00
6ee908848c p2p/nat: disable UPnP test on windows 2015-08-06 17:18:59 +02:00
3832019964 common/compiler, common/docserver, jsre: fix tests on windows 2015-08-06 17:18:59 +02:00
eae1191904 cmd/utils: fix path expansion on windows 2015-08-06 17:18:59 +02:00
78b101e15d common: remove windows path functions
They were unused and their tests failed on Windows.
2015-08-06 16:43:43 +02:00
74f6d90153 cmd/utils, core: disable exp diff for olympic net 2015-08-06 13:29:06 +02:00
c32073b11f miner, rpc: added submit hashrate for remote agents 2015-08-06 12:58:54 +02:00
b23b4dbd79 p2p/discover: close Table during testing
Not closing the table used to be fine, but now the table has a database.
2015-08-06 12:27:59 +02:00
c1d516546d faster hex-prefix codec and string -> []byte 2015-08-06 03:17:59 -04:00
37efd08b42 p2p: validate recovered ephemeral pubkey against checksum in decodeAuthMsg 2015-07-14 03:06:44 +00:00
972 changed files with 38142 additions and 10295 deletions

2
.gitattributes vendored Normal file
View File

@ -0,0 +1,2 @@
# Auto detect text files and perform LF normalization
* text=auto

View File

@ -5,7 +5,7 @@ install:
# - go get code.google.com/p/go.tools/cmd/goimports
# - go get github.com/golang/lint/golint
# - go get golang.org/x/tools/cmd/vet
- go get golang.org/x/tools/cmd/cover github.com/mattn/goveralls
- go get golang.org/x/tools/cmd/cover
before_script:
# - gofmt -l -w .
# - goimports -l -w .
@ -15,7 +15,7 @@ before_script:
script:
- make travis-test-with-coverage
after_success:
- if [ "$COVERALLS_TOKEN" ]; then goveralls -coverprofile=profile.cov -service=travis-ci -repotoken $COVERALLS_TOKEN; fi
- bash <(curl -s https://codecov.io/bash)
env:
global:
- secure: "U2U1AmkU4NJBgKR/uUAebQY87cNL0+1JHjnLOmmXwxYYyj5ralWb1aSuSH3qSXiT93qLBmtaUkuv9fberHVqrbAeVlztVdUsKAq7JMQH+M99iFkC9UiRMqHmtjWJ0ok4COD1sRYixxi21wb/JrMe3M1iL4QJVS61iltjHhVdM64="

9
CONTRIBUTING.md Normal file
View File

@ -0,0 +1,9 @@
If you'd like to contribute to go-ethereum please fork, fix, commit and
send a pull request. Commits who do not comply with the coding standards
are ignored (use gofmt!). If you send pull requests make absolute sure that you
commit on the `develop` branch and that you do not merge to master.
Commits that are directly based on master are simply ignored.
See [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide)
for more details on configuring your environment, testing, and
dependency management.

33
Godeps/Godeps.json generated
View File

@ -5,11 +5,6 @@
"./..."
],
"Deps": [
{
"ImportPath": "code.google.com/p/go-uuid/uuid",
"Comment": "null-12",
"Rev": "7dda39b2e7d5e265014674c5af696ba4186679e9"
},
{
"ImportPath": "github.com/codegangsta/cli",
"Comment": "1.2.0-95-g9b2bd2b",
@ -21,17 +16,21 @@
},
{
"ImportPath": "github.com/ethereum/ethash",
"Comment": "v23.1-227-g8f6ccaa",
"Rev": "8f6ccaaef9b418553807a73a95cb5f49cd3ea39f"
"Comment": "v23.1-234-g062e40a",
"Rev": "062e40a1a1671f5a5102862b56e4c56f68a732f5"
},
{
"ImportPath": "github.com/fatih/color",
"Comment": "v0.1-5-gf773d4c",
"Rev": "f773d4c806cc8e4a5749d6a35e2a4bbcd71443d6"
},
{
"ImportPath": "github.com/gizak/termui",
"Rev": "bab8dce01c193d82bc04888a0a9a7814d505f532"
},
{
"ImportPath": "github.com/howeyc/fsnotify",
"Comment": "v0.9.0-11-g6b1ef89",
"Rev": "6b1ef893dc11e0447abda6da20a5203481878dda"
"ImportPath": "github.com/hashicorp/golang-lru",
"Rev": "7f9ef20a0256f494e24126014135cf893ab71e9e"
},
{
"ImportPath": "github.com/huin/goupnp",
@ -45,13 +44,9 @@
"ImportPath": "github.com/kardianos/osext",
"Rev": "ccfcd0245381f0c94c68f50626665eed3c6b726a"
},
{
"ImportPath": "github.com/mattn/go-colorable",
"Rev": "043ae16291351db8465272edf465c9f388161627"
},
{
"ImportPath": "github.com/mattn/go-isatty",
"Rev": "fdbe02a1b44e75977b2690062b83cf507d70c013"
"Rev": "7fcbc72f853b92b5720db4a6b8482be612daef24"
},
{
"ImportPath": "github.com/mattn/go-runewidth",
@ -62,6 +57,10 @@
"ImportPath": "github.com/nsf/termbox-go",
"Rev": "675ffd907b7401b8a709a5ef2249978af5616bb2"
},
{
"ImportPath": "github.com/pborman/uuid",
"Rev": "cccd189d45f7ac3368a0d127efb7f4d08ae0b655"
},
{
"ImportPath": "github.com/peterh/liner",
"Rev": "29f6a646557d83e2b6e9ba05c45fbea9c006dbe8"
@ -78,6 +77,10 @@
"ImportPath": "github.com/rs/cors",
"Rev": "6e0c3cb65fc0fdb064c743d176a620e3ca446dfb"
},
{
"ImportPath": "github.com/shiena/ansicolor",
"Rev": "a5e2b567a4dd6cc74545b8a4f27c9d63b9e7735b"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
"Rev": "4875955338b0a434238a31165cb87255ab6e9e4a"

View File

@ -35,10 +35,14 @@
#elif defined(__FreeBSD__) || defined(__DragonFly__) || defined(__NetBSD__)
#define ethash_swap_u32(input_) bswap32(input_)
#define ethash_swap_u64(input_) bswap64(input_)
#elif defined(__OpenBSD__)
#include <endian.h>
#define ethash_swap_u32(input_) swap32(input_)
#define ethash_swap_u64(input_) swap64(input_)
#else // posix
#include <byteswap.h>
#define ethash_swap_u32(input_) __bswap_32(input_)
#define ethash_swap_u64(input_) __bswap_64(input_)
#define ethash_swap_u32(input_) bswap_32(input_)
#define ethash_swap_u64(input_) bswap_64(input_)
#endif

View File

@ -29,6 +29,10 @@ extern "C" {
#define FNV_PRIME 0x01000193
/* The FNV-1 spec multiplies the prime with the input one byte (octet) in turn.
We instead multiply it with the full 32-bit input.
This gives a different result compared to a canonical FNV-1 implementation.
*/
static inline uint32_t fnv_hash(uint32_t const x, uint32_t const y)
{
return x * FNV_PRIME ^ y;

View File

@ -0,0 +1,3 @@
language: go
go: 1.3

View File

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2013 Fatih Arslan
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

151
Godeps/_workspace/src/github.com/fatih/color/README.md generated vendored Normal file
View File

@ -0,0 +1,151 @@
# Color [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/color) [![Build Status](http://img.shields.io/travis/fatih/color.svg?style=flat-square)](https://travis-ci.org/fatih/color)
Color lets you use colorized outputs in terms of [ANSI Escape Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It has support for Windows too! The API can be used in several ways, pick one that suits you.
![Color](http://i.imgur.com/c1JI0lA.png)
## Install
```bash
go get github.com/fatih/color
```
## Examples
### Standard colors
```go
// Print with default helper functions
color.Cyan("Prints text in cyan.")
// A newline will be appended automatically
color.Blue("Prints %s in blue.", "text")
// These are using the default foreground colors
color.Red("We have red")
color.Magenta("And many others ..")
```
### Mix and reuse colors
```go
// Create a new color object
c := color.New(color.FgCyan).Add(color.Underline)
c.Println("Prints cyan text with an underline.")
// Or just add them to New()
d := color.New(color.FgCyan, color.Bold)
d.Printf("This prints bold cyan %s\n", "too!.")
// Mix up foreground and background colors, create new mixes!
red := color.New(color.FgRed)
boldRed := red.Add(color.Bold)
boldRed.Println("This will print text in bold red.")
whiteBackground := red.Add(color.BgWhite)
whiteBackground.Println("Red text with white background.")
```
### Custom print functions (PrintFunc)
```go
// Create a custom print function for convenience
red := color.New(color.FgRed).PrintfFunc()
red("Warning")
red("Error: %s", err)
// Mix up multiple attributes
notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
notice("Don't forget this...")
```
### Insert into noncolor strings (SprintFunc)
```go
// Create SprintXxx functions to mix strings with other non-colorized strings:
yellow := color.New(color.FgYellow).SprintFunc()
red := color.New(color.FgRed).SprintFunc()
fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error"))
info := color.New(color.FgWhite, color.BgGreen).SprintFunc()
fmt.Printf("This %s rocks!\n", info("package"))
// Use helper functions
fmt.Printf("This", color.RedString("warning"), "should be not neglected.")
fmt.Printf(color.GreenString("Info:"), "an important message." )
// Windows supported too! Just don't forget to change the output to color.Output
fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
```
### Plug into existing code
```go
// Use handy standard colors
color.Set(color.FgYellow)
fmt.Println("Existing text will now be in yellow")
fmt.Printf("This one %s\n", "too")
color.Unset() // Don't forget to unset
// You can mix up parameters
color.Set(color.FgMagenta, color.Bold)
defer color.Unset() // Use it in your function
fmt.Println("All text will now be bold magenta.")
```
### Disable color
There might be a case where you want to disable color output (for example to
pipe the standard output of your app to somewhere else). `Color` has support to
disable colors both globally and for single color definition. For example
suppose you have a CLI app and a `--no-color` bool flag. You can easily disable
the color output with:
```go
var flagNoColor = flag.Bool("no-color", false, "Disable color output")
if *flagNoColor {
color.NoColor = true // disables colorized output
}
```
It also has support for single color definitions (local). You can
disable/enable color output on the fly:
```go
c := color.New(color.FgCyan)
c.Println("Prints cyan text")
c.DisableColor()
c.Println("This is printed without any color")
c.EnableColor()
c.Println("This prints again cyan...")
```
## Todo
* Save/Return previous values
* Evaluate fmt.Formatter interface
## Credits
* [Fatih Arslan](https://github.com/fatih)
* Windows support via @shiena: [ansicolor](https://github.com/shiena/ansicolor)
## License
The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details

353
Godeps/_workspace/src/github.com/fatih/color/color.go generated vendored Normal file
View File

@ -0,0 +1,353 @@
package color
import (
"fmt"
"os"
"strconv"
"strings"
"github.com/mattn/go-isatty"
"github.com/shiena/ansicolor"
)
// NoColor defines if the output is colorized or not. It's dynamically set to
// false or true based on the stdout's file descriptor referring to a terminal
// or not. This is a global option and affects all colors. For more control
// over each color block use the methods DisableColor() individually.
var NoColor = !isatty.IsTerminal(os.Stdout.Fd())
// Color defines a custom color object which is defined by SGR parameters.
type Color struct {
params []Attribute
noColor *bool
}
// Attribute defines a single SGR Code
type Attribute int
const escape = "\x1b"
// Base attributes
const (
Reset Attribute = iota
Bold
Faint
Italic
Underline
BlinkSlow
BlinkRapid
ReverseVideo
Concealed
CrossedOut
)
// Foreground text colors
const (
FgBlack Attribute = iota + 30
FgRed
FgGreen
FgYellow
FgBlue
FgMagenta
FgCyan
FgWhite
)
// Background text colors
const (
BgBlack Attribute = iota + 40
BgRed
BgGreen
BgYellow
BgBlue
BgMagenta
BgCyan
BgWhite
)
// New returns a newly created color object.
func New(value ...Attribute) *Color {
c := &Color{params: make([]Attribute, 0)}
c.Add(value...)
return c
}
// Set sets the given parameters immediately. It will change the color of
// output with the given SGR parameters until color.Unset() is called.
func Set(p ...Attribute) *Color {
c := New(p...)
c.Set()
return c
}
// Unset resets all escape attributes and clears the output. Usually should
// be called after Set().
func Unset() {
if NoColor {
return
}
fmt.Fprintf(Output, "%s[%dm", escape, Reset)
}
// Set sets the SGR sequence.
func (c *Color) Set() *Color {
if c.isNoColorSet() {
return c
}
fmt.Fprintf(Output, c.format())
return c
}
func (c *Color) unset() {
if c.isNoColorSet() {
return
}
Unset()
}
// Add is used to chain SGR parameters. Use as many as parameters to combine
// and create custom color objects. Example: Add(color.FgRed, color.Underline).
func (c *Color) Add(value ...Attribute) *Color {
c.params = append(c.params, value...)
return c
}
func (c *Color) prepend(value Attribute) {
c.params = append(c.params, 0)
copy(c.params[1:], c.params[0:])
c.params[0] = value
}
// Output defines the standard output of the print functions. By default
// os.Stdout is used.
var Output = ansicolor.NewAnsiColorWriter(os.Stdout)
// Print formats using the default formats for its operands and writes to
// standard output. Spaces are added between operands when neither is a
// string. It returns the number of bytes written and any write error
// encountered. This is the standard fmt.Print() method wrapped with the given
// color.
func (c *Color) Print(a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprint(Output, a...)
}
// Printf formats according to a format specifier and writes to standard output.
// It returns the number of bytes written and any write error encountered.
// This is the standard fmt.Printf() method wrapped with the given color.
func (c *Color) Printf(format string, a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprintf(Output, format, a...)
}
// Println formats using the default formats for its operands and writes to
// standard output. Spaces are always added between operands and a newline is
// appended. It returns the number of bytes written and any write error
// encountered. This is the standard fmt.Print() method wrapped with the given
// color.
func (c *Color) Println(a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprintln(Output, a...)
}
// PrintFunc returns a new function that prints the passed arguments as
// colorized with color.Print().
func (c *Color) PrintFunc() func(a ...interface{}) {
return func(a ...interface{}) { c.Print(a...) }
}
// PrintfFunc returns a new function that prints the passed arguments as
// colorized with color.Printf().
func (c *Color) PrintfFunc() func(format string, a ...interface{}) {
return func(format string, a ...interface{}) { c.Printf(format, a...) }
}
// PrintlnFunc returns a new function that prints the passed arguments as
// colorized with color.Println().
func (c *Color) PrintlnFunc() func(a ...interface{}) {
return func(a ...interface{}) { c.Println(a...) }
}
// SprintFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprint(). Useful to put into or mix into other
// string. Windows users should use this in conjuction with color.Output, example:
//
// put := New(FgYellow).SprintFunc()
// fmt.Fprintf(color.Output, "This is a %s", put("warning"))
func (c *Color) SprintFunc() func(a ...interface{}) string {
return func(a ...interface{}) string {
return c.wrap(fmt.Sprint(a...))
}
}
// SprintfFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprintf(). Useful to put into or mix into other
// string. Windows users should use this in conjuction with color.Output.
func (c *Color) SprintfFunc() func(format string, a ...interface{}) string {
return func(format string, a ...interface{}) string {
return c.wrap(fmt.Sprintf(format, a...))
}
}
// SprintlnFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprintln(). Useful to put into or mix into other
// string. Windows users should use this in conjuction with color.Output.
func (c *Color) SprintlnFunc() func(a ...interface{}) string {
return func(a ...interface{}) string {
return c.wrap(fmt.Sprintln(a...))
}
}
// sequence returns a formated SGR sequence to be plugged into a "\x1b[...m"
// an example output might be: "1;36" -> bold cyan
func (c *Color) sequence() string {
format := make([]string, len(c.params))
for i, v := range c.params {
format[i] = strconv.Itoa(int(v))
}
return strings.Join(format, ";")
}
// wrap wraps the s string with the colors attributes. The string is ready to
// be printed.
func (c *Color) wrap(s string) string {
if c.isNoColorSet() {
return s
}
return c.format() + s + c.unformat()
}
func (c *Color) format() string {
return fmt.Sprintf("%s[%sm", escape, c.sequence())
}
func (c *Color) unformat() string {
return fmt.Sprintf("%s[%dm", escape, Reset)
}
// DisableColor disables the color output. Useful to not change any existing
// code and still being able to output. Can be used for flags like
// "--no-color". To enable back use EnableColor() method.
func (c *Color) DisableColor() {
c.noColor = boolPtr(true)
}
// EnableColor enables the color output. Use it in conjuction with
// DisableColor(). Otherwise this method has no side effects.
func (c *Color) EnableColor() {
c.noColor = boolPtr(false)
}
func (c *Color) isNoColorSet() bool {
// check first if we have user setted action
if c.noColor != nil {
return *c.noColor
}
// if not return the global option, which is disabled by default
return NoColor
}
func boolPtr(v bool) *bool {
return &v
}
// Black is an convenient helper function to print with black foreground. A
// newline is appended to format by default.
func Black(format string, a ...interface{}) { printColor(format, FgBlack, a...) }
// Red is an convenient helper function to print with red foreground. A
// newline is appended to format by default.
func Red(format string, a ...interface{}) { printColor(format, FgRed, a...) }
// Green is an convenient helper function to print with green foreground. A
// newline is appended to format by default.
func Green(format string, a ...interface{}) { printColor(format, FgGreen, a...) }
// Yellow is an convenient helper function to print with yellow foreground.
// A newline is appended to format by default.
func Yellow(format string, a ...interface{}) { printColor(format, FgYellow, a...) }
// Blue is an convenient helper function to print with blue foreground. A
// newline is appended to format by default.
func Blue(format string, a ...interface{}) { printColor(format, FgBlue, a...) }
// Magenta is an convenient helper function to print with magenta foreground.
// A newline is appended to format by default.
func Magenta(format string, a ...interface{}) { printColor(format, FgMagenta, a...) }
// Cyan is an convenient helper function to print with cyan foreground. A
// newline is appended to format by default.
func Cyan(format string, a ...interface{}) { printColor(format, FgCyan, a...) }
// White is an convenient helper function to print with white foreground. A
// newline is appended to format by default.
func White(format string, a ...interface{}) { printColor(format, FgWhite, a...) }
func printColor(format string, p Attribute, a ...interface{}) {
if !strings.HasSuffix(format, "\n") {
format += "\n"
}
c := &Color{params: []Attribute{p}}
c.Printf(format, a...)
}
// BlackString is an convenient helper function to return a string with black
// foreground.
func BlackString(format string, a ...interface{}) string {
return New(FgBlack).SprintfFunc()(format, a...)
}
// RedString is an convenient helper function to return a string with red
// foreground.
func RedString(format string, a ...interface{}) string {
return New(FgRed).SprintfFunc()(format, a...)
}
// GreenString is an convenient helper function to return a string with green
// foreground.
func GreenString(format string, a ...interface{}) string {
return New(FgGreen).SprintfFunc()(format, a...)
}
// YellowString is an convenient helper function to return a string with yellow
// foreground.
func YellowString(format string, a ...interface{}) string {
return New(FgYellow).SprintfFunc()(format, a...)
}
// BlueString is an convenient helper function to return a string with blue
// foreground.
func BlueString(format string, a ...interface{}) string {
return New(FgBlue).SprintfFunc()(format, a...)
}
// MagentaString is an convenient helper function to return a string with magenta
// foreground.
func MagentaString(format string, a ...interface{}) string {
return New(FgMagenta).SprintfFunc()(format, a...)
}
// CyanString is an convenient helper function to return a string with cyan
// foreground.
func CyanString(format string, a ...interface{}) string {
return New(FgCyan).SprintfFunc()(format, a...)
}
// WhiteString is an convenient helper function to return a string with white
// foreground.
func WhiteString(format string, a ...interface{}) string {
return New(FgWhite).SprintfFunc()(format, a...)
}

View File

@ -0,0 +1,176 @@
package color
import (
"bytes"
"fmt"
"os"
"testing"
"github.com/shiena/ansicolor"
)
// Testing colors is kinda different. First we test for given colors and their
// escaped formatted results. Next we create some visual tests to be tested.
// Each visual test includes the color name to be compared.
func TestColor(t *testing.T) {
rb := new(bytes.Buffer)
Output = rb
testColors := []struct {
text string
code Attribute
}{
{text: "black", code: FgBlack},
{text: "red", code: FgRed},
{text: "green", code: FgGreen},
{text: "yellow", code: FgYellow},
{text: "blue", code: FgBlue},
{text: "magent", code: FgMagenta},
{text: "cyan", code: FgCyan},
{text: "white", code: FgWhite},
}
for _, c := range testColors {
New(c.code).Print(c.text)
line, _ := rb.ReadString('\n')
scannedLine := fmt.Sprintf("%q", line)
colored := fmt.Sprintf("\x1b[%dm%s\x1b[0m", c.code, c.text)
escapedForm := fmt.Sprintf("%q", colored)
fmt.Printf("%s\t: %s\n", c.text, line)
if scannedLine != escapedForm {
t.Errorf("Expecting %s, got '%s'\n", escapedForm, scannedLine)
}
}
}
func TestNoColor(t *testing.T) {
rb := new(bytes.Buffer)
Output = rb
testColors := []struct {
text string
code Attribute
}{
{text: "black", code: FgBlack},
{text: "red", code: FgRed},
{text: "green", code: FgGreen},
{text: "yellow", code: FgYellow},
{text: "blue", code: FgBlue},
{text: "magent", code: FgMagenta},
{text: "cyan", code: FgCyan},
{text: "white", code: FgWhite},
}
for _, c := range testColors {
p := New(c.code)
p.DisableColor()
p.Print(c.text)
line, _ := rb.ReadString('\n')
if line != c.text {
t.Errorf("Expecting %s, got '%s'\n", c.text, line)
}
}
// global check
NoColor = true
defer func() {
NoColor = false
}()
for _, c := range testColors {
p := New(c.code)
p.Print(c.text)
line, _ := rb.ReadString('\n')
if line != c.text {
t.Errorf("Expecting %s, got '%s'\n", c.text, line)
}
}
}
func TestColorVisual(t *testing.T) {
// First Visual Test
fmt.Println("")
Output = ansicolor.NewAnsiColorWriter(os.Stdout)
New(FgRed).Printf("red\t")
New(BgRed).Print(" ")
New(FgRed, Bold).Println(" red")
New(FgGreen).Printf("green\t")
New(BgGreen).Print(" ")
New(FgGreen, Bold).Println(" green")
New(FgYellow).Printf("yellow\t")
New(BgYellow).Print(" ")
New(FgYellow, Bold).Println(" yellow")
New(FgBlue).Printf("blue\t")
New(BgBlue).Print(" ")
New(FgBlue, Bold).Println(" blue")
New(FgMagenta).Printf("magenta\t")
New(BgMagenta).Print(" ")
New(FgMagenta, Bold).Println(" magenta")
New(FgCyan).Printf("cyan\t")
New(BgCyan).Print(" ")
New(FgCyan, Bold).Println(" cyan")
New(FgWhite).Printf("white\t")
New(BgWhite).Print(" ")
New(FgWhite, Bold).Println(" white")
fmt.Println("")
// Second Visual test
Black("black")
Red("red")
Green("green")
Yellow("yellow")
Blue("blue")
Magenta("magenta")
Cyan("cyan")
White("white")
// Third visual test
fmt.Println()
Set(FgBlue)
fmt.Println("is this blue?")
Unset()
Set(FgMagenta)
fmt.Println("and this magenta?")
Unset()
// Fourth Visual test
fmt.Println()
blue := New(FgBlue).PrintlnFunc()
blue("blue text with custom print func")
red := New(FgRed).PrintfFunc()
red("red text with a printf func: %d\n", 123)
put := New(FgYellow).SprintFunc()
warn := New(FgRed).SprintFunc()
fmt.Fprintf(Output, "this is a %s and this is %s.\n", put("warning"), warn("error"))
info := New(FgWhite, BgGreen).SprintFunc()
fmt.Fprintf(Output, "this %s rocks!\n", info("package"))
// Fifth Visual Test
fmt.Println()
fmt.Fprintln(Output, BlackString("black"))
fmt.Fprintln(Output, RedString("red"))
fmt.Fprintln(Output, GreenString("green"))
fmt.Fprintln(Output, YellowString("yellow"))
fmt.Fprintln(Output, BlueString("blue"))
fmt.Fprintln(Output, MagentaString("magenta"))
fmt.Fprintln(Output, CyanString("cyan"))
fmt.Fprintln(Output, WhiteString("white"))
}

114
Godeps/_workspace/src/github.com/fatih/color/doc.go generated vendored Normal file
View File

@ -0,0 +1,114 @@
/*
Package color is an ANSI color package to output colorized or SGR defined
output to the standard output. The API can be used in several way, pick one
that suits you.
Use simple and default helper functions with predefined foreground colors:
color.Cyan("Prints text in cyan.")
// a newline will be appended automatically
color.Blue("Prints %s in blue.", "text")
// More default foreground colors..
color.Red("We have red")
color.Yellow("Yellow color too!")
color.Magenta("And many others ..")
However there are times where custom color mixes are required. Below are some
examples to create custom color objects and use the print functions of each
separate color object.
// Create a new color object
c := color.New(color.FgCyan).Add(color.Underline)
c.Println("Prints cyan text with an underline.")
// Or just add them to New()
d := color.New(color.FgCyan, color.Bold)
d.Printf("This prints bold cyan %s\n", "too!.")
// Mix up foreground and background colors, create new mixes!
red := color.New(color.FgRed)
boldRed := red.Add(color.Bold)
boldRed.Println("This will print text in bold red.")
whiteBackground := red.Add(color.BgWhite)
whiteBackground.Println("Red text with White background.")
You can create PrintXxx functions to simplify even more:
// Create a custom print function for convenient
red := color.New(color.FgRed).PrintfFunc()
red("warning")
red("error: %s", err)
// Mix up multiple attributes
notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
notice("don't forget this...")
Or create SprintXxx functions to mix strings with other non-colorized strings:
yellow := New(FgYellow).SprintFunc()
red := New(FgRed).SprintFunc()
fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error"))
info := New(FgWhite, BgGreen).SprintFunc()
fmt.Printf("this %s rocks!\n", info("package"))
Windows support is enabled by default. All Print functions works as intended.
However only for color.SprintXXX functions, user should use fmt.FprintXXX and
set the output to color.Output:
fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
info := New(FgWhite, BgGreen).SprintFunc()
fmt.Fprintf(color.Output, "this %s rocks!\n", info("package"))
Using with existing code is possible. Just use the Set() method to set the
standard output to the given parameters. That way a rewrite of an existing
code is not required.
// Use handy standard colors.
color.Set(color.FgYellow)
fmt.Println("Existing text will be now in Yellow")
fmt.Printf("This one %s\n", "too")
color.Unset() // don't forget to unset
// You can mix up parameters
color.Set(color.FgMagenta, color.Bold)
defer color.Unset() // use it in your function
fmt.Println("All text will be now bold magenta.")
There might be a case where you want to disable color output (for example to
pipe the standard output of your app to somewhere else). `Color` has support to
disable colors both globally and for single color definition. For example
suppose you have a CLI app and a `--no-color` bool flag. You can easily disable
the color output with:
var flagNoColor = flag.Bool("no-color", false, "Disable color output")
if *flagNoColor {
color.NoColor = true // disables colorized output
}
It also has support for single color definitions (local). You can
disable/enable color output on the fly:
c := color.New(color.FgCyan)
c.Println("Prints cyan text")
c.DisableColor()
c.Println("This is printed without any color")
c.EnableColor()
c.Println("This prints again cyan...")
*/
package color

View File

@ -9,8 +9,6 @@ import (
"net/http"
"sync"
"time"
"github.com/ethereum/go-ethereum/fdtrack"
)
// HTTPUClient is a client for dealing with HTTPU (HTTP over UDP). Its typical
@ -27,7 +25,6 @@ func NewHTTPUClient() (*HTTPUClient, error) {
if err != nil {
return nil, err
}
fdtrack.Open("upnp")
return &HTTPUClient{conn: conn}, nil
}
@ -36,7 +33,6 @@ func NewHTTPUClient() (*HTTPUClient, error) {
func (httpu *HTTPUClient) Close() error {
httpu.connLock.Lock()
defer httpu.connLock.Unlock()
fdtrack.Close("upnp")
return httpu.conn.Close()
}

View File

@ -7,12 +7,9 @@ import (
"encoding/xml"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"reflect"
"github.com/ethereum/go-ethereum/fdtrack"
)
const (
@ -29,17 +26,6 @@ type SOAPClient struct {
func NewSOAPClient(endpointURL url.URL) *SOAPClient {
return &SOAPClient{
EndpointURL: endpointURL,
HTTPClient: http.Client{
Transport: &http.Transport{
Dial: func(network, addr string) (net.Conn, error) {
c, err := net.Dial(network, addr)
if c != nil {
c = fdtrack.WrapConn("upnp", c)
}
return c, err
},
},
},
}
}

View File

@ -5,8 +5,6 @@ import (
"log"
"net"
"time"
"github.com/ethereum/go-ethereum/fdtrack"
)
// Implement the NAT-PMP protocol, typically supported by Apple routers and open source
@ -104,8 +102,6 @@ func (n *Client) rpc(msg []byte, resultSize int) (result []byte, err error) {
if err != nil {
return
}
fdtrack.Open("natpmp")
defer fdtrack.Close("natpmp")
defer conn.Close()
result = make([]byte, resultSize)

View File

@ -1,42 +0,0 @@
# go-colorable
Colorable writer for windows.
For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.)
This package is possible to handle escape sequence for ansi color on windows.
## Too Bad!
![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png)
## So Good!
![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png)
## Usage
```go
logrus.SetOutput(colorable.NewColorableStdout())
logrus.Info("succeeded")
logrus.Warn("not correct")
logrus.Error("something error")
logrus.Fatal("panic")
```
You can compile above code on non-windows OSs.
## Installation
```
$ go get github.com/mattn/go-colorable
```
# License
MIT
# Author
Yasuhiro Matsumoto (a.k.a mattn)

View File

@ -1,16 +0,0 @@
// +build !windows
package colorable
import (
"io"
"os"
)
func NewColorableStdout() io.Writer {
return os.Stdout
}
func NewColorableStderr() io.Writer {
return os.Stderr
}

View File

@ -1,594 +0,0 @@
package colorable
import (
"bytes"
"fmt"
"io"
"os"
"strconv"
"strings"
"syscall"
"unsafe"
"github.com/mattn/go-isatty"
)
const (
foregroundBlue = 0x1
foregroundGreen = 0x2
foregroundRed = 0x4
foregroundIntensity = 0x8
foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity)
backgroundBlue = 0x10
backgroundGreen = 0x20
backgroundRed = 0x40
backgroundIntensity = 0x80
backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity)
)
type wchar uint16
type short int16
type dword uint32
type word uint16
type coord struct {
x short
y short
}
type smallRect struct {
left short
top short
right short
bottom short
}
type consoleScreenBufferInfo struct {
size coord
cursorPosition coord
attributes word
window smallRect
maximumWindowSize coord
}
var (
kernel32 = syscall.NewLazyDLL("kernel32.dll")
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
)
type Writer struct {
out io.Writer
handle syscall.Handle
lastbuf bytes.Buffer
oldattr word
}
func NewColorableStdout() io.Writer {
var csbi consoleScreenBufferInfo
out := os.Stdout
if !isatty.IsTerminal(out.Fd()) {
return out
}
handle := syscall.Handle(out.Fd())
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
return &Writer{out: out, handle: handle, oldattr: csbi.attributes}
}
func NewColorableStderr() io.Writer {
var csbi consoleScreenBufferInfo
out := os.Stderr
if !isatty.IsTerminal(out.Fd()) {
return out
}
handle := syscall.Handle(out.Fd())
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
return &Writer{out: out, handle: handle, oldattr: csbi.attributes}
}
var color256 = map[int]int{
0: 0x000000,
1: 0x800000,
2: 0x008000,
3: 0x808000,
4: 0x000080,
5: 0x800080,
6: 0x008080,
7: 0xc0c0c0,
8: 0x808080,
9: 0xff0000,
10: 0x00ff00,
11: 0xffff00,
12: 0x0000ff,
13: 0xff00ff,
14: 0x00ffff,
15: 0xffffff,
16: 0x000000,
17: 0x00005f,
18: 0x000087,
19: 0x0000af,
20: 0x0000d7,
21: 0x0000ff,
22: 0x005f00,
23: 0x005f5f,
24: 0x005f87,
25: 0x005faf,
26: 0x005fd7,
27: 0x005fff,
28: 0x008700,
29: 0x00875f,
30: 0x008787,
31: 0x0087af,
32: 0x0087d7,
33: 0x0087ff,
34: 0x00af00,
35: 0x00af5f,
36: 0x00af87,
37: 0x00afaf,
38: 0x00afd7,
39: 0x00afff,
40: 0x00d700,
41: 0x00d75f,
42: 0x00d787,
43: 0x00d7af,
44: 0x00d7d7,
45: 0x00d7ff,
46: 0x00ff00,
47: 0x00ff5f,
48: 0x00ff87,
49: 0x00ffaf,
50: 0x00ffd7,
51: 0x00ffff,
52: 0x5f0000,
53: 0x5f005f,
54: 0x5f0087,
55: 0x5f00af,
56: 0x5f00d7,
57: 0x5f00ff,
58: 0x5f5f00,
59: 0x5f5f5f,
60: 0x5f5f87,
61: 0x5f5faf,
62: 0x5f5fd7,
63: 0x5f5fff,
64: 0x5f8700,
65: 0x5f875f,
66: 0x5f8787,
67: 0x5f87af,
68: 0x5f87d7,
69: 0x5f87ff,
70: 0x5faf00,
71: 0x5faf5f,
72: 0x5faf87,
73: 0x5fafaf,
74: 0x5fafd7,
75: 0x5fafff,
76: 0x5fd700,
77: 0x5fd75f,
78: 0x5fd787,
79: 0x5fd7af,
80: 0x5fd7d7,
81: 0x5fd7ff,
82: 0x5fff00,
83: 0x5fff5f,
84: 0x5fff87,
85: 0x5fffaf,
86: 0x5fffd7,
87: 0x5fffff,
88: 0x870000,
89: 0x87005f,
90: 0x870087,
91: 0x8700af,
92: 0x8700d7,
93: 0x8700ff,
94: 0x875f00,
95: 0x875f5f,
96: 0x875f87,
97: 0x875faf,
98: 0x875fd7,
99: 0x875fff,
100: 0x878700,
101: 0x87875f,
102: 0x878787,
103: 0x8787af,
104: 0x8787d7,
105: 0x8787ff,
106: 0x87af00,
107: 0x87af5f,
108: 0x87af87,
109: 0x87afaf,
110: 0x87afd7,
111: 0x87afff,
112: 0x87d700,
113: 0x87d75f,
114: 0x87d787,
115: 0x87d7af,
116: 0x87d7d7,
117: 0x87d7ff,
118: 0x87ff00,
119: 0x87ff5f,
120: 0x87ff87,
121: 0x87ffaf,
122: 0x87ffd7,
123: 0x87ffff,
124: 0xaf0000,
125: 0xaf005f,
126: 0xaf0087,
127: 0xaf00af,
128: 0xaf00d7,
129: 0xaf00ff,
130: 0xaf5f00,
131: 0xaf5f5f,
132: 0xaf5f87,
133: 0xaf5faf,
134: 0xaf5fd7,
135: 0xaf5fff,
136: 0xaf8700,
137: 0xaf875f,
138: 0xaf8787,
139: 0xaf87af,
140: 0xaf87d7,
141: 0xaf87ff,
142: 0xafaf00,
143: 0xafaf5f,
144: 0xafaf87,
145: 0xafafaf,
146: 0xafafd7,
147: 0xafafff,
148: 0xafd700,
149: 0xafd75f,
150: 0xafd787,
151: 0xafd7af,
152: 0xafd7d7,
153: 0xafd7ff,
154: 0xafff00,
155: 0xafff5f,
156: 0xafff87,
157: 0xafffaf,
158: 0xafffd7,
159: 0xafffff,
160: 0xd70000,
161: 0xd7005f,
162: 0xd70087,
163: 0xd700af,
164: 0xd700d7,
165: 0xd700ff,
166: 0xd75f00,
167: 0xd75f5f,
168: 0xd75f87,
169: 0xd75faf,
170: 0xd75fd7,
171: 0xd75fff,
172: 0xd78700,
173: 0xd7875f,
174: 0xd78787,
175: 0xd787af,
176: 0xd787d7,
177: 0xd787ff,
178: 0xd7af00,
179: 0xd7af5f,
180: 0xd7af87,
181: 0xd7afaf,
182: 0xd7afd7,
183: 0xd7afff,
184: 0xd7d700,
185: 0xd7d75f,
186: 0xd7d787,
187: 0xd7d7af,
188: 0xd7d7d7,
189: 0xd7d7ff,
190: 0xd7ff00,
191: 0xd7ff5f,
192: 0xd7ff87,
193: 0xd7ffaf,
194: 0xd7ffd7,
195: 0xd7ffff,
196: 0xff0000,
197: 0xff005f,
198: 0xff0087,
199: 0xff00af,
200: 0xff00d7,
201: 0xff00ff,
202: 0xff5f00,
203: 0xff5f5f,
204: 0xff5f87,
205: 0xff5faf,
206: 0xff5fd7,
207: 0xff5fff,
208: 0xff8700,
209: 0xff875f,
210: 0xff8787,
211: 0xff87af,
212: 0xff87d7,
213: 0xff87ff,
214: 0xffaf00,
215: 0xffaf5f,
216: 0xffaf87,
217: 0xffafaf,
218: 0xffafd7,
219: 0xffafff,
220: 0xffd700,
221: 0xffd75f,
222: 0xffd787,
223: 0xffd7af,
224: 0xffd7d7,
225: 0xffd7ff,
226: 0xffff00,
227: 0xffff5f,
228: 0xffff87,
229: 0xffffaf,
230: 0xffffd7,
231: 0xffffff,
232: 0x080808,
233: 0x121212,
234: 0x1c1c1c,
235: 0x262626,
236: 0x303030,
237: 0x3a3a3a,
238: 0x444444,
239: 0x4e4e4e,
240: 0x585858,
241: 0x626262,
242: 0x6c6c6c,
243: 0x767676,
244: 0x808080,
245: 0x8a8a8a,
246: 0x949494,
247: 0x9e9e9e,
248: 0xa8a8a8,
249: 0xb2b2b2,
250: 0xbcbcbc,
251: 0xc6c6c6,
252: 0xd0d0d0,
253: 0xdadada,
254: 0xe4e4e4,
255: 0xeeeeee,
}
func (w *Writer) Write(data []byte) (n int, err error) {
var csbi consoleScreenBufferInfo
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
er := bytes.NewBuffer(data)
loop:
for {
r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
if r1 == 0 {
break loop
}
c1, _, err := er.ReadRune()
if err != nil {
break loop
}
if c1 != 0x1b {
fmt.Fprint(w.out, string(c1))
continue
}
c2, _, err := er.ReadRune()
if err != nil {
w.lastbuf.WriteRune(c1)
break loop
}
if c2 != 0x5b {
w.lastbuf.WriteRune(c1)
w.lastbuf.WriteRune(c2)
continue
}
var buf bytes.Buffer
var m rune
for {
c, _, err := er.ReadRune()
if err != nil {
w.lastbuf.WriteRune(c1)
w.lastbuf.WriteRune(c2)
w.lastbuf.Write(buf.Bytes())
break loop
}
if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
m = c
break
}
buf.Write([]byte(string(c)))
}
switch m {
case 'm':
attr := csbi.attributes
cs := buf.String()
if cs == "" {
procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr))
continue
}
token := strings.Split(cs, ";")
for i, ns := range token {
if n, err = strconv.Atoi(ns); err == nil {
switch {
case n == 0 || n == 100:
attr = w.oldattr
case 1 <= n && n <= 5:
attr |= foregroundIntensity
case n == 7:
attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
case 22 == n || n == 25 || n == 25:
attr |= foregroundIntensity
case n == 27:
attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
case 30 <= n && n <= 37:
attr = (attr & backgroundMask)
if (n-30)&1 != 0 {
attr |= foregroundRed
}
if (n-30)&2 != 0 {
attr |= foregroundGreen
}
if (n-30)&4 != 0 {
attr |= foregroundBlue
}
case n == 38: // set foreground color.
if i < len(token)-2 && token[i+1] == "5" {
if n256, err := strconv.Atoi(token[i+2]); err == nil {
if n256foreAttr == nil {
n256setup()
}
attr &= backgroundMask
attr |= n256foreAttr[n256]
i += 2
}
} else {
attr = attr & (w.oldattr & backgroundMask)
}
case n == 39: // reset foreground color.
attr &= backgroundMask
attr |= w.oldattr & foregroundMask
case 40 <= n && n <= 47:
attr = (attr & foregroundMask)
if (n-40)&1 != 0 {
attr |= backgroundRed
}
if (n-40)&2 != 0 {
attr |= backgroundGreen
}
if (n-40)&4 != 0 {
attr |= backgroundBlue
}
case n == 48: // set background color.
if i < len(token)-2 && token[i+1] == "5" {
if n256, err := strconv.Atoi(token[i+2]); err == nil {
if n256backAttr == nil {
n256setup()
}
attr &= foregroundMask
attr |= n256backAttr[n256]
i += 2
}
} else {
attr = attr & (w.oldattr & foregroundMask)
}
case n == 49: // reset foreground color.
attr &= foregroundMask
attr |= w.oldattr & backgroundMask
}
procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr))
}
}
}
}
return len(data) - w.lastbuf.Len(), nil
}
type consoleColor struct {
red bool
green bool
blue bool
intensity bool
}
func minmax3(a, b, c int) (min, max int) {
if a < b {
if b < c {
return a, c
} else if a < c {
return a, b
} else {
return c, b
}
} else {
if a < c {
return b, c
} else if b < c {
return b, a
} else {
return c, a
}
}
}
func toConsoleColor(rgb int) (c consoleColor) {
r, g, b := (rgb&0xFF0000)>>16, (rgb&0x00FF00)>>8, rgb&0x0000FF
min, max := minmax3(r, g, b)
a := (min + max) / 2
if r < 128 && g < 128 && b < 128 {
if r >= a {
c.red = true
}
if g >= a {
c.green = true
}
if b >= a {
c.blue = true
}
// non-intensed white is lighter than intensed black, so swap those.
if c.red && c.green && c.blue {
c.red, c.green, c.blue = false, false, false
c.intensity = true
}
} else {
if min < 128 {
min = 128
a = (min + max) / 2
}
if r >= a {
c.red = true
}
if g >= a {
c.green = true
}
if b >= a {
c.blue = true
}
c.intensity = true
// intensed black is darker than non-intensed white, so swap those.
if !c.red && !c.green && !c.blue {
c.red, c.green, c.blue = true, true, true
c.intensity = false
}
}
return c
}
func (c consoleColor) foregroundAttr() (attr word) {
if c.red {
attr |= foregroundRed
}
if c.green {
attr |= foregroundGreen
}
if c.blue {
attr |= foregroundBlue
}
if c.intensity {
attr |= foregroundIntensity
}
return
}
func (c consoleColor) backgroundAttr() (attr word) {
if c.red {
attr |= backgroundRed
}
if c.green {
attr |= backgroundGreen
}
if c.blue {
attr |= backgroundBlue
}
if c.intensity {
attr |= backgroundIntensity
}
return
}
var n256foreAttr []word
var n256backAttr []word
func n256setup() {
n256foreAttr = make([]word, 256)
n256backAttr = make([]word, 256)
for i, rgb := range color256 {
c := toConsoleColor(rgb)
n256foreAttr[i] = c.foregroundAttr()
n256backAttr[i] = c.backgroundAttr()
}
}

View File

@ -0,0 +1,9 @@
Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
MIT License (Expat)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,4 +1,4 @@
// +build darwin freebsd
// +build darwin freebsd openbsd netbsd
package isatty

View File

@ -0,0 +1 @@
Paul Borman <borman@google.com>

View File

@ -1,4 +1,4 @@
Copyright (c) 2009 Google Inc. All rights reserved.
Copyright (c) 2009,2014 Google Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are

30
Godeps/_workspace/src/github.com/pborman/uuid/json.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
// Copyright 2014 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import "errors"
func (u UUID) MarshalJSON() ([]byte, error) {
if len(u) == 0 {
return []byte(`""`), nil
}
return []byte(`"` + u.String() + `"`), nil
}
func (u *UUID) UnmarshalJSON(data []byte) error {
if len(data) == 0 || string(data) == `""` {
return nil
}
if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' {
return errors.New("invalid UUID format")
}
data = data[1 : len(data)-1]
uu := Parse(string(data))
if uu == nil {
return errors.New("invalid UUID format")
}
*u = uu
return nil
}

View File

@ -0,0 +1,32 @@
// Copyright 2014 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/json"
"reflect"
"testing"
)
var testUUID = Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479")
func TestJSON(t *testing.T) {
type S struct {
ID1 UUID
ID2 UUID
}
s1 := S{ID1: testUUID}
data, err := json.Marshal(&s1)
if err != nil {
t.Fatal(err)
}
var s2 S
if err := json.Unmarshal(data, &s2); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(&s1, &s2) {
t.Errorf("got %#v, want %#v", s2, s1)
}
}

View File

@ -0,0 +1,66 @@
// Copyright 2014 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"flag"
"runtime"
"testing"
"time"
)
// This test is only run when --regressions is passed on the go test line.
var regressions = flag.Bool("regressions", false, "run uuid regression tests")
// TestClockSeqRace tests for a particular race condition of returning two
// identical Version1 UUIDs. The duration of 1 minute was chosen as the race
// condition, before being fixed, nearly always occured in under 30 seconds.
func TestClockSeqRace(t *testing.T) {
if !*regressions {
t.Skip("skipping regression tests")
}
duration := time.Minute
done := make(chan struct{})
defer close(done)
ch := make(chan UUID, 10000)
ncpu := runtime.NumCPU()
switch ncpu {
case 0, 1:
// We can't run the test effectively.
t.Skip("skipping race test, only one CPU detected")
return
default:
runtime.GOMAXPROCS(ncpu)
}
for i := 0; i < ncpu; i++ {
go func() {
for {
select {
case <-done:
return
case ch <- NewUUID():
}
}
}()
}
uuids := make(map[string]bool)
cnt := 0
start := time.Now()
for u := range ch {
s := u.String()
if uuids[s] {
t.Errorf("duplicate uuid after %d in %v: %s", cnt, time.Since(start), s)
return
}
uuids[s] = true
if time.Since(start) > duration {
return
}
cnt++
}
}

40
Godeps/_workspace/src/github.com/pborman/uuid/sql.go generated vendored Normal file
View File

@ -0,0 +1,40 @@
// Copyright 2015 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"errors"
"fmt"
)
// Scan implements sql.Scanner so UUIDs can be read from databases transparently
// Currently, database types that map to string and []byte are supported. Please
// consult database-specific driver documentation for matching types.
func (uuid *UUID) Scan(src interface{}) error {
switch src.(type) {
case string:
// see uuid.Parse for required string format
parsed := Parse(src.(string))
if parsed == nil {
return errors.New("Scan: invalid UUID format")
}
*uuid = parsed
case []byte:
// assumes a simple slice of bytes, just check validity and store
u := UUID(src.([]byte))
if u.Variant() == Invalid {
return errors.New("Scan: invalid UUID format")
}
*uuid = u
default:
return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
}
return nil
}

View File

@ -0,0 +1,53 @@
// Copyright 2015 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"strings"
"testing"
)
func TestScan(t *testing.T) {
var stringTest string = "f47ac10b-58cc-0372-8567-0e02b2c3d479"
var byteTest []byte = Parse(stringTest)
var badTypeTest int = 6
var invalidTest string = "f47ac10b-58cc-0372-8567-0e02b2c3d4"
var invalidByteTest []byte = Parse(invalidTest)
var uuid UUID
err := (&uuid).Scan(stringTest)
if err != nil {
t.Fatal(err)
}
err = (&uuid).Scan(byteTest)
if err != nil {
t.Fatal(err)
}
err = (&uuid).Scan(badTypeTest)
if err == nil {
t.Error("int correctly parsed and shouldn't have")
}
if !strings.Contains(err.Error(), "unable to scan type") {
t.Error("attempting to parse an int returned an incorrect error message")
}
err = (&uuid).Scan(invalidTest)
if err == nil {
t.Error("invalid uuid was parsed without error")
}
if !strings.Contains(err.Error(), "invalid UUID") {
t.Error("attempting to parse an invalid UUID returned an incorrect error message")
}
err = (&uuid).Scan(invalidByteTest)
if err == nil {
t.Error("invalid byte uuid was parsed without error")
}
if !strings.Contains(err.Error(), "invalid UUID") {
t.Error("attempting to parse an invalid byte UUID returned an incorrect error message")
}
}

View File

@ -40,15 +40,15 @@ func (t Time) UnixTime() (sec, nsec int64) {
}
// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
// adjusts the clock sequence as needed. An error is returned if the current
// time cannot be determined.
func GetTime() (Time, error) {
// clock sequence as well as adjusting the clock sequence as needed. An error
// is returned if the current time cannot be determined.
func GetTime() (Time, uint16, error) {
defer mu.Unlock()
mu.Lock()
return getTime()
}
func getTime() (Time, error) {
func getTime() (Time, uint16, error) {
t := timeNow()
// If we don't have a clock sequence already, set one.
@ -63,7 +63,7 @@ func getTime() (Time, error) {
clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000
}
lasttime = now
return Time(now), nil
return Time(now), clock_seq, nil
}
// ClockSequence returns the current clock sequence, generating one if not

View File

@ -19,7 +19,7 @@ func NewUUID() UUID {
SetNodeInterface("")
}
now, err := GetTime()
now, seq, err := GetTime()
if err != nil {
return nil
}
@ -34,7 +34,7 @@ func NewUUID() UUID {
binary.BigEndian.PutUint32(uuid[0:], time_low)
binary.BigEndian.PutUint16(uuid[4:], time_mid)
binary.BigEndian.PutUint16(uuid[6:], time_hi)
binary.BigEndian.PutUint16(uuid[8:], clock_seq)
binary.BigEndian.PutUint16(uuid[8:], seq)
copy(uuid[10:], nodeID)
return uuid

View File

@ -0,0 +1,27 @@
# Created by http://www.gitignore.io
### Go ###
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test

View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) [2014] [shiena]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,100 @@
[![GoDoc](https://godoc.org/github.com/shiena/ansicolor?status.svg)](https://godoc.org/github.com/shiena/ansicolor)
# ansicolor
Ansicolor library provides color console in Windows as ANSICON for Golang.
## Features
|Escape sequence|Text attributes|
|---------------|----|
|\x1b[0m|All attributes off(color at startup)|
|\x1b[1m|Bold on(enable foreground intensity)|
|\x1b[4m|Underline on|
|\x1b[5m|Blink on(enable background intensity)|
|\x1b[21m|Bold off(disable foreground intensity)|
|\x1b[24m|Underline off|
|\x1b[25m|Blink off(disable background intensity)|
|Escape sequence|Foreground colors|
|---------------|----|
|\x1b[30m|Black|
|\x1b[31m|Red|
|\x1b[32m|Green|
|\x1b[33m|Yellow|
|\x1b[34m|Blue|
|\x1b[35m|Magenta|
|\x1b[36m|Cyan|
|\x1b[37m|White|
|\x1b[39m|Default(foreground color at startup)|
|\x1b[90m|Light Gray|
|\x1b[91m|Light Red|
|\x1b[92m|Light Green|
|\x1b[93m|Light Yellow|
|\x1b[94m|Light Blue|
|\x1b[95m|Light Magenta|
|\x1b[96m|Light Cyan|
|\x1b[97m|Light White|
|Escape sequence|Background colors|
|---------------|----|
|\x1b[40m|Black|
|\x1b[41m|Red|
|\x1b[42m|Green|
|\x1b[43m|Yellow|
|\x1b[44m|Blue|
|\x1b[45m|Magenta|
|\x1b[46m|Cyan|
|\x1b[47m|White|
|\x1b[49m|Default(background color at startup)|
|\x1b[100m|Light Gray|
|\x1b[101m|Light Red|
|\x1b[102m|Light Green|
|\x1b[103m|Light Yellow|
|\x1b[104m|Light Blue|
|\x1b[105m|Light Magenta|
|\x1b[106m|Light Cyan|
|\x1b[107m|Light White|
## Example
```go
package main
import (
"fmt"
"os"
"github.com/shiena/ansicolor"
)
func main() {
w := ansicolor.NewAnsiColorWriter(os.Stdout)
text := "%sforeground %sbold%s %sbackground%s\n"
fmt.Fprintf(w, text, "\x1b[31m", "\x1b[1m", "\x1b[21m", "\x1b[41;32m", "\x1b[0m")
fmt.Fprintf(w, text, "\x1b[32m", "\x1b[1m", "\x1b[21m", "\x1b[42;31m", "\x1b[0m")
fmt.Fprintf(w, text, "\x1b[33m", "\x1b[1m", "\x1b[21m", "\x1b[43;34m", "\x1b[0m")
fmt.Fprintf(w, text, "\x1b[34m", "\x1b[1m", "\x1b[21m", "\x1b[44;33m", "\x1b[0m")
fmt.Fprintf(w, text, "\x1b[35m", "\x1b[1m", "\x1b[21m", "\x1b[45;36m", "\x1b[0m")
fmt.Fprintf(w, text, "\x1b[36m", "\x1b[1m", "\x1b[21m", "\x1b[46;35m", "\x1b[0m")
fmt.Fprintf(w, text, "\x1b[37m", "\x1b[1m", "\x1b[21m", "\x1b[47;30m", "\x1b[0m")
}
```
![screenshot](https://gist.githubusercontent.com/shiena/a1bada24b525314a7d5e/raw/c763aa7cda6e4fefaccf831e2617adc40b6151c7/main.png)
## See also:
- https://github.com/daviddengcn/go-colortext
- https://github.com/adoxa/ansicon
- https://github.com/aslakhellesoy/wac
- https://github.com/wsxiaoys/terminal
## Contributing
1. Fork it
2. Create your feature branch (`git checkout -b my-new-feature`)
3. Commit your changes (`git commit -am 'Add some feature'`)
4. Push to the branch (`git push origin my-new-feature`)
5. Create new Pull Request

View File

@ -0,0 +1,20 @@
// Copyright 2014 shiena Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
// Package ansicolor provides color console in Windows as ANSICON.
package ansicolor
import "io"
// NewAnsiColorWriter creates and initializes a new ansiColorWriter
// using io.Writer w as its initial contents.
// In the console of Windows, which change the foreground and background
// colors of the text by the escape sequence.
// In the console of other systems, which writes to w all text.
func NewAnsiColorWriter(w io.Writer) io.Writer {
if _, ok := w.(*ansiColorWriter); !ok {
return &ansiColorWriter{w: w}
}
return w
}

View File

@ -0,0 +1,27 @@
// Copyright 2014 shiena Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
/*
The ansicolor command colors a console text by ANSI escape sequence like wac.
$ go get github.com/shiena/ansicolor/ansicolor
See also:
https://github.com/aslakhellesoy/wac
*/
package main
import (
"io"
"os"
"github.com/shiena/ansicolor"
)
func main() {
w := ansicolor.NewAnsiColorWriter(os.Stdout)
io.Copy(w, os.Stdin)
}

View File

@ -0,0 +1,17 @@
// Copyright 2014 shiena Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
// +build !windows
package ansicolor
import "io"
type ansiColorWriter struct {
w io.Writer
}
func (cw *ansiColorWriter) Write(p []byte) (int, error) {
return cw.w.Write(p)
}

View File

@ -0,0 +1,25 @@
package ansicolor_test
import (
"bytes"
"testing"
"github.com/shiena/ansicolor"
)
func TestNewAnsiColor1(t *testing.T) {
inner := bytes.NewBufferString("")
w := ansicolor.NewAnsiColorWriter(inner)
if w == inner {
t.Errorf("Get %#v, want %#v", w, inner)
}
}
func TestNewAnsiColor2(t *testing.T) {
inner := bytes.NewBufferString("")
w1 := ansicolor.NewAnsiColorWriter(inner)
w2 := ansicolor.NewAnsiColorWriter(w1)
if w1 != w2 {
t.Errorf("Get %#v, want %#v", w1, w2)
}
}

View File

@ -0,0 +1,351 @@
// Copyright 2014 shiena Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
// +build windows
package ansicolor
import (
"bytes"
"io"
"strings"
"syscall"
"unsafe"
)
type csiState int
const (
outsideCsiCode csiState = iota
firstCsiCode
secondCsiCode
)
type ansiColorWriter struct {
w io.Writer
state csiState
paramBuf bytes.Buffer
}
const (
firstCsiChar byte = '\x1b'
secondeCsiChar byte = '['
separatorChar byte = ';'
sgrCode byte = 'm'
)
const (
foregroundBlue = uint16(0x0001)
foregroundGreen = uint16(0x0002)
foregroundRed = uint16(0x0004)
foregroundIntensity = uint16(0x0008)
backgroundBlue = uint16(0x0010)
backgroundGreen = uint16(0x0020)
backgroundRed = uint16(0x0040)
backgroundIntensity = uint16(0x0080)
underscore = uint16(0x8000)
foregroundMask = foregroundBlue | foregroundGreen | foregroundRed | foregroundIntensity
backgroundMask = backgroundBlue | backgroundGreen | backgroundRed | backgroundIntensity
)
const (
ansiReset = "0"
ansiIntensityOn = "1"
ansiIntensityOff = "21"
ansiUnderlineOn = "4"
ansiUnderlineOff = "24"
ansiBlinkOn = "5"
ansiBlinkOff = "25"
ansiForegroundBlack = "30"
ansiForegroundRed = "31"
ansiForegroundGreen = "32"
ansiForegroundYellow = "33"
ansiForegroundBlue = "34"
ansiForegroundMagenta = "35"
ansiForegroundCyan = "36"
ansiForegroundWhite = "37"
ansiForegroundDefault = "39"
ansiBackgroundBlack = "40"
ansiBackgroundRed = "41"
ansiBackgroundGreen = "42"
ansiBackgroundYellow = "43"
ansiBackgroundBlue = "44"
ansiBackgroundMagenta = "45"
ansiBackgroundCyan = "46"
ansiBackgroundWhite = "47"
ansiBackgroundDefault = "49"
ansiLightForegroundGray = "90"
ansiLightForegroundRed = "91"
ansiLightForegroundGreen = "92"
ansiLightForegroundYellow = "93"
ansiLightForegroundBlue = "94"
ansiLightForegroundMagenta = "95"
ansiLightForegroundCyan = "96"
ansiLightForegroundWhite = "97"
ansiLightBackgroundGray = "100"
ansiLightBackgroundRed = "101"
ansiLightBackgroundGreen = "102"
ansiLightBackgroundYellow = "103"
ansiLightBackgroundBlue = "104"
ansiLightBackgroundMagenta = "105"
ansiLightBackgroundCyan = "106"
ansiLightBackgroundWhite = "107"
)
type drawType int
const (
foreground drawType = iota
background
)
type winColor struct {
code uint16
drawType drawType
}
var colorMap = map[string]winColor{
ansiForegroundBlack: {0, foreground},
ansiForegroundRed: {foregroundRed, foreground},
ansiForegroundGreen: {foregroundGreen, foreground},
ansiForegroundYellow: {foregroundRed | foregroundGreen, foreground},
ansiForegroundBlue: {foregroundBlue, foreground},
ansiForegroundMagenta: {foregroundRed | foregroundBlue, foreground},
ansiForegroundCyan: {foregroundGreen | foregroundBlue, foreground},
ansiForegroundWhite: {foregroundRed | foregroundGreen | foregroundBlue, foreground},
ansiForegroundDefault: {foregroundRed | foregroundGreen | foregroundBlue, foreground},
ansiBackgroundBlack: {0, background},
ansiBackgroundRed: {backgroundRed, background},
ansiBackgroundGreen: {backgroundGreen, background},
ansiBackgroundYellow: {backgroundRed | backgroundGreen, background},
ansiBackgroundBlue: {backgroundBlue, background},
ansiBackgroundMagenta: {backgroundRed | backgroundBlue, background},
ansiBackgroundCyan: {backgroundGreen | backgroundBlue, background},
ansiBackgroundWhite: {backgroundRed | backgroundGreen | backgroundBlue, background},
ansiBackgroundDefault: {0, background},
ansiLightForegroundGray: {foregroundIntensity, foreground},
ansiLightForegroundRed: {foregroundIntensity | foregroundRed, foreground},
ansiLightForegroundGreen: {foregroundIntensity | foregroundGreen, foreground},
ansiLightForegroundYellow: {foregroundIntensity | foregroundRed | foregroundGreen, foreground},
ansiLightForegroundBlue: {foregroundIntensity | foregroundBlue, foreground},
ansiLightForegroundMagenta: {foregroundIntensity | foregroundRed | foregroundBlue, foreground},
ansiLightForegroundCyan: {foregroundIntensity | foregroundGreen | foregroundBlue, foreground},
ansiLightForegroundWhite: {foregroundIntensity | foregroundRed | foregroundGreen | foregroundBlue, foreground},
ansiLightBackgroundGray: {backgroundIntensity, background},
ansiLightBackgroundRed: {backgroundIntensity | backgroundRed, background},
ansiLightBackgroundGreen: {backgroundIntensity | backgroundGreen, background},
ansiLightBackgroundYellow: {backgroundIntensity | backgroundRed | backgroundGreen, background},
ansiLightBackgroundBlue: {backgroundIntensity | backgroundBlue, background},
ansiLightBackgroundMagenta: {backgroundIntensity | backgroundRed | backgroundBlue, background},
ansiLightBackgroundCyan: {backgroundIntensity | backgroundGreen | backgroundBlue, background},
ansiLightBackgroundWhite: {backgroundIntensity | backgroundRed | backgroundGreen | backgroundBlue, background},
}
var (
kernel32 = syscall.NewLazyDLL("kernel32.dll")
procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
defaultAttr *textAttributes
)
func init() {
screenInfo := getConsoleScreenBufferInfo(uintptr(syscall.Stdout))
if screenInfo != nil {
colorMap[ansiForegroundDefault] = winColor{
screenInfo.WAttributes & (foregroundRed | foregroundGreen | foregroundBlue),
foreground,
}
colorMap[ansiBackgroundDefault] = winColor{
screenInfo.WAttributes & (backgroundRed | backgroundGreen | backgroundBlue),
background,
}
defaultAttr = convertTextAttr(screenInfo.WAttributes)
}
}
type coord struct {
X, Y int16
}
type smallRect struct {
Left, Top, Right, Bottom int16
}
type consoleScreenBufferInfo struct {
DwSize coord
DwCursorPosition coord
WAttributes uint16
SrWindow smallRect
DwMaximumWindowSize coord
}
func getConsoleScreenBufferInfo(hConsoleOutput uintptr) *consoleScreenBufferInfo {
var csbi consoleScreenBufferInfo
ret, _, _ := procGetConsoleScreenBufferInfo.Call(
hConsoleOutput,
uintptr(unsafe.Pointer(&csbi)))
if ret == 0 {
return nil
}
return &csbi
}
func setConsoleTextAttribute(hConsoleOutput uintptr, wAttributes uint16) bool {
ret, _, _ := procSetConsoleTextAttribute.Call(
hConsoleOutput,
uintptr(wAttributes))
return ret != 0
}
type textAttributes struct {
foregroundColor uint16
backgroundColor uint16
foregroundIntensity uint16
backgroundIntensity uint16
underscore uint16
otherAttributes uint16
}
func convertTextAttr(winAttr uint16) *textAttributes {
fgColor := winAttr & (foregroundRed | foregroundGreen | foregroundBlue)
bgColor := winAttr & (backgroundRed | backgroundGreen | backgroundBlue)
fgIntensity := winAttr & foregroundIntensity
bgIntensity := winAttr & backgroundIntensity
underline := winAttr & underscore
otherAttributes := winAttr &^ (foregroundMask | backgroundMask | underscore)
return &textAttributes{fgColor, bgColor, fgIntensity, bgIntensity, underline, otherAttributes}
}
func convertWinAttr(textAttr *textAttributes) uint16 {
var winAttr uint16 = 0
winAttr |= textAttr.foregroundColor
winAttr |= textAttr.backgroundColor
winAttr |= textAttr.foregroundIntensity
winAttr |= textAttr.backgroundIntensity
winAttr |= textAttr.underscore
winAttr |= textAttr.otherAttributes
return winAttr
}
func changeColor(param []byte) {
if defaultAttr == nil {
return
}
screenInfo := getConsoleScreenBufferInfo(uintptr(syscall.Stdout))
if screenInfo == nil {
return
}
winAttr := convertTextAttr(screenInfo.WAttributes)
strParam := string(param)
if len(strParam) <= 0 {
strParam = "0"
}
csiParam := strings.Split(strParam, string(separatorChar))
for _, p := range csiParam {
c, ok := colorMap[p]
switch {
case !ok:
switch p {
case ansiReset:
winAttr.foregroundColor = defaultAttr.foregroundColor
winAttr.backgroundColor = defaultAttr.backgroundColor
winAttr.foregroundIntensity = defaultAttr.foregroundIntensity
winAttr.backgroundIntensity = defaultAttr.backgroundIntensity
winAttr.underscore = 0
winAttr.otherAttributes = 0
case ansiIntensityOn:
winAttr.foregroundIntensity = foregroundIntensity
case ansiIntensityOff:
winAttr.foregroundIntensity = 0
case ansiUnderlineOn:
winAttr.underscore = underscore
case ansiUnderlineOff:
winAttr.underscore = 0
case ansiBlinkOn:
winAttr.backgroundIntensity = backgroundIntensity
case ansiBlinkOff:
winAttr.backgroundIntensity = 0
default:
// unknown code
}
case c.drawType == foreground:
winAttr.foregroundColor = c.code
case c.drawType == background:
winAttr.backgroundColor = c.code
}
}
winTextAttribute := convertWinAttr(winAttr)
setConsoleTextAttribute(uintptr(syscall.Stdout), winTextAttribute)
}
func parseEscapeSequence(command byte, param []byte) {
switch command {
case sgrCode:
changeColor(param)
}
}
func isParameterChar(b byte) bool {
return ('0' <= b && b <= '9') || b == separatorChar
}
func (cw *ansiColorWriter) Write(p []byte) (int, error) {
r, nw, nc, first, last := 0, 0, 0, 0, 0
var err error
for i, ch := range p {
switch cw.state {
case outsideCsiCode:
if ch == firstCsiChar {
nc++
cw.state = firstCsiCode
}
case firstCsiCode:
switch ch {
case firstCsiChar:
nc++
break
case secondeCsiChar:
nc++
cw.state = secondCsiCode
last = i - 1
default:
cw.state = outsideCsiCode
}
case secondCsiCode:
nc++
if isParameterChar(ch) {
cw.paramBuf.WriteByte(ch)
} else {
nw, err = cw.w.Write(p[first:last])
r += nw
if err != nil {
return r, err
}
first = i + 1
param := cw.paramBuf.Bytes()
cw.paramBuf.Reset()
parseEscapeSequence(ch, param)
cw.state = outsideCsiCode
}
default:
cw.state = outsideCsiCode
}
}
if cw.state == outsideCsiCode {
nw, err = cw.w.Write(p[first:len(p)])
}
return r + nw + nc, err
}

View File

@ -0,0 +1,236 @@
// Copyright 2014 shiena Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
// +build windows
package ansicolor_test
import (
"bytes"
"fmt"
"syscall"
"testing"
"github.com/shiena/ansicolor"
. "github.com/shiena/ansicolor"
)
func TestWritePlanText(t *testing.T) {
inner := bytes.NewBufferString("")
w := ansicolor.NewAnsiColorWriter(inner)
expected := "plain text"
fmt.Fprintf(w, expected)
actual := inner.String()
if actual != expected {
t.Errorf("Get %s, want %s", actual, expected)
}
}
func TestWriteParseText(t *testing.T) {
inner := bytes.NewBufferString("")
w := ansicolor.NewAnsiColorWriter(inner)
inputTail := "\x1b[0mtail text"
expectedTail := "tail text"
fmt.Fprintf(w, inputTail)
actualTail := inner.String()
inner.Reset()
if actualTail != expectedTail {
t.Errorf("Get %s, want %s", actualTail, expectedTail)
}
inputHead := "head text\x1b[0m"
expectedHead := "head text"
fmt.Fprintf(w, inputHead)
actualHead := inner.String()
inner.Reset()
if actualHead != expectedHead {
t.Errorf("Get %s, want %s", actualHead, expectedHead)
}
inputBothEnds := "both ends \x1b[0m text"
expectedBothEnds := "both ends text"
fmt.Fprintf(w, inputBothEnds)
actualBothEnds := inner.String()
inner.Reset()
if actualBothEnds != expectedBothEnds {
t.Errorf("Get %s, want %s", actualBothEnds, expectedBothEnds)
}
inputManyEsc := "\x1b\x1b\x1b\x1b[0m many esc"
expectedManyEsc := "\x1b\x1b\x1b many esc"
fmt.Fprintf(w, inputManyEsc)
actualManyEsc := inner.String()
inner.Reset()
if actualManyEsc != expectedManyEsc {
t.Errorf("Get %s, want %s", actualManyEsc, expectedManyEsc)
}
expectedSplit := "split text"
for _, ch := range "split \x1b[0m text" {
fmt.Fprintf(w, string(ch))
}
actualSplit := inner.String()
inner.Reset()
if actualSplit != expectedSplit {
t.Errorf("Get %s, want %s", actualSplit, expectedSplit)
}
}
type screenNotFoundError struct {
error
}
func writeAnsiColor(expectedText, colorCode string) (actualText string, actualAttributes uint16, err error) {
inner := bytes.NewBufferString("")
w := ansicolor.NewAnsiColorWriter(inner)
fmt.Fprintf(w, "\x1b[%sm%s", colorCode, expectedText)
actualText = inner.String()
screenInfo := GetConsoleScreenBufferInfo(uintptr(syscall.Stdout))
if screenInfo != nil {
actualAttributes = screenInfo.WAttributes
} else {
err = &screenNotFoundError{}
}
return
}
type testParam struct {
text string
attributes uint16
ansiColor string
}
func TestWriteAnsiColorText(t *testing.T) {
screenInfo := GetConsoleScreenBufferInfo(uintptr(syscall.Stdout))
if screenInfo == nil {
t.Fatal("Could not get ConsoleScreenBufferInfo")
}
defer ChangeColor(screenInfo.WAttributes)
defaultFgColor := screenInfo.WAttributes & uint16(0x0007)
defaultBgColor := screenInfo.WAttributes & uint16(0x0070)
defaultFgIntensity := screenInfo.WAttributes & uint16(0x0008)
defaultBgIntensity := screenInfo.WAttributes & uint16(0x0080)
fgParam := []testParam{
{"foreground black ", uint16(0x0000 | 0x0000), "30"},
{"foreground red ", uint16(0x0004 | 0x0000), "31"},
{"foreground green ", uint16(0x0002 | 0x0000), "32"},
{"foreground yellow ", uint16(0x0006 | 0x0000), "33"},
{"foreground blue ", uint16(0x0001 | 0x0000), "34"},
{"foreground magenta", uint16(0x0005 | 0x0000), "35"},
{"foreground cyan ", uint16(0x0003 | 0x0000), "36"},
{"foreground white ", uint16(0x0007 | 0x0000), "37"},
{"foreground default", defaultFgColor | 0x0000, "39"},
{"foreground light gray ", uint16(0x0000 | 0x0008 | 0x0000), "90"},
{"foreground light red ", uint16(0x0004 | 0x0008 | 0x0000), "91"},
{"foreground light green ", uint16(0x0002 | 0x0008 | 0x0000), "92"},
{"foreground light yellow ", uint16(0x0006 | 0x0008 | 0x0000), "93"},
{"foreground light blue ", uint16(0x0001 | 0x0008 | 0x0000), "94"},
{"foreground light magenta", uint16(0x0005 | 0x0008 | 0x0000), "95"},
{"foreground light cyan ", uint16(0x0003 | 0x0008 | 0x0000), "96"},
{"foreground light white ", uint16(0x0007 | 0x0008 | 0x0000), "97"},
}
bgParam := []testParam{
{"background black ", uint16(0x0007 | 0x0000), "40"},
{"background red ", uint16(0x0007 | 0x0040), "41"},
{"background green ", uint16(0x0007 | 0x0020), "42"},
{"background yellow ", uint16(0x0007 | 0x0060), "43"},
{"background blue ", uint16(0x0007 | 0x0010), "44"},
{"background magenta", uint16(0x0007 | 0x0050), "45"},
{"background cyan ", uint16(0x0007 | 0x0030), "46"},
{"background white ", uint16(0x0007 | 0x0070), "47"},
{"background default", uint16(0x0007) | defaultBgColor, "49"},
{"background light gray ", uint16(0x0007 | 0x0000 | 0x0080), "100"},
{"background light red ", uint16(0x0007 | 0x0040 | 0x0080), "101"},
{"background light green ", uint16(0x0007 | 0x0020 | 0x0080), "102"},
{"background light yellow ", uint16(0x0007 | 0x0060 | 0x0080), "103"},
{"background light blue ", uint16(0x0007 | 0x0010 | 0x0080), "104"},
{"background light magenta", uint16(0x0007 | 0x0050 | 0x0080), "105"},
{"background light cyan ", uint16(0x0007 | 0x0030 | 0x0080), "106"},
{"background light white ", uint16(0x0007 | 0x0070 | 0x0080), "107"},
}
resetParam := []testParam{
{"all reset", defaultFgColor | defaultBgColor | defaultFgIntensity | defaultBgIntensity, "0"},
{"all reset", defaultFgColor | defaultBgColor | defaultFgIntensity | defaultBgIntensity, ""},
}
boldParam := []testParam{
{"bold on", uint16(0x0007 | 0x0008), "1"},
{"bold off", uint16(0x0007), "21"},
}
underscoreParam := []testParam{
{"underscore on", uint16(0x0007 | 0x8000), "4"},
{"underscore off", uint16(0x0007), "24"},
}
blinkParam := []testParam{
{"blink on", uint16(0x0007 | 0x0080), "5"},
{"blink off", uint16(0x0007), "25"},
}
mixedParam := []testParam{
{"both black, bold, underline, blink", uint16(0x0000 | 0x0000 | 0x0008 | 0x8000 | 0x0080), "30;40;1;4;5"},
{"both red, bold, underline, blink", uint16(0x0004 | 0x0040 | 0x0008 | 0x8000 | 0x0080), "31;41;1;4;5"},
{"both green, bold, underline, blink", uint16(0x0002 | 0x0020 | 0x0008 | 0x8000 | 0x0080), "32;42;1;4;5"},
{"both yellow, bold, underline, blink", uint16(0x0006 | 0x0060 | 0x0008 | 0x8000 | 0x0080), "33;43;1;4;5"},
{"both blue, bold, underline, blink", uint16(0x0001 | 0x0010 | 0x0008 | 0x8000 | 0x0080), "34;44;1;4;5"},
{"both magenta, bold, underline, blink", uint16(0x0005 | 0x0050 | 0x0008 | 0x8000 | 0x0080), "35;45;1;4;5"},
{"both cyan, bold, underline, blink", uint16(0x0003 | 0x0030 | 0x0008 | 0x8000 | 0x0080), "36;46;1;4;5"},
{"both white, bold, underline, blink", uint16(0x0007 | 0x0070 | 0x0008 | 0x8000 | 0x0080), "37;47;1;4;5"},
{"both default, bold, underline, blink", uint16(defaultFgColor | defaultBgColor | 0x0008 | 0x8000 | 0x0080), "39;49;1;4;5"},
}
assertTextAttribute := func(expectedText string, expectedAttributes uint16, ansiColor string) {
actualText, actualAttributes, err := writeAnsiColor(expectedText, ansiColor)
if actualText != expectedText {
t.Errorf("Get %s, want %s", actualText, expectedText)
}
if err != nil {
t.Fatal("Could not get ConsoleScreenBufferInfo")
}
if actualAttributes != expectedAttributes {
t.Errorf("Text: %s, Get 0x%04x, want 0x%04x", expectedText, actualAttributes, expectedAttributes)
}
}
for _, v := range fgParam {
ResetColor()
assertTextAttribute(v.text, v.attributes, v.ansiColor)
}
for _, v := range bgParam {
ChangeColor(uint16(0x0070 | 0x0007))
assertTextAttribute(v.text, v.attributes, v.ansiColor)
}
for _, v := range resetParam {
ChangeColor(uint16(0x0000 | 0x0070 | 0x0008))
assertTextAttribute(v.text, v.attributes, v.ansiColor)
}
ResetColor()
for _, v := range boldParam {
assertTextAttribute(v.text, v.attributes, v.ansiColor)
}
ResetColor()
for _, v := range underscoreParam {
assertTextAttribute(v.text, v.attributes, v.ansiColor)
}
ResetColor()
for _, v := range blinkParam {
assertTextAttribute(v.text, v.attributes, v.ansiColor)
}
for _, v := range mixedParam {
ResetColor()
assertTextAttribute(v.text, v.attributes, v.ansiColor)
}
}

View File

@ -0,0 +1,24 @@
// Copyright 2014 shiena Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package ansicolor_test
import (
"fmt"
"os"
"github.com/shiena/ansicolor"
)
func ExampleNewAnsiColorWriter() {
w := ansicolor.NewAnsiColorWriter(os.Stdout)
text := "%sforeground %sbold%s %sbackground%s\n"
fmt.Fprintf(w, text, "\x1b[31m", "\x1b[1m", "\x1b[21m", "\x1b[41;32m", "\x1b[0m")
fmt.Fprintf(w, text, "\x1b[32m", "\x1b[1m", "\x1b[21m", "\x1b[42;31m", "\x1b[0m")
fmt.Fprintf(w, text, "\x1b[33m", "\x1b[1m", "\x1b[21m", "\x1b[43;34m", "\x1b[0m")
fmt.Fprintf(w, text, "\x1b[34m", "\x1b[1m", "\x1b[21m", "\x1b[44;33m", "\x1b[0m")
fmt.Fprintf(w, text, "\x1b[35m", "\x1b[1m", "\x1b[21m", "\x1b[45;36m", "\x1b[0m")
fmt.Fprintf(w, text, "\x1b[36m", "\x1b[1m", "\x1b[21m", "\x1b[46;35m", "\x1b[0m")
fmt.Fprintf(w, text, "\x1b[37m", "\x1b[1m", "\x1b[21m", "\x1b[47;30m", "\x1b[0m")
}

View File

@ -0,0 +1,19 @@
// Copyright 2014 shiena Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
// +build windows
package ansicolor
import "syscall"
var GetConsoleScreenBufferInfo = getConsoleScreenBufferInfo
func ChangeColor(color uint16) {
setConsoleTextAttribute(uintptr(syscall.Stdout), color)
}
func ResetColor() {
ChangeColor(uint16(0x0007))
}

View File

@ -18,7 +18,6 @@ import (
"sync"
"time"
"github.com/ethereum/go-ethereum/fdtrack"
"github.com/syndtr/goleveldb/leveldb/util"
)
@ -370,8 +369,6 @@ func (fw fileWrap) Close() error {
err := fw.File.Close()
if err != nil {
f.fs.log(fmt.Sprintf("close %s.%d: %v", f.Type(), f.Num(), err))
} else {
fdtrack.Close("leveldb")
}
return err
}
@ -403,7 +400,6 @@ func (f *file) Open() (Reader, error) {
return nil, err
}
ok:
fdtrack.Open("leveldb")
f.open = true
f.fs.open++
return fileWrap{of, f}, nil
@ -422,7 +418,6 @@ func (f *file) Create() (Writer, error) {
if err != nil {
return nil, err
}
fdtrack.Open("leveldb")
f.open = true
f.fs.open++
return fileWrap{of, f}, nil

View File

@ -2,7 +2,7 @@
# with Go source code. If you know what GOPATH is then you probably
# don't need to bother with make.
.PHONY: geth mist all test travis-test-with-coverage clean
.PHONY: geth evm mist all test travis-test-with-coverage clean
GOBIN = build/bin
geth:
@ -10,6 +10,34 @@ geth:
@echo "Done building."
@echo "Run \"$(GOBIN)/geth\" to launch geth."
geth-cross: geth-linux geth-darwin geth-windows geth-android
@echo "Full cross compilation done:"
@ls -l $(GOBIN)/geth-*
geth-linux: xgo
build/env.sh $(GOBIN)/xgo --dest=$(GOBIN) --deps=https://gmplib.org/download/gmp/gmp-6.0.0a.tar.bz2 --targets=linux/* -v ./cmd/geth
@echo "Linux cross compilation done:"
@ls -l $(GOBIN)/geth-linux-*
geth-darwin: xgo
build/env.sh $(GOBIN)/xgo --dest=$(GOBIN) --deps=https://gmplib.org/download/gmp/gmp-6.0.0a.tar.bz2 --targets=darwin/* -v ./cmd/geth
@echo "Darwin cross compilation done:"
@ls -l $(GOBIN)/geth-darwin-*
geth-windows: xgo
build/env.sh $(GOBIN)/xgo --dest=$(GOBIN) --deps=https://gmplib.org/download/gmp/gmp-6.0.0a.tar.bz2 --targets=windows/* -v ./cmd/geth
@echo "Windows cross compilation done:"
@ls -l $(GOBIN)/geth-windows-*
geth-android: xgo
build/env.sh $(GOBIN)/xgo --dest=$(GOBIN) --deps=https://gmplib.org/download/gmp/gmp-6.0.0a.tar.bz2 --targets=android-16/*,android-21/* -v ./cmd/geth
@echo "Android cross compilation done:"
@ls -l $(GOBIN)/geth-android-*
evm:
build/env.sh $(GOROOT)/bin/go install -v $(shell build/ldflags.sh) ./cmd/evm
@echo "Done building."
@echo "Run \"$(GOBIN)/evm to start the evm."
mist:
build/env.sh go install -v $(shell build/ldflags.sh) ./cmd/mist
@echo "Done building."
@ -24,5 +52,8 @@ test: all
travis-test-with-coverage: all
build/env.sh build/test-global-coverage.sh
xgo:
build/env.sh go get github.com/karalabe/xgo
clean:
rm -fr build/_workspace/pkg/ Godeps/_workspace/pkg $(GOBIN)/*

View File

@ -1,19 +1,18 @@
## Ethereum Go
Ethereum Go Client, by Jeffrey Wilcke (and some other people).
Official golang implementation of the Ethereum protocol
| Linux | OSX | ARM | Windows | Tests
----------|---------|-----|-----|---------|------
develop | [![Build+Status](https://build.ethdev.com/buildstatusimage?builder=Linux%20Go%20develop%20branch)](https://build.ethdev.com/builders/Linux%20Go%20develop%20branch/builds/-1) | [![Build+Status](https://build.ethdev.com/buildstatusimage?builder=Linux%20Go%20develop%20branch)](https://build.ethdev.com/builders/OSX%20Go%20develop%20branch/builds/-1) | [![Build+Status](https://build.ethdev.com/buildstatusimage?builder=ARM%20Go%20develop%20branch)](https://build.ethdev.com/builders/ARM%20Go%20develop%20branch/builds/-1) | [![Build+Status](https://build.ethdev.com/buildstatusimage?builder=Windows%20Go%20develop%20branch)](https://build.ethdev.com/builders/Windows%20Go%20develop%20branch/builds/-1) | [![Buildr+Status](https://travis-ci.org/ethereum/go-ethereum.svg?branch=develop)](https://travis-ci.org/ethereum/go-ethereum) [![Coverage Status](https://coveralls.io/repos/ethereum/go-ethereum/badge.svg?branch=develop)](https://coveralls.io/r/ethereum/go-ethereum?branch=develop)
master | [![Build+Status](https://build.ethdev.com/buildstatusimage?builder=Linux%20Go%20master%20branch)](https://build.ethdev.com/builders/Linux%20Go%20master%20branch/builds/-1) | [![Build+Status](https://build.ethdev.com/buildstatusimage?builder=OSX%20Go%20master%20branch)](https://build.ethdev.com/builders/OSX%20Go%20master%20branch/builds/-1) | [![Build+Status](https://build.ethdev.com/buildstatusimage?builder=ARM%20Go%20master%20branch)](https://build.ethdev.com/builders/ARM%20Go%20master%20branch/builds/-1) | [![Build+Status](https://build.ethdev.com/buildstatusimage?builder=Windows%20Go%20master%20branch)](https://build.ethdev.com/builders/Windows%20Go%20master%20branch/builds/-1) | [![Buildr+Status](https://travis-ci.org/ethereum/go-ethereum.svg?branch=master)](https://travis-ci.org/ethereum/go-ethereum) [![Coverage Status](https://coveralls.io/repos/ethereum/go-ethereum/badge.svg?branch=master)](https://coveralls.io/r/ethereum/go-ethereum?branch=master)
develop | [![Build+Status](https://build.ethdev.com/buildstatusimage?builder=Linux%20Go%20develop%20branch)](https://build.ethdev.com/builders/Linux%20Go%20develop%20branch/builds/-1) | [![Build+Status](https://build.ethdev.com/buildstatusimage?builder=Linux%20Go%20develop%20branch)](https://build.ethdev.com/builders/OSX%20Go%20develop%20branch/builds/-1) | [![Build+Status](https://build.ethdev.com/buildstatusimage?builder=ARM%20Go%20develop%20branch)](https://build.ethdev.com/builders/ARM%20Go%20develop%20branch/builds/-1) | [![Build+Status](https://build.ethdev.com/buildstatusimage?builder=Windows%20Go%20develop%20branch)](https://build.ethdev.com/builders/Windows%20Go%20develop%20branch/builds/-1) | [![Buildr+Status](https://travis-ci.org/ethereum/go-ethereum.svg?branch=develop)](https://travis-ci.org/ethereum/go-ethereum) [![codecov.io](http://codecov.io/github/ethereum/go-ethereum/coverage.svg?branch=develop)](http://codecov.io/github/ethereum/go-ethereum?branch=develop)
master | [![Build+Status](https://build.ethdev.com/buildstatusimage?builder=Linux%20Go%20master%20branch)](https://build.ethdev.com/builders/Linux%20Go%20master%20branch/builds/-1) | [![Build+Status](https://build.ethdev.com/buildstatusimage?builder=OSX%20Go%20master%20branch)](https://build.ethdev.com/builders/OSX%20Go%20master%20branch/builds/-1) | [![Build+Status](https://build.ethdev.com/buildstatusimage?builder=ARM%20Go%20master%20branch)](https://build.ethdev.com/builders/ARM%20Go%20master%20branch/builds/-1) | [![Build+Status](https://build.ethdev.com/buildstatusimage?builder=Windows%20Go%20master%20branch)](https://build.ethdev.com/builders/Windows%20Go%20master%20branch/builds/-1) | [![Buildr+Status](https://travis-ci.org/ethereum/go-ethereum.svg?branch=master)](https://travis-ci.org/ethereum/go-ethereum) [![codecov.io](http://codecov.io/github/ethereum/go-ethereum/coverage.svg?branch=master)](http://codecov.io/github/ethereum/go-ethereum?branch=master)
[![Bugs](https://badge.waffle.io/ethereum/go-ethereum.png?label=bug&title=Bugs)](https://waffle.io/ethereum/go-ethereum)
[![Stories in Ready](https://badge.waffle.io/ethereum/go-ethereum.png?label=ready&title=Ready)](https://waffle.io/ethereum/go-ethereum)
[![Stories in Progress](https://badge.waffle.io/ethereum/go-ethereum.svg?label=in%20progress&title=In Progress)](http://waffle.io/ethereum/go-ethereum)
[![API Reference](
https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667
)](https://godoc.org/github.com/ethereum/go-ethereum)
[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/ethereum/go-ethereum?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
Automated development builds
======================
## Automated development builds
The following builds are build automatically by our build servers after each push to the [develop](https://github.com/ethereum/go-ethereum/tree/develop) branch.
@ -25,8 +24,7 @@ The following builds are build automatically by our build servers after each pus
* [Windows 64-bit](https://build.ethdev.com/builds/Windows%20Go%20develop%20branch/Geth-Win64-latest.zip)
* [ARM](https://build.ethdev.com/builds/ARM%20Go%20develop%20branch/geth-ARM-latest.tar.bz2)
Building the source
===================
## Building the source
For prerequisites and detailed build instructions please read the
[Installation Instructions](https://github.com/ethereum/go-ethereum/wiki/Building-Ethereum)
@ -38,34 +36,31 @@ Once the dependencies are installed, run
make geth
Executables
===========
## Executables
Go Ethereum comes with several wrappers/executables found in
[the `cmd` directory](https://github.com/ethereum/go-ethereum/tree/develop/cmd):
* `geth` Ethereum CLI (ethereum command line interface client)
* `bootnode` runs a bootstrap node for the Discovery Protocol
* `ethtest` test tool which runs with the [tests](https://github.com/ethereum/tests) suite:
`/path/to/test.json > ethtest --test BlockTests --stdin`.
* `evm` is a generic Ethereum Virtual Machine: `evm -code 60ff60ff -gas
10000 -price 0 -dump`. See `-h` for a detailed description.
* `disasm` disassembles EVM code: `echo "6001" | disasm`
* `rlpdump` prints RLP structures
Command | |
----------|---------|
`geth` | Ethereum CLI (ethereum command line interface client) |
`bootnode` | runs a bootstrap node for the Discovery Protocol |
`ethtest` | test tool which runs with the [tests](https://github.com/ethereum/tests) suite: `/path/to/test.json > ethtest --test BlockTests --stdin`.
`evm` | is a generic Ethereum Virtual Machine: `evm -code 60ff60ff -gas 10000 -price 0 -dump`. See `-h` for a detailed description. |
`disasm` | disassembles EVM code: `echo "6001" | disasm` |
`rlpdump` | prints RLP structures |
Command line options
====================
## Command line options
`geth` can be configured via command line options, environment variables and config files.
To get the options available:
geth --help
geth help
For further details on options, see the [wiki](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options)
Contribution
============
## Contribution
If you'd like to contribute to go-ethereum please fork, fix, commit and
send a pull request. Commits who do not comply with the coding standards

1
VERSION Normal file
View File

@ -0,0 +1 @@
1.2.2

View File

@ -7,7 +7,12 @@ if [ ! -f "build/env.sh" ]; then
exit 2
fi
# Since Go 1.5, the separator char for link time assignments
# is '=' and using ' ' prints a warning. However, Go < 1.5 does
# not support using '='.
sep=$(go version | awk '{ if ($3 >= "go1.5" || index($3, "devel")) print "="; else print " "; }' -)
# set gitCommit when running from a Git checkout.
if [ -f ".git/HEAD" ]; then
echo "-ldflags '-X main.gitCommit $(git rev-parse HEAD)'"
echo "-ldflags '-X main.gitCommit$sep$(git rev-parse HEAD)'"
fi

View File

@ -1,26 +1,15 @@
#!/bin/bash
# This script runs all package tests and merges the resulting coverage
# profiles. Coverage is accounted per package under test.
#!/usr/bin/env bash
set -e
echo "" > coverage.txt
if [ ! -f "build/env.sh" ]; then
echo "$0 must be run from the root of the repository."
exit 2
fi
echo "mode: count" > profile.cov
for pkg in $(go list ./...); do
# drop the namespace prefix.
dir=${pkg##github.com/ethereum/go-ethereum/}
if [[ $dir != "tests" ]]; then
go test -covermode=count -coverprofile=$dir/profile.tmp $pkg
fi
if [[ -f $dir/profile.tmp ]]; then
tail -n +2 $dir/profile.tmp >> profile.cov
rm $dir/profile.tmp
for d in $(find ./* -maxdepth 10 -type d -not -path "./build" -not -path "./Godeps/*" ); do
if ls $d/*.go &> /dev/null; then
go test -coverprofile=profile.out -covermode=atomic $d
if [ -f profile.out ]; then
cat profile.out >> coverage.txt
echo '<<<<<< EOF' >> coverage.txt
rm profile.out
fi
fi
done

View File

@ -26,6 +26,7 @@ import (
"strings"
"github.com/codegangsta/cli"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/tests"
)
@ -62,6 +63,10 @@ var (
Name: "skip",
Usage: "Tests names to skip",
}
TraceFlag = cli.BoolFlag{
Name: "trace",
Usage: "Enable VM tracing",
}
)
func runTestWithReader(test string, r io.Reader) error {
@ -173,7 +178,6 @@ func runSuite(test, file string) {
glog.Fatalln(err)
}
}
}
}
}
@ -184,6 +188,7 @@ func setupApp(c *cli.Context) {
continueOnError = c.GlobalBool(ContinueOnErrorFlag.Name)
useStdIn := c.GlobalBool(ReadStdInFlag.Name)
skipTests = strings.Split(c.GlobalString(SkipTestsFlag.Name), " ")
vm.Debug = c.GlobalBool(TraceFlag.Name)
if !useStdIn {
runSuite(flagTest, flagFile)
@ -211,6 +216,7 @@ func main() {
ContinueOnErrorFlag,
ReadStdInFlag,
SkipTestsFlag,
TraceFlag,
}
if err := app.Run(os.Args); err != nil {

View File

@ -32,6 +32,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger/glog"
)
var (
@ -40,6 +41,14 @@ var (
Name: "debug",
Usage: "output full trace logs",
}
ForceJitFlag = cli.BoolFlag{
Name: "forcejit",
Usage: "forces jit compilation",
}
DisableJitFlag = cli.BoolFlag{
Name: "nojit",
Usage: "disabled jit compilation",
}
CodeFlag = cli.StringFlag{
Name: "code",
Usage: "EVM code",
@ -77,6 +86,8 @@ func init() {
app = utils.NewApp("0.2", "the evm command line interface")
app.Flags = []cli.Flag{
DebugFlag,
ForceJitFlag,
DisableJitFlag,
SysStatFlag,
CodeFlag,
GasFlag,
@ -90,6 +101,10 @@ func init() {
func run(ctx *cli.Context) {
vm.Debug = ctx.GlobalBool(DebugFlag.Name)
vm.ForceJit = ctx.GlobalBool(ForceJitFlag.Name)
vm.EnableJit = !ctx.GlobalBool(DisableJitFlag.Name)
glog.SetToStderr(true)
db, _ := ethdb.NewMemDatabase()
statedb := state.New(common.Hash{}, db)
@ -110,11 +125,6 @@ func run(ctx *cli.Context) {
)
vmdone := time.Since(tstart)
if e != nil {
fmt.Println(e)
os.Exit(1)
}
if ctx.GlobalBool(DumpFlag.Name) {
fmt.Println(string(statedb.Dump()))
}
@ -133,7 +143,11 @@ num gc: %d
`, mem.Alloc, mem.TotalAlloc, mem.Mallocs, mem.HeapAlloc, mem.HeapObjects, mem.NumGC)
}
fmt.Printf("OUT: 0x%x\n", ret)
fmt.Printf("OUT: 0x%x", ret)
if e != nil {
fmt.Printf(" error: %v", e)
}
fmt.Println()
}
func main() {
@ -152,7 +166,7 @@ type VMEnv struct {
depth int
Gas *big.Int
time uint64
time *big.Int
logs []vm.StructLog
}
@ -161,7 +175,7 @@ func NewEnv(state *state.StateDB, transactor common.Address, value *big.Int) *VM
state: state,
transactor: &transactor,
value: value,
time: uint64(time.Now().Unix()),
time: big.NewInt(time.Now().Unix()),
}
}
@ -169,7 +183,7 @@ func (self *VMEnv) State() *state.StateDB { return self.state }
func (self *VMEnv) Origin() common.Address { return *self.transactor }
func (self *VMEnv) BlockNumber() *big.Int { return common.Big0 }
func (self *VMEnv) Coinbase() common.Address { return *self.transactor }
func (self *VMEnv) Time() uint64 { return self.time }
func (self *VMEnv) Time() *big.Int { return self.time }
func (self *VMEnv) Difficulty() *big.Int { return common.Big1 }
func (self *VMEnv) BlockHash() []byte { return make([]byte, 32) }
func (self *VMEnv) Value() *big.Int { return self.value }
@ -192,6 +206,9 @@ func (self *VMEnv) StructLogs() []vm.StructLog {
func (self *VMEnv) AddLog(log *state.Log) {
self.state.AddLog(log)
}
func (self *VMEnv) CanTransfer(from vm.Account, balance *big.Int) bool {
return from.Balance().Cmp(balance) >= 0
}
func (self *VMEnv) Transfer(from, to vm.Account, amount *big.Int) error {
return vm.Transfer(from, to, amount)
}

View File

@ -22,7 +22,6 @@ import (
"github.com/codegangsta/cli"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/tests"
@ -92,7 +91,6 @@ func runBlockTest(ctx *cli.Context) {
if err != nil {
utils.Fatalf("%v", err)
}
defer ethereum.Stop()
if rpc {
fmt.Println("Block Test post state validated, starting RPC interface.")
startEth(ctx, ethereum)
@ -103,34 +101,31 @@ func runBlockTest(ctx *cli.Context) {
func runOneBlockTest(ctx *cli.Context, test *tests.BlockTest) (*eth.Ethereum, error) {
cfg := utils.MakeEthConfig(ClientIdentifier, Version, ctx)
cfg.NewDB = func(path string) (common.Database, error) { return ethdb.NewMemDatabase() }
cfg.NewDB = func(path string) (ethdb.Database, error) { return ethdb.NewMemDatabase() }
cfg.MaxPeers = 0 // disable network
cfg.Shh = false // disable whisper
cfg.NAT = nil // disable port mapping
ethereum, err := eth.New(cfg)
if err != nil {
return nil, err
}
// if err := ethereum.Start(); err != nil {
// return nil, err
// }
// import the genesis block
ethereum.ResetWithGenesisBlock(test.Genesis)
// import pre accounts
statedb, err := test.InsertPreState(ethereum)
_, err = test.InsertPreState(ethereum)
if err != nil {
return ethereum, fmt.Errorf("InsertPreState: %v", err)
}
if err := test.TryBlocksInsert(ethereum.ChainManager()); err != nil {
cm := ethereum.ChainManager()
validBlocks, err := test.TryBlocksInsert(cm)
if err != nil {
return ethereum, fmt.Errorf("Block Test load error: %v", err)
}
if err := test.ValidatePostState(statedb); err != nil {
newDB := cm.State()
if err := test.ValidatePostState(newDB); err != nil {
return ethereum, fmt.Errorf("post state validation failed: %v", err)
}
return ethereum, nil
return ethereum, test.ValidateImportedHeaders(cm, validBlocks)
}

View File

@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger/glog"
)
@ -74,10 +75,10 @@ func importChain(ctx *cli.Context) {
if len(ctx.Args()) != 1 {
utils.Fatalf("This command requires an argument.")
}
chain, blockDB, stateDB, extraDB := utils.MakeChain(ctx)
chain, chainDb := utils.MakeChain(ctx)
start := time.Now()
err := utils.ImportChain(chain, ctx.Args().First())
closeAll(blockDB, stateDB, extraDB)
chainDb.Close()
if err != nil {
utils.Fatalf("Import error: %v", err)
}
@ -88,7 +89,7 @@ func exportChain(ctx *cli.Context) {
if len(ctx.Args()) < 1 {
utils.Fatalf("This command requires an argument.")
}
chain, _, _, _ := utils.MakeChain(ctx)
chain, _ := utils.MakeChain(ctx)
start := time.Now()
var err error
@ -115,17 +116,16 @@ func exportChain(ctx *cli.Context) {
}
func removeDB(ctx *cli.Context) {
confirm, err := utils.PromptConfirm("Remove local databases?")
confirm, err := utils.PromptConfirm("Remove local database?")
if err != nil {
utils.Fatalf("%v", err)
}
if confirm {
fmt.Println("Removing chain and state databases...")
fmt.Println("Removing chaindata...")
start := time.Now()
os.RemoveAll(filepath.Join(ctx.GlobalString(utils.DataDirFlag.Name), "blockchain"))
os.RemoveAll(filepath.Join(ctx.GlobalString(utils.DataDirFlag.Name), "state"))
os.RemoveAll(filepath.Join(ctx.GlobalString(utils.DataDirFlag.Name), "chaindata"))
fmt.Printf("Removed in %v\n", time.Since(start))
} else {
@ -136,8 +136,8 @@ func removeDB(ctx *cli.Context) {
func upgradeDB(ctx *cli.Context) {
glog.Infoln("Upgrading blockchain database")
chain, blockDB, stateDB, extraDB := utils.MakeChain(ctx)
v, _ := blockDB.Get([]byte("BlockchainVersion"))
chain, chainDb := utils.MakeChain(ctx)
v, _ := chainDb.Get([]byte("BlockchainVersion"))
bcVersion := int(common.NewValue(v).Uint())
if bcVersion == 0 {
bcVersion = core.BlockChainVersion
@ -149,15 +149,14 @@ func upgradeDB(ctx *cli.Context) {
if err := utils.ExportChain(chain, exportFile); err != nil {
utils.Fatalf("Unable to export chain for reimport %s", err)
}
closeAll(blockDB, stateDB, extraDB)
os.RemoveAll(filepath.Join(ctx.GlobalString(utils.DataDirFlag.Name), "blockchain"))
os.RemoveAll(filepath.Join(ctx.GlobalString(utils.DataDirFlag.Name), "state"))
chainDb.Close()
os.RemoveAll(filepath.Join(ctx.GlobalString(utils.DataDirFlag.Name), "chaindata"))
// Import the chain file.
chain, blockDB, stateDB, extraDB = utils.MakeChain(ctx)
blockDB.Put([]byte("BlockchainVersion"), common.NewValue(core.BlockChainVersion).Bytes())
chain, chainDb = utils.MakeChain(ctx)
chainDb.Put([]byte("BlockchainVersion"), common.NewValue(core.BlockChainVersion).Bytes())
err := utils.ImportChain(chain, exportFile)
closeAll(blockDB, stateDB, extraDB)
chainDb.Close()
if err != nil {
utils.Fatalf("Import error %v (a backup is made in %s, use the import command to import it)", err, exportFile)
} else {
@ -167,7 +166,7 @@ func upgradeDB(ctx *cli.Context) {
}
func dump(ctx *cli.Context) {
chain, _, stateDB, _ := utils.MakeChain(ctx)
chain, chainDb := utils.MakeChain(ctx)
for _, arg := range ctx.Args() {
var block *types.Block
if hashish(arg) {
@ -180,10 +179,11 @@ func dump(ctx *cli.Context) {
fmt.Println("{}")
utils.Fatalf("block not found")
} else {
state := state.New(block.Root(), stateDB)
state := state.New(block.Root(), chainDb)
fmt.Printf("%s\n", state.Dump())
}
}
chainDb.Close()
}
// hashish returns true for strings that look like hashes.
@ -192,7 +192,7 @@ func hashish(x string) bool {
return err != nil
}
func closeAll(dbs ...common.Database) {
func closeAll(dbs ...ethdb.Database) {
for _, db := range dbs {
db.Close()
}

View File

@ -121,7 +121,7 @@ func keywordCompleter(line string) []string {
}
func apiWordCompleter(line string, pos int) (head string, completions []string, tail string) {
if len(line) == 0 {
if len(line) == 0 || pos == 0 {
return "", nil, ""
}
@ -145,19 +145,15 @@ func apiWordCompleter(line string, pos int) (head string, completions []string,
return begin, completionWords, end
}
func newLightweightJSRE(libPath string, client comms.EthereumClient, interactive bool, f xeth.Frontend) *jsre {
func newLightweightJSRE(libPath string, client comms.EthereumClient, interactive bool) *jsre {
js := &jsre{ps1: "> "}
js.wait = make(chan *big.Int)
js.client = client
js.ds = docserver.New("/")
if f == nil {
f = js
}
// update state in separare forever blocks
js.re = re.New(libPath)
if err := js.apiBindings(f); err != nil {
if err := js.apiBindings(js); err != nil {
utils.Fatalf("Unable to initialize console - %v", err)
}
@ -232,15 +228,10 @@ func (self *jsre) loadAutoCompletion() {
}
func (self *jsre) batch(statement string) {
val, err := self.re.Run(statement)
err := self.re.EvalAndPrettyPrint(statement)
if err != nil {
fmt.Printf("error: %v", err)
} else if val.IsDefined() && val.IsObject() {
obj, _ := self.re.Get("ret_result")
fmt.Printf("%v", obj)
} else if val.IsDefined() {
fmt.Printf("%v", val)
}
if self.atexit != nil {
@ -252,22 +243,22 @@ func (self *jsre) batch(statement string) {
// show summary of current geth instance
func (self *jsre) welcome() {
self.re.Eval(`console.log('instance: ' + web3.version.client);`)
self.re.Eval(`console.log(' datadir: ' + admin.datadir);`)
self.re.Eval(`console.log("coinbase: " + eth.coinbase);`)
self.re.Eval(`var lastBlockTimestamp = 1000 * eth.getBlock(eth.blockNumber).timestamp`)
self.re.Eval(`console.log("at block: " + eth.blockNumber + " (" + new Date(lastBlockTimestamp).toLocaleDateString()
+ " " + new Date(lastBlockTimestamp).toLocaleTimeString() + ")");`)
self.re.Run(`
(function () {
console.log('instance: ' + web3.version.client);
console.log(' datadir: ' + admin.datadir);
console.log("coinbase: " + eth.coinbase);
var ts = 1000 * eth.getBlock(eth.blockNumber).timestamp;
console.log("at block: " + eth.blockNumber + " (" + new Date(ts) + ")");
})();
`)
if modules, err := self.supportedApis(); err == nil {
loadedModules := make([]string, 0)
for api, version := range modules {
loadedModules = append(loadedModules, fmt.Sprintf("%s:%s", api, version))
}
sort.Strings(loadedModules)
self.re.Eval(fmt.Sprintf("var modules = '%s';", strings.Join(loadedModules, " ")))
self.re.Eval(`console.log(" modules: " + modules);`)
fmt.Println("modules:", strings.Join(loadedModules, " "))
}
}
@ -291,7 +282,7 @@ func (js *jsre) apiBindings(f xeth.Frontend) error {
utils.Fatalf("Unable to determine supported api's: %v", err)
}
jeth := rpc.NewJeth(api.Merge(apiImpl...), js.re, js.client)
jeth := rpc.NewJeth(api.Merge(apiImpl...), js.re, js.client, f)
js.re.Set("jeth", struct{}{})
t, _ := js.re.Get("jeth")
jethObj := t.Object()
@ -309,12 +300,12 @@ func (js *jsre) apiBindings(f xeth.Frontend) error {
utils.Fatalf("Error loading web3.js: %v", err)
}
_, err = js.re.Eval("var web3 = require('web3');")
_, err = js.re.Run("var web3 = require('web3');")
if err != nil {
utils.Fatalf("Error requiring web3: %v", err)
}
_, err = js.re.Eval("web3.setProvider(jeth)")
_, err = js.re.Run("web3.setProvider(jeth)")
if err != nil {
utils.Fatalf("Error setting web3 provider: %v", err)
}
@ -333,13 +324,13 @@ func (js *jsre) apiBindings(f xeth.Frontend) error {
}
}
_, err = js.re.Eval(shortcuts)
_, err = js.re.Run(shortcuts)
if err != nil {
utils.Fatalf("Error setting namespaces: %v", err)
}
js.re.Eval(`var GlobalRegistrar = eth.contract(` + registrar.GlobalRegistrarAbi + `); registrar = GlobalRegistrar.at("` + registrar.GlobalRegistrarAddr + `");`)
js.re.Run(`var GlobalRegistrar = eth.contract(` + registrar.GlobalRegistrarAbi + `); registrar = GlobalRegistrar.at("` + registrar.GlobalRegistrarAddr + `");`)
return nil
}
@ -387,6 +378,11 @@ func (self *jsre) interactive() {
for {
line, err := self.Prompt(<-prompt)
if err != nil {
if err == liner.ErrPromptAborted { // ctrl-C
self.resetPrompt()
inputln <- ""
continue
}
return
}
inputln <- line
@ -458,8 +454,7 @@ func (self *jsre) parseInput(code string) {
fmt.Println("[native] error", r)
}
}()
value, err := self.re.Run(code)
if err != nil {
if err := self.re.EvalAndPrettyPrint(code); err != nil {
if ottoErr, ok := err.(*otto.Error); ok {
fmt.Println(ottoErr.String())
} else {
@ -467,12 +462,17 @@ func (self *jsre) parseInput(code string) {
}
return
}
self.printValue(value)
}
var indentCount = 0
var str = ""
func (self *jsre) resetPrompt() {
indentCount = 0
str = ""
self.ps1 = "> "
}
func (self *jsre) setIndent() {
open := strings.Count(str, "{")
open += strings.Count(str, "(")
@ -486,10 +486,3 @@ func (self *jsre) setIndent() {
self.ps1 += " "
}
}
func (self *jsre) printValue(v interface{}) {
val, err := self.re.PrettyPrint(v)
if err == nil {
fmt.Printf("%v", val)
}
}

View File

@ -92,7 +92,7 @@ func testREPL(t *testing.T, config func(*eth.Config)) (string, *testjethre, *eth
db, _ := ethdb.NewMemDatabase()
core.WriteGenesisBlockForTesting(db, common.HexToAddress(testAddress), common.String2Big(testBalance))
core.WriteGenesisBlockForTesting(db, core.GenesisAccount{common.HexToAddress(testAddress), common.String2Big(testBalance)})
ks := crypto.NewKeyStorePlain(filepath.Join(tmp, "keystore"))
am := accounts.NewManager(ks)
conf := &eth.Config{
@ -103,7 +103,7 @@ func testREPL(t *testing.T, config func(*eth.Config)) (string, *testjethre, *eth
Name: "test",
SolcPath: testSolcPath,
PowTest: true,
NewDB: func(path string) (common.Database, error) { return db, nil },
NewDB: func(path string) (ethdb.Database, error) { return db, nil },
}
if config != nil {
config(conf)

View File

@ -19,7 +19,6 @@ package main
import (
"fmt"
"io"
"io/ioutil"
_ "net/http/pprof"
"os"
@ -38,25 +37,32 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/fdtrack"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc/codec"
"github.com/ethereum/go-ethereum/rpc/comms"
"github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
)
const (
ClientIdentifier = "Geth"
Version = "1.0.1"
Version = "1.2.2"
VersionMajor = 1
VersionMinor = 2
VersionPatch = 2
)
var (
gitCommit string // set via linker flagg
nodeNameVersion string
app *cli.App
ExtraDataFlag = cli.StringFlag{
Name: "extradata",
Usage: "Extra data for the miner",
}
)
func init() {
@ -282,6 +288,7 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
utils.DataDirFlag,
utils.BlockchainVersionFlag,
utils.OlympicFlag,
utils.EthVersionFlag,
utils.CacheFlag,
utils.JSpathFlag,
utils.ListenPortFlag,
@ -306,7 +313,11 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
utils.IPCPathFlag,
utils.ExecFlag,
utils.WhisperEnabledFlag,
utils.DevModeFlag,
utils.VMDebugFlag,
utils.VMForceJitFlag,
utils.VMJitCacheFlag,
utils.VMEnableJitFlag,
utils.NetworkIdFlag,
utils.RPCCORSDomainFlag,
utils.VerbosityFlag,
@ -325,9 +336,12 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
utils.GpobaseStepDownFlag,
utils.GpobaseStepUpFlag,
utils.GpobaseCorrectionFactorFlag,
ExtraDataFlag,
}
app.Before = func(ctx *cli.Context) error {
utils.SetupLogger(ctx)
utils.SetupVM(ctx)
utils.SetupEth(ctx)
if ctx.GlobalBool(utils.PProfEanbledFlag.Name) {
utils.StartPProf(ctx)
}
@ -346,6 +360,35 @@ func main() {
}
}
// makeExtra resolves extradata for the miner from a flag or returns a default.
func makeExtra(ctx *cli.Context) []byte {
if ctx.GlobalIsSet(ExtraDataFlag.Name) {
return []byte(ctx.GlobalString(ExtraDataFlag.Name))
}
return makeDefaultExtra()
}
func makeDefaultExtra() []byte {
var clientInfo = struct {
Version uint
Name string
GoVersion string
Os string
}{uint(VersionMajor<<16 | VersionMinor<<8 | VersionPatch), ClientIdentifier, runtime.Version(), runtime.GOOS}
extra, err := rlp.EncodeToBytes(clientInfo)
if err != nil {
glog.V(logger.Warn).Infoln("error setting canonical miner information:", err)
}
if uint64(len(extra)) > params.MaximumExtraDataSize.Uint64() {
glog.V(logger.Warn).Infoln("error setting canonical miner information: extra exceeds", params.MaximumExtraDataSize)
glog.V(logger.Debug).Infof("extra: %x\n", extra)
return nil
}
return extra
}
func run(ctx *cli.Context) {
utils.CheckLegalese(ctx.GlobalString(utils.DataDirFlag.Name))
if ctx.GlobalBool(utils.OlympicFlag.Name) {
@ -353,6 +396,8 @@ func run(ctx *cli.Context) {
}
cfg := utils.MakeEthConfig(ClientIdentifier, nodeNameVersion, ctx)
cfg.ExtraData = makeExtra(ctx)
ethereum, err := eth.New(cfg)
if err != nil {
utils.Fatalf("%v", err)
@ -366,21 +411,13 @@ func run(ctx *cli.Context) {
func attach(ctx *cli.Context) {
utils.CheckLegalese(ctx.GlobalString(utils.DataDirFlag.Name))
// Wrap the standard output with a colorified stream (windows)
if isatty.IsTerminal(os.Stdout.Fd()) {
if pr, pw, err := os.Pipe(); err == nil {
go io.Copy(colorable.NewColorableStdout(), pr)
os.Stdout = pw
}
}
var client comms.EthereumClient
var err error
if ctx.Args().Present() {
client, err = comms.ClientFromEndpoint(ctx.Args().First(), codec.JSON)
} else {
cfg := comms.IpcConfig{
Endpoint: ctx.GlobalString(utils.IPCPathFlag.Name),
Endpoint: utils.IpcSocketPath(ctx),
}
client, err = comms.NewIpcClient(cfg, codec.JSON)
}
@ -393,7 +430,7 @@ func attach(ctx *cli.Context) {
ctx.GlobalString(utils.JSpathFlag.Name),
client,
true,
nil)
)
if ctx.GlobalString(utils.ExecFlag.Name) != "" {
repl.batch(ctx.GlobalString(utils.ExecFlag.Name))
@ -406,15 +443,9 @@ func attach(ctx *cli.Context) {
func console(ctx *cli.Context) {
utils.CheckLegalese(ctx.GlobalString(utils.DataDirFlag.Name))
// Wrap the standard output with a colorified stream (windows)
if isatty.IsTerminal(os.Stdout.Fd()) {
if pr, pw, err := os.Pipe(); err == nil {
go io.Copy(colorable.NewColorableStdout(), pr)
os.Stdout = pw
}
}
cfg := utils.MakeEthConfig(ClientIdentifier, nodeNameVersion, ctx)
cfg.ExtraData = makeExtra(ctx)
ethereum, err := eth.New(cfg)
if err != nil {
utils.Fatalf("%v", err)
@ -513,17 +544,16 @@ func blockRecovery(ctx *cli.Context) {
var block *types.Block
if arg[0] == '#' {
block = core.GetBlockByNumber(blockDb, common.String2Big(arg[1:]).Uint64())
block = core.GetBlock(blockDb, core.GetCanonicalHash(blockDb, common.String2Big(arg[1:]).Uint64()))
} else {
block = core.GetBlockByHash(blockDb, common.HexToHash(arg))
block = core.GetBlock(blockDb, common.HexToHash(arg))
}
if block == nil {
glog.Fatalln("block not found. Recovery failed")
}
err = core.WriteHead(blockDb, block)
if err != nil {
if err = core.WriteHeadBlockHash(blockDb, block.Hash()); err != nil {
glog.Fatalln("block write err", err)
}
glog.Infof("Recovery succesful. New HEAD %x\n", block.Hash())
@ -533,9 +563,6 @@ func startEth(ctx *cli.Context, eth *eth.Ethereum) {
// Start Ethereum itself
utils.StartEthereum(eth)
// Start logging file descriptor stats.
fdtrack.Start()
am := eth.AccountManager()
account := ctx.GlobalString(utils.UnlockedAccountFlag.Name)
accounts := strings.Split(account, " ")

View File

@ -289,7 +289,7 @@ func updateChart(metric string, data []float64, base *int, chart *termui.LineCha
}
}
unit, scale := 0, 1.0
for high >= 1000 {
for high >= 1000 && unit+1 < len(dataUnits) {
high, unit, scale = high/1000, unit+1, scale*1000
}
// If the unit changes, re-create the chart (hack to set max height...)

View File

@ -21,6 +21,7 @@ import (
"bufio"
"fmt"
"io"
"math"
"math/big"
"os"
"os/signal"
@ -152,6 +153,7 @@ func InitOlympic() {
params.MaximumExtraDataSize = big.NewInt(1024)
NetworkIdFlag.Value = 0
core.BlockReward = big.NewInt(1.5e+18)
core.ExpDiffPeriod = big.NewInt(math.MaxInt64)
}
func FormatTransactionData(data string) []byte {

View File

@ -21,7 +21,7 @@ import (
"fmt"
"os"
"os/user"
"path/filepath"
"path"
"strings"
"github.com/codegangsta/cli"
@ -138,11 +138,8 @@ func (self *DirectoryFlag) Set(value string) {
func expandPath(p string) string {
if strings.HasPrefix(p, "~/") || strings.HasPrefix(p, "~\\") {
if user, err := user.Current(); err == nil {
if err == nil {
p = strings.Replace(p, "~", user.HomeDir, 1)
}
p = user.HomeDir + p[1:]
}
}
return filepath.Clean(os.ExpandEnv(p))
return path.Clean(os.ExpandEnv(p))
}

View File

@ -23,18 +23,15 @@ import (
)
func TestPathExpansion(t *testing.T) {
user, _ := user.Current()
tests := map[string]string{
"/home/someuser/tmp": "/home/someuser/tmp",
"~/tmp": user.HomeDir + "/tmp",
"~thisOtherUser/b/": "~thisOtherUser/b",
"$DDDXXX/a/b": "/tmp/a/b",
"/a/b/": "/a/b",
}
os.Setenv("DDDXXX", "/tmp")
for test, expected := range tests {
got := expandPath(test)
if got != expected {

View File

@ -21,29 +21,32 @@ import (
"fmt"
"log"
"math/big"
"net"
"net/http"
"os"
"path/filepath"
"runtime"
"strconv"
"github.com/ethereum/go-ethereum/metrics"
"github.com/codegangsta/cli"
"github.com/ethereum/ethash"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/rpc/api"
"github.com/ethereum/go-ethereum/rpc/codec"
"github.com/ethereum/go-ethereum/rpc/comms"
"github.com/ethereum/go-ethereum/rpc/shared"
"github.com/ethereum/go-ethereum/rpc/useragent"
"github.com/ethereum/go-ethereum/xeth"
)
@ -118,6 +121,10 @@ var (
Name: "genesis",
Usage: "Inserts/Overwrites the genesis block (json format)",
}
DevModeFlag = cli.BoolFlag{
Name: "dev",
Usage: "Developer mode. This mode creates a private network and sets several debugging flags",
}
IdentityFlag = cli.StringFlag{
Name: "identity",
Usage: "Custom node name",
@ -135,6 +142,11 @@ var (
Name: "olympic",
Usage: "Use olympic style protocol",
}
EthVersionFlag = cli.IntFlag{
Name: "eth",
Value: 62,
Usage: "Highest eth protocol to advertise (temporary, dev option)",
}
// miner settings
MinerThreadsFlag = cli.IntFlag{
@ -172,6 +184,25 @@ var (
Value: "",
}
// vm flags
VMDebugFlag = cli.BoolFlag{
Name: "vmdebug",
Usage: "Virtual Machine debug output",
}
VMForceJitFlag = cli.BoolFlag{
Name: "forcejit",
Usage: "Force the JIT VM to take precedence",
}
VMJitCacheFlag = cli.IntFlag{
Name: "jitcache",
Usage: "Amount of cached JIT VM programs",
Value: 64,
}
VMEnableJitFlag = cli.BoolFlag{
Name: "jitvm",
Usage: "Enable the JIT VM",
}
// logging and debug settings
LogFileFlag = cli.StringFlag{
Name: "logfile",
@ -196,10 +227,6 @@ var (
Usage: "The syntax of the argument is a comma-separated list of pattern=N, where pattern is a literal file name (minus the \".go\" suffix) or \"glob\" pattern and N is a log verbosity level.",
Value: glog.GetVModule(),
}
VMDebugFlag = cli.BoolFlag{
Name: "vmdebug",
Usage: "Virtual Machine debug output",
}
BacktraceAtFlag = cli.GenericFlag{
Name: "backtrace_at",
Usage: "If set to a file and line number (e.g., \"block.go:271\") holding a logging statement, a stack trace will be logged",
@ -387,7 +414,7 @@ func MakeEthConfig(clientID, version string, ctx *cli.Context) *eth.Config {
glog.V(logger.Error).Infoln("WARNING: No etherbase set and no accounts found as default")
}
return &eth.Config{
cfg := &eth.Config{
Name: common.MakeName(clientID, version),
DataDir: ctx.GlobalString(DataDirFlag.Name),
GenesisNonce: ctx.GlobalInt(GenesisNonceFlag.Name),
@ -424,6 +451,33 @@ func MakeEthConfig(clientID, version string, ctx *cli.Context) *eth.Config {
SolcPath: ctx.GlobalString(SolcPathFlag.Name),
AutoDAG: ctx.GlobalBool(AutoDAGFlag.Name) || ctx.GlobalBool(MiningEnabledFlag.Name),
}
if ctx.GlobalBool(DevModeFlag.Name) {
if !ctx.GlobalIsSet(VMDebugFlag.Name) {
cfg.VmDebug = true
}
if !ctx.GlobalIsSet(MaxPeersFlag.Name) {
cfg.MaxPeers = 0
}
if !ctx.GlobalIsSet(GasPriceFlag.Name) {
cfg.GasPrice = new(big.Int)
}
if !ctx.GlobalIsSet(ListenPortFlag.Name) {
cfg.Port = "0" // auto port
}
if !ctx.GlobalIsSet(WhisperEnabledFlag.Name) {
cfg.Shh = true
}
if !ctx.GlobalIsSet(DataDirFlag.Name) {
cfg.DataDir = os.TempDir() + "/ethereum_dev_mode"
}
cfg.PowTest = true
cfg.DevMode = true
glog.V(logger.Info).Infoln("dev mode enabled")
}
return cfg
}
// SetupLogger configures glog from the logging-related command line flags.
@ -434,24 +488,37 @@ func SetupLogger(ctx *cli.Context) {
glog.SetLogDir(ctx.GlobalString(LogFileFlag.Name))
}
// SetupVM configured the VM package's global settings
func SetupVM(ctx *cli.Context) {
vm.EnableJit = ctx.GlobalBool(VMEnableJitFlag.Name)
vm.ForceJit = ctx.GlobalBool(VMForceJitFlag.Name)
vm.SetJITCacheSize(ctx.GlobalInt(VMJitCacheFlag.Name))
}
// SetupEth configures the eth packages global settings
func SetupEth(ctx *cli.Context) {
version := ctx.GlobalInt(EthVersionFlag.Name)
for len(eth.ProtocolVersions) > 0 && eth.ProtocolVersions[0] > uint(version) {
eth.ProtocolVersions = eth.ProtocolVersions[1:]
eth.ProtocolLengths = eth.ProtocolLengths[1:]
}
if len(eth.ProtocolVersions) == 0 {
Fatalf("No valid eth protocols remaining")
}
}
// MakeChain creates a chain manager from set command line flags.
func MakeChain(ctx *cli.Context) (chain *core.ChainManager, blockDB, stateDB, extraDB common.Database) {
func MakeChain(ctx *cli.Context) (chain *core.ChainManager, chainDb ethdb.Database) {
datadir := ctx.GlobalString(DataDirFlag.Name)
cache := ctx.GlobalInt(CacheFlag.Name)
var err error
if blockDB, err = ethdb.NewLDBDatabase(filepath.Join(datadir, "blockchain"), cache); err != nil {
Fatalf("Could not open database: %v", err)
}
if stateDB, err = ethdb.NewLDBDatabase(filepath.Join(datadir, "state"), cache); err != nil {
Fatalf("Could not open database: %v", err)
}
if extraDB, err = ethdb.NewLDBDatabase(filepath.Join(datadir, "extra"), cache); err != nil {
if chainDb, err = ethdb.NewLDBDatabase(filepath.Join(datadir, "chaindata"), cache); err != nil {
Fatalf("Could not open database: %v", err)
}
if ctx.GlobalBool(OlympicFlag.Name) {
InitOlympic()
_, err := core.WriteTestNetGenesisBlock(stateDB, blockDB, 42)
_, err := core.WriteTestNetGenesisBlock(chainDb, 42)
if err != nil {
glog.Fatalln(err)
}
@ -460,14 +527,14 @@ func MakeChain(ctx *cli.Context) (chain *core.ChainManager, blockDB, stateDB, ex
eventMux := new(event.TypeMux)
pow := ethash.New()
//genesis := core.GenesisBlock(uint64(ctx.GlobalInt(GenesisNonceFlag.Name)), blockDB)
chain, err = core.NewChainManager(blockDB, stateDB, extraDB, pow, eventMux)
chain, err = core.NewChainManager(chainDb, pow, eventMux)
if err != nil {
Fatalf("Could not start chainmanager: %v", err)
}
proc := core.NewBlockProcessor(stateDB, extraDB, pow, chain, eventMux)
proc := core.NewBlockProcessor(chainDb, pow, chain, eventMux)
chain.SetProcessor(proc)
return chain, blockDB, stateDB, extraDB
return chain, chainDb
}
// MakeChain creates an account manager from set command line flags.
@ -478,7 +545,7 @@ func MakeAccountManager(ctx *cli.Context) *accounts.Manager {
}
func IpcSocketPath(ctx *cli.Context) (ipcpath string) {
if common.IsWindows() {
if runtime.GOOS == "windows" {
ipcpath = common.DefaultIpcPath()
if ctx.GlobalIsSet(IPCPathFlag.Name) {
ipcpath = ctx.GlobalString(IPCPathFlag.Name)
@ -501,15 +568,20 @@ func StartIPC(eth *eth.Ethereum, ctx *cli.Context) error {
Endpoint: IpcSocketPath(ctx),
}
xeth := xeth.New(eth, nil)
codec := codec.JSON
initializer := func(conn net.Conn) (shared.EthereumApi, error) {
fe := useragent.NewRemoteFrontend(conn, eth.AccountManager())
xeth := xeth.New(eth, fe)
codec := codec.JSON
apis, err := api.ParseApiString(ctx.GlobalString(IPCApiFlag.Name), codec, xeth, eth)
if err != nil {
return err
apis, err := api.ParseApiString(ctx.GlobalString(IPCApiFlag.Name), codec, xeth, eth)
if err != nil {
return nil, err
}
return api.Merge(apis...), nil
}
return comms.StartIpc(config, codec, api.Merge(apis...))
return comms.StartIpc(config, codec.JSON, initializer)
}
func StartRPC(eth *eth.Ethereum, ctx *cli.Context) error {

View File

@ -1,49 +1,50 @@
# ethutil
# common
[![Build
Status](https://travis-ci.org/ethereum/go-ethereum.png?branch=master)](https://travis-ci.org/ethereum/go-ethereum)
The ethutil package contains the ethereum utility library.
The common package contains the ethereum utility library.
# Installation
`go get github.com/ethereum/ethutil-go`
As a subdirectory the main go-ethereum repository, you get it with
`go get github.com/ethereum/go-ethereum`.
# Usage
## RLP (Recursive Linear Prefix) Encoding
RLP Encoding is an encoding scheme utilized by the Ethereum project. It
encodes any native value or list to string.
RLP Encoding is an encoding scheme used by the Ethereum project. It
encodes any native value or list to a string.
More in depth information about the Encoding scheme see the [Wiki](http://wiki.ethereum.org/index.php/RLP)
article.
More in depth information about the encoding scheme see the
[Wiki](http://wiki.ethereum.org/index.php/RLP) article.
```go
rlp := ethutil.Encode("doge")
rlp := common.Encode("doge")
fmt.Printf("%q\n", rlp) // => "\0x83dog"
rlp = ethutil.Encode([]interface{}{"dog", "cat"})
rlp = common.Encode([]interface{}{"dog", "cat"})
fmt.Printf("%q\n", rlp) // => "\0xc8\0x83dog\0x83cat"
decoded := ethutil.Decode(rlp)
decoded := common.Decode(rlp)
fmt.Println(decoded) // => ["dog" "cat"]
```
## Patricia Trie
Patricie Tree is a merkle trie utilized by the Ethereum project.
Patricie Tree is a merkle trie used by the Ethereum project.
More in depth information about the (modified) Patricia Trie can be
found on the [Wiki](http://wiki.ethereum.org/index.php/Patricia_Tree).
The patricia trie uses a db as backend and could be anything as long as
it satisfies the Database interface found in `ethutil/db.go`.
it satisfies the Database interface found in `common/db.go`.
```go
db := NewDatabase()
// db, root
trie := ethutil.NewTrie(db, "")
trie := common.NewTrie(db, "")
trie.Put("puppy", "dog")
trie.Put("horse", "stallion")
@ -65,7 +66,7 @@ all (key, value) bindings.
// ... Create db/trie
// Note that RLP uses interface slices as list
value := ethutil.Encode([]interface{}{"one", 2, "three", []interface{}{42}})
value := common.Encode([]interface{}{"one", 2, "three", []interface{}{42}})
// Store the RLP encoded value of the list
trie.Put("mykey", value)
```
@ -89,7 +90,7 @@ type (e.g. `Slice()` returns []interface{}, `Uint()` return 0, etc).
`Append(v)` appends the value (v) to the current value/list.
```go
val := ethutil.NewEmptyValue().Append(1).Append("2")
val := common.NewEmptyValue().Append(1).Append("2")
val.AppendList().Append(3)
```
@ -110,7 +111,7 @@ val.AppendList().Append(3)
`Byte()` returns the value as a single byte.
```go
val := ethutil.NewValue([]interface{}{1,"2",[]interface{}{3}})
val := common.NewValue([]interface{}{1,"2",[]interface{}{3}})
val.Get(0).Uint() // => 1
val.Get(1).Str() // => "2"
s := val.Get(2) // => Value([]interface{}{3})
@ -122,7 +123,7 @@ s.Get(0).Uint() // => 3
Decoding streams of RLP data is simplified
```go
val := ethutil.NewValueFromBytes(rlpData)
val := common.NewValueFromBytes(rlpData)
val.Get(0).Uint()
```
@ -132,7 +133,7 @@ Encoding from Value to RLP is done with the `Encode` method. The
underlying value can be anything RLP can encode (int, str, lists, bytes)
```go
val := ethutil.NewValue([]interface{}{1,"2",[]interface{}{3}})
val := common.NewValue([]interface{}{1,"2",[]interface{}{3}})
rlp := val.Encode()
// Store the rlp data
Store(rlp)

View File

@ -19,6 +19,7 @@ package compiler
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
@ -33,15 +34,10 @@ import (
"github.com/ethereum/go-ethereum/logger/glog"
)
const (
// flair = "Christian <c@ethdev.com> and Lefteris <lefteris@ethdev.com> (c) 2014-2015"
flair = ""
languageVersion = "0"
)
var (
versionRegExp = regexp.MustCompile("[0-9]+.[0-9]+.[0-9]+")
params = []string{
versionRegexp = regexp.MustCompile("[0-9]+\\.[0-9]+\\.[0-9]+")
legacyRegexp = regexp.MustCompile("0\\.(9\\..*|1\\.[01])")
paramsLegacy = []string{
"--binary", // Request to output the contract in binary (hexadecimal).
"file", //
"--json-abi", // Request to output the contract's JSON ABI interface.
@ -53,6 +49,15 @@ var (
"--add-std",
"1",
}
paramsNew = []string{
"--bin", // Request to output the contract in binary (hexadecimal).
"--abi", // Request to output the contract's JSON ABI interface.
"--userdoc", // Request to output the contract's Natspec user documentation.
"--devdoc", // Request to output the contract's Natspec developer documentation.
"--add-std", // include standard lib contracts
"--optimize", // code optimizer switched on
"-o", // output directory
}
)
type Contract struct {
@ -65,14 +70,17 @@ type ContractInfo struct {
Language string `json:"language"`
LanguageVersion string `json:"languageVersion"`
CompilerVersion string `json:"compilerVersion"`
CompilerOptions string `json:"compilerOptions"`
AbiDefinition interface{} `json:"abiDefinition"`
UserDoc interface{} `json:"userDoc"`
DeveloperDoc interface{} `json:"developerDoc"`
}
type Solidity struct {
solcPath string
version string
solcPath string
version string
fullVersion string
legacy bool
}
func New(solcPath string) (sol *Solidity, err error) {
@ -93,112 +101,118 @@ func New(solcPath string) (sol *Solidity, err error) {
return
}
version := versionRegExp.FindString(out.String())
fullVersion := out.String()
version := versionRegexp.FindString(fullVersion)
legacy := legacyRegexp.MatchString(version)
sol = &Solidity{
solcPath: solcPath,
version: version,
solcPath: solcPath,
version: version,
fullVersion: fullVersion,
legacy: legacy,
}
glog.V(logger.Info).Infoln(sol.Info())
return
}
func (sol *Solidity) Info() string {
return fmt.Sprintf("solc v%s\nSolidity Compiler: %s\n%s", sol.version, sol.solcPath, flair)
return fmt.Sprintf("%s\npath: %s", sol.fullVersion, sol.solcPath)
}
func (sol *Solidity) Version() string {
return sol.version
}
func (sol *Solidity) Compile(source string) (contracts map[string]*Contract, err error) {
// Compile builds and returns all the contracts contained within a source string.
func (sol *Solidity) Compile(source string) (map[string]*Contract, error) {
// Short circuit if no source code was specified
if len(source) == 0 {
err = fmt.Errorf("empty source")
return
return nil, errors.New("solc: empty source string")
}
// Create a safe place to dump compilation output
wd, err := ioutil.TempDir("", "solc")
if err != nil {
return
return nil, fmt.Errorf("solc: failed to create temporary build folder: %v", err)
}
defer os.RemoveAll(wd)
in := strings.NewReader(source)
var out bytes.Buffer
// cwd set to temp dir
// Assemble the compiler command, change to the temp folder and capture any errors
stderr := new(bytes.Buffer)
var params []string
if sol.legacy {
params = paramsLegacy
} else {
params = paramsNew
params = append(params, wd)
}
compilerOptions := strings.Join(params, " ")
cmd := exec.Command(sol.solcPath, params...)
cmd.Dir = wd
cmd.Stdin = in
cmd.Stdout = &out
err = cmd.Run()
if err != nil {
err = fmt.Errorf("solc error: %v", err)
return
}
cmd.Stdin = strings.NewReader(source)
cmd.Stderr = stderr
matches, _ := filepath.Glob(wd + "/*.binary")
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("solc: %v\n%s", err, string(stderr.Bytes()))
}
// Sanity check that something was actually built
matches, _ := filepath.Glob(wd + "/*\\.bin*")
if len(matches) < 1 {
err = fmt.Errorf("solc error: missing code output")
return
return nil, fmt.Errorf("solc: no build results found")
}
contracts = make(map[string]*Contract)
// Compilation succeeded, assemble and return the contracts
contracts := make(map[string]*Contract)
for _, path := range matches {
_, file := filepath.Split(path)
base := strings.Split(file, ".")[0]
codeFile := filepath.Join(wd, base+".binary")
abiDefinitionFile := filepath.Join(wd, base+".abi")
userDocFile := filepath.Join(wd, base+".docuser")
developerDocFile := filepath.Join(wd, base+".docdev")
var code, abiDefinitionJson, userDocJson, developerDocJson []byte
code, err = ioutil.ReadFile(codeFile)
if err != nil {
err = fmt.Errorf("error reading compiler output for code: %v", err)
return
// Parse the individual compilation results (code binary, ABI definitions, user and dev docs)
var binary []byte
binext := ".bin"
if sol.legacy {
binext = ".binary"
}
abiDefinitionJson, err = ioutil.ReadFile(abiDefinitionFile)
if err != nil {
err = fmt.Errorf("error reading compiler output for abiDefinition: %v", err)
return
if binary, err = ioutil.ReadFile(filepath.Join(wd, base+binext)); err != nil {
return nil, fmt.Errorf("solc: error reading compiler output for code: %v", err)
}
var abiDefinition interface{}
err = json.Unmarshal(abiDefinitionJson, &abiDefinition)
userDocJson, err = ioutil.ReadFile(userDocFile)
if err != nil {
err = fmt.Errorf("error reading compiler output for userDoc: %v", err)
return
var abi interface{}
if blob, err := ioutil.ReadFile(filepath.Join(wd, base+".abi")); err != nil {
return nil, fmt.Errorf("solc: error reading abi definition: %v", err)
} else if err = json.Unmarshal(blob, &abi); err != nil {
return nil, fmt.Errorf("solc: error parsing abi definition: %v", err)
}
var userDoc interface{}
err = json.Unmarshal(userDocJson, &userDoc)
developerDocJson, err = ioutil.ReadFile(developerDocFile)
if err != nil {
err = fmt.Errorf("error reading compiler output for developerDoc: %v", err)
return
var userdoc interface{}
if blob, err := ioutil.ReadFile(filepath.Join(wd, base+".docuser")); err != nil {
return nil, fmt.Errorf("solc: error reading user doc: %v", err)
} else if err = json.Unmarshal(blob, &userdoc); err != nil {
return nil, fmt.Errorf("solc: error parsing user doc: %v", err)
}
var developerDoc interface{}
err = json.Unmarshal(developerDocJson, &developerDoc)
contract := &Contract{
Code: "0x" + string(code),
var devdoc interface{}
if blob, err := ioutil.ReadFile(filepath.Join(wd, base+".docdev")); err != nil {
return nil, fmt.Errorf("solc: error reading dev doc: %v", err)
} else if err = json.Unmarshal(blob, &devdoc); err != nil {
return nil, fmt.Errorf("solc: error parsing dev doc: %v", err)
}
// Assemble the final contract
contracts[base] = &Contract{
Code: "0x" + string(binary),
Info: ContractInfo{
Source: source,
Language: "Solidity",
LanguageVersion: languageVersion,
LanguageVersion: sol.version,
CompilerVersion: sol.version,
AbiDefinition: abiDefinition,
UserDoc: userDoc,
DeveloperDoc: developerDoc,
CompilerOptions: compilerOptions,
AbiDefinition: abi,
UserDoc: userdoc,
DeveloperDoc: devdoc,
},
}
contracts[base] = contract
}
return
return contracts, nil
}
func SaveInfo(info *ContractInfo, filename string) (contenthash common.Hash, err error) {

View File

@ -20,12 +20,13 @@ import (
"encoding/json"
"io/ioutil"
"os"
"path"
"testing"
"github.com/ethereum/go-ethereum/common"
)
const solcVersion = "0.9.23"
const solcVersion = "0.1.1"
var (
source = `
@ -36,18 +37,18 @@ contract test {
}
}
`
code = "0x605880600c6000396000f3006000357c010000000000000000000000000000000000000000000000000000000090048063c6888fa114602e57005b603d6004803590602001506047565b8060005260206000f35b60006007820290506053565b91905056"
info = `{"source":"\ncontract test {\n /// @notice Will multiply ` + "`a`" + ` by 7.\n function multiply(uint a) returns(uint d) {\n return a * 7;\n }\n}\n","language":"Solidity","languageVersion":"0","compilerVersion":"0.9.23","abiDefinition":[{"constant":false,"inputs":[{"name":"a","type":"uint256"}],"name":"multiply","outputs":[{"name":"d","type":"uint256"}],"type":"function"}],"userDoc":{"methods":{"multiply(uint256)":{"notice":"Will multiply ` + "`a`" + ` by 7."}}},"developerDoc":{"methods":{}}}`
code = "0x6060604052606d8060116000396000f30060606040526000357c010000000000000000000000000000000000000000000000000000000090048063c6888fa1146037576035565b005b6046600480359060200150605c565b6040518082815260200191505060405180910390f35b60006007820290506068565b91905056"
info = `{"source":"\ncontract test {\n /// @notice Will multiply ` + "`a`" + ` by 7.\n function multiply(uint a) returns(uint d) {\n return a * 7;\n }\n}\n","language":"Solidity","languageVersion":"0.1.1","compilerVersion":"0.1.1","compilerOptions":"--binary file --json-abi file --natspec-user file --natspec-dev file --add-std 1","abiDefinition":[{"constant":false,"inputs":[{"name":"a","type":"uint256"}],"name":"multiply","outputs":[{"name":"d","type":"uint256"}],"type":"function"}],"userDoc":{"methods":{"multiply(uint256)":{"notice":"Will multiply ` + "`a`" + ` by 7."}}},"developerDoc":{"methods":{}}}`
infohash = common.HexToHash("0xea782f674eb898e477c20e8a7cf11c2c28b09fa68b5278732104f7a101aed255")
infohash = common.HexToHash("0x9f3803735e7f16120c5a140ab3f02121fd3533a9655c69b33a10e78752cc49b0")
)
func TestCompiler(t *testing.T) {
sol, err := New("")
if err != nil {
t.Skip("solc not found: skip")
t.Skipf("solc not found: %v", err)
} else if sol.Version() != solcVersion {
t.Skip("WARNING: skipping due to a newer version of solc found (%v, expect %v)", sol.Version(), solcVersion)
t.Skipf("WARNING: a newer version of solc found (%v, expect %v)", sol.Version(), solcVersion)
}
contracts, err := sol.Compile(source)
if err != nil {
@ -82,7 +83,7 @@ func TestCompileError(t *testing.T) {
func TestNoCompiler(t *testing.T) {
_, err := New("/path/to/solc")
if err != nil {
t.Log("solidity quits with error: %v", err)
t.Logf("solidity quits with error: %v", err)
} else {
t.Errorf("no solc installed, but got no error")
}
@ -94,7 +95,7 @@ func TestSaveInfo(t *testing.T) {
if err != nil {
t.Errorf("%v", err)
}
filename := "/tmp/solctest.info.json"
filename := path.Join(os.TempDir(), "solctest.info.json")
os.Remove(filename)
cinfohash, err := SaveInfo(&cinfo, filename)
if err != nil {

View File

@ -38,7 +38,6 @@ func New(docRoot string) (self *DocServer) {
DocRoot: docRoot,
schemes: []string{"file"},
}
self.DocRoot = "/tmp/"
self.RegisterProtocol("file", http.NewFileTransport(http.Dir(self.DocRoot)))
return
}

View File

@ -20,6 +20,7 @@ import (
"io/ioutil"
"net/http"
"os"
"path"
"testing"
"github.com/ethereum/go-ethereum/common"
@ -27,12 +28,18 @@ import (
)
func TestGetAuthContent(t *testing.T) {
text := "test"
hash := common.Hash{}
copy(hash[:], crypto.Sha3([]byte(text)))
ioutil.WriteFile("/tmp/test.content", []byte(text), os.ModePerm)
dir, err := ioutil.TempDir("", "docserver-test")
if err != nil {
t.Fatal("cannot create temporary directory:", err)
}
defer os.RemoveAll(dir)
ds := New(dir)
ds := New("/tmp/")
text := "test"
hash := crypto.Sha3Hash([]byte(text))
if err := ioutil.WriteFile(path.Join(dir, "test.content"), []byte(text), os.ModePerm); err != nil {
t.Fatal("could not write test file", err)
}
content, err := ds.GetAuthContent("file:///test.content", hash)
if err != nil {
t.Errorf("no error expected, got %v", err)
@ -67,4 +74,4 @@ func TestRegisterScheme(t *testing.T) {
if !ds.HasScheme("scheme") {
t.Errorf("expected scheme to be registered")
}
}
}

View File

@ -134,7 +134,7 @@ func testEth(t *testing.T) (ethereum *eth.Ethereum, err error) {
db, _ := ethdb.NewMemDatabase()
// set up mock genesis with balance on the testAddress
core.WriteGenesisBlockForTesting(db, common.HexToAddress(testAddress), common.String2Big(testBalance))
core.WriteGenesisBlockForTesting(db, core.GenesisAccount{common.HexToAddress(testAddress), common.String2Big(testBalance)})
// only use minimalistic stack with no networking
ethereum, err = eth.New(&eth.Config{
@ -143,7 +143,7 @@ func testEth(t *testing.T) (ethereum *eth.Ethereum, err error) {
MaxPeers: 0,
PowTest: true,
Etherbase: common.HexToAddress(testAddress),
NewDB: func(path string) (common.Database, error) { return db, nil },
NewDB: func(path string) (ethdb.Database, error) { return db, nil },
})
if err != nil {

View File

@ -116,14 +116,3 @@ func DefaultIpcPath() string {
}
return filepath.Join(DefaultDataDir(), "geth.ipc")
}
func IsWindows() bool {
return runtime.GOOS == "windows"
}
func WindonizePath(path string) string {
if string(path[0]) == "/" && IsWindows() {
path = path[1:]
}
return path
}

View File

@ -1,52 +0,0 @@
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package common
import (
"os"
// "testing"
checker "gopkg.in/check.v1"
)
type CommonSuite struct{}
var _ = checker.Suite(&CommonSuite{})
func (s *CommonSuite) TestOS(c *checker.C) {
expwin := (os.PathSeparator == '\\' && os.PathListSeparator == ';')
res := IsWindows()
if !expwin {
c.Assert(res, checker.Equals, expwin, checker.Commentf("IsWindows is", res, "but path is", os.PathSeparator))
} else {
c.Assert(res, checker.Not(checker.Equals), expwin, checker.Commentf("IsWindows is", res, "but path is", os.PathSeparator))
}
}
func (s *CommonSuite) TestWindonziePath(c *checker.C) {
iswindowspath := os.PathSeparator == '\\'
path := "/opt/eth/test/file.ext"
res := WindonizePath(path)
ressep := string(res[0])
if !iswindowspath {
c.Assert(ressep, checker.Equals, "/")
} else {
c.Assert(ressep, checker.Not(checker.Equals), "/")
}
}

View File

@ -40,7 +40,7 @@ func (s *SizeSuite) TestStorageSizeString(c *checker.C) {
c.Assert(StorageSize(data3).String(), checker.Equals, exp3)
}
func (s *CommonSuite) TestCommon(c *checker.C) {
func (s *SizeSuite) TestCommon(c *checker.C) {
ether := CurrencyToString(BigPow(10, 19))
finney := CurrencyToString(BigPow(10, 16))
szabo := CurrencyToString(BigPow(10, 13))

View File

@ -144,7 +144,7 @@ func genUncles(i int, gen *BlockGen) {
func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
// Create the database in memory or in a temporary directory.
var db common.Database
var db ethdb.Database
if !disk {
db, _ = ethdb.NewMemDatabase()
} else {
@ -162,14 +162,14 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
// Generate a chain of b.N blocks using the supplied block
// generator function.
genesis := WriteGenesisBlockForTesting(db, benchRootAddr, benchRootFunds)
genesis := WriteGenesisBlockForTesting(db, GenesisAccount{benchRootAddr, benchRootFunds})
chain := GenerateChain(genesis, db, b.N, gen)
// Time the insertion of the new chain.
// State and blocks are stored in the same DB.
evmux := new(event.TypeMux)
chainman, _ := NewChainManager(db, db, db, FakePow{}, evmux)
chainman.SetProcessor(NewBlockProcessor(db, db, FakePow{}, chainman, evmux))
chainman, _ := NewChainManager(db, FakePow{}, evmux)
chainman.SetProcessor(NewBlockProcessor(db, FakePow{}, chainman, evmux))
defer chainman.Stop()
b.ReportAllocs()
b.ResetTimer()

View File

@ -1,120 +0,0 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
// BlockCache implements a caching mechanism specifically for blocks and uses FILO to pop
type BlockCache struct {
size int
hashes []common.Hash
blocks map[common.Hash]*types.Block
mu sync.RWMutex
}
// Creates and returns a `BlockCache` with `size`. If `size` is smaller than 1 it will panic
func NewBlockCache(size int) *BlockCache {
if size < 1 {
panic("block cache size not allowed to be smaller than 1")
}
bc := &BlockCache{size: size}
bc.Clear()
return bc
}
func (bc *BlockCache) Clear() {
bc.blocks = make(map[common.Hash]*types.Block)
bc.hashes = nil
}
func (bc *BlockCache) Push(block *types.Block) {
bc.mu.Lock()
defer bc.mu.Unlock()
if len(bc.hashes) == bc.size {
delete(bc.blocks, bc.hashes[0])
// XXX There are a few other options on solving this
// 1) use a poller / GC like mechanism to clean up untracked objects
// 2) copy as below
// re-use the slice and remove the reference to bc.hashes[0]
// this will allow the element to be garbage collected.
copy(bc.hashes, bc.hashes[1:])
} else {
bc.hashes = append(bc.hashes, common.Hash{})
}
hash := block.Hash()
bc.blocks[hash] = block
bc.hashes[len(bc.hashes)-1] = hash
}
func (bc *BlockCache) Delete(hash common.Hash) {
bc.mu.Lock()
defer bc.mu.Unlock()
if _, ok := bc.blocks[hash]; ok {
delete(bc.blocks, hash)
for i, h := range bc.hashes {
if hash == h {
bc.hashes = bc.hashes[:i+copy(bc.hashes[i:], bc.hashes[i+1:])]
// or ? => bc.hashes = append(bc.hashes[:i], bc.hashes[i+1]...)
break
}
}
}
}
func (bc *BlockCache) Get(hash common.Hash) *types.Block {
bc.mu.RLock()
defer bc.mu.RUnlock()
if block, haz := bc.blocks[hash]; haz {
return block
}
return nil
}
func (bc *BlockCache) Has(hash common.Hash) bool {
bc.mu.RLock()
defer bc.mu.RUnlock()
_, ok := bc.blocks[hash]
return ok
}
func (bc *BlockCache) Each(cb func(int, *types.Block)) {
bc.mu.Lock()
defer bc.mu.Unlock()
i := 0
for _, block := range bc.blocks {
cb(i, block)
i++
}
}

View File

@ -1,76 +0,0 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"math/big"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
func newChain(size int) (chain []*types.Block) {
var parentHash common.Hash
for i := 0; i < size; i++ {
head := &types.Header{ParentHash: parentHash, Number: big.NewInt(int64(i))}
block := types.NewBlock(head, nil, nil, nil)
chain = append(chain, block)
parentHash = block.Hash()
}
return chain
}
func insertChainCache(cache *BlockCache, chain []*types.Block) {
for _, block := range chain {
cache.Push(block)
}
}
func TestNewBlockCache(t *testing.T) {
chain := newChain(3)
cache := NewBlockCache(2)
insertChainCache(cache, chain)
if cache.hashes[0] != chain[1].Hash() {
t.Error("oldest block incorrect")
}
}
func TestInclusion(t *testing.T) {
chain := newChain(3)
cache := NewBlockCache(3)
insertChainCache(cache, chain)
for _, block := range chain {
if b := cache.Get(block.Hash()); b == nil {
t.Errorf("getting %x failed", block.Hash())
}
}
}
func TestDeletion(t *testing.T) {
chain := newChain(3)
cache := NewBlockCache(3)
insertChainCache(cache, chain)
cache.Delete(chain[1].Hash())
if cache.Has(chain[1].Hash()) {
t.Errorf("expected %x not to be included")
}
}

View File

@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
@ -41,8 +42,7 @@ const (
)
type BlockProcessor struct {
db common.Database
extraDb common.Database
chainDb ethdb.Database
// Mutex for locking the block processor. Blocks can only be handled one at a time
mutex sync.Mutex
// Canonical block chain
@ -57,25 +57,35 @@ type BlockProcessor struct {
eventMux *event.TypeMux
}
func NewBlockProcessor(db, extra common.Database, pow pow.PoW, chainManager *ChainManager, eventMux *event.TypeMux) *BlockProcessor {
// TODO: type GasPool big.Int
//
// GasPool is implemented by state.StateObject. This is a historical
// coincidence. Gas tracking should move out of StateObject.
// GasPool tracks the amount of gas available during
// execution of the transactions in a block.
type GasPool interface {
AddGas(gas, price *big.Int)
SubGas(gas, price *big.Int) error
}
func NewBlockProcessor(db ethdb.Database, pow pow.PoW, chainManager *ChainManager, eventMux *event.TypeMux) *BlockProcessor {
sm := &BlockProcessor{
db: db,
extraDb: extra,
chainDb: db,
mem: make(map[string]*big.Int),
Pow: pow,
bc: chainManager,
eventMux: eventMux,
}
return sm
}
func (sm *BlockProcessor) TransitionState(statedb *state.StateDB, parent, block *types.Block, transientProcess bool) (receipts types.Receipts, err error) {
coinbase := statedb.GetOrNewStateObject(block.Coinbase())
coinbase.SetGasLimit(block.GasLimit())
gp := statedb.GetOrNewStateObject(block.Coinbase())
gp.SetGasLimit(block.GasLimit())
// Process the transactions on to parent state
receipts, err = sm.ApplyTransactions(coinbase, statedb, block, block.Transactions(), transientProcess)
receipts, err = sm.ApplyTransactions(gp, statedb, block, block.Transactions(), transientProcess)
if err != nil {
return nil, err
}
@ -83,11 +93,8 @@ func (sm *BlockProcessor) TransitionState(statedb *state.StateDB, parent, block
return receipts, nil
}
func (self *BlockProcessor) ApplyTransaction(coinbase *state.StateObject, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *big.Int, transientProcess bool) (*types.Receipt, *big.Int, error) {
// If we are mining this block and validating we want to set the logs back to 0
cb := statedb.GetStateObject(coinbase.Address())
_, gas, err := ApplyMessage(NewEnv(statedb, self.bc, tx, header), tx, cb)
func (self *BlockProcessor) ApplyTransaction(gp GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *big.Int, transientProcess bool) (*types.Receipt, *big.Int, error) {
_, gas, err := ApplyMessage(NewEnv(statedb, self.bc, tx, header), tx, gp)
if err != nil {
return nil, nil, err
}
@ -122,7 +129,7 @@ func (self *BlockProcessor) ChainManager() *ChainManager {
return self.bc
}
func (self *BlockProcessor) ApplyTransactions(coinbase *state.StateObject, statedb *state.StateDB, block *types.Block, txs types.Transactions, transientProcess bool) (types.Receipts, error) {
func (self *BlockProcessor) ApplyTransactions(gp GasPool, statedb *state.StateDB, block *types.Block, txs types.Transactions, transientProcess bool) (types.Receipts, error) {
var (
receipts types.Receipts
totalUsedGas = big.NewInt(0)
@ -134,7 +141,7 @@ func (self *BlockProcessor) ApplyTransactions(coinbase *state.StateObject, state
for i, tx := range txs {
statedb.StartRecord(tx.Hash(), block.Hash(), i)
receipt, txGas, err := self.ApplyTransaction(coinbase, statedb, header, tx, totalUsedGas, transientProcess)
receipt, txGas, err := self.ApplyTransaction(gp, statedb, header, tx, totalUsedGas, transientProcess)
if err != nil {
return nil, err
}
@ -201,13 +208,13 @@ func (sm *BlockProcessor) Process(block *types.Block) (logs state.Logs, receipts
func (sm *BlockProcessor) processWithParent(block, parent *types.Block) (logs state.Logs, receipts types.Receipts, err error) {
// Create a new state based on the parent's root (e.g., create copy)
state := state.New(parent.Root(), sm.db)
state := state.New(parent.Root(), sm.chainDb)
header := block.Header()
uncles := block.Uncles()
txs := block.Transactions()
// Block validation
if err = ValidateHeader(sm.Pow, header, parent, false); err != nil {
if err = ValidateHeader(sm.Pow, header, parent.Header(), false, false); err != nil {
return
}
@ -331,7 +338,7 @@ func (sm *BlockProcessor) VerifyUncles(statedb *state.StateDB, block, parent *ty
return UncleError("uncle[%d](%x)'s parent is not ancestor (%x)", i, hash[:4], uncle.ParentHash[0:4])
}
if err := ValidateHeader(sm.Pow, uncle, ancestors[uncle.ParentHash], true); err != nil {
if err := ValidateHeader(sm.Pow, uncle, ancestors[uncle.ParentHash].Header(), true, true); err != nil {
return ValidationError(fmt.Sprintf("uncle[%d](%x) header invalid: %v", i, hash[:4], err))
}
}
@ -342,7 +349,7 @@ func (sm *BlockProcessor) VerifyUncles(statedb *state.StateDB, block, parent *ty
// GetBlockReceipts returns the receipts beloniging to the block hash
func (sm *BlockProcessor) GetBlockReceipts(bhash common.Hash) types.Receipts {
if block := sm.ChainManager().GetBlock(bhash); block != nil {
return GetBlockReceipts(sm.extraDb, block.Hash())
return GetBlockReceipts(sm.chainDb, block.Hash())
}
return nil
@ -352,67 +359,59 @@ func (sm *BlockProcessor) GetBlockReceipts(bhash common.Hash) types.Receipts {
// where it tries to get it from the (updated) method which gets them from the receipts or
// the depricated way by re-processing the block.
func (sm *BlockProcessor) GetLogs(block *types.Block) (logs state.Logs, err error) {
receipts := GetBlockReceipts(sm.extraDb, block.Hash())
if len(receipts) > 0 {
// coalesce logs
for _, receipt := range receipts {
logs = append(logs, receipt.Logs()...)
}
return
receipts := GetBlockReceipts(sm.chainDb, block.Hash())
// coalesce logs
for _, receipt := range receipts {
logs = append(logs, receipt.Logs()...)
}
// TODO: remove backward compatibility
var (
parent = sm.bc.GetBlock(block.ParentHash())
state = state.New(parent.Root(), sm.db)
)
sm.TransitionState(state, parent, block, true)
return state.Logs(), nil
return logs, nil
}
// See YP section 4.3.4. "Block Header Validity"
// Validates a block. Returns an error if the block is invalid.
func ValidateHeader(pow pow.PoW, block *types.Header, parent *types.Block, checkPow bool) error {
if big.NewInt(int64(len(block.Extra))).Cmp(params.MaximumExtraDataSize) == 1 {
return fmt.Errorf("Block extra data too long (%d)", len(block.Extra))
// Validates a header. Returns an error if the header is invalid.
func ValidateHeader(pow pow.PoW, header *types.Header, parent *types.Header, checkPow, uncle bool) error {
if big.NewInt(int64(len(header.Extra))).Cmp(params.MaximumExtraDataSize) == 1 {
return fmt.Errorf("Header extra data too long (%d)", len(header.Extra))
}
if block.Time > uint64(time.Now().Unix()) {
return BlockFutureErr
if uncle {
if header.Time.Cmp(common.MaxBig) == 1 {
return BlockTSTooBigErr
}
} else {
if header.Time.Cmp(big.NewInt(time.Now().Unix())) == 1 {
return BlockFutureErr
}
}
if block.Time <= parent.Time() {
if header.Time.Cmp(parent.Time) != 1 {
return BlockEqualTSErr
}
expd := CalcDifficulty(block.Time, parent.Time(), parent.Number(), parent.Difficulty())
if expd.Cmp(block.Difficulty) != 0 {
return fmt.Errorf("Difficulty check failed for block %v, %v", block.Difficulty, expd)
expd := CalcDifficulty(header.Time.Uint64(), parent.Time.Uint64(), parent.Number, parent.Difficulty)
if expd.Cmp(header.Difficulty) != 0 {
return fmt.Errorf("Difficulty check failed for header %v, %v", header.Difficulty, expd)
}
var a, b *big.Int
a = parent.GasLimit()
a = a.Sub(a, block.GasLimit)
a := new(big.Int).Set(parent.GasLimit)
a = a.Sub(a, header.GasLimit)
a.Abs(a)
b = parent.GasLimit()
b := new(big.Int).Set(parent.GasLimit)
b = b.Div(b, params.GasLimitBoundDivisor)
if !(a.Cmp(b) < 0) || (block.GasLimit.Cmp(params.MinGasLimit) == -1) {
return fmt.Errorf("GasLimit check failed for block %v (%v > %v)", block.GasLimit, a, b)
if !(a.Cmp(b) < 0) || (header.GasLimit.Cmp(params.MinGasLimit) == -1) {
return fmt.Errorf("GasLimit check failed for header %v (%v > %v)", header.GasLimit, a, b)
}
num := parent.Number()
num.Sub(block.Number, num)
num := new(big.Int).Set(parent.Number)
num.Sub(header.Number, num)
if num.Cmp(big.NewInt(1)) != 0 {
return BlockNumberErr
}
if checkPow {
// Verify the nonce of the block. Return an error if it's not valid
if !pow.Verify(types.NewBlockWithHeader(block)) {
return ValidationError("Block's nonce is invalid (= %x)", block.Nonce)
// Verify the nonce of the header. Return an error if it's not valid
if !pow.Verify(types.NewBlockWithHeader(header)) {
return ValidationError("Header's nonce is invalid (= %x)", header.Nonce)
}
}
return nil
}

View File

@ -33,28 +33,28 @@ func proc() (*BlockProcessor, *ChainManager) {
db, _ := ethdb.NewMemDatabase()
var mux event.TypeMux
WriteTestNetGenesisBlock(db, db, 0)
chainMan, err := NewChainManager(db, db, db, thePow(), &mux)
WriteTestNetGenesisBlock(db, 0)
chainMan, err := NewChainManager(db, thePow(), &mux)
if err != nil {
fmt.Println(err)
}
return NewBlockProcessor(db, db, ezp.New(), chainMan, &mux), chainMan
return NewBlockProcessor(db, ezp.New(), chainMan, &mux), chainMan
}
func TestNumber(t *testing.T) {
pow := ezp.New()
_, chain := proc()
statedb := state.New(chain.Genesis().Root(), chain.stateDb)
statedb := state.New(chain.Genesis().Root(), chain.chainDb)
header := makeHeader(chain.Genesis(), statedb)
header.Number = big.NewInt(3)
err := ValidateHeader(pow, header, chain.Genesis(), false)
err := ValidateHeader(pow, header, chain.Genesis().Header(), false, false)
if err != BlockNumberErr {
t.Errorf("expected block number error, got %q", err)
}
header = makeHeader(chain.Genesis(), statedb)
err = ValidateHeader(pow, header, chain.Genesis(), false)
err = ValidateHeader(pow, header, chain.Genesis().Header(), false, false)
if err == BlockNumberErr {
t.Errorf("didn't expect block number error")
}

View File

@ -20,8 +20,5 @@ import "github.com/ethereum/go-ethereum/common"
// Set of manually tracked bad hashes (usually hard forks)
var BadHashes = map[common.Hash]bool{
common.HexToHash("f269c503aed286caaa0d114d6a5320e70abbc2febe37953207e76a2873f2ba79"): true,
common.HexToHash("38f5bbbffd74804820ffa4bab0cd540e9de229725afb98c1a7e57936f4a714bc"): true,
common.HexToHash("7064455b364775a16afbdecd75370e912c6e2879f202eda85b9beae547fff3ac"): true,
common.HexToHash("5b7c80070a6eff35f3eb3181edb023465c776d40af2885571e1bc4689f3a44d8"): true,
common.HexToHash("05bef30ef572270f654746da22639a7a0c97dd97a7050b9e252391996aaeb689"): true,
}

View File

@ -24,10 +24,10 @@ import (
)
var (
jeff = common.HexToAddress("a8edb1ac2c86d3d9d78f96cd18001f60df29e52c")
vitalik = common.HexToAddress("1baf27b88c48dd02b744999cf3522766929d2b2a")
christoph = common.HexToAddress("60d11b58744784dc97f878f7e3749c0f1381a004")
gav = common.HexToAddress("4bb7e8ae99b645c2b7860b8f3a2328aae28bd80a")
jeff = common.HexToAddress("959c33de5961820567930eccce51ea715c496f85")
vitalik = common.HexToAddress("c8158da0b567a8cc898991c2c2a073af67dc03a9")
christoph = common.HexToAddress("7a19a893f91d5b6e2cdf941b6acbba2cbcf431ee")
gav = common.HexToAddress("539dd9aaf45c3feb03f9c004f4098bd3268fef6b")
)
// Canary will check the 0'd address of the 4 contracts above.

View File

@ -22,6 +22,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/pow"
)
@ -130,6 +131,17 @@ func (b *BlockGen) PrevBlock(index int) *types.Block {
return b.chain[index]
}
// OffsetTime modifies the time instance of a block, implicitly changing its
// associated difficulty. It's useful to test scenarios where forking is not
// tied to chain length directly.
func (b *BlockGen) OffsetTime(seconds int64) {
b.header.Time.Add(b.header.Time, new(big.Int).SetInt64(seconds))
if b.header.Time.Cmp(b.parent.Header().Time) <= 0 {
panic("block time out of range")
}
b.header.Difficulty = CalcDifficulty(b.header.Time.Uint64(), b.parent.Time().Uint64(), b.parent.Number(), b.parent.Difficulty())
}
// GenerateChain creates a chain of n blocks. The first block's
// parent will be the provided parent. db is used to store
// intermediate states and should contain the parent's state trie.
@ -142,7 +154,7 @@ func (b *BlockGen) PrevBlock(index int) *types.Block {
// Blocks created by GenerateChain do not contain valid proof of work
// values. Inserting them into ChainManager requires use of FakePow or
// a similar non-validating proof of work implementation.
func GenerateChain(parent *types.Block, db common.Database, n int, gen func(int, *BlockGen)) []*types.Block {
func GenerateChain(parent *types.Block, db ethdb.Database, n int, gen func(int, *BlockGen)) []*types.Block {
statedb := state.New(parent.Root(), db)
blocks := make(types.Blocks, n)
genblock := func(i int, h *types.Header) *types.Block {
@ -158,7 +170,6 @@ func GenerateChain(parent *types.Block, db common.Database, n int, gen func(int,
for i := 0; i < n; i++ {
header := makeHeader(parent, statedb)
block := genblock(i, header)
block.Td = CalcTD(block, parent)
blocks[i] = block
parent = block
}
@ -166,27 +177,32 @@ func GenerateChain(parent *types.Block, db common.Database, n int, gen func(int,
}
func makeHeader(parent *types.Block, state *state.StateDB) *types.Header {
time := parent.Time() + 10 // block time is fixed at 10 seconds
var time *big.Int
if parent.Time() == nil {
time = big.NewInt(10)
} else {
time = new(big.Int).Add(parent.Time(), big.NewInt(10)) // block time is fixed at 10 seconds
}
return &types.Header{
Root: state.Root(),
ParentHash: parent.Hash(),
Coinbase: parent.Coinbase(),
Difficulty: CalcDifficulty(time, parent.Time(), parent.Number(), parent.Difficulty()),
Difficulty: CalcDifficulty(time.Uint64(), new(big.Int).Sub(time, big.NewInt(10)).Uint64(), parent.Number(), parent.Difficulty()),
GasLimit: CalcGasLimit(parent),
GasUsed: new(big.Int),
Number: new(big.Int).Add(parent.Number(), common.Big1),
Time: uint64(time),
Time: time,
}
}
// newCanonical creates a new deterministic canonical chain by running
// InsertChain on the result of makeChain.
func newCanonical(n int, db common.Database) (*BlockProcessor, error) {
func newCanonical(n int, db ethdb.Database) (*BlockProcessor, error) {
evmux := &event.TypeMux{}
WriteTestNetGenesisBlock(db, db, 0)
chainman, _ := NewChainManager(db, db, db, FakePow{}, evmux)
bman := NewBlockProcessor(db, db, FakePow{}, chainman, evmux)
WriteTestNetGenesisBlock(db, 0)
chainman, _ := NewChainManager(db, FakePow{}, evmux)
bman := NewBlockProcessor(db, FakePow{}, chainman, evmux)
bman.bc.SetProcessor(bman)
parent := bman.bc.CurrentBlock()
if n == 0 {
@ -197,7 +213,7 @@ func newCanonical(n int, db common.Database) (*BlockProcessor, error) {
return bman, err
}
func makeChain(parent *types.Block, n int, db common.Database, seed int) []*types.Block {
func makeChain(parent *types.Block, n int, db ethdb.Database, seed int) []*types.Block {
return GenerateChain(parent, db, n, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)})
})

View File

@ -42,7 +42,7 @@ func ExampleGenerateChain() {
)
// Ensure that key1 has some funds in the genesis block.
genesis := WriteGenesisBlockForTesting(db, addr1, big.NewInt(1000000))
genesis := WriteGenesisBlockForTesting(db, GenesisAccount{addr1, big.NewInt(1000000)})
// This call generates a chain of 5 blocks. The function runs for
// each block and adds different features to gen based on the
@ -77,8 +77,8 @@ func ExampleGenerateChain() {
// Import the chain. This runs all block validation rules.
evmux := &event.TypeMux{}
chainman, _ := NewChainManager(db, db, db, FakePow{}, evmux)
chainman.SetProcessor(NewBlockProcessor(db, db, FakePow{}, chainman, evmux))
chainman, _ := NewChainManager(db, FakePow{}, evmux)
chainman.SetProcessor(NewBlockProcessor(db, FakePow{}, chainman, evmux))
if i, err := chainman.InsertChain(chain); err != nil {
fmt.Printf("insert error (block %d): %v\n", i, err)
return

View File

@ -22,7 +22,6 @@ import (
"fmt"
"io"
"math/big"
"runtime"
"sync"
"sync/atomic"
"time"
@ -30,11 +29,13 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/pow"
"github.com/ethereum/go-ethereum/rlp"
"github.com/hashicorp/golang-lru"
)
@ -48,6 +49,9 @@ var (
)
const (
headerCacheLimit = 512
bodyCacheLimit = 256
tdCacheLimit = 1024
blockCacheLimit = 256
maxFutureBlocks = 256
maxTimeFutureBlocks = 30
@ -56,9 +60,7 @@ const (
type ChainManager struct {
//eth EthManager
blockDb common.Database
stateDb common.Database
extraDb common.Database
chainDb ethdb.Database
processor types.BlockProcessor
eventMux *event.TypeMux
genesisBlock *types.Block
@ -70,10 +72,13 @@ type ChainManager struct {
checkpoint int // checkpoint counts towards the new checkpoint
td *big.Int
currentBlock *types.Block
lastBlockHash common.Hash
currentGasLimit *big.Int
cache *lru.Cache // cache is the LRU caching
headerCache *lru.Cache // Cache for the most recent block headers
bodyCache *lru.Cache // Cache for the most recent block bodies
bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format
tdCache *lru.Cache // Cache for the most recent block total difficulties
blockCache *lru.Cache // Cache for the most recent entire blocks
futureBlocks *lru.Cache // future blocks are blocks added for later processing
quit chan struct{}
@ -85,16 +90,25 @@ type ChainManager struct {
pow pow.PoW
}
func NewChainManager(blockDb, stateDb, extraDb common.Database, pow pow.PoW, mux *event.TypeMux) (*ChainManager, error) {
cache, _ := lru.New(blockCacheLimit)
func NewChainManager(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*ChainManager, error) {
headerCache, _ := lru.New(headerCacheLimit)
bodyCache, _ := lru.New(bodyCacheLimit)
bodyRLPCache, _ := lru.New(bodyCacheLimit)
tdCache, _ := lru.New(tdCacheLimit)
blockCache, _ := lru.New(blockCacheLimit)
futureBlocks, _ := lru.New(maxFutureBlocks)
bc := &ChainManager{
blockDb: blockDb,
stateDb: stateDb,
extraDb: extraDb,
eventMux: mux,
quit: make(chan struct{}),
cache: cache,
pow: pow,
chainDb: chainDb,
eventMux: mux,
quit: make(chan struct{}),
headerCache: headerCache,
bodyCache: bodyCache,
bodyRLPCache: bodyRLPCache,
tdCache: tdCache,
blockCache: blockCache,
futureBlocks: futureBlocks,
pow: pow,
}
bc.genesisBlock = bc.GetBlockByNumber(0)
@ -103,17 +117,15 @@ func NewChainManager(blockDb, stateDb, extraDb common.Database, pow pow.PoW, mux
if err != nil {
return nil, err
}
bc.genesisBlock, err = WriteGenesisBlock(stateDb, blockDb, reader)
bc.genesisBlock, err = WriteGenesisBlock(chainDb, reader)
if err != nil {
return nil, err
}
glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block")
}
if err := bc.setLastState(); err != nil {
return nil, err
}
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
for hash, _ := range BadHashes {
if block := bc.GetBlock(hash); block != nil {
@ -127,14 +139,8 @@ func NewChainManager(blockDb, stateDb, extraDb common.Database, pow pow.PoW, mux
glog.V(logger.Error).Infoln("Chain reorg was successfull. Resuming normal operation")
}
}
// Take ownership of this particular state
bc.futureBlocks, _ = lru.New(maxFutureBlocks)
bc.makeCache()
go bc.update()
return bc, nil
}
@ -143,14 +149,16 @@ func (bc *ChainManager) SetHead(head *types.Block) {
defer bc.mu.Unlock()
for block := bc.currentBlock; block != nil && block.Hash() != head.Hash(); block = bc.GetBlock(block.ParentHash()) {
bc.removeBlock(block)
DeleteBlock(bc.chainDb, block.Hash())
}
bc.headerCache.Purge()
bc.bodyCache.Purge()
bc.bodyRLPCache.Purge()
bc.blockCache.Purge()
bc.futureBlocks.Purge()
bc.cache, _ = lru.New(blockCacheLimit)
bc.currentBlock = head
bc.makeCache()
bc.setTotalDifficulty(head.Td)
bc.setTotalDifficulty(bc.GetTd(head.Hash()))
bc.insert(head)
bc.setLastState()
}
@ -173,7 +181,7 @@ func (self *ChainManager) LastBlockHash() common.Hash {
self.mu.RLock()
defer self.mu.RUnlock()
return self.lastBlockHash
return self.currentBlock.Hash()
}
func (self *ChainManager) CurrentBlock() *types.Block {
@ -195,21 +203,21 @@ func (self *ChainManager) SetProcessor(proc types.BlockProcessor) {
}
func (self *ChainManager) State() *state.StateDB {
return state.New(self.CurrentBlock().Root(), self.stateDb)
return state.New(self.CurrentBlock().Root(), self.chainDb)
}
func (bc *ChainManager) recover() bool {
data, _ := bc.blockDb.Get([]byte("checkpoint"))
data, _ := bc.chainDb.Get([]byte("checkpoint"))
if len(data) != 0 {
block := bc.GetBlock(common.BytesToHash(data))
if block != nil {
err := bc.blockDb.Put([]byte("LastBlock"), block.Hash().Bytes())
if err != nil {
glog.Fatalln("db write err:", err)
if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil {
glog.Fatalf("failed to write database head number: %v", err)
}
if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil {
glog.Fatalf("failed to write database head hash: %v", err)
}
bc.currentBlock = block
bc.lastBlockHash = block.Hash()
return true
}
}
@ -217,14 +225,13 @@ func (bc *ChainManager) recover() bool {
}
func (bc *ChainManager) setLastState() error {
data, _ := bc.blockDb.Get([]byte("LastBlock"))
if len(data) != 0 {
block := bc.GetBlock(common.BytesToHash(data))
head := GetHeadBlockHash(bc.chainDb)
if head != (common.Hash{}) {
block := bc.GetBlock(head)
if block != nil {
bc.currentBlock = block
bc.lastBlockHash = block.Hash()
} else {
glog.Infof("LastBlock (%x) not found. Recovering...\n", data)
glog.Infof("LastBlock (%x) not found. Recovering...\n", head)
if bc.recover() {
glog.Infof("Recover successful")
} else {
@ -234,7 +241,7 @@ func (bc *ChainManager) setLastState() error {
} else {
bc.Reset()
}
bc.td = bc.currentBlock.Td
bc.td = bc.GetTd(bc.currentBlock.Hash())
bc.currentGasLimit = CalcGasLimit(bc.currentBlock)
if glog.V(logger.Info) {
@ -244,63 +251,38 @@ func (bc *ChainManager) setLastState() error {
return nil
}
func (bc *ChainManager) makeCache() {
bc.cache, _ = lru.New(blockCacheLimit)
// load in last `blockCacheLimit` - 1 blocks. Last block is the current.
bc.cache.Add(bc.genesisBlock.Hash(), bc.genesisBlock)
for _, block := range bc.GetBlocksFromHash(bc.currentBlock.Hash(), blockCacheLimit) {
bc.cache.Add(block.Hash(), block)
}
}
// Reset purges the entire blockchain, restoring it to its genesis state.
func (bc *ChainManager) Reset() {
bc.ResetWithGenesisBlock(bc.genesisBlock)
}
// ResetWithGenesisBlock purges the entire blockchain, restoring it to the
// specified genesis state.
func (bc *ChainManager) ResetWithGenesisBlock(genesis *types.Block) {
bc.mu.Lock()
defer bc.mu.Unlock()
// Dump the entire block chain and purge the caches
for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.ParentHash()) {
bc.removeBlock(block)
DeleteBlock(bc.chainDb, block.Hash())
}
bc.headerCache.Purge()
bc.bodyCache.Purge()
bc.bodyRLPCache.Purge()
bc.blockCache.Purge()
bc.futureBlocks.Purge()
bc.cache, _ = lru.New(blockCacheLimit)
// Prepare the genesis block
err := WriteBlock(bc.blockDb, bc.genesisBlock)
if err != nil {
glog.Fatalln("db err:", err)
// Prepare the genesis block and reinitialize the chain
if err := WriteTd(bc.chainDb, genesis.Hash(), genesis.Difficulty()); err != nil {
glog.Fatalf("failed to write genesis block TD: %v", err)
}
if err := WriteBlock(bc.chainDb, genesis); err != nil {
glog.Fatalf("failed to write genesis block: %v", err)
}
bc.genesisBlock = genesis
bc.insert(bc.genesisBlock)
bc.currentBlock = bc.genesisBlock
bc.makeCache()
bc.setTotalDifficulty(common.Big("0"))
}
func (bc *ChainManager) removeBlock(block *types.Block) {
bc.blockDb.Delete(append(blockHashPre, block.Hash().Bytes()...))
}
func (bc *ChainManager) ResetWithGenesisBlock(gb *types.Block) {
bc.mu.Lock()
defer bc.mu.Unlock()
for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.ParentHash()) {
bc.removeBlock(block)
}
// Prepare the genesis block
gb.Td = gb.Difficulty()
bc.genesisBlock = gb
err := WriteBlock(bc.blockDb, bc.genesisBlock)
if err != nil {
glog.Fatalln("db err:", err)
}
bc.insert(bc.genesisBlock)
bc.currentBlock = bc.genesisBlock
bc.makeCache()
bc.td = gb.Difficulty()
bc.setTotalDifficulty(genesis.Difficulty())
}
// Export writes the active chain to the given writer.
@ -339,23 +321,23 @@ func (self *ChainManager) ExportN(w io.Writer, first uint64, last uint64) error
// insert injects a block into the current chain block chain. Note, this function
// assumes that the `mu` mutex is held!
func (bc *ChainManager) insert(block *types.Block) {
err := WriteHead(bc.blockDb, block)
if err != nil {
glog.Fatal("db write fail:", err)
// Add the block to the canonical chain number scheme and mark as the head
if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil {
glog.Fatalf("failed to insert block number: %v", err)
}
if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil {
glog.Fatalf("failed to insert block number: %v", err)
}
// Add a new restore point if we reached some limit
bc.checkpoint++
if bc.checkpoint > checkpointLimit {
err = bc.blockDb.Put([]byte("checkpoint"), block.Hash().Bytes())
if err != nil {
glog.Fatal("db write fail:", err)
if err := bc.chainDb.Put([]byte("checkpoint"), block.Hash().Bytes()); err != nil {
glog.Fatalf("failed to create checkpoint: %v", err)
}
bc.checkpoint = 0
}
// Update the internal internal state with the head block
bc.currentBlock = block
bc.lastBlockHash = block.Hash()
}
// Accessors
@ -363,61 +345,141 @@ func (bc *ChainManager) Genesis() *types.Block {
return bc.genesisBlock
}
// Block fetching methods
// HasHeader checks if a block header is present in the database or not, caching
// it if present.
func (bc *ChainManager) HasHeader(hash common.Hash) bool {
return bc.GetHeader(hash) != nil
}
// GetHeader retrieves a block header from the database by hash, caching it if
// found.
func (self *ChainManager) GetHeader(hash common.Hash) *types.Header {
// Short circuit if the header's already in the cache, retrieve otherwise
if header, ok := self.headerCache.Get(hash); ok {
return header.(*types.Header)
}
header := GetHeader(self.chainDb, hash)
if header == nil {
return nil
}
// Cache the found header for next time and return
self.headerCache.Add(header.Hash(), header)
return header
}
// GetHeaderByNumber retrieves a block header from the database by number,
// caching it (associated with its hash) if found.
func (self *ChainManager) GetHeaderByNumber(number uint64) *types.Header {
hash := GetCanonicalHash(self.chainDb, number)
if hash == (common.Hash{}) {
return nil
}
return self.GetHeader(hash)
}
// GetBody retrieves a block body (transactions and uncles) from the database by
// hash, caching it if found.
func (self *ChainManager) GetBody(hash common.Hash) *types.Body {
// Short circuit if the body's already in the cache, retrieve otherwise
if cached, ok := self.bodyCache.Get(hash); ok {
body := cached.(*types.Body)
return body
}
body := GetBody(self.chainDb, hash)
if body == nil {
return nil
}
// Cache the found body for next time and return
self.bodyCache.Add(hash, body)
return body
}
// GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
// caching it if found.
func (self *ChainManager) GetBodyRLP(hash common.Hash) rlp.RawValue {
// Short circuit if the body's already in the cache, retrieve otherwise
if cached, ok := self.bodyRLPCache.Get(hash); ok {
return cached.(rlp.RawValue)
}
body := GetBodyRLP(self.chainDb, hash)
if len(body) == 0 {
return nil
}
// Cache the found body for next time and return
self.bodyRLPCache.Add(hash, body)
return body
}
// GetTd retrieves a block's total difficulty in the canonical chain from the
// database by hash, caching it if found.
func (self *ChainManager) GetTd(hash common.Hash) *big.Int {
// Short circuit if the td's already in the cache, retrieve otherwise
if cached, ok := self.tdCache.Get(hash); ok {
return cached.(*big.Int)
}
td := GetTd(self.chainDb, hash)
if td == nil {
return nil
}
// Cache the found body for next time and return
self.tdCache.Add(hash, td)
return td
}
// HasBlock checks if a block is fully present in the database or not, caching
// it if present.
func (bc *ChainManager) HasBlock(hash common.Hash) bool {
if bc.cache.Contains(hash) {
return true
}
data, _ := bc.blockDb.Get(append(blockHashPre, hash[:]...))
return len(data) != 0
}
func (self *ChainManager) GetBlockHashesFromHash(hash common.Hash, max uint64) (chain []common.Hash) {
block := self.GetBlock(hash)
if block == nil {
return
}
// XXX Could be optimised by using a different database which only holds hashes (i.e., linked list)
for i := uint64(0); i < max; i++ {
block = self.GetBlock(block.ParentHash())
if block == nil {
break
}
chain = append(chain, block.Hash())
if block.Number().Cmp(common.Big0) <= 0 {
break
}
}
return
return bc.GetBlock(hash) != nil
}
// GetBlock retrieves a block from the database by hash, caching it if found.
func (self *ChainManager) GetBlock(hash common.Hash) *types.Block {
if block, ok := self.cache.Get(hash); ok {
// Short circuit if the block's already in the cache, retrieve otherwise
if block, ok := self.blockCache.Get(hash); ok {
return block.(*types.Block)
}
block := GetBlockByHash(self.blockDb, hash)
block := GetBlock(self.chainDb, hash)
if block == nil {
return nil
}
// Add the block to the cache
self.cache.Add(hash, (*types.Block)(block))
return (*types.Block)(block)
// Cache the found block for next time and return
self.blockCache.Add(block.Hash(), block)
return block
}
func (self *ChainManager) GetBlockByNumber(num uint64) *types.Block {
self.mu.RLock()
defer self.mu.RUnlock()
return self.getBlockByNumber(num)
// GetBlockByNumber retrieves a block from the database by number, caching it
// (associated with its hash) if found.
func (self *ChainManager) GetBlockByNumber(number uint64) *types.Block {
hash := GetCanonicalHash(self.chainDb, number)
if hash == (common.Hash{}) {
return nil
}
return self.GetBlock(hash)
}
// GetBlockHashesFromHash retrieves a number of block hashes starting at a given
// hash, fetching towards the genesis block.
func (self *ChainManager) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
// Get the origin header from which to fetch
header := self.GetHeader(hash)
if header == nil {
return nil
}
// Iterate the headers until enough is collected or the genesis reached
chain := make([]common.Hash, 0, max)
for i := uint64(0); i < max; i++ {
if header = self.GetHeader(header.ParentHash); header == nil {
break
}
chain = append(chain, header.Hash())
if header.Number.Cmp(common.Big0) == 0 {
break
}
}
return chain
}
// [deprecated by eth/62]
// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
func (self *ChainManager) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
for i := 0; i < n; i++ {
@ -431,11 +493,6 @@ func (self *ChainManager) GetBlocksFromHash(hash common.Hash, n int) (blocks []*
return
}
// non blocking version
func (self *ChainManager) getBlockByNumber(num uint64) *types.Block {
return GetBlockByNumber(self.blockDb, num)
}
func (self *ChainManager) GetUnclesInChain(block *types.Block, length int) (uncles []*types.Header) {
for i := 0; block != nil && i < length; i++ {
uncles = append(uncles, block.Uncles()...)
@ -491,39 +548,48 @@ const (
SideStatTy
)
// WriteBlock writes the block to the chain (or pending queue)
func (self *ChainManager) WriteBlock(block *types.Block, queued bool) (status writeStatus, err error) {
// WriteBlock writes the block to the chain.
func (self *ChainManager) WriteBlock(block *types.Block) (status writeStatus, err error) {
self.wg.Add(1)
defer self.wg.Done()
// Calculate the total difficulty of the block
ptd := self.GetTd(block.ParentHash())
if ptd == nil {
return NonStatTy, ParentError(block.ParentHash())
}
td := new(big.Int).Add(block.Difficulty(), ptd)
self.mu.RLock()
cblock := self.currentBlock
self.mu.RUnlock()
// Compare the TD of the last known block in the canonical chain to make sure it's greater.
// At this point it's possible that a different chain (fork) becomes the new canonical chain.
if block.Td.Cmp(self.Td()) > 0 {
if td.Cmp(self.Td()) > 0 {
// chain fork
if block.ParentHash() != cblock.Hash() {
// during split we merge two different chains and create the new canonical chain
err := self.merge(cblock, block)
err := self.reorg(cblock, block)
if err != nil {
return NonStatTy, err
}
status = SplitStatTy
}
status = CanonStatTy
self.mu.Lock()
self.setTotalDifficulty(block.Td)
self.setTotalDifficulty(td)
self.insert(block)
self.mu.Unlock()
status = CanonStatTy
} else {
status = SideStatTy
}
err = WriteBlock(self.blockDb, block)
if err != nil {
glog.Fatalln("db err:", err)
if err := WriteTd(self.chainDb, block.Hash(), td); err != nil {
glog.Fatalf("failed to write block total difficulty: %v", err)
}
if err := WriteBlock(self.chainDb, block); err != nil {
glog.Fatalf("filed to write block contents: %v", err)
}
// Delete from future blocks
self.futureBlocks.Remove(block.Hash())
@ -549,14 +615,12 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
stats struct{ queued, processed, ignored int }
tstart = time.Now()
nonceDone = make(chan nonceResult, len(chain))
nonceQuit = make(chan struct{})
nonceChecked = make([]bool, len(chain))
)
// Start the parallel nonce verifier.
go verifyNonces(self.pow, chain, nonceQuit, nonceDone)
defer close(nonceQuit)
nonceAbort, nonceResults := verifyNoncesFromBlocks(self.pow, chain)
defer close(nonceAbort)
txcount := 0
for i, block := range chain {
@ -569,24 +633,19 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
// Wait for block i's nonce to be verified before processing
// its state transition.
for !nonceChecked[i] {
r := <-nonceDone
nonceChecked[r.i] = true
r := <-nonceResults
nonceChecked[r.index] = true
if !r.valid {
block := chain[r.i]
return r.i, &BlockNonceErr{Hash: block.Hash(), Number: block.Number(), Nonce: block.Nonce()}
block := chain[r.index]
return r.index, &BlockNonceErr{Hash: block.Hash(), Number: block.Number(), Nonce: block.Nonce()}
}
}
if BadHashes[block.Hash()] {
err := fmt.Errorf("Found known bad hash in chain %x", block.Hash())
err := BadHashError(block.Hash())
blockErr(block, err)
return i, err
}
// Setting block.Td regardless of error (known for example) prevents errors down the line
// in the protocol handler
block.Td = new(big.Int).Set(CalcTD(block, self.GetBlock(block.ParentHash())))
// Call in to the block processor and check for errors. It's likely that if one block fails
// all others will fail too (unless a known block is returned).
logs, receipts, err := self.processor.Process(block)
@ -600,7 +659,8 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
// Allow up to MaxFuture second in the future blocks. If this limit
// is exceeded the chain is discarded and processed at a later time
// if given.
if max := uint64(time.Now().Unix()) + maxTimeFutureBlocks; block.Time() > max {
max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
if block.Time().Cmp(max) == 1 {
return i, fmt.Errorf("%v: BlockFutureErr, %v > %v", BlockFutureErr, block.Time(), max)
}
@ -621,26 +681,28 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
return i, err
}
if err := PutBlockReceipts(self.chainDb, block, receipts); err != nil {
glog.V(logger.Warn).Infoln("error writing block receipts:", err)
}
txcount += len(block.Transactions())
// write the block to the chain and get the status
status, err := self.WriteBlock(block, true)
status, err := self.WriteBlock(block)
if err != nil {
return i, err
}
switch status {
case CanonStatTy:
if glog.V(logger.Debug) {
glog.Infof("[%v] inserted block #%d (%d TXs %d UNCs) (%x...). Took %v\n", time.Now().UnixNano(), block.Number(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart))
glog.Infof("[%v] inserted block #%d (%d TXs %v G %d UNCs) (%x...). Took %v\n", time.Now().UnixNano(), block.Number(), len(block.Transactions()), block.GasUsed(), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart))
}
queue[i] = ChainEvent{block, block.Hash(), logs}
queueEvent.canonicalCount++
// This puts transactions in a extra db for rpc
PutTransactions(self.extraDb, block, block.Transactions())
PutTransactions(self.chainDb, block, block.Transactions())
// store the receipts
PutReceipts(self.extraDb, receipts)
PutReceipts(self.chainDb, receipts)
case SideStatTy:
if glog.V(logger.Detail) {
glog.Infof("inserted forked block #%d (TD=%v) (%d TXs %d UNCs) (%x...). Took %v\n", block.Number(), block.Difficulty(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart))
@ -651,8 +713,6 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
queue[i] = ChainSplitEvent{block, logs}
queueEvent.splitCount++
}
PutBlockReceipts(self.extraDb, block, receipts)
stats.processed++
}
@ -667,20 +727,26 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
return 0, nil
}
// diff takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
// to be part of the new canonical chain.
func (self *ChainManager) diff(oldBlock, newBlock *types.Block) (types.Blocks, error) {
// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
// to be part of the new canonical chain and accumulates potential missing transactions and post an
// event about them
func (self *ChainManager) reorg(oldBlock, newBlock *types.Block) error {
self.mu.Lock()
defer self.mu.Unlock()
var (
newChain types.Blocks
commonBlock *types.Block
oldStart = oldBlock
newStart = newBlock
deletedTxs types.Transactions
)
// first reduce whoever is higher bound
if oldBlock.NumberU64() > newBlock.NumberU64() {
// reduce old chain
for oldBlock = oldBlock; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = self.GetBlock(oldBlock.ParentHash()) {
deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
}
} else {
// reduce new chain and append new chain blocks for inserting later on
@ -689,10 +755,10 @@ func (self *ChainManager) diff(oldBlock, newBlock *types.Block) (types.Blocks, e
}
}
if oldBlock == nil {
return nil, fmt.Errorf("Invalid old chain")
return fmt.Errorf("Invalid old chain")
}
if newBlock == nil {
return nil, fmt.Errorf("Invalid new chain")
return fmt.Errorf("Invalid new chain")
}
numSplit := newBlock.Number()
@ -702,13 +768,14 @@ func (self *ChainManager) diff(oldBlock, newBlock *types.Block) (types.Blocks, e
break
}
newChain = append(newChain, newBlock)
deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
oldBlock, newBlock = self.GetBlock(oldBlock.ParentHash()), self.GetBlock(newBlock.ParentHash())
if oldBlock == nil {
return nil, fmt.Errorf("Invalid old chain")
return fmt.Errorf("Invalid old chain")
}
if newBlock == nil {
return nil, fmt.Errorf("Invalid new chain")
return fmt.Errorf("Invalid new chain")
}
}
@ -717,27 +784,29 @@ func (self *ChainManager) diff(oldBlock, newBlock *types.Block) (types.Blocks, e
glog.Infof("Chain split detected @ %x. Reorganising chain from #%v %x to %x", commonHash[:4], numSplit, oldStart.Hash().Bytes()[:4], newStart.Hash().Bytes()[:4])
}
return newChain, nil
}
// merge merges two different chain to the new canonical chain
func (self *ChainManager) merge(oldBlock, newBlock *types.Block) error {
newChain, err := self.diff(oldBlock, newBlock)
if err != nil {
return fmt.Errorf("chain reorg failed: %v", err)
}
var addedTxs types.Transactions
// insert blocks. Order does not matter. Last block will be written in ImportChain itself which creates the new head properly
self.mu.Lock()
for _, block := range newChain {
// insert the block in the canonical way, re-writing history
self.insert(block)
// write canonical receipts and transactions
PutTransactions(self.extraDb, block, block.Transactions())
PutReceipts(self.extraDb, GetBlockReceipts(self.extraDb, block.Hash()))
PutTransactions(self.chainDb, block, block.Transactions())
PutReceipts(self.chainDb, GetBlockReceipts(self.chainDb, block.Hash()))
addedTxs = append(addedTxs, block.Transactions()...)
}
self.mu.Unlock()
// calculate the difference between deleted and added transactions
diff := types.TxDifference(deletedTxs, addedTxs)
// When transactions get deleted from the database that means the
// receipts that were created in the fork must also be deleted
for _, tx := range diff {
DeleteReceipt(self.chainDb, tx.Hash())
DeleteTransaction(self.chainDb, tx.Hash())
}
// Must be posted in a goroutine because of the transaction pool trying
// to acquire the chain manager lock
go self.eventMux.Post(RemovedTransactionEvent{diff})
return nil
}
@ -756,12 +825,11 @@ out:
case ChainEvent:
// We need some control over the mining operation. Acquiring locks and waiting for the miner to create new block takes too long
// and in most cases isn't even necessary.
if self.lastBlockHash == event.Hash {
if self.currentBlock.Hash() == event.Hash {
self.currentGasLimit = CalcGasLimit(event.Block)
self.eventMux.Post(ChainHeadEvent{event.Block})
}
}
self.eventMux.Post(event)
}
}
@ -779,40 +847,3 @@ func blockErr(block *types.Block, err error) {
glog.V(logger.Error).Infoln(err)
glog.V(logger.Debug).Infoln(verifyNonces)
}
type nonceResult struct {
i int
valid bool
}
// block verifies nonces of the given blocks in parallel and returns
// an error if one of the blocks nonce verifications failed.
func verifyNonces(pow pow.PoW, blocks []*types.Block, quit <-chan struct{}, done chan<- nonceResult) {
// Spawn a few workers. They listen for blocks on the in channel
// and send results on done. The workers will exit in the
// background when in is closed.
var (
in = make(chan int)
nworkers = runtime.GOMAXPROCS(0)
)
defer close(in)
if len(blocks) < nworkers {
nworkers = len(blocks)
}
for i := 0; i < nworkers; i++ {
go func() {
for i := range in {
done <- nonceResult{i: i, valid: pow.Verify(blocks[i])}
}
}()
}
// Feed block indices to the workers.
for i := range blocks {
select {
case in <- i:
continue
case <-quit:
return
}
}
}

View File

@ -30,8 +30,10 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/pow"
"github.com/ethereum/go-ethereum/rlp"
"github.com/hashicorp/golang-lru"
@ -46,16 +48,16 @@ func thePow() pow.PoW {
return pow
}
func theChainManager(db common.Database, t *testing.T) *ChainManager {
func theChainManager(db ethdb.Database, t *testing.T) *ChainManager {
var eventMux event.TypeMux
WriteTestNetGenesisBlock(db, db, 0)
chainMan, err := NewChainManager(db, db, db, thePow(), &eventMux)
WriteTestNetGenesisBlock(db, 0)
chainMan, err := NewChainManager(db, thePow(), &eventMux)
if err != nil {
t.Error("failed creating chainmanager:", err)
t.FailNow()
return nil
}
blockMan := NewBlockProcessor(db, db, nil, chainMan, &eventMux)
blockMan := NewBlockProcessor(db, nil, chainMan, &eventMux)
chainMan.SetProcessor(blockMan)
return chainMan
@ -73,10 +75,11 @@ func testFork(t *testing.T, bman *BlockProcessor, i, N int, f func(td1, td2 *big
if err != nil {
t.Fatal("could not make new canonical in testFork", err)
}
// asert the bmans have the same block at i
// assert the bmans have the same block at i
bi1 := bman.bc.GetBlockByNumber(uint64(i)).Hash()
bi2 := bman2.bc.GetBlockByNumber(uint64(i)).Hash()
if bi1 != bi2 {
fmt.Printf("%+v\n%+v\n\n", bi1, bi2)
t.Fatal("chains do not have the same hash at height", i)
}
bman2.bc.SetProcessor(bman2)
@ -110,7 +113,6 @@ func printChain(bc *ChainManager) {
// process blocks against a chain
func testChain(chainB types.Blocks, bman *BlockProcessor) (*big.Int, error) {
td := new(big.Int)
for _, block := range chainB {
_, _, err := bman.bc.processor.Process(block)
if err != nil {
@ -119,17 +121,12 @@ func testChain(chainB types.Blocks, bman *BlockProcessor) (*big.Int, error) {
}
return nil, err
}
parent := bman.bc.GetBlock(block.ParentHash())
block.Td = CalcTD(block, parent)
td = block.Td
bman.bc.mu.Lock()
{
WriteBlock(bman.bc.blockDb, block)
}
WriteTd(bman.bc.chainDb, block.Hash(), new(big.Int).Add(block.Difficulty(), bman.bc.GetTd(block.ParentHash())))
WriteBlock(bman.bc.chainDb, block)
bman.bc.mu.Unlock()
}
return td, nil
return bman.bc.GetTd(chainB[len(chainB)-1].Hash()), nil
}
func loadChain(fn string, t *testing.T) (types.Blocks, error) {
@ -385,10 +382,14 @@ func makeChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block
return chain
}
func chm(genesis *types.Block, db common.Database) *ChainManager {
func chm(genesis *types.Block, db ethdb.Database) *ChainManager {
var eventMux event.TypeMux
bc := &ChainManager{extraDb: db, blockDb: db, stateDb: db, genesisBlock: genesis, eventMux: &eventMux, pow: FakePow{}}
bc.cache, _ = lru.New(100)
bc := &ChainManager{chainDb: db, genesisBlock: genesis, eventMux: &eventMux, pow: FakePow{}}
bc.headerCache, _ = lru.New(100)
bc.bodyCache, _ = lru.New(100)
bc.bodyRLPCache, _ = lru.New(100)
bc.tdCache, _ = lru.New(100)
bc.blockCache, _ = lru.New(100)
bc.futureBlocks, _ = lru.New(100)
bc.processor = bproc{}
bc.ResetWithGenesisBlock(genesis)
@ -399,7 +400,7 @@ func chm(genesis *types.Block, db common.Database) *ChainManager {
func TestReorgLongest(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
genesis, err := WriteTestNetGenesisBlock(db, db, 0)
genesis, err := WriteTestNetGenesisBlock(db, 0)
if err != nil {
t.Error(err)
t.FailNow()
@ -420,9 +421,62 @@ func TestReorgLongest(t *testing.T) {
}
}
func TestBadHashes(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
genesis, err := WriteTestNetGenesisBlock(db, 0)
if err != nil {
t.Error(err)
t.FailNow()
}
bc := chm(genesis, db)
chain := makeChainWithDiff(genesis, []int{1, 2, 4}, 10)
BadHashes[chain[2].Header().Hash()] = true
_, err = bc.InsertChain(chain)
if !IsBadHashError(err) {
t.Errorf("error mismatch: want: BadHashError, have: %v", err)
}
}
func TestReorgBadHashes(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
genesis, err := WriteTestNetGenesisBlock(db, 0)
if err != nil {
t.Error(err)
t.FailNow()
}
bc := chm(genesis, db)
chain := makeChainWithDiff(genesis, []int{1, 2, 3, 4}, 11)
bc.InsertChain(chain)
if chain[3].Header().Hash() != bc.LastBlockHash() {
t.Errorf("last block hash mismatch: want: %x, have: %x", chain[3].Header().Hash(), bc.LastBlockHash())
}
// NewChainManager should check BadHashes when loading it db
BadHashes[chain[3].Header().Hash()] = true
var eventMux event.TypeMux
ncm, err := NewChainManager(db, FakePow{}, &eventMux)
if err != nil {
t.Errorf("NewChainManager err: %s", err)
}
// check it set head to (valid) parent of bad hash block
if chain[2].Header().Hash() != ncm.LastBlockHash() {
t.Errorf("last block hash mismatch: want: %x, have: %x", chain[2].Header().Hash(), ncm.LastBlockHash())
}
if chain[2].Header().GasLimit.Cmp(ncm.GasLimit()) != 0 {
t.Errorf("current block gasLimit mismatch: want: %x, have: %x", chain[2].Header().GasLimit, ncm.GasLimit())
}
}
func TestReorgShortest(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
genesis, err := WriteTestNetGenesisBlock(db, db, 0)
genesis, err := WriteTestNetGenesisBlock(db, 0)
if err != nil {
t.Error(err)
t.FailNow()
@ -446,18 +500,18 @@ func TestReorgShortest(t *testing.T) {
func TestInsertNonceError(t *testing.T) {
for i := 1; i < 25 && !t.Failed(); i++ {
db, _ := ethdb.NewMemDatabase()
genesis, err := WriteTestNetGenesisBlock(db, db, 0)
genesis, err := WriteTestNetGenesisBlock(db, 0)
if err != nil {
t.Error(err)
t.FailNow()
}
bc := chm(genesis, db)
bc.processor = NewBlockProcessor(db, db, bc.pow, bc, bc.eventMux)
bc.processor = NewBlockProcessor(db, bc.pow, bc, bc.eventMux)
blocks := makeChain(bc.currentBlock, i, db, 0)
fail := rand.Int() % len(blocks)
failblock := blocks[fail]
bc.pow = failpow{failblock.NumberU64()}
bc.pow = failPow{failblock.NumberU64()}
n, err := bc.InsertChain(blocks)
// Check that the returned error indicates the nonce failure.
@ -484,34 +538,115 @@ func TestInsertNonceError(t *testing.T) {
}
}
/*
func TestGenesisMismatch(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
var mux event.TypeMux
genesis := GenesisBlock(0, db)
_, err := NewChainManager(genesis, db, db, db, thePow(), &mux)
if err != nil {
t.Error(err)
// Tests that chain reorganizations handle transaction removals and reinsertions.
func TestChainTxReorgs(t *testing.T) {
params.MinGasLimit = big.NewInt(125000) // Minimum the gas limit may ever be.
params.GenesisGasLimit = big.NewInt(3141592) // Gas limit of the Genesis block.
var (
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
key3, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
addr2 = crypto.PubkeyToAddress(key2.PublicKey)
addr3 = crypto.PubkeyToAddress(key3.PublicKey)
db, _ = ethdb.NewMemDatabase()
)
genesis := WriteGenesisBlockForTesting(db,
GenesisAccount{addr1, big.NewInt(1000000)},
GenesisAccount{addr2, big.NewInt(1000000)},
GenesisAccount{addr3, big.NewInt(1000000)},
)
// Create two transactions shared between the chains:
// - postponed: transaction included at a later block in the forked chain
// - swapped: transaction included at the same block number in the forked chain
postponed, _ := types.NewTransaction(0, addr1, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key1)
swapped, _ := types.NewTransaction(1, addr1, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key1)
// Create two transactions that will be dropped by the forked chain:
// - pastDrop: transaction dropped retroactively from a past block
// - freshDrop: transaction dropped exactly at the block where the reorg is detected
var pastDrop, freshDrop *types.Transaction
// Create three transactions that will be added in the forked chain:
// - pastAdd: transaction added before the reorganiztion is detected
// - freshAdd: transaction added at the exact block the reorg is detected
// - futureAdd: transaction added after the reorg has already finished
var pastAdd, freshAdd, futureAdd *types.Transaction
chain := GenerateChain(genesis, db, 3, func(i int, gen *BlockGen) {
switch i {
case 0:
pastDrop, _ = types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key2)
gen.AddTx(pastDrop) // This transaction will be dropped in the fork from below the split point
gen.AddTx(postponed) // This transaction will be postponed till block #3 in the fork
case 2:
freshDrop, _ = types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key2)
gen.AddTx(freshDrop) // This transaction will be dropped in the fork from exactly at the split point
gen.AddTx(swapped) // This transaction will be swapped out at the exact height
gen.OffsetTime(9) // Lower the block difficulty to simulate a weaker chain
}
})
// Import the chain. This runs all block validation rules.
evmux := &event.TypeMux{}
chainman, _ := NewChainManager(db, FakePow{}, evmux)
chainman.SetProcessor(NewBlockProcessor(db, FakePow{}, chainman, evmux))
if i, err := chainman.InsertChain(chain); err != nil {
t.Fatalf("failed to insert original chain[%d]: %v", i, err)
}
genesis = GenesisBlock(1, db)
_, err = NewChainManager(genesis, db, db, db, thePow(), &mux)
if err == nil {
t.Error("expected genesis mismatch error")
// overwrite the old chain
chain = GenerateChain(genesis, db, 5, func(i int, gen *BlockGen) {
switch i {
case 0:
pastAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3)
gen.AddTx(pastAdd) // This transaction needs to be injected during reorg
case 2:
gen.AddTx(postponed) // This transaction was postponed from block #1 in the original chain
gen.AddTx(swapped) // This transaction was swapped from the exact current spot in the original chain
freshAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3)
gen.AddTx(freshAdd) // This transaction will be added exactly at reorg time
case 3:
futureAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3)
gen.AddTx(futureAdd) // This transaction will be added after a full reorg
}
})
if _, err := chainman.InsertChain(chain); err != nil {
t.Fatalf("failed to insert forked chain: %v", err)
}
// removed tx
for i, tx := range (types.Transactions{pastDrop, freshDrop}) {
if GetTransaction(db, tx.Hash()) != nil {
t.Errorf("drop %d: tx found while shouldn't have been", i)
}
if GetReceipt(db, tx.Hash()) != nil {
t.Errorf("drop %d: receipt found while shouldn't have been", i)
}
}
// added tx
for i, tx := range (types.Transactions{pastAdd, freshAdd, futureAdd}) {
if GetTransaction(db, tx.Hash()) == nil {
t.Errorf("add %d: expected tx to be found", i)
}
if GetReceipt(db, tx.Hash()) == nil {
t.Errorf("add %d: expected receipt to be found", i)
}
}
// shared tx
for i, tx := range (types.Transactions{postponed, swapped}) {
if GetTransaction(db, tx.Hash()) == nil {
t.Errorf("share %d: expected tx to be found", i)
}
if GetReceipt(db, tx.Hash()) == nil {
t.Errorf("share %d: expected receipt to be found", i)
}
}
}
*/
// failpow returns false from Verify for a certain block number.
type failpow struct{ num uint64 }
func (pow failpow) Search(pow.Block, <-chan struct{}) (nonce uint64, mixHash []byte) {
return 0, nil
}
func (pow failpow) Verify(b pow.Block) bool {
return b.NumberU64() != pow.num
}
func (pow failpow) GetHashrate() int64 {
return 0
}
func (pow failpow) Turbo(bool) {
}

87
core/chain_pow.go Normal file
View File

@ -0,0 +1,87 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"runtime"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/pow"
)
// nonceCheckResult contains the result of a nonce verification.
type nonceCheckResult struct {
index int // Index of the item verified from an input array
valid bool // Result of the nonce verification
}
// verifyNoncesFromHeaders starts a concurrent header nonce verification,
// returning a quit channel to abort the operations and a results channel
// to retrieve the async verifications.
func verifyNoncesFromHeaders(checker pow.PoW, headers []*types.Header) (chan<- struct{}, <-chan nonceCheckResult) {
items := make([]pow.Block, len(headers))
for i, header := range headers {
items[i] = types.NewBlockWithHeader(header)
}
return verifyNonces(checker, items)
}
// verifyNoncesFromBlocks starts a concurrent block nonce verification,
// returning a quit channel to abort the operations and a results channel
// to retrieve the async verifications.
func verifyNoncesFromBlocks(checker pow.PoW, blocks []*types.Block) (chan<- struct{}, <-chan nonceCheckResult) {
items := make([]pow.Block, len(blocks))
for i, block := range blocks {
items[i] = block
}
return verifyNonces(checker, items)
}
// verifyNonces starts a concurrent nonce verification, returning a quit channel
// to abort the operations and a results channel to retrieve the async checks.
func verifyNonces(checker pow.PoW, items []pow.Block) (chan<- struct{}, <-chan nonceCheckResult) {
// Spawn as many workers as allowed threads
workers := runtime.GOMAXPROCS(0)
if len(items) < workers {
workers = len(items)
}
// Create a task channel and spawn the verifiers
tasks := make(chan int, workers)
results := make(chan nonceCheckResult, len(items)) // Buffered to make sure all workers stop
for i := 0; i < workers; i++ {
go func() {
for index := range tasks {
results <- nonceCheckResult{index: index, valid: checker.Verify(items[index])}
}
}()
}
// Feed item indices to the workers until done or aborted
abort := make(chan struct{})
go func() {
defer close(tasks)
for i := range items {
select {
case tasks <- i:
continue
case <-abort:
return
}
}
}()
return abort, results
}

233
core/chain_pow_test.go Normal file
View File

@ -0,0 +1,233 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"math/big"
"runtime"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/pow"
)
// failPow is a non-validating proof of work implementation, that returns true
// from Verify for all but one block.
type failPow struct {
failing uint64
}
func (pow failPow) Search(pow.Block, <-chan struct{}) (uint64, []byte) {
return 0, nil
}
func (pow failPow) Verify(block pow.Block) bool { return block.NumberU64() != pow.failing }
func (pow failPow) GetHashrate() int64 { return 0 }
func (pow failPow) Turbo(bool) {}
// delayedPow is a non-validating proof of work implementation, that returns true
// from Verify for all blocks, but delays them the configured amount of time.
type delayedPow struct {
delay time.Duration
}
func (pow delayedPow) Search(pow.Block, <-chan struct{}) (uint64, []byte) {
return 0, nil
}
func (pow delayedPow) Verify(block pow.Block) bool { time.Sleep(pow.delay); return true }
func (pow delayedPow) GetHashrate() int64 { return 0 }
func (pow delayedPow) Turbo(bool) {}
// Tests that simple POW verification works, for both good and bad blocks.
func TestPowVerification(t *testing.T) {
// Create a simple chain to verify
var (
testdb, _ = ethdb.NewMemDatabase()
genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
blocks = GenerateChain(genesis, testdb, 8, nil)
)
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
headers[i] = block.Header()
}
// Run the POW checker for blocks one-by-one, checking for both valid and invalid nonces
for i := 0; i < len(blocks); i++ {
for j, full := range []bool{true, false} {
for k, valid := range []bool{true, false} {
var results <-chan nonceCheckResult
switch {
case full && valid:
_, results = verifyNoncesFromBlocks(FakePow{}, []*types.Block{blocks[i]})
case full && !valid:
_, results = verifyNoncesFromBlocks(failPow{blocks[i].NumberU64()}, []*types.Block{blocks[i]})
case !full && valid:
_, results = verifyNoncesFromHeaders(FakePow{}, []*types.Header{headers[i]})
case !full && !valid:
_, results = verifyNoncesFromHeaders(failPow{headers[i].Number.Uint64()}, []*types.Header{headers[i]})
}
// Wait for the verification result
select {
case result := <-results:
if result.index != 0 {
t.Errorf("test %d.%d.%d: invalid index: have %d, want 0", i, j, k, result.index)
}
if result.valid != valid {
t.Errorf("test %d.%d.%d: validity mismatch: have %v, want %v", i, j, k, result.valid, valid)
}
case <-time.After(time.Second):
t.Fatalf("test %d.%d.%d: verification timeout", i, j, k)
}
// Make sure no more data is returned
select {
case result := <-results:
t.Fatalf("test %d.%d.%d: unexpected result returned: %v", i, j, k, result)
case <-time.After(25 * time.Millisecond):
}
}
}
}
}
// Tests that concurrent POW verification works, for both good and bad blocks.
func TestPowConcurrentVerification2(t *testing.T) { testPowConcurrentVerification(t, 2) }
func TestPowConcurrentVerification8(t *testing.T) { testPowConcurrentVerification(t, 8) }
func TestPowConcurrentVerification32(t *testing.T) { testPowConcurrentVerification(t, 32) }
func testPowConcurrentVerification(t *testing.T, threads int) {
// Create a simple chain to verify
var (
testdb, _ = ethdb.NewMemDatabase()
genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
blocks = GenerateChain(genesis, testdb, 8, nil)
)
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
headers[i] = block.Header()
}
// Set the number of threads to verify on
old := runtime.GOMAXPROCS(threads)
defer runtime.GOMAXPROCS(old)
// Run the POW checker for the entire block chain at once both for a valid and
// also an invalid chain (enough if one is invalid, last but one (arbitrary)).
for i, full := range []bool{true, false} {
for j, valid := range []bool{true, false} {
var results <-chan nonceCheckResult
switch {
case full && valid:
_, results = verifyNoncesFromBlocks(FakePow{}, blocks)
case full && !valid:
_, results = verifyNoncesFromBlocks(failPow{uint64(len(blocks) - 1)}, blocks)
case !full && valid:
_, results = verifyNoncesFromHeaders(FakePow{}, headers)
case !full && !valid:
_, results = verifyNoncesFromHeaders(failPow{uint64(len(headers) - 1)}, headers)
}
// Wait for all the verification results
checks := make(map[int]bool)
for k := 0; k < len(blocks); k++ {
select {
case result := <-results:
if _, ok := checks[result.index]; ok {
t.Fatalf("test %d.%d.%d: duplicate results for %d", i, j, k, result.index)
}
if result.index < 0 || result.index >= len(blocks) {
t.Fatalf("test %d.%d.%d: result %d out of bounds [%d, %d]", i, j, k, result.index, 0, len(blocks)-1)
}
checks[result.index] = result.valid
case <-time.After(time.Second):
t.Fatalf("test %d.%d.%d: verification timeout", i, j, k)
}
}
// Check nonce check validity
for k := 0; k < len(blocks); k++ {
want := valid || (k != len(blocks)-2) // We chose the last but one nonce in the chain to fail
if checks[k] != want {
t.Errorf("test %d.%d.%d: validity mismatch: have %v, want %v", i, j, k, checks[k], want)
}
}
// Make sure no more data is returned
select {
case result := <-results:
t.Fatalf("test %d.%d: unexpected result returned: %v", i, j, result)
case <-time.After(25 * time.Millisecond):
}
}
}
}
// Tests that aborting a POW validation indeed prevents further checks from being
// run, as well as checks that no left-over goroutines are leaked.
func TestPowConcurrentAbortion2(t *testing.T) { testPowConcurrentAbortion(t, 2) }
func TestPowConcurrentAbortion8(t *testing.T) { testPowConcurrentAbortion(t, 8) }
func TestPowConcurrentAbortion32(t *testing.T) { testPowConcurrentAbortion(t, 32) }
func testPowConcurrentAbortion(t *testing.T, threads int) {
// Create a simple chain to verify
var (
testdb, _ = ethdb.NewMemDatabase()
genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
blocks = GenerateChain(genesis, testdb, 1024, nil)
)
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
headers[i] = block.Header()
}
// Set the number of threads to verify on
old := runtime.GOMAXPROCS(threads)
defer runtime.GOMAXPROCS(old)
// Run the POW checker for the entire block chain at once
for i, full := range []bool{true, false} {
var abort chan<- struct{}
var results <-chan nonceCheckResult
// Start the verifications and immediately abort
if full {
abort, results = verifyNoncesFromBlocks(delayedPow{time.Millisecond}, blocks)
} else {
abort, results = verifyNoncesFromHeaders(delayedPow{time.Millisecond}, headers)
}
close(abort)
// Deplete the results channel
verified := make(map[int]struct{})
for depleted := false; !depleted; {
select {
case result := <-results:
verified[result.index] = struct{}{}
case <-time.After(50 * time.Millisecond):
depleted = true
}
}
// Check that abortion was honored by not processing too many POWs
if len(verified) > 2*threads {
t.Errorf("test %d: verification count too large: have %d, want below %d", i, len(verified), 2*threads)
}
// Check that there are no gaps in the results
for j := 0; j < len(verified); j++ {
if _, ok := verified[j]; !ok {
t.Errorf("test %d.%d: gap found in verification results", i, j)
}
}
}
}

View File

@ -19,10 +19,10 @@ package core
import (
"bytes"
"math/big"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/params"
@ -30,9 +30,18 @@ import (
)
var (
blockHashPre = []byte("block-hash-")
blockNumPre = []byte("block-num-")
expDiffPeriod = big.NewInt(100000)
headHeaderKey = []byte("LastHeader")
headBlockKey = []byte("LastBlock")
blockPrefix = []byte("block-")
blockNumPrefix = []byte("block-num-")
headerSuffix = []byte("-header")
bodySuffix = []byte("-body")
tdSuffix = []byte("-td")
ExpDiffPeriod = big.NewInt(100000)
blockHashPre = []byte("block-hash-") // [deprecated by eth/63]
)
// CalcDifficulty is the difficulty adjustment algorithm. It returns
@ -57,7 +66,7 @@ func CalcDifficulty(time, parentTime uint64, parentNumber, parentDiff *big.Int)
}
periodCount := new(big.Int).Add(parentNumber, common.Big1)
periodCount.Div(periodCount, expDiffPeriod)
periodCount.Div(periodCount, ExpDiffPeriod)
if periodCount.Cmp(common.Big1) > 0 {
// diff = diff + 2^(periodCount - 2)
expDiff := periodCount.Sub(periodCount, common.Big2)
@ -69,16 +78,6 @@ func CalcDifficulty(time, parentTime uint64, parentNumber, parentDiff *big.Int)
return diff
}
// CalcTD computes the total difficulty of block.
func CalcTD(block, parent *types.Block) *big.Int {
if parent == nil {
return block.Difficulty()
}
d := block.Difficulty()
d.Add(d, parent.Td)
return d
}
// CalcGasLimit computes the gas limit of the next block after parent.
// The result may be modified by the caller.
// This is miner strategy, not consensus protocol.
@ -112,8 +111,230 @@ func CalcGasLimit(parent *types.Block) *big.Int {
return gl
}
// GetBlockByHash returns the block corresponding to the hash or nil if not found
func GetBlockByHash(db common.Database, hash common.Hash) *types.Block {
// GetCanonicalHash retrieves a hash assigned to a canonical block number.
func GetCanonicalHash(db ethdb.Database, number uint64) common.Hash {
data, _ := db.Get(append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...))
if len(data) == 0 {
return common.Hash{}
}
return common.BytesToHash(data)
}
// GetHeadHeaderHash retrieves the hash of the current canonical head block's
// header. The difference between this and GetHeadBlockHash is that whereas the
// last block hash is only updated upon a full block import, the last header
// hash is updated already at header import, allowing head tracking for the
// fast synchronization mechanism.
func GetHeadHeaderHash(db ethdb.Database) common.Hash {
data, _ := db.Get(headHeaderKey)
if len(data) == 0 {
return common.Hash{}
}
return common.BytesToHash(data)
}
// GetHeadBlockHash retrieves the hash of the current canonical head block.
func GetHeadBlockHash(db ethdb.Database) common.Hash {
data, _ := db.Get(headBlockKey)
if len(data) == 0 {
return common.Hash{}
}
return common.BytesToHash(data)
}
// GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil
// if the header's not found.
func GetHeaderRLP(db ethdb.Database, hash common.Hash) rlp.RawValue {
data, _ := db.Get(append(append(blockPrefix, hash[:]...), headerSuffix...))
return data
}
// GetHeader retrieves the block header corresponding to the hash, nil if none
// found.
func GetHeader(db ethdb.Database, hash common.Hash) *types.Header {
data := GetHeaderRLP(db, hash)
if len(data) == 0 {
return nil
}
header := new(types.Header)
if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
glog.V(logger.Error).Infof("invalid block header RLP for hash %x: %v", hash, err)
return nil
}
return header
}
// GetBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
func GetBodyRLP(db ethdb.Database, hash common.Hash) rlp.RawValue {
data, _ := db.Get(append(append(blockPrefix, hash[:]...), bodySuffix...))
return data
}
// GetBody retrieves the block body (transactons, uncles) corresponding to the
// hash, nil if none found.
func GetBody(db ethdb.Database, hash common.Hash) *types.Body {
data := GetBodyRLP(db, hash)
if len(data) == 0 {
return nil
}
body := new(types.Body)
if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
glog.V(logger.Error).Infof("invalid block body RLP for hash %x: %v", hash, err)
return nil
}
return body
}
// GetTd retrieves a block's total difficulty corresponding to the hash, nil if
// none found.
func GetTd(db ethdb.Database, hash common.Hash) *big.Int {
data, _ := db.Get(append(append(blockPrefix, hash.Bytes()...), tdSuffix...))
if len(data) == 0 {
return nil
}
td := new(big.Int)
if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
glog.V(logger.Error).Infof("invalid block total difficulty RLP for hash %x: %v", hash, err)
return nil
}
return td
}
// GetBlock retrieves an entire block corresponding to the hash, assembling it
// back from the stored header and body.
func GetBlock(db ethdb.Database, hash common.Hash) *types.Block {
// Retrieve the block header and body contents
header := GetHeader(db, hash)
if header == nil {
return nil
}
body := GetBody(db, hash)
if body == nil {
return nil
}
// Reassemble the block and return
return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles)
}
// WriteCanonicalHash stores the canonical hash for the given block number.
func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) error {
key := append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...)
if err := db.Put(key, hash.Bytes()); err != nil {
glog.Fatalf("failed to store number to hash mapping into database: %v", err)
return err
}
return nil
}
// WriteHeadHeaderHash stores the head header's hash.
func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error {
if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
glog.Fatalf("failed to store last header's hash into database: %v", err)
return err
}
return nil
}
// WriteHeadBlockHash stores the head block's hash.
func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
glog.Fatalf("failed to store last block's hash into database: %v", err)
return err
}
return nil
}
// WriteHeader serializes a block header into the database.
func WriteHeader(db ethdb.Database, header *types.Header) error {
data, err := rlp.EncodeToBytes(header)
if err != nil {
return err
}
key := append(append(blockPrefix, header.Hash().Bytes()...), headerSuffix...)
if err := db.Put(key, data); err != nil {
glog.Fatalf("failed to store header into database: %v", err)
return err
}
glog.V(logger.Debug).Infof("stored header #%v [%x…]", header.Number, header.Hash().Bytes()[:4])
return nil
}
// WriteBody serializes the body of a block into the database.
func WriteBody(db ethdb.Database, hash common.Hash, body *types.Body) error {
data, err := rlp.EncodeToBytes(body)
if err != nil {
return err
}
key := append(append(blockPrefix, hash.Bytes()...), bodySuffix...)
if err := db.Put(key, data); err != nil {
glog.Fatalf("failed to store block body into database: %v", err)
return err
}
glog.V(logger.Debug).Infof("stored block body [%x…]", hash.Bytes()[:4])
return nil
}
// WriteTd serializes the total difficulty of a block into the database.
func WriteTd(db ethdb.Database, hash common.Hash, td *big.Int) error {
data, err := rlp.EncodeToBytes(td)
if err != nil {
return err
}
key := append(append(blockPrefix, hash.Bytes()...), tdSuffix...)
if err := db.Put(key, data); err != nil {
glog.Fatalf("failed to store block total difficulty into database: %v", err)
return err
}
glog.V(logger.Debug).Infof("stored block total difficulty [%x…]: %v", hash.Bytes()[:4], td)
return nil
}
// WriteBlock serializes a block into the database, header and body separately.
func WriteBlock(db ethdb.Database, block *types.Block) error {
// Store the body first to retain database consistency
if err := WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil {
return err
}
// Store the header too, signaling full block ownership
if err := WriteHeader(db, block.Header()); err != nil {
return err
}
return nil
}
// DeleteCanonicalHash removes the number to hash canonical mapping.
func DeleteCanonicalHash(db ethdb.Database, number uint64) {
db.Delete(append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...))
}
// DeleteHeader removes all block header data associated with a hash.
func DeleteHeader(db ethdb.Database, hash common.Hash) {
db.Delete(append(append(blockPrefix, hash.Bytes()...), headerSuffix...))
}
// DeleteBody removes all block body data associated with a hash.
func DeleteBody(db ethdb.Database, hash common.Hash) {
db.Delete(append(append(blockPrefix, hash.Bytes()...), bodySuffix...))
}
// DeleteTd removes all block total difficulty data associated with a hash.
func DeleteTd(db ethdb.Database, hash common.Hash) {
db.Delete(append(append(blockPrefix, hash.Bytes()...), tdSuffix...))
}
// DeleteBlock removes all block data associated with a hash.
func DeleteBlock(db ethdb.Database, hash common.Hash) {
DeleteHeader(db, hash)
DeleteBody(db, hash)
DeleteTd(db, hash)
}
// [deprecated by eth/63]
// GetBlockByHashOld returns the old combined block corresponding to the hash
// or nil if not found. This method is only used by the upgrade mechanism to
// access the old combined block representation. It will be dropped after the
// network transitions to eth/63.
func GetBlockByHashOld(db ethdb.Database, hash common.Hash) *types.Block {
data, _ := db.Get(append(blockHashPre, hash[:]...))
if len(data) == 0 {
return nil
@ -125,55 +346,3 @@ func GetBlockByHash(db common.Database, hash common.Hash) *types.Block {
}
return (*types.Block)(&block)
}
// GetBlockByHash returns the canonical block by number or nil if not found
func GetBlockByNumber(db common.Database, number uint64) *types.Block {
key, _ := db.Get(append(blockNumPre, big.NewInt(int64(number)).Bytes()...))
if len(key) == 0 {
return nil
}
return GetBlockByHash(db, common.BytesToHash(key))
}
// WriteCanonNumber writes the canonical hash for the given block
func WriteCanonNumber(db common.Database, block *types.Block) error {
key := append(blockNumPre, block.Number().Bytes()...)
err := db.Put(key, block.Hash().Bytes())
if err != nil {
return err
}
return nil
}
// WriteHead force writes the current head
func WriteHead(db common.Database, block *types.Block) error {
err := WriteCanonNumber(db, block)
if err != nil {
return err
}
err = db.Put([]byte("LastBlock"), block.Hash().Bytes())
if err != nil {
return err
}
return nil
}
// WriteBlock writes a block to the database
func WriteBlock(db common.Database, block *types.Block) error {
tstart := time.Now()
enc, _ := rlp.EncodeToBytes((*types.StorageBlock)(block))
key := append(blockHashPre, block.Hash().Bytes()...)
err := db.Put(key, enc)
if err != nil {
glog.Fatal("db write fail:", err)
return err
}
if glog.V(logger.Debug) {
glog.Infof("wrote block #%v %s. Took %v\n", block.Number(), common.PP(block.Hash().Bytes()), time.Since(tstart))
}
return nil
}

View File

@ -23,6 +23,10 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
)
type diffTest struct {
@ -75,3 +79,242 @@ func TestDifficulty(t *testing.T) {
}
}
}
// Tests block header storage and retrieval operations.
func TestHeaderStorage(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
// Create a test header to move around the database and make sure it's really new
header := &types.Header{Extra: []byte("test header")}
if entry := GetHeader(db, header.Hash()); entry != nil {
t.Fatalf("Non existent header returned: %v", entry)
}
// Write and verify the header in the database
if err := WriteHeader(db, header); err != nil {
t.Fatalf("Failed to write header into database: %v", err)
}
if entry := GetHeader(db, header.Hash()); entry == nil {
t.Fatalf("Stored header not found")
} else if entry.Hash() != header.Hash() {
t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, header)
}
if entry := GetHeaderRLP(db, header.Hash()); entry == nil {
t.Fatalf("Stored header RLP not found")
} else {
hasher := sha3.NewKeccak256()
hasher.Write(entry)
if hash := common.BytesToHash(hasher.Sum(nil)); hash != header.Hash() {
t.Fatalf("Retrieved RLP header mismatch: have %v, want %v", entry, header)
}
}
// Delete the header and verify the execution
DeleteHeader(db, header.Hash())
if entry := GetHeader(db, header.Hash()); entry != nil {
t.Fatalf("Deleted header returned: %v", entry)
}
}
// Tests block body storage and retrieval operations.
func TestBodyStorage(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
// Create a test body to move around the database and make sure it's really new
body := &types.Body{Uncles: []*types.Header{{Extra: []byte("test header")}}}
hasher := sha3.NewKeccak256()
rlp.Encode(hasher, body)
hash := common.BytesToHash(hasher.Sum(nil))
if entry := GetBody(db, hash); entry != nil {
t.Fatalf("Non existent body returned: %v", entry)
}
// Write and verify the body in the database
if err := WriteBody(db, hash, body); err != nil {
t.Fatalf("Failed to write body into database: %v", err)
}
if entry := GetBody(db, hash); entry == nil {
t.Fatalf("Stored body not found")
} else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(types.Transactions(body.Transactions)) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) {
t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, body)
}
if entry := GetBodyRLP(db, hash); entry == nil {
t.Fatalf("Stored body RLP not found")
} else {
hasher := sha3.NewKeccak256()
hasher.Write(entry)
if calc := common.BytesToHash(hasher.Sum(nil)); calc != hash {
t.Fatalf("Retrieved RLP body mismatch: have %v, want %v", entry, body)
}
}
// Delete the body and verify the execution
DeleteBody(db, hash)
if entry := GetBody(db, hash); entry != nil {
t.Fatalf("Deleted body returned: %v", entry)
}
}
// Tests block storage and retrieval operations.
func TestBlockStorage(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
// Create a test block to move around the database and make sure it's really new
block := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block")})
if entry := GetBlock(db, block.Hash()); entry != nil {
t.Fatalf("Non existent block returned: %v", entry)
}
if entry := GetHeader(db, block.Hash()); entry != nil {
t.Fatalf("Non existent header returned: %v", entry)
}
if entry := GetBody(db, block.Hash()); entry != nil {
t.Fatalf("Non existent body returned: %v", entry)
}
// Write and verify the block in the database
if err := WriteBlock(db, block); err != nil {
t.Fatalf("Failed to write block into database: %v", err)
}
if entry := GetBlock(db, block.Hash()); entry == nil {
t.Fatalf("Stored block not found")
} else if entry.Hash() != block.Hash() {
t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
}
if entry := GetHeader(db, block.Hash()); entry == nil {
t.Fatalf("Stored header not found")
} else if entry.Hash() != block.Header().Hash() {
t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, block.Header())
}
if entry := GetBody(db, block.Hash()); entry == nil {
t.Fatalf("Stored body not found")
} else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(block.Transactions()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) {
t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, &types.Body{block.Transactions(), block.Uncles()})
}
// Delete the block and verify the execution
DeleteBlock(db, block.Hash())
if entry := GetBlock(db, block.Hash()); entry != nil {
t.Fatalf("Deleted block returned: %v", entry)
}
if entry := GetHeader(db, block.Hash()); entry != nil {
t.Fatalf("Deleted header returned: %v", entry)
}
if entry := GetBody(db, block.Hash()); entry != nil {
t.Fatalf("Deleted body returned: %v", entry)
}
}
// Tests that partial block contents don't get reassembled into full blocks.
func TestPartialBlockStorage(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
block := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block")})
// Store a header and check that it's not recognized as a block
if err := WriteHeader(db, block.Header()); err != nil {
t.Fatalf("Failed to write header into database: %v", err)
}
if entry := GetBlock(db, block.Hash()); entry != nil {
t.Fatalf("Non existent block returned: %v", entry)
}
DeleteHeader(db, block.Hash())
// Store a body and check that it's not recognized as a block
if err := WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil {
t.Fatalf("Failed to write body into database: %v", err)
}
if entry := GetBlock(db, block.Hash()); entry != nil {
t.Fatalf("Non existent block returned: %v", entry)
}
DeleteBody(db, block.Hash())
// Store a header and a body separately and check reassembly
if err := WriteHeader(db, block.Header()); err != nil {
t.Fatalf("Failed to write header into database: %v", err)
}
if err := WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil {
t.Fatalf("Failed to write body into database: %v", err)
}
if entry := GetBlock(db, block.Hash()); entry == nil {
t.Fatalf("Stored block not found")
} else if entry.Hash() != block.Hash() {
t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
}
}
// Tests block total difficulty storage and retrieval operations.
func TestTdStorage(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
// Create a test TD to move around the database and make sure it's really new
hash, td := common.Hash{}, big.NewInt(314)
if entry := GetTd(db, hash); entry != nil {
t.Fatalf("Non existent TD returned: %v", entry)
}
// Write and verify the TD in the database
if err := WriteTd(db, hash, td); err != nil {
t.Fatalf("Failed to write TD into database: %v", err)
}
if entry := GetTd(db, hash); entry == nil {
t.Fatalf("Stored TD not found")
} else if entry.Cmp(td) != 0 {
t.Fatalf("Retrieved TD mismatch: have %v, want %v", entry, td)
}
// Delete the TD and verify the execution
DeleteTd(db, hash)
if entry := GetTd(db, hash); entry != nil {
t.Fatalf("Deleted TD returned: %v", entry)
}
}
// Tests that canonical numbers can be mapped to hashes and retrieved.
func TestCanonicalMappingStorage(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
// Create a test canonical number and assinged hash to move around
hash, number := common.Hash{0: 0xff}, uint64(314)
if entry := GetCanonicalHash(db, number); entry != (common.Hash{}) {
t.Fatalf("Non existent canonical mapping returned: %v", entry)
}
// Write and verify the TD in the database
if err := WriteCanonicalHash(db, hash, number); err != nil {
t.Fatalf("Failed to write canonical mapping into database: %v", err)
}
if entry := GetCanonicalHash(db, number); entry == (common.Hash{}) {
t.Fatalf("Stored canonical mapping not found")
} else if entry != hash {
t.Fatalf("Retrieved canonical mapping mismatch: have %v, want %v", entry, hash)
}
// Delete the TD and verify the execution
DeleteCanonicalHash(db, number)
if entry := GetCanonicalHash(db, number); entry != (common.Hash{}) {
t.Fatalf("Deleted canonical mapping returned: %v", entry)
}
}
// Tests that head headers and head blocks can be assigned, individually.
func TestHeadStorage(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
blockHead := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block header")})
blockFull := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block full")})
// Check that no head entries are in a pristine database
if entry := GetHeadHeaderHash(db); entry != (common.Hash{}) {
t.Fatalf("Non head header entry returned: %v", entry)
}
if entry := GetHeadBlockHash(db); entry != (common.Hash{}) {
t.Fatalf("Non head block entry returned: %v", entry)
}
// Assign separate entries for the head header and block
if err := WriteHeadHeaderHash(db, blockHead.Hash()); err != nil {
t.Fatalf("Failed to write head header hash: %v", err)
}
if err := WriteHeadBlockHash(db, blockFull.Hash()); err != nil {
t.Fatalf("Failed to write head block hash: %v", err)
}
// Check that both heads are present, and different (i.e. two heads maintained)
if entry := GetHeadHeaderHash(db); entry != blockHead.Hash() {
t.Fatalf("Head header hash mismatch: have %v, want %v", entry, blockHead.Hash())
}
if entry := GetHeadBlockHash(db); entry != blockFull.Hash() {
t.Fatalf("Head block hash mismatch: have %v, want %v", entry, blockFull.Hash())
}
}

View File

@ -25,9 +25,10 @@ import (
)
var (
BlockNumberErr = errors.New("block number invalid")
BlockFutureErr = errors.New("block time is in the future")
BlockEqualTSErr = errors.New("block time stamp equal to previous")
BlockNumberErr = errors.New("block number invalid")
BlockFutureErr = errors.New("block time is in the future")
BlockTSTooBigErr = errors.New("block time too big")
BlockEqualTSErr = errors.New("block time stamp equal to previous")
)
// Parent error. In case a parent is unknown this error will be thrown
@ -176,3 +177,14 @@ func IsValueTransferErr(e error) bool {
_, ok := e.(*ValueTransferError)
return ok
}
type BadHashError common.Hash
func (h BadHashError) Error() string {
return fmt.Sprintf("Found known bad hash in chain %x", h[:])
}
func IsBadHashError(err error) bool {
_, ok := err.(BadHashError)
return ok
}

View File

@ -36,6 +36,9 @@ type NewBlockEvent struct{ Block *types.Block }
// NewMinedBlockEvent is posted when a block has been imported.
type NewMinedBlockEvent struct{ Block *types.Block }
// RemovedTransactionEvent is posted when a reorg happens
type RemovedTransactionEvent struct{ Txs types.Transactions }
// ChainSplit is posted when a new head is detected
type ChainSplitEvent struct {
Block *types.Block

View File

@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/params"
)
// Execution is the execution environment for the given call or create action.
type Execution struct {
env vm.Environment
address *common.Address
@ -35,12 +36,15 @@ type Execution struct {
Gas, price, value *big.Int
}
// NewExecution returns a new execution environment that handles all calling
// and creation logic defined by the YP.
func NewExecution(env vm.Environment, address *common.Address, input []byte, gas, gasPrice, value *big.Int) *Execution {
exe := &Execution{env: env, address: address, input: input, Gas: gas, price: gasPrice, value: value}
exe.evm = vm.NewVm(env)
return exe
}
// Call executes within the given context
func (self *Execution) Call(codeAddr common.Address, caller vm.ContextRef) ([]byte, error) {
// Retrieve the executing code
code := self.env.State().GetCode(codeAddr)
@ -48,6 +52,9 @@ func (self *Execution) Call(codeAddr common.Address, caller vm.ContextRef) ([]by
return self.exec(&codeAddr, code, caller)
}
// Create creates a new contract and runs the initialisation procedure of the
// contract. This returns the returned code for the contract and is stored
// elsewhere.
func (self *Execution) Create(caller vm.ContextRef) (ret []byte, err error, account *state.StateObject) {
// Input must be nil for create
code := self.input
@ -63,16 +70,24 @@ func (self *Execution) Create(caller vm.ContextRef) (ret []byte, err error, acco
return
}
// exec executes the given code and executes within the contextAddr context.
func (self *Execution) exec(contextAddr *common.Address, code []byte, caller vm.ContextRef) (ret []byte, err error) {
env := self.env
evm := self.evm
// Depth check execution. Fail if we're trying to execute above the
// limit.
if env.Depth() > int(params.CallCreateDepth.Int64()) {
caller.ReturnGas(self.Gas, self.price)
return nil, vm.DepthError
}
vsnapshot := env.State().Copy()
if !env.CanTransfer(env.State().GetStateObject(caller.Address()), self.value) {
caller.ReturnGas(self.Gas, self.price)
return nil, ValueTransferErr("insufficient funds to transfer value. Req %v, has %v", self.value, env.State().GetBalance(caller.Address()))
}
var createAccount bool
if self.address == nil {
// Generate a new address
@ -95,15 +110,7 @@ func (self *Execution) exec(contextAddr *common.Address, code []byte, caller vm.
} else {
to = env.State().GetOrNewStateObject(*self.address)
}
err = env.Transfer(from, to, self.value)
if err != nil {
env.State().Set(vsnapshot)
caller.ReturnGas(self.Gas, self.price)
return nil, ValueTransferErr("insufficient funds to transfer value. Req %v, has %v", self.value, from.Balance())
}
vm.Transfer(from, to, self.value)
context := vm.NewContext(caller, to, self.value, self.Gas, self.price)
context.SetCallCode(contextAddr, code)

View File

@ -22,6 +22,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
)
type AccountChange struct {
@ -111,7 +113,7 @@ done:
// Get the logs of the block
unfiltered, err := self.eth.BlockProcessor().GetLogs(block)
if err != nil {
chainlogger.Warnln("err: filter get logs ", err)
glog.V(logger.Warn).Infoln("err: filter get logs ", err)
break
}
@ -129,12 +131,12 @@ done:
func includes(addresses []common.Address, a common.Address) bool {
for _, addr := range addresses {
if addr != a {
return false
if addr == a {
return true
}
}
return true
return false
}
func (self *Filter) FilterLogs(logs state.Logs) state.Logs {

View File

@ -27,13 +27,14 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/params"
)
// WriteGenesisBlock writes the genesis block to the database as block number 0
func WriteGenesisBlock(stateDb, blockDb common.Database, reader io.Reader) (*types.Block, error) {
func WriteGenesisBlock(chainDb ethdb.Database, reader io.Reader) (*types.Block, error) {
contents, err := ioutil.ReadAll(reader)
if err != nil {
return nil, err
@ -59,7 +60,7 @@ func WriteGenesisBlock(stateDb, blockDb common.Database, reader io.Reader) (*typ
return nil, err
}
statedb := state.New(common.Hash{}, stateDb)
statedb := state.New(common.Hash{}, chainDb)
for addr, account := range genesis.Alloc {
address := common.HexToAddress(addr)
statedb.AddBalance(address, common.String2Big(account.Balance))
@ -73,7 +74,7 @@ func WriteGenesisBlock(stateDb, blockDb common.Database, reader io.Reader) (*typ
difficulty := common.String2Big(genesis.Difficulty)
block := types.NewBlock(&types.Header{
Nonce: types.EncodeNonce(common.String2Big(genesis.Nonce).Uint64()),
Time: common.String2Big(genesis.Timestamp).Uint64(),
Time: common.String2Big(genesis.Timestamp),
ParentHash: common.HexToHash(genesis.ParentHash),
Extra: common.FromHex(genesis.ExtraData),
GasLimit: common.String2Big(genesis.GasLimit),
@ -82,34 +83,35 @@ func WriteGenesisBlock(stateDb, blockDb common.Database, reader io.Reader) (*typ
Coinbase: common.HexToAddress(genesis.Coinbase),
Root: statedb.Root(),
}, nil, nil, nil)
block.Td = difficulty
if block := GetBlockByHash(blockDb, block.Hash()); block != nil {
if block := GetBlock(chainDb, block.Hash()); block != nil {
glog.V(logger.Info).Infoln("Genesis block already in chain. Writing canonical number")
err := WriteCanonNumber(blockDb, block)
err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
if err != nil {
return nil, err
}
return block, nil
}
statedb.Sync()
err = WriteBlock(blockDb, block)
if err != nil {
if err := WriteTd(chainDb, block.Hash(), difficulty); err != nil {
return nil, err
}
err = WriteHead(blockDb, block)
if err != nil {
if err := WriteBlock(chainDb, block); err != nil {
return nil, err
}
if err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()); err != nil {
return nil, err
}
if err := WriteHeadBlockHash(chainDb, block.Hash()); err != nil {
return nil, err
}
return block, nil
}
// GenesisBlockForTesting creates a block in which addr has the given wei balance.
// The state trie of the block is written to db.
func GenesisBlockForTesting(db common.Database, addr common.Address, balance *big.Int) *types.Block {
func GenesisBlockForTesting(db ethdb.Database, addr common.Address, balance *big.Int) *types.Block {
statedb := state.New(common.Hash{}, db)
obj := statedb.GetOrNewStateObject(addr)
obj.SetBalance(balance)
@ -120,24 +122,35 @@ func GenesisBlockForTesting(db common.Database, addr common.Address, balance *bi
GasLimit: params.GenesisGasLimit,
Root: statedb.Root(),
}, nil, nil, nil)
block.Td = params.GenesisDifficulty
return block
}
func WriteGenesisBlockForTesting(db common.Database, addr common.Address, balance *big.Int) *types.Block {
type GenesisAccount struct {
Address common.Address
Balance *big.Int
}
func WriteGenesisBlockForTesting(db ethdb.Database, accounts ...GenesisAccount) *types.Block {
accountJson := "{"
for i, account := range accounts {
if i != 0 {
accountJson += ","
}
accountJson += fmt.Sprintf(`"0x%x":{"balance":"0x%x"}`, account.Address, account.Balance.Bytes())
}
accountJson += "}"
testGenesis := fmt.Sprintf(`{
"nonce":"0x%x",
"gasLimit":"0x%x",
"difficulty":"0x%x",
"alloc": {
"0x%x":{"balance":"0x%x"}
}
}`, types.EncodeNonce(0), params.GenesisGasLimit.Bytes(), params.GenesisDifficulty.Bytes(), addr, balance.Bytes())
block, _ := WriteGenesisBlock(db, db, strings.NewReader(testGenesis))
"alloc": %s
}`, types.EncodeNonce(0), params.GenesisGasLimit.Bytes(), params.GenesisDifficulty.Bytes(), accountJson)
block, _ := WriteGenesisBlock(db, strings.NewReader(testGenesis))
return block
}
func WriteTestNetGenesisBlock(stateDb, blockDb common.Database, nonce uint64) (*types.Block, error) {
func WriteTestNetGenesisBlock(chainDb ethdb.Database, nonce uint64) (*types.Block, error) {
testGenesis := fmt.Sprintf(`{
"nonce":"0x%x",
"gasLimit":"0x%x",
@ -157,5 +170,5 @@ func WriteTestNetGenesisBlock(stateDb, blockDb common.Database, nonce uint64) (*
"1a26338f0d905e295fccb71fa9ea849ffa12aaf4": {"balance": "1606938044258990275541962092341162602522202993782792835301376"}
}
}`, types.EncodeNonce(nonce), params.GenesisGasLimit.Bytes(), params.GenesisDifficulty.Bytes())
return WriteGenesisBlock(stateDb, blockDb, strings.NewReader(testGenesis))
return WriteGenesisBlock(chainDb, strings.NewReader(testGenesis))
}

View File

@ -22,7 +22,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
// "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
)
@ -32,7 +32,7 @@ type TestManager struct {
// stateManager *StateManager
eventMux *event.TypeMux
db common.Database
db ethdb.Database
txPool *TxPool
blockChain *ChainManager
Blocks []*types.Block
@ -74,7 +74,7 @@ func (tm *TestManager) EventMux() *event.TypeMux {
// return nil
// }
func (tm *TestManager) Db() common.Database {
func (tm *TestManager) Db() ethdb.Database {
return tm.db
}

View File

@ -18,7 +18,7 @@ package core
import (
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
)
@ -28,8 +28,7 @@ type Backend interface {
BlockProcessor() *BlockProcessor
ChainManager() *ChainManager
TxPool() *TxPool
BlockDb() common.Database
StateDb() common.Database
ExtraDb() common.Database
ChainDb() ethdb.Database
DappDb() ethdb.Database
EventMux() *event.TypeMux
}

View File

@ -23,6 +23,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/rlp"
@ -56,7 +57,7 @@ func (self Storage) Copy() Storage {
type StateObject struct {
// State database for storing state changes
db common.Database
db ethdb.Database
trie *trie.SecureTrie
// Address belonging to this account
@ -82,15 +83,12 @@ type StateObject struct {
// Mark for deletion
// When an object is marked for deletion it will be delete from the trie
// during the "update" phase of the state transition
remove bool
dirty bool
remove bool
deleted bool
dirty bool
}
func (self *StateObject) Reset() {
self.storage = make(Storage)
}
func NewStateObject(address common.Address, db common.Database) *StateObject {
func NewStateObject(address common.Address, db ethdb.Database) *StateObject {
object := &StateObject{db: db, address: address, balance: new(big.Int), gasPool: new(big.Int), dirty: true}
object.trie = trie.NewSecure((common.Hash{}).Bytes(), db)
object.storage = make(Storage)
@ -99,7 +97,7 @@ func NewStateObject(address common.Address, db common.Database) *StateObject {
return object
}
func NewStateObjectFromBytes(address common.Address, data []byte, db common.Database) *StateObject {
func NewStateObjectFromBytes(address common.Address, data []byte, db ethdb.Database) *StateObject {
// TODO clean me up
var extobject struct {
Nonce uint64
@ -183,14 +181,6 @@ func (self *StateObject) Update() {
}
}
func (c *StateObject) GetInstr(pc *big.Int) *common.Value {
if int64(len(c.code)-1) < pc.Int64() {
return common.NewValue(0)
}
return common.NewValueFromBytes([]byte{c.code[pc.Int64()]})
}
func (c *StateObject) AddBalance(amount *big.Int) {
c.SetBalance(new(big.Int).Add(c.balance, amount))
@ -262,14 +252,11 @@ func (self *StateObject) Copy() *StateObject {
stateObject.gasPool.Set(self.gasPool)
stateObject.remove = self.remove
stateObject.dirty = self.dirty
stateObject.deleted = self.deleted
return stateObject
}
func (self *StateObject) Set(stateObject *StateObject) {
*self = *stateObject
}
//
// Attribute accessors
//
@ -278,20 +265,11 @@ func (self *StateObject) Balance() *big.Int {
return self.balance
}
func (c *StateObject) N() *big.Int {
return big.NewInt(int64(c.nonce))
}
// Returns the address of the contract/account
func (c *StateObject) Address() common.Address {
return c.address
}
// Returns the initialization Code
func (c *StateObject) Init() Code {
return c.initCode
}
func (self *StateObject) Trie() *trie.SecureTrie {
return self.trie
}
@ -309,11 +287,6 @@ func (self *StateObject) SetCode(code []byte) {
self.dirty = true
}
func (self *StateObject) SetInitCode(code []byte) {
self.initCode = code
self.dirty = true
}
func (self *StateObject) SetNonce(nonce uint64) {
self.nonce = nonce
self.dirty = true
@ -352,19 +325,6 @@ func (c *StateObject) CodeHash() common.Bytes {
return crypto.Sha3(c.code)
}
func (c *StateObject) RlpDecode(data []byte) {
decoder := common.NewValueFromBytes(data)
c.nonce = decoder.Get(0).Uint()
c.balance = decoder.Get(1).BigInt()
c.trie = trie.NewSecure(decoder.Get(2).Bytes(), c.db)
c.storage = make(map[string]common.Hash)
c.gasPool = new(big.Int)
c.codeHash = decoder.Get(3).Bytes()
c.code, _ = c.db.Get(c.codeHash)
}
// Storage change object. Used by the manifest for notifying changes to
// the sub channels.
type StorageState struct {

View File

@ -17,6 +17,7 @@
package state
import (
"bytes"
"math/big"
"testing"
@ -117,3 +118,106 @@ func (s *StateSuite) TestSnapshot(c *checker.C) {
c.Assert(data1, checker.DeepEquals, res)
}
// use testing instead of checker because checker does not support
// printing/logging in tests (-check.vv does not work)
func TestSnapshot2(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
state := New(common.Hash{}, db)
stateobjaddr0 := toAddr([]byte("so0"))
stateobjaddr1 := toAddr([]byte("so1"))
var storageaddr common.Hash
data0 := common.BytesToHash([]byte{17})
data1 := common.BytesToHash([]byte{18})
state.SetState(stateobjaddr0, storageaddr, data0)
state.SetState(stateobjaddr1, storageaddr, data1)
// db, trie are already non-empty values
so0 := state.GetStateObject(stateobjaddr0)
so0.balance = big.NewInt(42)
so0.nonce = 43
so0.gasPool = big.NewInt(44)
so0.code = []byte{'c', 'a', 'f', 'e'}
so0.codeHash = so0.CodeHash()
so0.remove = true
so0.deleted = false
so0.dirty = false
state.SetStateObject(so0)
// and one with deleted == true
so1 := state.GetStateObject(stateobjaddr1)
so1.balance = big.NewInt(52)
so1.nonce = 53
so1.gasPool = big.NewInt(54)
so1.code = []byte{'c', 'a', 'f', 'e', '2'}
so1.codeHash = so1.CodeHash()
so1.remove = true
so1.deleted = true
so1.dirty = true
state.SetStateObject(so1)
so1 = state.GetStateObject(stateobjaddr1)
if so1 != nil {
t.Fatalf("deleted object not nil when getting")
}
snapshot := state.Copy()
state.Set(snapshot)
so0Restored := state.GetStateObject(stateobjaddr0)
so1Restored := state.GetStateObject(stateobjaddr1)
// non-deleted is equal (restored)
compareStateObjects(so0Restored, so0, t)
// deleted should be nil, both before and after restore of state copy
if so1Restored != nil {
t.Fatalf("deleted object not nil after restoring snapshot")
}
}
func compareStateObjects(so0, so1 *StateObject, t *testing.T) {
if so0.address != so1.address {
t.Fatalf("Address mismatch: have %v, want %v", so0.address, so1.address)
}
if so0.balance.Cmp(so1.balance) != 0 {
t.Fatalf("Balance mismatch: have %v, want %v", so0.balance, so1.balance)
}
if so0.nonce != so1.nonce {
t.Fatalf("Nonce mismatch: have %v, want %v", so0.nonce, so1.nonce)
}
if !bytes.Equal(so0.codeHash, so1.codeHash) {
t.Fatalf("CodeHash mismatch: have %v, want %v", so0.codeHash, so1.codeHash)
}
if !bytes.Equal(so0.code, so1.code) {
t.Fatalf("Code mismatch: have %v, want %v", so0.code, so1.code)
}
if !bytes.Equal(so0.initCode, so1.initCode) {
t.Fatalf("InitCode mismatch: have %v, want %v", so0.initCode, so1.initCode)
}
for k, v := range so1.storage {
if so0.storage[k] != v {
t.Fatalf("Storage key %s mismatch: have %v, want %v", k, so0.storage[k], v)
}
}
for k, v := range so0.storage {
if so1.storage[k] != v {
t.Fatalf("Storage key %s mismatch: have %v, want none.", k, v)
}
}
if so0.gasPool.Cmp(so1.gasPool) != 0 {
t.Fatalf("GasPool mismatch: have %v, want %v", so0.gasPool, so1.gasPool)
}
if so0.remove != so1.remove {
t.Fatalf("Remove mismatch: have %v, want %v", so0.remove, so1.remove)
}
if so0.deleted != so1.deleted {
t.Fatalf("Deleted mismatch: have %v, want %v", so0.deleted, so1.deleted)
}
if so0.dirty != so1.dirty {
t.Fatalf("Dirty mismatch: have %v, want %v", so0.dirty, so1.dirty)
}
}

View File

@ -18,10 +18,10 @@
package state
import (
"bytes"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/trie"
@ -33,7 +33,7 @@ import (
// * Contracts
// * Accounts
type StateDB struct {
db common.Database
db ethdb.Database
trie *trie.SecureTrie
root common.Hash
@ -48,7 +48,7 @@ type StateDB struct {
}
// Create a new state from a given trie
func New(root common.Hash, db common.Database) *StateDB {
func New(root common.Hash, db ethdb.Database) *StateDB {
trie := trie.NewSecure(root[:], db)
return &StateDB{root: root, db: db, trie: trie, stateObjects: make(map[string]*StateObject), refund: new(big.Int), logs: make(map[common.Hash]Logs)}
}
@ -203,18 +203,20 @@ func (self *StateDB) UpdateStateObject(stateObject *StateObject) {
// Delete the given state object and delete it from the state trie
func (self *StateDB) DeleteStateObject(stateObject *StateObject) {
stateObject.deleted = true
addr := stateObject.Address()
self.trie.Delete(addr[:])
//delete(self.stateObjects, addr.Str())
}
// Retrieve a state object given my the address. Nil if not found
func (self *StateDB) GetStateObject(addr common.Address) *StateObject {
//addr = common.Address(addr)
stateObject := self.stateObjects[addr.Str()]
func (self *StateDB) GetStateObject(addr common.Address) (stateObject *StateObject) {
stateObject = self.stateObjects[addr.Str()]
if stateObject != nil {
if stateObject.deleted {
stateObject = nil
}
return stateObject
}
@ -236,7 +238,7 @@ func (self *StateDB) SetStateObject(object *StateObject) {
// Retrieve a state object or create a new state object if nil
func (self *StateDB) GetOrNewStateObject(addr common.Address) *StateObject {
stateObject := self.GetStateObject(addr)
if stateObject == nil {
if stateObject == nil || stateObject.deleted {
stateObject = self.CreateAccount(addr)
}
@ -274,10 +276,6 @@ func (self *StateDB) CreateAccount(addr common.Address) *StateObject {
// Setting, copying of the state methods
//
func (s *StateDB) Cmp(other *StateDB) bool {
return bytes.Equal(s.trie.Root(), other.trie.Root())
}
func (self *StateDB) Copy() *StateDB {
state := New(common.Hash{}, self.db)
state.trie = self.trie
@ -309,22 +307,6 @@ func (s *StateDB) Root() common.Hash {
return common.BytesToHash(s.trie.Root())
}
func (s *StateDB) Trie() *trie.SecureTrie {
return s.trie
}
// Resets the trie and all siblings
func (s *StateDB) Reset() {
s.trie.Reset()
// Reset all nested states
for _, stateObject := range s.stateObjects {
stateObject.Reset()
}
s.Empty()
}
// Syncs the trie and all siblings
func (s *StateDB) Sync() {
// Sync all nested states

Some files were not shown because too many files have changed in this diff Show More