Compare commits

..

463 Commits

Author SHA1 Message Date
Péter Szilágyi
477eb0933b params, swarm: release Geth v1.8.16, Swarm v0.3.4 2018-09-24 15:59:21 +03:00
Payne
1d9d3815e5 crypto/secp256k1: remove useless code (#17728)
`(void)data;` may cause link error on Windows.
2018-09-21 21:42:02 +02:00
Péter Szilágyi
06d40d37b8 Merge pull request #17732 from karalabe/faucet-caching
cmd/faucet: cache internal state, avoid sync-trashing les
2018-09-21 14:20:46 +03:00
Péter Szilágyi
ee92bc537f Merge pull request #17383 from holiman/eip1283
Eip1283
2018-09-21 14:16:18 +03:00
Wuxiang
81080bf8cb core: fix a typo (#17733) 2018-09-21 13:45:42 +03:00
Péter Szilágyi
c528e3e3cf cmd/faucet: cache internal state, avoid sync-trashing les 2018-09-21 13:31:00 +03:00
Péter Szilágyi
1a16cc71c6 Merge pull request #17730 from karalabe/revert-go1.10-ppa
build: revert launchpad PPAs to Go 1.10
2018-09-21 12:34:22 +03:00
Péter Szilágyi
b0d60721f1 build: revert launchpad PPAs to Go 1.10 2018-09-21 11:31:23 +03:00
Felföldi Zsolt
ab13cd9924 les: fix invalid delivery handling in retriever (#17727) 2018-09-21 10:59:21 +03:00
Péter Szilágyi
32c05e82a3 Merge pull request #17726 from karalabe/go-1.11-ppa-fix
build/deb: upgrade launchpad PPA sources to Go 1.11 too
2018-09-21 00:33:26 +03:00
Péter Szilágyi
5e32152c02 build/deb: upgrade launchpad PPA sources to Go 1.11 too 2018-09-21 00:33:00 +03:00
gary rong
457e930f27 eth, miner: prefer locally generated uncles vs remote ones (#17715)
* core, eth: fix dependency cycle

* eth, miner: perfer to locally generated uncle
2018-09-21 00:11:55 +03:00
gary rong
ba0a8b7887 core, eth: fix dependency cycle (#17720) 2018-09-20 20:02:15 +03:00
Péter Szilágyi
f55c26ae6d Merge pull request #17719 from karalabe/update-chts
les, light, params: update light client CHTs
2018-09-20 15:10:04 +03:00
gary rong
d6254f827b all: protect self-mined block during reorg (#17656) 2018-09-20 15:09:30 +03:00
Péter Szilágyi
af89093116 les, light, params: update light client CHTs 2018-09-20 14:14:48 +03:00
Péter Szilágyi
f89dce0126 Merge pull request #17718 from karalabe/chain-age-logs
common, core, light: add block age into info logs
2018-09-20 13:01:33 +03:00
Péter Szilágyi
0f2ba07c41 common, core, light: add block age into info logs 2018-09-20 12:56:35 +03:00
Felföldi Zsolt
c37238cae9 les: fix retriever logic (#17705) 2018-09-20 10:46:39 +03:00
Guillaume Ballet
da29332c5f core/vm: add switches to select evm+ewasm interpreters (#17687)
Interpreter initialization is left to the PRs implementing them.
Options for external interpreters are passed after a colon in the
`--vm.ewasm` and `--vm.evm` switches.
2018-09-20 10:44:35 +03:00
Kevin
3fec73500b cmd/evm: EVM prestate initialization (#17685)
* Bugfix #17216: evm loads prestate file properly now

* code gofmted
2018-09-20 08:24:53 +02:00
HackyMiner
6975c72981 all: fix various comment typos (#17591)
* swarm: fixed comment typo
* eth: fixed comment typo
* cmd/puppeth: fixed comment typo
2018-09-19 18:10:40 +02:00
Peter Broadhurst
c35659c6a0 rpc: enable basic auth for websocket client (#17699) 2018-09-19 18:09:03 +02:00
Martin Holst Swende
6f004c46d5 accounts/keystore: double-check keystore file after creation (#17348) 2018-09-19 18:08:38 +02:00
Pedro Pombeiro
16e95f33b7 whisper: Fix interpretation of to parameter in shh_requestMessages (#16996)
The argument is inclusive rather than exclusive, according to docs.
2018-09-19 17:44:30 +02:00
Balint Gabor
f5c7d1c8eb swarm/storage: Implement global timeout for fetcher (#17702) 2018-09-19 16:59:10 +02:00
Péter Szilágyi
736b45a876 Merge pull request #17701 from karalabe/go-1.11
travis, Dockerfile, appveyor, build: bump to Go 1.11
2018-09-19 15:08:42 +03:00
Wenbiao Zheng
bd9d79adba cmd/geth: typo export -> import (#17703) 2018-09-19 13:29:40 +03:00
Martin Holst Swende
16bc8741bf abi, signer: fix nil dereference in #17633 (#17653)
* abi,signer: fix nil dereference in #17633

* signer/core: tiny typo fix in test error message
2018-09-19 12:07:53 +03:00
gary rong
0b477712a1 consensus/clique: hide no transaction error (#17614) 2018-09-19 12:06:55 +03:00
Péter Szilágyi
faa69bea1c core, eth: fix goimports for Go 1.11 2018-09-19 11:47:09 +03:00
Samuel Marks
67c332e9b5 travis, Dockerfile, appveyor, build: bump to Go 1.11 2018-09-19 11:29:29 +03:00
Martin Holst Swende
360a72d54e tests: disable constantinople statetests 2018-09-19 10:05:44 +02:00
Péter Szilágyi
5d921fa3a0 core, params: polish net gas metering PR a bit 2018-09-18 16:29:51 +03:00
Janoš Guljaš
1f45ba9bb1 swarm/network: downgrade fetcher unable to request log message severity (#17692) 2018-09-18 14:54:52 +02:00
Martin Holst Swende
caa2c23a38 core,state: finish implementing Eip 1283 2018-09-18 13:08:32 +03:00
Martin Holst Swende
58374e28d9 core, state: initial implementation of Eip-1283 2018-09-18 13:08:28 +03:00
chenyufeng
b8aa5980cf cmd/puppeth: fix comment typo (#17690)
* ethdb: unified code comment style.

* puppeth: it is unnecessary to alloc pre-funded to 256 addresses

* Revert "puppeth: it is unnecessary to alloc pre-funded to 256 addresses"

This reverts commit 5e04fbccf0.

* puppeth: fix comment typo

* Revert "ethdb: unified code comment style."

This reverts commit a581efb3f0.

* cmd/puppeth: fix comment typo
2018-09-18 11:30:39 +03:00
Balint Gabor
bd58098f2d swarm: Chunk refactor improvements (#17683)
* swarm/network: Protocol bump (for chunk refactor)

* swarm/network: Increase discovery and stream protocol version too

* swarm/network: Increase priority queue cap
2018-09-18 10:17:13 +02:00
gary rong
5d1d1a808d consensus, ethdb, metrics: implement forced-meter (#17667) 2018-09-17 15:32:34 +03:00
Péter Szilágyi
41ac8dd803 Merge pull request #17675 from holiman/eip1234
Eip1234
2018-09-17 15:18:17 +03:00
chenyufeng
c1345b0742 cmd/puppeth: fix comment typo (#17684)
* ethdb: unified code comment style.

* puppeth: it is unnecessary to alloc pre-funded to 256 addresses

* Revert "puppeth: it is unnecessary to alloc pre-funded to 256 addresses"

This reverts commit 5e04fbccf0.

* puppeth: fix comment typo

* Revert "ethdb: unified code comment style."

This reverts commit a581efb3f0.
2018-09-17 15:09:09 +03:00
Martin Holst Swende
7efb12d29b ethash: documentation + cleanup 2018-09-17 11:53:36 +02:00
Péter Szilágyi
cc21928e12 Merge pull request #17622 from karalabe/chain-maker-seal
consensus/clique, core: chain maker clique + error tests
2018-09-17 11:35:42 +03:00
Martin Holst Swende
3df7df0386 ethash: less copy-paste for EIP 1234 2018-09-15 23:54:16 +02:00
Péter Szilágyi
7c71e936a7 Merge pull request #17674 from eosclassicteam/discord
README: Change gitter badge to discord
2018-09-15 11:11:30 +03:00
Felföldi Zsolt
d4a28a13ca les: fix distReq.sentChn double close bug (#17639) 2018-09-14 22:14:29 +02:00
Emil
86a03f97d3 all: simplify s[:] to s where s is a slice (#17673) 2018-09-14 22:07:13 +02:00
EOS Classic
44a1764f9c README: Change gitter badge to discord 2018-09-14 22:16:10 +09:00
Martin Holst Swende
7bb95a9a64 Merge pull request #17652 from YaoZengzeng/file-permission
cmd/clef: fix incorrect file permissions for secrets.dat
2018-09-14 08:38:13 +02:00
Liang ZOU
72c820c49e core/vm: fix typo 'EVM EVM' ==> 'EVM' (#17654) 2018-09-13 12:48:15 +03:00
Balint Gabor
3ff2f75636 swarm: Chunk refactor (#17659)
Co-authored-by: Janos Guljas <janos@resenje.org>
Co-authored-by: Balint Gabor <balint.g@gmail.com>
Co-authored-by: Anton Evangelatov <anton.evangelatov@gmail.com>
Co-authored-by: Viktor Trón <viktor.tron@gmail.com>
2018-09-13 11:42:19 +02:00
Anton Evangelatov
ff3a5d24d2 swarm/storage: remove redundant increments for dataIdx and entryCnt (#17484)
* swarm/storage: remove redundant increments for dataIdx and entryCnt

* swarm/storage: add Delete to LDBStore

* swarm/storage: wait for garbage collection
2018-09-12 14:39:45 +02:00
EOS Classic
0732617b65 consensus: implement Constantinople EIP 1234 2018-09-12 20:02:34 +09:00
Viktor Trón
bfce00385f Kademlia refactor (#17641)
* swarm/network: simplify kademlia/hive; rid interfaces

* swarm, swarm/network/stream, swarm/netork/simulations,, swarm/pss: adapt to new Kad API

* swarm/network: minor changes re review; add missing lock to NeighbourhoodDepthC
2018-09-12 11:24:56 +02:00
Viktor Trón
b06ff563a1 Merge pull request #17651 from ethersphere/wet-run-bug
cmd/swarm: password threw on upload manifest
2018-09-12 10:23:27 +02:00
YaoZengzeng
b040b75075 cmd/clef: fix incorrect file permissions for secrets.dat
Signed-off-by: YaoZengzeng <yaozengzeng@zju.edu.cn>
2018-09-12 16:15:11 +08:00
Elad
933ebaa47e cmd/swarm: password threw on upload manifest 2018-09-12 08:25:04 +02:00
chenyufeng
2d98099c25 rlp: fix comment typo (#17640) 2018-09-11 18:05:28 +03:00
Péter Szilágyi
4bb25042eb consensus/clique, core: chain maker clique + error tests 2018-09-11 16:40:00 +03:00
Viktor Trón
6dd87483d4 Encryption async api (#17603)
* swarm/storage/encryption: async segmentwise encryption/decryption

* swarm/storage: adapt hasherstore to encryption API change

* swarm/api: adapt RefEncryption for AC to new Encryption API

* swarm/storage/encryption: address review comments
2018-09-11 11:39:02 +02:00
Péter Szilágyi
10bac36647 Merge pull request #17620 from karalabe/clique-epoch-fix
consensus/clique: only trust snapshot for genesis or les checkpoint
2018-09-10 16:21:21 +03:00
TColl
0e32989a08 cmd/utils: typos in {Miner, MinerLegacy}GasPriceFlag (#17588) 2018-09-10 15:22:34 +03:00
Péter Szilágyi
bcfb7f58b9 consensus/clique: only trust snapshot for genesis or les checkpoint 2018-09-10 15:00:54 +03:00
Paweł Bylica
ae992a5d73 core/vm: Hide read only flag from Interpreter interface (#17461)
Makes Interface interface a bit more stateless and abstract.

Obviously this change is dictated by EVMC design. The EVMC tries to keep the responsibility for EVM features totally inside the VMs, if feasible. This makes VM "stateless" because VM does not need to pass any information between executions, all information is included in parameters of the execute function.
2018-09-07 18:13:25 +02:00
Elad
8b9b149d54 swarm/api/http: bzz-immutable wrong handler bug (#17602) 2018-09-07 15:23:29 +02:00
Elad
70d31fb278 cmd/swarm: added password to ACT (#17598) 2018-09-07 09:56:05 +02:00
Elad
580145e96d swarm/storage: added metrics for db entry count (#17589) 2018-09-06 12:11:38 +02:00
Elad
4c15ffffdd swarm/api/http: added a regression test for resolver bug from #17483 (#17502) 2018-09-06 12:08:39 +02:00
Elad
5918b88a8f cmd/swarm: added publisher key assertion to act tests (#17471) 2018-09-05 11:33:07 +02:00
b00ris
8711e2b636 whisper: add light mode check to handshake (#16725) 2018-09-05 10:57:45 +02:00
Hyung-Kyu Hqueue Choi
cf33d8b83c core: fix typo in comment (#17586) 2018-09-05 10:29:51 +02:00
Diep Pham
42bd67bd6f accounts/abi: fix unpacking of negative int256 (#17583) 2018-09-04 17:53:28 +02:00
Elad
beee7a52e0 cmd/swarm: added scaling test for ACT manifests (#17496) 2018-09-04 14:29:00 +02:00
Richard Littauer
661aa4dc20 .github: slight edits to No Response template (#17475)
The template was not grammatical to me as it was. I have edited the language a tiny bit to make the close comment more clear and less abrasive.
2018-09-04 14:13:50 +02:00
ligi
3e81840061 common: fix typo (#17582)
Fixes #17581
2018-09-04 14:12:16 +02:00
Evangelos Pappas
84084df26c cmd/ethkey: fix the README to match updated commands (#17332) 2018-09-04 13:45:27 +02:00
dipingxian2
003e031994 cmd/faucet: remove trailing newline in password (#17558)
Fixes #17557
2018-09-04 13:16:49 +02:00
Martin Holst Swende
32f28a9360 core/vm, tests: update tests, enable constantinople statetests, fix SAR opcode (#17538)
This commit does a few things at once:

- Updates the tests to contain the latest data from ethereum/tests repo.
- Enables Constantinople state tests. This is needed to be able to
  fuzz-test the evm with constantinople rules.
- Fixes the error in opSAR that we've known about for some time. I was
  kind of saving it to see if we hit upon it with the random test
  generator, but it's difficult to both enable the tests and have the
  bug there -- we don't want to forget about it, so maybe it's better
  to just fix it.
2018-09-04 10:49:18 +02:00
Wenbiao Zheng
6a33954731 core, eth, trie: use common/prque (#17508) 2018-09-03 17:33:21 +02:00
Eugene Valeyev
6fc8494620 mobile: add whisper client (#15922) 2018-09-03 17:30:47 +02:00
Anton Evangelatov
cc2b39bbd1 consensus/ethash: increase timeout in test (#17526)
This is an attempt to fix the flaky consensus/ethash tests under macOS.
2018-09-03 16:59:23 +02:00
Gísli Kristjánsson
c9a0b36a5f rpc: reset client write deadline after write (#17549)
This fixes an issue with websocket ping frame handling.
2018-09-03 16:56:30 +02:00
Mymskmkt
e1c64a7d89 params: fix typo (#17552) 2018-09-03 16:52:32 +02:00
ult-bobonovski
992b77992f consensus: fix comment typo (#17562) 2018-09-03 16:49:00 +02:00
HAOYUatHZ
5c0954afff p2p/discv5: make idx bounds checking more sound (#17571) 2018-09-03 16:47:20 +02:00
Péter Szilágyi
62e94895da params, swarm: begin geth v1.8.16 and swarm v0.3.4 cycle 2018-08-29 18:27:43 +03:00
Péter Szilágyi
89451f7c38 params, swarm: release geth v1.8.15 and swarm 0.3.3 2018-08-29 18:24:32 +03:00
gary rong
f0242ee76d miner: keep the timestamp for resubmitted mining block (#17547) 2018-08-29 17:31:59 +03:00
Péter Szilágyi
a4bc2c31e1 Merge pull request #17540 from karalabe/miner-uncle-fix
miner: track uncles more aggressively
2018-08-29 13:59:15 +03:00
Adam Babik
75ae5af62a whisper: fix loop in expire() (#17532) 2018-08-29 13:56:13 +03:00
Anton Evangelatov
9574968116 cmd/swarm: disable ACT tests on windows (#17536) 2018-08-29 13:52:21 +03:00
Péter Szilágyi
e29c2e4364 Merge pull request #17546 from karalabe/miner-max-limit
cmd, core, eth, miner, params: configurable gas floor and ceil
2018-08-29 13:49:59 +03:00
Péter Szilágyi
f751c6ed47 miner: track uncles more aggressively 2018-08-29 13:29:59 +03:00
Péter Szilágyi
e8f229b82e cmd, core, eth, miner, params: configurable gas floor and ceil 2018-08-29 12:40:12 +03:00
gary rong
c1c003e4ff consensus, miner: stale block mining support (#17506)
* consensus, miner: stale block supporting

* consensus, miner: refactor seal signature

* cmd, consensus, eth: add miner noverify flag

* cmd, consensus, miner: polish
2018-08-28 16:59:05 +03:00
Felföldi Zsolt
63352bf424 core: safe indexer operation when syncing starts before the checkpoint (#17511) 2018-08-28 10:31:34 +03:00
gary rong
b69476b372 all: make indexer configurable (#17188) 2018-08-28 10:08:16 +03:00
Mymskmkt
c64d72bea2 consensus/ethash: remove unnecessary type declaration (#17529) 2018-08-28 10:05:25 +03:00
Sheldon
0bec85f2e2 core: fix typos in comment (#17531) 2018-08-28 10:04:33 +03:00
Anton Evangelatov
f236ac710e vendor: github.com/rjeczalik/notify update to master (#17527) 2018-08-28 10:03:33 +03:00
Geon Kim
1acefafe22 swarm/api: fix typo (#17500) 2018-08-27 11:51:58 +03:00
Caesar Chad
f0488e80f7 signer/storage: fix typo (#17504) 2018-08-27 11:51:26 +03:00
Wenbiao Zheng
d1aa605f1e all: remove the duplicate 'the' in annotations (#17509) 2018-08-27 11:49:29 +03:00
Mymskmkt
70398d300d trie: fix typo (#17498) 2018-08-24 21:08:48 +03:00
Péter Szilágyi
c134e00e48 Merge pull request #17494 from karalabe/mined-block-uncle-check
miner: differentiate between uncle and lost block
2018-08-23 16:03:10 +03:00
gary rong
40a71f28cf miner: fix state commit, track old work packages too (#17490)
* miner: commit state which is relative with sealing result

* consensus, core, miner, mobile: introduce sealHash interface

* miner: evict pending task with threshold

* miner: go fmt
2018-08-23 16:02:57 +03:00
gary rong
c3f7e3be3b core/statedb: deep copy logs (#17489) 2018-08-23 15:59:58 +03:00
Péter Szilágyi
1136269a79 miner: differentiate between uncle and lost block 2018-08-23 15:44:27 +03:00
Péter Szilágyi
67d6d0bb7d Merge pull request #17492 from karalabe/eth-miner-threads-defaults
cmd, eth: clean up miner startup API, drop noop config field
2018-08-23 14:17:12 +03:00
Péter Szilágyi
1e63a015a5 miner: add two stress tests based on clique and ethash 2018-08-23 14:09:45 +03:00
Péter Szilágyi
92381ee009 cmd, eth: clean up miner startup API, drop noop config field 2018-08-23 14:09:45 +03:00
Mymskmkt
1df1187d83 p2p: fix comment typo (#17491) 2018-08-23 11:47:43 +03:00
Elad
f34f361ca6 swarm/api/http: fixed resolver bug (#17483) 2018-08-22 17:53:33 +03:00
Péter Szilágyi
2993fb519d params, swarm: begin geth v1.8.15 and swarm v0.3.3 cycle 2018-08-22 11:42:03 +03:00
Péter Szilágyi
316fc7ecfc params, swarm: release Geth v1.8.14 and Swarm v0.3.2 2018-08-22 11:39:00 +03:00
gary rong
af85d8e2b3 cmd, eth: apply default miner recommit setting (#17479) 2018-08-22 09:47:50 +03:00
Péter Szilágyi
6a8b47c880 Merge pull request #17472 from karalabe/txpool-locals
cmd, core, miner: add --txpool.locals and priority mining
2018-08-22 09:46:13 +03:00
Péter Szilágyi
e0d0e64ce2 cmd, core, miner: add --txpool.locals and priority mining 2018-08-22 09:43:57 +03:00
gary rong
b2c644ffb5 cmd, eth, miner: make recommit configurable (#17444)
* cmd, eth, miner: make recommit configurable

* cmd, eth, les, miner: polish a bit

* miner: filter duplicate sealing work

* cmd: remove uncessary conversion

* miner: avoid microptimization in favor of cleaner code
2018-08-21 22:56:54 +03:00
Geon Kim
522cfc68ff swarm: fix typos (#17473) 2018-08-21 21:13:33 +03:00
gary rong
a063fe9b2d miner: fix uncle iteration logic (#17469) 2018-08-21 17:33:04 +03:00
Péter Szilágyi
1dcad8b2de Merge pull request #17466 from karalabe/rinkeby-light-snapshots
consensus/clique, light: light client snapshots on Rinkeby
2018-08-21 16:13:26 +03:00
Jeremy Schlatter
86acdf1a5b vendor: update rjeczalik/notify so that it compiles on go1.11 (#17467) 2018-08-21 16:13:03 +03:00
Péter Szilágyi
9f036647e4 consensus/clique, light: light client snapshots on Rinkeby 2018-08-21 15:21:59 +03:00
Felföldi Zsolt
355fc47d39 les: fix CHT field in nodeInfo (#17465) 2018-08-21 14:58:10 +03:00
Pierre Neter
76301ca051 eth: upgradedb subcommand was dropped (#17464) 2018-08-21 13:36:38 +03:00
Anton Evangelatov
c582667c9b swarm/network: bump bzz protocol version (#17449) 2018-08-21 11:34:40 +03:00
Anton Evangelatov
dcd97c41fa swarm, swarm/network, swarm/pss: log error and fix logs (#17410)
* swarm, swarm/network, swarm/pss: log error and fix logs

* swarm/pss: log compressed publickey
2018-08-21 11:34:10 +03:00
Péter Szilágyi
85c6a1c526 Merge pull request #17451 from karalabe/bn256-relicense
crypto/bn256: add missing license file, release wrapper in BSD-3
2018-08-21 11:33:33 +03:00
Péter Szilágyi
ecca49e078 Merge pull request #17460 from holiman/tracerfix
Ensure from < to when tracing chain
2018-08-21 11:32:50 +03:00
Martin Holst Swende
106d196ec4 eth: ensure from<to when tracing chain (credits Chen Nan via bugbounty) 2018-08-21 09:48:53 +02:00
Péter Szilágyi
a6d45a5d00 crypto/bn256: add missing license file, release wrapper in BSD-3 2018-08-20 18:05:06 +03:00
Nilesh Trivedi
7d38d53ae4 cmd/puppeth: accept ssh identity in the server string (#17407)
* cmd/puppeth: Accept identityfile in the server string with fallback to id_rsa

* cmd/puppeth: code polishes + fix heath check double ports
2018-08-20 16:54:38 +03:00
Felföldi Zsolt
1de9ada401 light: new CHTs (#17448) 2018-08-20 16:49:28 +03:00
Aditya
c929030e28 core/types: fix docs about protected Vs (#17436) 2018-08-20 15:14:50 +03:00
Péter Szilágyi
87f294aa0b Merge pull request #17437 from hackmod/console-typo
console: fixed comment typo
2018-08-20 15:12:48 +03:00
Péter Szilágyi
68f0a414ea Merge pull request #17430 from karalabe/miner-notify-less-aggressive-test
consensus/ethash: reduce notify test aggressiveness
2018-08-20 15:11:49 +03:00
Anton Evangelatov
55d050ccd8 travis: remove brew update and osxfuse install (#17429) 2018-08-20 15:11:08 +03:00
Anton Evangelatov
a8aa89accb swarm/storage: cleanup task - remove bigger chunks (#17424) 2018-08-20 15:10:30 +03:00
Elad
c4078fc805 cmd/swarm: added swarm bootnodes (#17414) 2018-08-20 15:09:50 +03:00
Wuxiang
d3488c1aff p2p: fix typo (#17446) 2018-08-20 15:07:21 +03:00
hackyminer
0fd02fe9cf console: fixed comment typo 2018-08-18 07:47:10 +09:00
Péter Szilágyi
251c868008 consensus/ethash: reduce notify test aggressiveness 2018-08-17 18:12:39 +03:00
Péter Szilágyi
99e1a5e0fb Merge pull request #17426 from karalabe/miner-fees-log-fix
miner: update mining log with correct fee calculation
2018-08-17 16:46:57 +03:00
Péter Szilágyi
22cd3f70a6 miner: update mining log with correct fee calculation 2018-08-17 15:47:14 +03:00
Felix Lange
2695fa2213 les: fix crasher in NodeInfo when running as server (#17419)
* les: fix crasher in NodeInfo when running as server

The ProtocolManager computes CHT and Bloom trie roots by asking the
indexers for their current head. It tried to get the indexers from
LesOdr, but no LesOdr instance is created in server mode.

Attempt to fix this by moving the indexers, protocol creation and
NodeInfo to a new lesCommons struct which is embedded into both server
and client.

All this setup code should really be cleaned up, but this is just a
hotfix so we have to do that some other time.

* les: fix commons protocol maker
2018-08-17 13:21:53 +03:00
Anton Evangelatov
f44046a1c6 build: do not require ethereum-swarm deb when installing ethereum (#17425) 2018-08-17 11:33:39 +03:00
Péter Szilágyi
2a06791461 Merge pull request #17368 from karalabe/bn256-go1.11
crypto/bn256: fix issues caused by Go 1.11
2018-08-17 11:01:35 +03:00
Sasuke1964
60390878a5 accounts: fixed typo (#17421) 2018-08-17 10:38:02 +03:00
Péter Szilágyi
9bf6bb8f63 Merge pull request #17405 from karalabe/miner-remote-dag
consensus/ethash: use DAGs for remote mining, generate async
2018-08-16 15:25:25 +03:00
Péter Szilágyi
1d439b5e10 Merge pull request #17416 from karalabe/miner-details
miner: add gas and fee details to mining logs
2018-08-16 15:04:50 +03:00
Péter Szilágyi
62f5137a72 miner: add gas and fee details to mining logs 2018-08-16 14:49:00 +03:00
gary rong
54216811a0 miner: regenerate mining work every 3 seconds (#17413)
* miner: regenerate mining work every 3 seconds

* miner: polish
2018-08-16 14:14:33 +03:00
Péter Szilágyi
5952d962dc Merge pull request #17412 from karalabe/puppeth-fix-dial-panic
cmd/puppeth: fix nil panic on disconnected stats gathering
2018-08-16 13:19:01 +03:00
Péter Szilágyi
3e21adc648 crypto/bn256: fix issues caused by Go 1.11 2018-08-16 11:02:16 +03:00
Péter Szilágyi
b24fb76a3a cmd/puppeth: fix nil panic on disconnected stats gathering 2018-08-16 09:41:16 +03:00
Felföldi Zsolt
2cdf6ee7e0 light: CHT and bloom trie indexers working in light mode (#16534)
This PR enables the indexers to work in light client mode by
downloading a part of these tries (the Merkle proofs of the last
values of the last known section) in order to be able to add new
values and recalculate subsequent hashes. It also adds CHT data to
NodeInfo.
2018-08-15 22:25:46 +02:00
Elad
e8752f4e9f cmd/swarm, swarm: added access control functionality (#17404)
Co-authored-by: Janos Guljas <janos@resenje.org>
Co-authored-by: Anton Evangelatov <anton.evangelatov@gmail.com>
Co-authored-by: Balint Gabor <balint.g@gmail.com>
2018-08-15 17:41:52 +02:00
Péter Szilágyi
d8541a9f99 consensus/ethash: use DAGs for remote mining, generate async 2018-08-15 14:38:39 +03:00
gary rong
040aa2bb10 miner: streaming uncle blocks (#17320)
* miner: stream uncle block

* miner: polish
2018-08-15 14:09:17 +03:00
Péter Szilágyi
e598ae5c01 Merge pull request #17402 from karalabe/deprecate-flags
cmd: polish miner flags, deprecate olds, add upgrade path
2018-08-15 12:09:34 +03:00
Péter Szilágyi
2a17fe2561 cmd: polish miner flags, deprecate olds, add upgrade path 2018-08-15 11:41:23 +03:00
Jeff Prestes
212bba47ff backends: configurable gas limit to allow testing large contracts (#17358)
* backends: increase gaslimit in order to allow tests of large contracts

* backends: increase gaslimit in order to allow tests of large contracts

* backends: increase gaslimit in order to allow tests of large contracts
2018-08-15 10:15:42 +03:00
Felföldi Zsolt
b52bb31b76 p2p/discv5: add delay to refresh cycle when no seed nodes are found (#16994) 2018-08-14 22:59:18 +02:00
Felföldi Zsolt
b2ddb1fcbf les: implement client connection logic (#16899)
This PR implements les.freeClientPool. It also adds a simulated clock
in common/mclock, which enables time-sensitive tests to run quickly
and still produce accurate results, and package common/prque which is
a generalised variant of prque that enables removing elements other
than the top one from the queue.

les.freeClientPool implements a client database that limits the
connection time of each client and manages accepting/rejecting
incoming connections and even kicking out some connected clients. The
pool calculates recent usage time for each known client (a value that
increases linearly when the client is connected and decreases
exponentially when not connected). Clients with lower recent usage are
preferred, unknown nodes have the highest priority. Already connected
nodes receive a small bias in their favor in order to avoid accepting
and instantly kicking out clients.

Note: the pool can use any string for client identification. Using
signature keys for that purpose would not make sense when being known
has a negative value for the client. Currently the LES protocol
manager uses IP addresses (without port address) to identify clients.
2018-08-14 22:44:46 +02:00
gary rong
a1783d1697 miner: move agent logic to worker (#17351)
* miner: move agent logic to worker

* miner: polish

* core: persist block before reorg
2018-08-14 18:34:33 +03:00
gary rong
e0e0e53401 crypto: change formula for create2 (#17393) 2018-08-14 18:30:42 +03:00
Anton Evangelatov
97887d98da swarm/network, swarm/storage: validate chunk size (#17397)
* swarm/network, swarm/storage: validate default chunk size

* swarm/bmt, swarm/network, swarm/storage: update BMT hash initialisation

* swarm/bmt: move segmentCount to tests

* swarm/chunk: change chunk.DefaultSize to be untyped const

* swarm/storage: add size validator

* swarm/storage: add chunk size validation to localstore

* swarm/storage: move validation from localstore to validator

* swarm/storage: global chunk rules in MRU
2018-08-14 16:03:56 +02:00
Yao Zengzeng
8a040de60b README.md: fix some typos (#17381)
Signed-off-by: YaoZengzeng <yaozengzeng@zju.edu.cn>
2018-08-14 14:25:36 +03:00
Eugene Valeyev
e07e507d1a whisper: fixed broken partial topic filtering
Changes in #15811 broke partial topic filtering. Re-enable it.
2018-08-13 16:27:25 +02:00
Péter Szilágyi
d8328a96b4 Merge pull request #17347 from karalabe/miner-notify
cmd, consensus/ethash, eth: miner push notifications
2018-08-13 17:03:16 +03:00
Mymskmkt
fb368723ac core: fix comment typo (#17376) 2018-08-13 11:40:52 +03:00
Janoš Guljaš
6d1e292eef Manifest cli fix and upload defaultpath only once (#17375)
* cmd/swarm: fix manifest subcommands and add tests

* cmd/swarm: manifest update: update default entry for non-encrypted uploads

* swarm/api: upload defaultpath file only once

* swarm/api/client: improve UploadDirectory default path handling

* cmd/swarm: support absolute and relative default path values

* cmd/swarm: fix a typo in test

* cmd/swarm: check encrypted uploads in manifest update tests
2018-08-10 16:12:55 +02:00
Elad
3ec5dda4d2 swarm/api/http: added logging to denote request ended (#17371) 2018-08-10 13:49:37 +02:00
Péter Szilágyi
f0998415ba cmd, consensus/ethash, eth: miner push notifications 2018-08-10 09:06:59 +03:00
Janoš Guljaš
45eaef2431 cmd/swarm: solve rare cases of using the same random port in tests (#17352) 2018-08-09 16:15:59 +02:00
Janoš Guljaš
3bcb501c8f swarm/api: close tar writer in GetDirectoryTar to flush and clean (#17339) 2018-08-09 16:15:07 +02:00
Janoš Guljaš
d3e4c2dcb0 cmd/swarm: disable TestCLISwarmFs fuse test on darwin (#17340) 2018-08-09 16:14:30 +02:00
Anton Evangelatov
7b5c375825 cmd/swarm: remove shadow err (#17360) 2018-08-09 12:37:00 +03:00
Péter Szilágyi
beade042d1 Merge pull request #17357 from karalabe/tracer-trie-deref-bug
eth, trie: fix tracer GC which accidentally pruned the metaroot
2018-08-09 12:35:12 +03:00
Péter Szilágyi
11bbc66082 eth, trie: fix tracer GC which accidentally pruned the metaroot 2018-08-09 12:33:30 +03:00
libotony
834057592f p2p/discv5: fix negative index after uint convert to int (#17274) 2018-08-09 10:03:42 +03:00
Jay
abbb219933 rpc: fix a subscription name (#17345) 2018-08-09 08:56:35 +03:00
Mymskmkt
8051a0768a trie: fix comment typo (#17350) 2018-08-08 16:08:40 +03:00
Giulio M
a1eb9c7d13 swarm/api/http: fixed list leaf links (#17342) 2018-08-08 09:33:06 +02:00
Janoš Guljaš
00e6da9704 swarm/bmt: ignore data longer then 4096 bytes in Hasher.Write (#17338) 2018-08-07 15:34:33 +02:00
Attila Gazso
9df16f3468 swarm: Added lightnode flag (#17291)
* swarm: Added lightnode flag

Added --lightnode command line parameter
Added LightNode to Handshake message

* swarm/config: Fixed variable naming

* cmd/swarm: Changed BoolTFlag to BoolFlag for SwarmLightNodeEnabled

* swarm/network: Changed logging

* swarm/network: Changed protocol version testing

* swarm/network: Renamed DefaultNetworkID variable to TestProtocolNetworkID

* swarm/network: Bumped protocol version

* swarm/network: Changed LightNode handhsake test to table driven

* swarm/network: Changed back TestProtocolVersion to 5 for now

* swarm/network: Moved the test configuration inside the test function scope
2018-08-07 15:34:11 +02:00
b00ris
8461fea44b whisper: remove unused error (#17315) 2018-08-07 15:16:56 +02:00
Elad
4bb2dc3d09 swarm/api/http: test fixes (#17334) 2018-08-07 13:40:38 +02:00
Oleg Kovalov
cf05ef9106 p2p, swarm, trie: avoid copying slices in loops (#17265) 2018-08-07 13:56:40 +03:00
Anton Evangelatov
de9b0660ac swarm/README: add more sections to easily onboard developers (#17333) 2018-08-07 12:56:01 +02:00
Andrew Chiw
042191338d swarm/api/http: GET/PUT/PATCH/DELETE/POST multipart form unit tests. (#17277)
httpDo has a verbose option that dumps the HTTP request
2018-08-07 12:00:12 +02:00
Elad
93fe16b0a5 swarm/api/http: refactored http package (#17309) 2018-08-07 11:56:55 +02:00
Javier Peletier
64a4e89504 swarm/storage/mru: HOTFIX - fix panic in Handler.update (#17313) 2018-08-07 11:53:56 +02:00
Felföldi Zsolt
eef65b20fc p2p: use safe atomic operations when changing connFlags (#17325) 2018-08-06 15:46:30 +03:00
Felföldi Zsolt
c4df67461f Merge pull request #16333 from shazow/addremovetrustedpeer
rpc: Add admin_addTrustedPeer and admin_removeTrustedPeer.
2018-08-06 13:30:04 +02:00
gary rong
941018b570 miner: seperate state, receipts for different mining work (#17323) 2018-08-06 12:55:44 +03:00
Janoš Guljaš
a72ba5a55b cmd/swarm, swarm: various test fixes (#17299)
* swarm/network/simulation: increase the sleep duration for TestRun

* cmd/swarm, swarm: fix failing tests on mac

* cmd/swarm: update TestCLISwarmFs skip comment

* swarm/network/simulation: adjust disconnections on simulation close

* swarm/network/simulation: call cleanups after net shutdown
2018-08-06 12:33:22 +03:00
stormpang
6711f098d5 core/vm: fix comment typo (#17319)
antything --> anything
:P
2018-08-06 11:42:49 +03:00
Péter Szilágyi
c376a5263f Merge pull request #17318 from ligi/fix_punctuation
Fix punctuation - closes #17317
2018-08-06 11:11:58 +03:00
ligi
2901b8b2d2 README: Fix punctuation - closes #17317 2018-08-05 15:40:22 +02:00
Péter Szilágyi
35fcd2f423 Merge pull request #17311 from karalabe/puppeth-graceful-stop
cmd/puppeth: graceful shutdown on redeploys
2018-08-03 13:26:39 +03:00
Péter Szilágyi
faf0e06ed8 cmd/puppeth: graceful shutdown on redeploys 2018-08-03 12:08:19 +03:00
gary rong
51db5975cc consensus/ethash: move remote agent logic to ethash internal (#15853)
* consensus/ethash: start remote ggoroutine to handle remote mining

* consensus/ethash: expose remote miner api

* consensus/ethash: expose submitHashrate api

* miner, ethash: push empty block to sealer without waiting execution

* consensus, internal: add getHashrate API for ethash

* consensus: add three method for consensus interface

* miner: expose consensus engine running status to miner

* eth, miner: specify etherbase when miner created

* miner: commit new work when consensus engine is started

* consensus, miner: fix some logics

* all: delete useless interfaces

* consensus: polish a bit
2018-08-03 11:33:37 +03:00
Roc Yu
70176cda0e accounts/keystore: rename skipKeyFile to nonKeyFile to better reveal the function purpose (#17290) 2018-08-03 10:25:17 +03:00
Péter Szilágyi
d56fa8a659 Merge pull request #17310 from karalabe/mobile-nil-panic
mobile: fix missing return for CallMsg.SetTo(nil)
2018-08-03 09:37:57 +03:00
Péter Szilágyi
e4cb158d01 mobile: fix missing return for CallMsg.SetTo(nil) 2018-08-03 08:42:31 +03:00
Hyung-Kyu Hqueue Choi
0ab54de1a5 core/vm: update benchmarks for core/vm (#17308)
- Update benchmarks to use a pool of int pools.
  Unless benchmarks are aborted with segmentation fault.

Signed-off-by: Hyung-Kyu Choi <hqueue@users.noreply.github.com>
2018-08-03 08:15:33 +03:00
Péter Szilágyi
adc2944b4c Merge pull request #17301 from karalabe/tests-enable-constantinople
tests: enable the Constantinople fork definition
2018-08-02 14:42:06 +03:00
Péter Szilágyi
16eaf2b158 Merge pull request #17302 from karalabe/revert-evm-nil-panic
Revert "cmd/evm: change error msg output to stderr (#17118)"
2018-08-01 19:43:01 +03:00
Péter Szilágyi
83e2761c3a Revert "cmd/evm: change error msg output to stderr (#17118)"
This reverts commit fb9f7261ec.
2018-08-01 19:09:08 +03:00
Péter Szilágyi
353a82385b tests: enable the Constantinople fork definition 2018-08-01 18:47:09 +03:00
Anton Evangelatov
46d4721519 build: explicitly name all packages to be cross-compiled (#17288) 2018-07-31 16:34:54 +03:00
Péter Szilágyi
454382e81a params, swarm/version: begin Geth v1.8.14, Swarm v0.3.2 cycle 2018-07-31 13:39:22 +03:00
Péter Szilágyi
225171a4bf params, swarm/version: release Geth v1.8.13, Swarm 0.3.1 2018-07-31 13:35:53 +03:00
Ha ĐANG
702b8a7aec core/vm: fix typo in cryptographic hash function name (#17285) 2018-07-31 13:27:51 +03:00
Ryan Schneider
5d7e18539e rpc: make HTTP RPC timeouts configurable, raise defaults (#17240)
* rpc: Make HTTP server timeout values configurable

* rpc: Remove flags for setting HTTP Timeouts, configuring via .toml is sufficient.

* rpc: Replace separate constants with a single default struct.

* rpc: Update HTTP Server Read and Write Timeouts to 30s.

* rpc: Remove redundant NewDefaultHTTPTimeouts function.

* rpc: document HTTPTimeouts.

* rpc: sanitize timeout values for library use
2018-07-31 12:16:14 +03:00
gary rong
c4a1d4fecf eth/filters: fix the block range assignment for log filter (#17284) 2018-07-31 12:10:38 +03:00
Chen Quan
fb9f7261ec cmd/evm: change error msg output to stderr (#17118)
* cmd/evm: change error msg output to stderr

* cmd/evm: fix some linter error
2018-07-31 10:48:27 +03:00
Péter Szilágyi
d927cbb638 Merge pull request #17282 from karalabe/trie-flushlist-fixes
trie: handle removing the freshest node too
2018-07-31 10:41:53 +03:00
ledgerwatch
2fbc454355 miner: fix state locking while writing to chain (issue #16933) (#17173) 2018-07-31 10:22:33 +03:00
holisticode
d6efa69187 Merge netsim mig to master (#17241)
* swarm: merged stream-tests migration to develop

* swarm/network: expose simulation RandomUpNode to use in stream tests

* swarm/network: wait for subs in PeerEvents and fix stream.runSyncTest

* swarm: enforce waitkademlia for snapshot tests

* swarm: fixed syncer tests and snapshot_sync_test

* swarm: linting of simulation package

* swarm: address review comments

* swarm/network/stream: fix delivery_test bugs and refactor

* swarm/network/stream: addressed PR comments @janos

* swarm/network/stream: enforce waitKademlia, improve TestIntervals

* swarm/network/stream: TestIntervals not waiting for chunk to be stored
2018-07-30 22:55:25 +02:00
Péter Szilágyi
3ea8ac6a9a Merge pull request #17281 from karalabe/puppeth-cachewarn-fix
cmd/puppeth: force tiny memory for geth attach in id lookup
2018-07-30 16:32:10 +03:00
Péter Szilágyi
8a9c31a307 trie: handle removing the freshest node too 2018-07-30 16:31:17 +03:00
Péter Szilágyi
6380c06c65 Merge pull request #17279 from karalabe/puppeth-banlist-fix
cmd/puppeth: split banned ethstats addresses over columns
2018-07-30 16:15:35 +03:00
Péter Szilágyi
54d1111965 cmd/puppeth: force tiny memory for geth attach in id lookup 2018-07-30 16:09:19 +03:00
Péter Szilágyi
f00d0daf33 cmd/puppeth: split banned ethstats addresses over columns 2018-07-30 15:39:35 +03:00
Ha ĐANG
2cffd4ff3c core: fix some small typos on comment code (#17278) 2018-07-30 14:10:48 +03:00
Oleg Kovalov
7b1aa64220 dashboard: append to proper slice (#17266) 2018-07-30 12:48:16 +03:00
Janoš Guljaš
8f4c4fea20 p2p: fix rare deadlock in Stop (#17260) 2018-07-30 12:44:17 +03:00
Oleg Kovalov
d42ce0f2c1 all: simplify switches (#17267)
* all: simplify switches

* silly mistake
2018-07-30 12:30:09 +03:00
Anton Evangelatov
273c7a9dc4 swarm/api: remove ioutil.ReadAll for HandleGetFiles (#17276) 2018-07-30 12:19:26 +03:00
Anton Evangelatov
a5d5609e38 build: rename swarm deb package to ethereum-swarm; change swarm deb version from 1.8.x to 0.3.x (#16988)
* build: add support for different package and binary names

* build: bump up copyright date

* build: change default PackageName to empty string

* build, internal, swarm: enhance build/release process

* build: hack ethereum-swarm as a "depends" in deb package

* build/ci: remove redundant variables

* build, cmd, mobile, params, swarm: remove VERSION file; rename Version to VersionMeta;

* internal: remove VERSION() method which reads VERSION file

* build: fix VersionFilePath to Version

* Makefile: remove clean_go_build_cache.sh until it works

* Makefile: revert removal of clean_go_build_cache.sh
2018-07-30 11:56:40 +03:00
Péter Szilágyi
93c0f1715d Merge pull request #17245 from karalabe/azure-deps-fixups
internal, vendor: update Azure blobstore API
2018-07-26 19:59:46 +03:00
Péter Szilágyi
d9575e92fc crypto/secp256k1: remove external LGPL dependencies (#17239) 2018-07-26 13:33:13 +02:00
Raghav Sood
11a402f747 core: report progress on log chain exports (#17066)
* core/blockchain: export progress

* core: polish up chain export progress report a bit
2018-07-26 14:26:24 +03:00
a e r t h
021d6fbbbb cmd: prevent accidental invalid commands (#17248)
* cmd: stop parsing bootnodes if one is invalid

* cmd/geth: require valid command as argument (or no arg)
2018-07-26 13:57:20 +03:00
Péter Szilágyi
feed8069a6 Merge pull request #17251 from karalabe/ppa-deprecate-artful
build: deprecated ubuntu artful, enable ubuntu cosmic
2018-07-26 13:34:38 +03:00
Péter Szilágyi
514022bde6 Merge pull request #17252 from karalabe/travis-debsrc-fix
build: noop clean during travis debsrc assembly step
2018-07-26 13:29:57 +03:00
Péter Szilágyi
ff22ec31b6 build: noop clean during travis debsrc assembly step 2018-07-26 13:26:53 +03:00
Péter Szilágyi
0598707129 build: deprecated ubuntu artful, enable ubuntu cosmic 2018-07-26 13:14:36 +03:00
Péter Szilágyi
a511f6b515 Merge pull request #17250 from karalabe/fix-clean
build: fix bash->sh function declaration
2018-07-26 13:05:45 +03:00
Péter Szilágyi
8b1e14b7f8 build: fix bash->sh function declaration 2018-07-26 13:01:00 +03:00
Sarlor
6c412e313c cmd/utils: fix comment typo (#17249)
cmd: Comment error
2018-07-26 10:59:05 +03:00
Péter Szilágyi
6b232ce325 internal, vendor: update Azure blobstore API 2018-07-25 18:04:43 +03:00
Guillaume Ballet
7abedf9bbb core/vm: support for multiple interpreters (#17093)
- Define an Interpreter interface
- One contract can call contracts from other interpreter types.
- Pass the interpreter to the operands instead of the evm.
  This is meant to prevent type assertions in operands.
2018-07-25 08:56:39 -04:00
Antoine Rondelet
27a278e6e3 core: fixed typo in addresssByHeartbeat (#17243) 2018-07-25 14:25:14 +03:00
Péter Szilágyi
a0bcb16875 Merge pull request #17244 from chainpunk/master
core: fix typo in comment code
2018-07-25 14:07:54 +03:00
hadv
bc0a43191e core: fix typo in comment code 2018-07-25 18:04:38 +07:00
Viktor Trón
1064b9e691 Merge pull request #17233 from ethersphere/swarm-readme
swarm: README.md
2018-07-25 05:38:14 +02:00
Osuke
10780e8a00 core: fix txpool guarantee comment (#17214)
* fixed-typo

* core: fix txpool guarantee comment
2018-07-24 18:44:41 +03:00
gary rong
2433349c80 core/vm, params: implement EXTCODEHASH opcode (#17202)
* core/vm, params: implement EXTCODEHASH opcode

* core, params: tiny fixes and polish

* core: add function description
2018-07-24 18:06:40 +03:00
Anton Evangelatov
58243b4d3e README: point Swarm brief to the Swarm README, instead of directly to docs 2018-07-24 16:55:07 +02:00
gary rong
cab1cff11c core, crypto, params: implement CREATE2 evm instrction (#17196)
* core, crypto, params: implement CREATE2 evm instrction

* core/vm: add opcode to string mapping

* core: remove past fork checking

* core, crypto: use option2 to generate new address
2018-07-24 17:22:03 +03:00
Vincent Serpoul
2909f6d7a2 common: add database/sql support for Hash and Address (#15541) 2018-07-24 15:15:07 +02:00
Ian Macalinao
d96ba77113 eth/filters: improve error message for invalid filter topics (#17234) 2018-07-24 15:12:49 +02:00
Péter Szilágyi
62467e4405 Merge pull request #17206 from hadv/master
consensus/clique: replace bubble sort by golang stable sort
2018-07-24 13:46:37 +03:00
Wenbiao Zheng
d0082bb7ec core: fix comment typo (#17236) 2018-07-24 13:17:12 +03:00
Péter Szilágyi
21c059b67e Merge pull request #16734 from reductionista/eip234
eth/filters, interfaces: EIP-234 Add blockHash option to eth_getLogs
2018-07-24 12:52:16 +03:00
Sheldon
9e24491c65 core/bloombits, light: fix typos (#17235) 2018-07-24 11:24:27 +03:00
hadv
49f63deb24 consensus/clique: replace bubble sort by golang stable sort 2018-07-24 14:56:53 +07:00
Viktor Trón
b536460f8e Merge pull request #17231 from ethersphere/develop
swarm: client-side MRU signatures ; BMT fixes ; network simulation tests
2018-07-24 08:44:43 +02:00
Péter Szilágyi
afd8b84706 crypto/secp256k1: unify the package license to 3-Clause BSD (#17225)
Our original wrapper code had two parts. One taken from a third
party repository (who took it from upstream Go) licensed under
BSD-3. The second written by Jeff, Felix and Gustav, licensed
under LGPL. This made this package problematic to use from the
outside.

With the agreement of the original copyright holders, this commit
changes the license of the LGPL portions of the code to BSD-3:

---
I agree changing from LGPL to a BSD style license.

Jeff
---
Hey guys,

My preference would be to relicense to GNUBL, but I'm also OK with BSD.

Cheers,
Gustav
---
Felix Lange (fjl):
I would approve anything that makes our licensing less complicated
---
2018-07-24 02:47:47 +02:00
Wenbiao Zheng
f6206efe5b consensus: move test use only var/func to test(#17004) 2018-07-24 02:14:15 +02:00
Hyung-Kyu Hqueue Choi
ae674a3660 Makefile: clean go build cache (#17079) 2018-07-24 02:11:51 +02:00
Wenbiao Zheng
894022a3d5 cmd/geth: clean up call to SelfDerive (#16970) 2018-07-24 02:09:24 +02:00
Wenbiao Zheng
68da9aa716 rpc: clean up check for missing methods/subscriptions on handler (#17145) 2018-07-24 02:00:55 +02:00
Anton Evangelatov
14bdcdeab4 swarm/testutil: remove EnableMetrics 2018-07-23 18:45:39 +02:00
Wenbiao Zheng
fe6a9473dc p2p: token is useless in xxxEncHandshake (#17230) 2018-07-23 17:36:08 +02:00
Javier Peletier
427316a707 swarm/storage/mru: Client-side MRU signatures (#784)
* swarm/storage/mru: Add embedded publickey and remove ENS dep

This commit breaks swarm, swarm/api...
but tests in swarm/storage/mru pass

* swarm: Refactor swarm, swarm/api to mru changes, make tests pass

* swarm/storage/mru: Remove self from recv, remove test ens vldtr

* swarm/storage/mru: Remove redundant test, expose ResourceHash mthd

* swarm/storage/mru: Make HeaderGetter mandatory + godoc fixes

* swarm/storage: Remove validator prefix for metadata chunk

* swarm/storage/mru: Use Address instead of PublicKey

* swarm/storage/mru: Change index from name to metadata chunk addr

* swarm/storage/mru: Refactor swarm/api/... to MRU index changes

* swarm/storage/mru: Refactor cleanup

* swarm/storage/mru: Rebase cleanup

* swarm: Use constructor for GenericSigner MRU in swarm.go

* swarm/storage: Change to BMTHash for MRU hashing

* swarm/storage: Reduce loglevel on chunk validator logs

* swarm/storage/mru: Delint

* swarm: MRU Rebase cleanup

* swarm/storage/mru: client-side mru signatures

Rebase to PR #668 and fix all conflicts

* swarm/storage/mru:  refactor and documentation

* swarm/resource/mru: error-checking  tests for parseUpdate/newUpdateChunk

* swarm/storage/mru: Added resourcemetadata tests

* swarm/storage/mru: Added tests  for UpdateRequest

* swarm/storage/mru: more test coverage for UpdateRequest and comments

* swarm/storage/mru: Avoid fake chunks in parseUpdate()

* swarm/storage/mru: Documented resource.go extensively

moved some functions where they make most sense

* swarm/storage/mru: increase test coverage for UpdateRequest and

variable name changes throughout to increase consistency

* swarm/storage/mru: moved default timestamp to NewCreateRequest-

* swarm/storage/mru: lookup refactor

* swarm/storage/mru: added comments and renamed raw flag to rawmru

* swarm/storage/mru: fix receiver typo

* swarm/storage/mru: refactored update chunk new/create

* swarm/storage/mru:  refactored signature digest to avoid malleability

* swarm/storage/mru: optimize update data serialization

* swarm/storage/mru: refactor and cleanup

* swarm/storage/mru: add timestamp struct and serialization

* swarm/storage/mru: fix lint error and mark some old code for deletion

* swarm/storage/mru: remove unnecessary variable

* swarm/storage/mru: Added more comments throughout

* swarm/storage/mru: Refactored metadata chunk layout + extensive error...

* swarm/storage/mru: refactor cli parser
Changed resource info output to JSON

* swarm/storage/mru: refactor serialization for extensibility

refactored error messages to NewErrorf

* swarm/storage/mru: Moved Signature to resource_sign.
Check Sign errors in server tests

* swarm/storage/mru: Remove isSafeName() checks

* swarm/storage/mru: scrubbed off all references to "block" for time

* swarm/storage/mru: removed superfluous isSynced() call.

* swarm/storage/mru: remove isMultihash() and ToSafeName functions

* swarm/storage/mru: various fixes and comments

* swarm/storage/mru: decoupled cli for independent create/update
* Made resource name optional
* Removed unused LookupPrevious

* swarm/storage/mru: Decoupled resource create / update & refactor

* swarm/storage/mru: Fixed some comments as per issues raised in PR #743

* swarm/storage/mru: Cosmetic changes as per #743 comments

* swarm/storage/mru: refct request encoder/decoder > marshal/unmarshal

* swarm/storage/mru: Cosmetic changes as per review in #748

* swarm/storage/mru: removed timestamp proof placeholder

* swarm/storage/mru: cosmetic/doc/fixes changes as per comments in #704

* swarm/storage/mru: removed unnecessary check in Handler.update

* swarm/storage/mru: Implemented Marshaler/Unmarshaler iface in Request

* swarm/storage/mru: Fixed linter error

* swarm/storage/mru: removed redundant address in signature digest

* swarm/storage/mru: fixed bug: LookupLatestVersionInPeriod not working

* swarm/storage/mru: Unfold Request creation API for create or update+create
set common time source for mru package

* swarm/api/http: fix HandleGetResource error variable shadowed
when requesting a resource that does not exist

* swarm/storage/mru: Add simple check to detect duplicate updates

* swarm/storage/mru: moved Multihash() to the right place.

* cmd/swarm: remove unneeded clientaccountmanager.go

* swarm/storage/mru: Changed some comments as per reviews in #784

* swarm/storage/mru: Made SignedResourceUpdate.GetDigest() public

* swarm/storage/mru: cosmetic changes as per comments in #784

* cmd/swarm: Inverted --multihash flag default

* swarm/storage/mru: removed Verify from SignedResourceUpdate.fromChunk

* swarm/storage/mru: Moved validation code out of serializer
Cosmetic / comment changes

* swarm/storage/mru: Added unit tests for UpdateLookup

* swarm/storage/mru: Increased coverage of metadata serialization

* swarm/storage/mru: Increased test coverage of updateHeader serializers

* swarm/storage/mru: Add resourceUpdate serializer test
2018-07-23 15:33:33 +02:00
Elad
0647c4de7b swarm/api/http: http package refactoring 1/5 and 2/5 2018-07-23 15:33:32 +02:00
Javier Peletier
7ddc2c9e95 cmd/swarm: add implicit subcommand help (fix #786) (#788)
* cmd/swarm: add implicit subcommand help (fix #786)

* cmd/swarm: moved implicit help to a recursive func
2018-07-23 15:33:32 +02:00
Janoš Guljaš
dcaaa3c804 swarm: network simulation for swarm tests (#769)
* cmd/swarm: minor cli flag text adjustments

* cmd/swarm, swarm/storage, swarm: fix  mingw on windows test issues

* cmd/swarm: support for smoke tests on the production swarm cluster

* cmd/swarm/swarm-smoke: simplify cluster logic as per suggestion

* changed colour of landing page

* landing page reacts to enter keypress

* swarm/api/http: sticky footer for swarm landing page using flex

* swarm/api/http: sticky footer for error pages and fix for multiple choices

* swarm: propagate ctx to internal apis (#754)

* swarm/simnet: add basic node/service functions

* swarm/netsim: add buckets for global state and kademlia health check

* swarm/netsim: Use sync.Map as bucket and provide cleanup function for...

* swarm, swarm/netsim: adjust SwarmNetworkTest

* swarm/netsim: fix tests

* swarm: added visualization option to sim net redesign

* swarm/netsim: support multiple services per node

* swarm/netsim: remove redundant return statement

* swarm/netsim: add comments

* swarm: shutdown HTTP in Simulation.Close

* swarm: sim HTTP server timeout

* swarm/netsim: add more simulation methods and peer events examples

* swarm/netsim: add WaitKademlia example

* swarm/netsim: fix comments

* swarm/netsim: terminate peer events goroutines on simulation done

* swarm, swarm/netsim: naming updates

* swarm/netsim: return not healthy kademlias on WaitTillHealthy

* swarm: fix WaitTillHealthy call in testSwarmNetwork

* swarm/netsim: allow bucket to have any type for a key

* swarm: Added snapshots to new netsim

* swarm/netsim: add more tests for bucket

* swarm/netsim: move http related things into separate files

* swarm/netsim: add AddNodeWithService option

* swarm/netsim: add more tests and Start* methods

* swarm/netsim: add peer events and kademlia tests

* swarm/netsim: fix some tests flakiness

* swarm/netsim: improve random nodes selection, fix TestStartStop* tests

* swarm/netsim: remove time measurement from TestClose to avoid flakiness

* swarm/netsim: builder pattern for netsim HTTP server (#773)

* swarm/netsim: add connect related tests

* swarm/netsim: add comment for TestPeerEvents

* swarm: rename netsim package to network/simulation
2018-07-23 15:33:25 +02:00
lash
f5b128a5b3 swarm/fuse: Hotfix missing parantheses in statement 2018-07-23 15:31:02 +02:00
Viktor Trón
fd982d3f3b swarm/bmt: async section writer interface to BMT (#778)
- AsyncHasher implements AsyncWriter interface
 - add extra level for zerohashes in pool to lookup empty data hash
 - remove unused segment, hash and depth fields from Tree
 - Hash pkg function -> syncHash moved to test
 - add asyncHash helper func to tests using shuffle
 - add TestAsyncCorrectness to tests
 - add BenchmarkBMTAsync to tests
 - refactor benchmarks using subbenchmarks
 - improved comments
 - preinitialise base hashers on the nodes
2018-07-23 15:09:25 +02:00
emile
526abe2736 eth/tracers: fix noop tracer (#17220) 2018-07-22 22:11:52 +03:00
cong
8997efe31f rpc: fix missing parentheses in doc (#17224) 2018-07-22 22:09:45 +03:00
Anton Evangelatov
4ea2d707f9 gitter: change ethereum/swarm to ethersphere/orange-lounge 2018-07-21 00:47:46 +02:00
Anton Evangelatov
ee8877509a swarm/readme: add link to code review guidelines 2018-07-19 13:27:31 +02:00
Anton Evangelatov
763e64cad8 swarm: readme.md 2018-07-19 12:13:03 +02:00
Roc Yu
040dd5bd5d accounts/abi: refactor Method#Sig() to use index in range iterator directly (#17198) 2018-07-19 11:42:47 +03:00
gary rong
dcdd57df62 core, ethdb: two tiny fixes (#17183)
* ethdb: fix memory database

* core: fix bloombits checking

* core: minor polish
2018-07-18 13:41:36 +03:00
Roc Yu
323428865f accounts: add unit tests for URL (#17182) 2018-07-18 11:32:49 +03:00
jkcomment
65c91ad5e7 p2p: correct comments typo (#17184) 2018-07-18 10:41:18 +03:00
Ralph Caraveo III
5d30be412b all: switch out defunct set library to different one (#16873)
* keystore, ethash, eth, miner, rpc, whisperv6: tech debt with now defunct set.

* whisperv5: swap out gopkg.in/fatih/set.v0 with supported set
2018-07-16 10:54:19 +03:00
Kurkó Mihály
eb7f901289 dashboard: fix CSS, escape special HTML chars, clean up code (#17167)
* dashboard: fix CSS, escape special HTML chars, clean up code

* dashboard: change 0 to 1

* dashboard: add escape-html npm package
2018-07-16 10:43:58 +03:00
Péter Szilágyi
db5e403afe build: fix typo (#17175)
build: Fix a typo in ci.go
2018-07-16 10:36:21 +03:00
Roc Yu
96116758d2 cmd/geth: fix golint issue (#17176) 2018-07-16 10:33:58 +03:00
Felix Yan
65cebb7730 build: Fix a typo in ci.go 2018-07-15 14:13:11 +08:00
Anton Evangelatov
2e0391ea84 cmd/swarm: change version of swarm binary (#17174) 2018-07-13 22:53:02 +02:00
Anton Evangelatov
d483da766f swarm/network: bump up protocol versions due to wrapped message intro (#17170) 2018-07-13 17:40:55 +02:00
Anton Evangelatov
7c9314f231 swarm: integrate OpenTracing; propagate ctx to internal APIs (#17169)
* swarm: propagate ctx, enable opentracing

* swarm/tracing: log error when tracing is misconfigured
2018-07-13 17:40:28 +02:00
Péter Szilágyi
e1f1d3085c accounts, eth, les: blockhash based filtering on all code paths 2018-07-12 18:16:54 +03:00
Domino Valdano
96339daf40 eth/filters, ethereum: EIP-234 add blockHash param for eth_getLogs 2018-07-12 18:16:32 +03:00
Anton Evangelatov
f7d3678c28 swarm/api/http: http package refactoring 1/5 and 2/5 (#17164) 2018-07-12 15:42:45 +02:00
Kwuaint
facf1bc9d6 consensus/ethash: fix the algorithm of fakeBlockNumber in comments (#17166)
correct the algorithm in the comments for fakeBlockNumber, from "min" to "max".
2018-07-12 13:32:23 +03:00
gary rong
e8824f6e74 vendor, ethdb: resume write operation asap (#17144)
* vendor: update leveldb

* ethdb: remove useless warning log
2018-07-12 11:07:51 +03:00
Kurkó Mihály
a9835c1816 cmd, dashboard, log: log collection and exploration (#17097)
* cmd, dashboard, internal, log, node: logging feature

* cmd, dashboard, internal, log: requested changes

* dashboard, vendor: gofmt, govendor, use vendored file watcher

* dashboard, log: gofmt -s -w, goimports

* dashboard, log: gosimple
2018-07-11 10:59:04 +03:00
Wenbiao Zheng
2eedbe799f cmd: typo fixed, isntance -> instance (#17149) 2018-07-09 17:34:59 +03:00
Anton Evangelatov
b3711af051 swarm: ctx propagation; bmt fixes; pss generic notification framework (#17150)
* cmd/swarm: minor cli flag text adjustments

* swarm/api/http: sticky footer for swarm landing page using flex

* swarm/api/http: sticky footer for error pages and fix for multiple choices

* cmd/swarm, swarm/storage, swarm: fix  mingw on windows test issues

* cmd/swarm: update description of swarm cmd

* swarm: added network ID test

* cmd/swarm: support for smoke tests on the production swarm cluster

* cmd/swarm/swarm-smoke: simplify cluster logic as per suggestion

* swarm: propagate ctx to internal apis (#754)

* swarm/metrics: collect disk measurements

* swarm/bmt: fix io.Writer interface

  * Write now tolerates arbitrary variable buffers
  * added variable buffer tests
  * Write loop and finalise optimisation
  * refactor / rename
  * add tests for empty input

* swarm/pss: (UPDATE) Generic notifications package (#744)

swarm/pss: Generic package for creating pss notification svcs

* swarm: Adding context to more functions

* swarm/api: change colour of landing page in templates

* swarm/api: change landing page to react to enter keypress
2018-07-09 14:11:49 +02:00
Smilenator
30bdf817a0 core/types: polish TxDifference code and docs a bit (#17130)
* core: fix func TxDifference

fix a typo in func comment;
change named return to unnamed as there's explicit return in the body

* fix another typo in TxDifference
2018-07-09 11:48:54 +03:00
Wenbiao Zheng
fbeb4f20f9 cmd/geth: fix usage formatting (#17136) 2018-07-09 11:41:28 +03:00
LeoLiao
0b20b1a050 consensus/clique: fixed documentation copy-paste issue (#17137) 2018-07-09 11:39:43 +03:00
LeoLiao
4dbefc1f25 cmd/geth: fixed comment typo (#17140) 2018-07-09 11:38:52 +03:00
LeoLiao
dbae1dc7b3 rpc: fixed comment grammar issue (#17146) 2018-07-09 11:31:59 +03:00
Felix Lange
3b07451564 params, VERSION: v1.8.13 unstable 2018-07-05 01:09:02 +02:00
Felix Lange
37685930d9 params: v1.8.12 stable 2018-07-05 01:08:05 +02:00
Felföldi Zsolt
51df1c1f20 les: add announcement safety check to light fetcher (#17034) 2018-07-04 13:40:20 +03:00
Felföldi Zsolt
f524ec4326 light: new CHTs (#17124) 2018-07-04 12:41:17 +03:00
Zak Cole
eb794af833 consensus/ethash: fixed documentation typo (#17121)
"proot-of-work" to "proof-of-work"
2018-07-04 11:20:58 +03:00
Péter Szilágyi
67a7857124 Merge pull request #17111 from karalabe/trie-memleak
trie: fix a temporary memory leak in the memcache
2018-07-03 17:20:58 +03:00
Felix Lange
c73b654fd1 p2p/discover: move bond logic from table to transport (#17048)
* p2p/discover: move bond logic from table to transport

This commit moves node endpoint verification (bonding) from the table to
the UDP transport implementation. Previously, adding a node to the table
entailed pinging the node if needed. With this change, the ping-back
logic is embedded in the packet handler at a lower level.

It is easy to verify that the basic protocol is unchanged: we still
require a valid pong reply from the node before findnode is accepted.

The node database tracked the time of last ping sent to the node and
time of last valid pong received from the node. Node endpoints are
considered verified when a valid pong is received and the time of last
pong was called 'bond time'. The time of last ping sent was unused. In
this commit, the last ping database entry is repurposed to mean last
ping _received_. This entry is now used to track whether the node needs
to be pinged back.

The other big change is how nodes are added to the table. We used to add
nodes in Table.bond, which ran when a remote node pinged us or when we
encountered the node in a neighbors reply. The transport now adds to the
table directly after the endpoint is verified through ping. To ensure
that the Table can't be filled just by pinging the node repeatedly, we
retain the isInitDone check. During init, only nodes from neighbors
replies are added.

* p2p/discover: reduce findnode failure counter on success

* p2p/discover: remove unused parameter of loadSeedNodes

* p2p/discover: improve ping-back check and comments

* p2p/discover: add neighbors reply nodes always, not just during init
2018-07-03 16:24:12 +03:00
Chen Quan
9da128db70 cmd/p2psim: add exit error output and exit code (#17116) 2018-07-03 15:14:57 +03:00
Guillaume Ballet
4e5d1f1c39 core/vm: reuse bigint pools across transactions (#17070)
* core/vm: A pool for int pools

* core/vm: fix rebase issue

* core/vm: push leftover stack items after execution, not before
2018-07-03 13:06:42 +03:00
LeoLiao
d57e85ecc9 node: documentation typo fix (#17113) 2018-07-03 11:42:18 +03:00
Anton Evangelatov
1990c9e621 cmd/geth: export metrics to InfluxDB (#16979)
* cmd/geth: add flags for metrics export

* cmd/geth: update usage fields for metrics flags

* metrics/influxdb: update reporter logger to adhere to geth logging convention
2018-07-02 15:51:02 +03:00
Péter Szilágyi
319098cc1c trie: fix a temporary memory leak in the memcache 2018-07-02 15:47:33 +03:00
Fabian Raetz
223d943481 vendor: update docker/docker/pkg/reexec so that it compiles on OpenBSD (#17084) 2018-07-02 14:25:02 +03:00
Péter Szilágyi
8974e2e5e0 Merge pull request #17092 from pilu/master
remove formatting from ResettingTimer metrics if requested in raw format
2018-07-02 14:22:19 +03:00
gary rong
a4a2343cdc ethdb, core: implement delete for db batch (#17101) 2018-07-02 11:16:30 +03:00
kevin.xu
fdfd6d3c39 ethstats: comment minor correction (#17102)
spell correction from `repors` to `reports`
2018-06-29 15:15:38 +03:00
Andrea Franz
b5537c5601 node: remove formatting from ResettingTimer metrics if requested in raw 2018-06-27 11:43:49 +02:00
Péter Szilágyi
e916f9786d Merge pull request #17087 from OpenCommunityCoin/build/portable-shell
build: make build/goimports.sh more potable
2018-06-27 11:28:58 +03:00
hackyminer
909e968ebb build: make build/goimports.sh more potable 2018-06-26 22:04:27 +09:00
Guillaume Ballet
598f786aab core/vm: clear linter warnings (#17057)
* core/vm: clear linter warnings

* core/vm: review input

* core/vm.go: revert lint in noop as per request
2018-06-26 15:56:25 +03:00
Adrià Cidre
461291882e whisper: Reduce message loop log from Warn to Info (#17055) 2018-06-26 04:31:05 -04:00
lash
1f0f6f0272 swarm/pss: Hide big network tests under longrunning flag (#17074) 2018-06-25 16:11:47 +02:00
Balint Gabor
0a22ae5572 swarm/fuse: Disable fuse tests, they are flaky (#17072) 2018-06-25 14:04:01 +02:00
Péter Szilágyi
b0cfd9c786 Merge pull request #17054 from chfast/log-time-format
log: Change time format
2018-06-25 12:23:33 +03:00
Paweł Bylica
6d8a1bfb08 log: Change time format
- Keep the tailing zeros.
- Limit precision to milliseconds.
2018-06-25 11:22:23 +02:00
gary rong
4895665670 les: handle conn/disc/reg logic in the eventloop (#16981)
* les: handle conn/disc/reg logic in the eventloop

* les: try to dial before start eventloop

* les: handle disconnect logic more safely

* les: grammar fix
2018-06-25 11:52:24 +03:00
Viktor Trón
eaff89291c Merge pull request #17041 from ethersphere/swarm-network-rewrite-merge
Swarm POC3 - happy solstice
2018-06-21 23:00:43 +02:00
ethersphere
e187711c65 swarm: network rewrite merge 2018-06-21 21:10:31 +02:00
Andrey Petrov
6209545083 p2p: Wrap conn.flags ops with atomic.Load/Store 2018-06-21 12:22:47 -04:00
Andrey Petrov
193a402cc0 p2p: Test for peer.rw.flags race conditions 2018-06-21 12:22:47 -04:00
Andrey Petrov
dcca66bce8 p2p: Cache inbound flag on Peer.isInbound to avoid a race 2018-06-21 12:22:47 -04:00
Andrey Petrov
399aa710d5 p2p: Attempt to race check peer.Inbound() in TestServerDial 2018-06-21 12:22:47 -04:00
Andrey Petrov
699794d88d p2p: More tests for AddTrustedPeer/RemoveTrustedPeer 2018-06-21 12:22:47 -04:00
Andrey Petrov
773857a524 p2p: Test for MaxPeers=0 and TrustedPeer override 2018-06-21 12:21:48 -04:00
Andrey Petrov
2a75fe3308 rpc: Add admin_addTrustedPeer and admin_removeTrustedPeer.
These RPC calls are analogous to Parity's parity_addReservedPeer and
parity_removeReservedPeer.

They are useful for adjusting the trusted peer set during runtime,
without requiring restarting the server.
2018-06-21 12:21:48 -04:00
Péter Szilágyi
d926bf2c7e trie: cache collapsed tries node, not rlp blobs (#16876)
The current trie memory database/cache that we do pruning on stores
trie nodes as binary rlp encoded blobs, and also stores the node
relationships/references for GC purposes. However, most of the trie
nodes (everything apart from a value node) is in essence just a
collection of references.

This PR switches out the RLP encoded trie blobs with the
collapsed-but-not-serialized trie nodes. This permits most of the
references to be recovered from within the node data structure,
avoiding the need to track them a second time (expensive memory wise).
2018-06-21 11:28:05 +02:00
nobody
8db8d074e2 cmd/geth: remove the tail "," from genesis config (#17028)
remove the tail "," from genesis config,  which will cause genesis config parse error .
2018-06-21 10:44:39 +03:00
Husam Ibrahim
1a70338734 mobile: correct comment typo in ethereum.go (#17040) 2018-06-21 10:35:35 +03:00
Wenbiao Zheng
61a5976368 accounts: remove deadcode isSigned (#16990) 2018-06-20 11:46:29 +02:00
Péter Szilágyi
88c42ab4e7 Merge pull request #16954 from holiman/more_tracers
eth/tracers: fix err in 4byte, add some opcode analysis tools
2018-06-20 11:43:40 +03:00
Martin Holst Swende
4210dd1500 tracers: fix err in 4byte, add some opcode analysis tools 2018-06-20 11:42:57 +03:00
ligi
c971ab617d travis: use NDK 17b for Android archives (#17029) 2018-06-20 11:28:10 +03:00
Wenbiao Zheng
f1986f86f2 signer: remove useless errorWrapper (#17003) 2018-06-19 14:48:10 +03:00
Husam Ibrahim
28aca90716 accounts/usbwallet: correct comment typo (#16998) 2018-06-19 14:43:20 +03:00
Wenbiao Zheng
9b1536b26a core: remove dead code, limit test code scope (#17006)
* core: move test util var/func to test file

* core: remove useless func
2018-06-19 14:41:13 +03:00
Husam Ibrahim
3e57c33147 accounts/usbwallet: correct comment typo (#17008) 2018-06-19 14:36:35 +03:00
Husam Ibrahim
baa7eb901e mobile: correct comment typo in geth.go (#17021) 2018-06-19 14:35:59 +03:00
Wenbiao Zheng
574378edb5 cmd: remove faucet/puppeth dead code (#16991)
* cmd/faucet: authGitHub is not used anymore

* cmd/puppeth: remove not used code
2018-06-15 11:14:55 +03:00
Wenbiao Zheng
c95e4a80d1 accounts/keystore: assign schema as const instead of var (#16985) 2018-06-14 17:24:35 +03:00
Ryan Schneider
897ea01d5f internal/debug: use pprof goroutine writer for debug_stacks (#16892)
* debug: Use pprof goroutine writer in debug.Stacks() to ensure all goroutines are captured.

* Up to 64MB limit, previous code only captured first 1MB of goroutines.

* internal/debug: simplify stacks handler

* fix typo

* fix pointer receiver
2018-06-14 16:58:44 +03:00
Caesar Chad
ec192f18b4 core/asm: correct comments typo (#16974)
* core/asm/compiler: correct comments typo

core/asm/compiler: correct comments typo

* Correct comments typo
2018-06-14 16:24:35 +03:00
Felix Lange
aa34173f13 common/number: delete unused package (#16983)
This package was meant to hold an improved 256 bit integer library, but
the effort was abandoned in 2015. AFAIK nothing ever used this package.
Time to say goodbye.
2018-06-14 15:10:34 +03:00
kiel barry
c343f75c26 bmt: fix package documentation comment (#16909) 2018-06-14 13:50:31 +02:00
Wenbiao Zheng
52b1d09457 core: reduce nesting in transaction pool code (#16980) 2018-06-14 13:46:43 +03:00
williambannas
9402f96597 eth: conform better to the golint standards (#16783)
* eth: made changes to conform better to the golint standards

* eth: fix comment nit
2018-06-14 13:14:52 +03:00
kiel barry
d0fd8d6fc2 common: all golint warnings removed (#16852)
* common: all golint warnings removed

* common: fixups
2018-06-14 12:52:50 +03:00
Péter Szilágyi
cfde0b5f52 Merge pull request #16977 from karalabe/go-1.10.3
travis, appveyor: update to Go 1.10.3
2018-06-14 12:51:50 +03:00
Péter Szilágyi
de06185fc3 travis, appveyor: update to Go 1.10.3 2018-06-14 12:46:49 +03:00
Jeremy Schlatter
3fb5f3ae11 cmd/utils: fix NetworkId default when -dev is set (#16833)
Prior to this change, when geth was started with `geth -dev -rpc`,
it would report a network id of `1` in response to the `net_version` RPC
request. But the actual network id it used to verify transactions
was `1337`.

This change causes geth instead respond with `1337` to the `net_version`
RPC when geth is started with `geth -dev -rpc`.
2018-06-14 12:31:31 +03:00
knarfeh
e75d0a6e4c eth/filters: make filterLogs func more readable (#16920) 2018-06-14 12:27:02 +03:00
Martin Holst Swende
947e0afeb3 core/vm: optimize MSTORE and SLOAD (#16939)
* vm/test: add tests+benchmarks for mstore

* core/vm: less alloc and copying for mstore

* core/vm: less allocs in sload

* vm: check for errors more correctly
2018-06-14 12:23:37 +03:00
Elad
1836366ac1 all: library changes for swarm-network-rewrite (#16898)
This commit adds all changes needed for the merge of swarm-network-rewrite.
The changes:

- build: increase linter timeout
- contracts/ens: export ensNode
- log: add Output method and enable fractional seconds in format
- metrics: relax test timeout
- p2p: reduced some log levels, updates to simulation packages
- rpc: increased maxClientSubscriptionBuffer to 20000
2018-06-14 11:21:17 +02:00
Armin Braun
591cef17d4 #15685 made peer_test.go more portable by using random free port instead of hardcoded port 30303 (#15687)
Improves test portability by resolving 127.0.0.1:0
to get a random free port instead of the hard coded one. Now
the test works if you have a running node on the same
interface already.

Fixes #15685
2018-06-14 10:54:00 +02:00
Caesar Chad
e33a5de454 console: correct some comments typo (#16971)
console/console: correct some comments typo
2018-06-14 11:35:20 +03:00
Caesar Chad
f04c0e341e core/asm: correct comments typo (#16975)
core/asm/lexer: correct comments typo
2018-06-14 11:32:19 +03:00
Wenbiao Zheng
ea89f40f0d eth/fetcher: fix annotation (#16969) 2018-06-13 17:02:28 +03:00
Ryan Schneider
1fc54d92ec internal/web3ext: fix method name for enabling mutex profiling (#16964) 2018-06-13 14:10:20 +02:00
John C. Vernaleo
8c4a7fa8d3 core: change comment to match code more closely (#16963) 2018-06-13 10:14:15 +03:00
Péter Szilágyi
423d4254f5 VERSION, params: begin v1.8.12 release cycle 2018-06-12 17:04:17 +03:00
Péter Szilágyi
dea1ce052a params: release go-ethereum v1.8.11 2018-06-12 17:02:14 +03:00
Felföldi Zsolt
25982375a8 les: fix retriever logic (#16776)
This PR fixes a retriever logic bug. When a peer had a soft timeout
and then a response arrived, it always assumed it was the same peer
even though it could have been a later requested one that did not time
out at all yet. In this case the logic went to an illegal state and
deadlocked, causing a goroutine leak.

Fixes #16243 and replaces #16359.
Thanks to @riceke for finding the bug in the logic.
2018-06-12 15:58:47 +02:00
Felföldi Zsolt
049f5b3572 core, eth, les: more efficient hash-based header chain retrieval (#16946) 2018-06-12 16:52:54 +03:00
Felix Lange
0255951587 crypto: replace ToECDSAPub with error-checking func UnmarshalPubkey (#16932)
ToECDSAPub was unsafe because it returned a non-nil key with nil X, Y in
case of invalid input. This change replaces ToECDSAPub with
UnmarshalPubkey across the codebase.
2018-06-12 15:26:08 +02:00
Péter Szilágyi
85cd64df0e Merge pull request #16958 from karalabe/pending-account-fast
internal/ethapi: reduce pendingTransactions to O(txs+accs) from O(txs*accs)
2018-06-12 14:07:21 +03:00
Péter Szilágyi
9608ccf106 Merge pull request #16959 from karalabe/fix-linters
metrics: fix gofmt linter warnings
2018-06-12 14:04:59 +03:00
Péter Szilágyi
3f06da7b5f metrics: fix gofmt linter warnings 2018-06-12 14:02:36 +03:00
Felföldi Zsolt
546d42179e les: pass server pool to protocol manager (#16947) 2018-06-12 14:00:52 +03:00
Péter Szilágyi
90829a04bf internal/ethapi: reduce pendingTransactions to O(txs+accs) from O(txs*accs) 2018-06-12 13:49:43 +03:00
gary rong
f991995918 ethdb: gracefullly handle quit channel (#16794)
* ethdb: gratefullly handle quit channel

* ethdb: minor polish
2018-06-11 16:11:48 +03:00
Wenbiao Zheng
aab7ab04b0 core/rawdb: wrap db key creations (#16914)
* core/rawdb: use wrappered helper to assemble key

* core/rawdb: wrappered helper to assemble key

* core/rawdb: rewrite the wrapper, pass common.Hash
2018-06-11 16:06:26 +03:00
Péter Szilágyi
43b940ec5a Merge pull request #16945 from karalabe/triedb-spurious-warning
trie: don't report the root flushlist as an alloc
2018-06-11 15:02:37 +03:00
Clayton Jacobs
b487bdf0ba metrics: removed repetitive calculations (#16944) 2018-06-11 14:45:25 +03:00
Péter Szilágyi
a3267ed929 trie: don't report the root flushlist as an alloc 2018-06-11 14:32:13 +03:00
Péter Szilágyi
9f7592c802 Merge pull request #16942 from karalabe/rpc-nil-reply
rpc: support returning nil pointer big.Ints (null)
2018-06-11 13:58:17 +03:00
Péter Szilágyi
99483e85b9 rpc: support returning nil pointer big.Ints (null) 2018-06-11 13:56:22 +03:00
xincaosu
1d666cf27e rpc: fix a comment typo (#16929) 2018-06-11 12:01:13 +03:00
Martin Holst Swende
eac16f9824 core: improve getBadBlocks to return full block rlp (#16902)
* core: improve getBadBlocks to return full block rlp

* core, eth, ethapi: changes to getBadBlocks formatting

* ethapi: address review concerns
2018-06-11 11:03:40 +03:00
Steven Roose
69c52bde3f ethclient: fix RPC parse error of Parity response (#16924)
The error produced when using a Parity RPC was the following:

ERROR: transaction did not get mined: failed to get tx for txid 0xbdeb094b3278019383c8da148ff1cb5b5dbd61bf8731bc2310ac1b8ed0235226: json: cannot unmarshal non-string into Go struct field txExtraInfo.blockHash of type common.Hash
2018-06-11 10:41:09 +03:00
Felföldi Zsolt
2977538ac0 light: new CHTs for mainnet and ropsten (#16926) 2018-06-11 10:38:05 +03:00
Anton Evangelatov
7f0726f706 metrics: return an empty snapshot for NilResettingTimer (#16930) 2018-06-11 10:31:55 +03:00
Steven Roose
13af276418 cmd/ethkey: add command to change key passphrase (#16516)
This change introduces 

    ethkey changepassphrase <keyfile>

to change the passphrase of a key file.
2018-06-08 15:07:07 +02:00
Sarlor
ea06da0892 trie: avoid unnecessary slicing on shortnode decoding (#16917)
optimization code
2018-06-07 11:48:36 +03:00
ledgerwatch
feb6620c34 core: relax type requirement for bc in ApplyTransaction (#16901) 2018-06-07 10:34:24 +02:00
Bruno Škvorc
90b22773e9 cmd/puppeth: fixed a typo in a wizard input query (#16910) 2018-06-06 12:17:41 +03:00
Guillaume Ballet
9e4f96a1a6 whisper: re-insert #16757 that has been lost during a merge (#16889) 2018-06-05 16:25:16 +02:00
Péter Szilágyi
01a7e267dc Merge pull request #16882 from karalabe/streaming-ecrecover
core: concurrent background transaction sender ecrecover
2018-06-05 17:13:43 +03:00
Felix Lange
e8ea5aa0d5 trie: reduce hasher allocations (#16896)
* trie: reduce hasher allocations

name    old time/op    new time/op    delta
Hash-8    4.05µs ±12%    3.56µs ± 9%  -12.13%  (p=0.000 n=20+19)

name    old alloc/op   new alloc/op   delta
Hash-8    1.30kB ± 0%    0.66kB ± 0%  -49.15%  (p=0.000 n=20+20)

name    old allocs/op  new allocs/op  delta
Hash-8      11.0 ± 0%       8.0 ± 0%  -27.27%  (p=0.000 n=20+20)

* trie: bump initial buffer cap in hasher
2018-06-05 15:06:29 +03:00
Elad
5bee5d69d7 vendor: added vendor packages necessary for the swarm-network-rewrite merge (#16792)
* vendor: added vendor packages necessary for the swarm-network-rewrite merge into ethereum master

* vendor: removed multihash deps
2018-06-05 12:40:21 +02:00
kiel barry
cbfb40b0aa params: fix golint warnings (#16853)
params: fix golint warnings
2018-06-05 12:31:34 +02:00
Antonio Salazar Cardozo
4cf2b4110e cmd/abigen: support for reading solc output from stdin (#16683)
Allow the --abi flag to be given - to indicate that it should read the
ABI information from standard input. It expects to read the solc output
with the --combined-json flag providing bin, abi, userdoc, devdoc, and
metadata, and works very similarly to the internal invocation of solc,
except it allows external invocation of solc.

This facilitates integration with more complex solc invocations, such
as invocations that require path remapping or --allow-paths tweaks.

Simple usage example:

    solc --combined-json bin,abi,userdoc,devdoc,metadata *.sol | abigen --abi -
2018-06-05 12:22:02 +02:00
Mark
0029a869f0 miner: not call commitNewWork if it's a side block (#16751) 2018-06-05 12:10:09 +02:00
Péter Szilágyi
2ab24a2a8f core: concurrent background transaction sender ecrecover 2018-06-05 11:03:55 +03:00
Martin Holst Swende
400332b99d eth/tracers: fix minor off-by-one error (#16879)
* tracing: fix minor off-by-one error

* tracers: go generate
2018-06-05 10:27:07 +03:00
Felföldi Zsolt
a5237a27ea les: add Skip overflow check to GetBlockHeadersMsg handler (#16891) 2018-06-05 10:23:00 +03:00
Péter Szilágyi
7a22e89080 Merge pull request #16894 from hadv/master
core: fix typo in comment code
2018-06-05 10:11:42 +03:00
hadv
e3a993d774 core: fix typo in comment code 2018-06-05 09:56:45 +07:00
Péter Szilágyi
ed40767355 Merge pull request #16800 from rjl493456442/memory_allowance_warining
cmd: cap cache size if exceeds reasonable range
2018-06-04 17:40:51 +03:00
rjl493456442
a20cc75b4a cmd/geth: cap cache allowance 2018-06-04 13:46:39 +03:00
Péter Szilágyi
b659718fd0 Merge pull request #16880 from holiman/http_timeouts
rpc: set timeouts for http server, see #16859
2018-06-04 13:38:23 +03:00
Anton Evangelatov
be2aec092d metrics: expvar support for ResettingTimer (#16878)
* metrics: expvar support for ResettingTimer

* metrics: use integers for percentiles; remove Overall

* metrics: fix edge-case panic for index-out-of-range
2018-06-04 13:05:16 +03:00
Martin Holst Swende
17f80cc2e2 rpc: set timeouts for http server, see #16859 2018-06-04 11:41:55 +02:00
Péter Szilágyi
143c4341d8 core, eth, trie: streaming GC for the trie cache (#16810)
* core, eth, trie: streaming GC for the trie cache

* trie: track memcache statistics
2018-06-04 10:47:43 +03:00
Felix Lange
3f33a7c8ce consensus/ethash: reduce keccak hash allocations (#16857)
Use Read instead of Sum to avoid internal allocations and
copying the state.

name                      old time/op  new time/op  delta
CacheGeneration-8          764ms ± 1%   579ms ± 1%  -24.22%  (p=0.000 n=20+17)
SmallDatasetGeneration-8  75.2ms ±12%  60.6ms ±10%  -19.37%  (p=0.000 n=20+20)
HashimotoLight-8          1.58ms ±11%  1.55ms ± 8%     ~     (p=0.322 n=20+19)
HashimotoFullSmall-8      4.90µs ± 1%  4.88µs ± 1%   -0.31%  (p=0.013 n=19+18)
2018-06-04 10:32:32 +03:00
Ryan Schneider
c8dcb9584e rpc: use HTTP request context as top-level context (#16861) 2018-06-02 12:26:47 +02:00
kiel barry
af28d12847 console: squash golint warnings (#16836) 2018-05-31 13:59:08 +02:00
kiel barry
0ad32d3be7 ethstats: fix last golint warning (#16837) 2018-05-30 11:36:02 +03:00
Péter Szilágyi
68b0d30d4a VERSION, params: begin 1.8.11 release cycle 2018-05-30 11:04:45 +03:00
Péter Szilágyi
eae63c511c params: release Geth 1.8.10 hotfix 2018-05-30 11:00:07 +03:00
Péter Szilágyi
ca34e8230e Merge pull request #16843 from karalabe/txpool-fix-deadlock
core: fix transaction event asynchronicity
2018-05-30 10:45:02 +03:00
Péter Szilágyi
342ec83d67 core: fix transaction event asynchronicity 2018-05-30 10:14:00 +03:00
Wenbiao Zheng
38c7eb0f26 trie: rename TrieSync to Sync and improve hexToKeybytes (#16804)
This removes a golint warning: type name will be used as trie.TrieSync by
other packages, and that stutters; consider calling this Sync.

In hexToKeybytes len(hex) is even and (even+1)/2 == even/2, remove the +1.
2018-05-29 17:48:43 +02:00
Péter Szilágyi
d51faee240 Merge pull request #16831 from abeln/patch-1
core/vm: fix typo in comment
2018-05-29 15:44:30 +03:00
kimmylin
426f62f1a8 core: improve test for TransactionPriceNonceSort (#16413) 2018-05-29 14:21:04 +02:00
Dmitry Shulyak
7677ec1f34 p2p/discv5: add egress/ingress traffic metrics to discv5 udp transport (#16369) 2018-05-29 13:46:09 +02:00
Abel Nieto
d258eee211 core/vm: fix typo in comment 2018-05-29 13:22:00 +02:00
kiel barry
84f8c0cc1f common: improve documentation comments (#16701)
This commit adds many comments and removes unused code.
It also removes the EmptyHash function, which had some uses
but was silly.
2018-05-29 12:42:21 +02:00
Andrea Franz
998f6564b2 whisper/shhclient: update call to shh_post to expect string instead of bool (#16757)
Fixes #16756
2018-05-29 04:36:31 -04:00
Smilenator
40a2c52397 eth/fetcher: reuse variables for hash and number (#16819) 2018-05-29 10:57:08 +03:00
Mohanson
a9c6ef6905 ethereum: fix a typo in FilterQuery{} (#16827)
Fix a spelling mistake in comment
2018-05-29 10:44:06 +03:00
Péter Szilágyi
ccc0debb63 VERSION, params: begin 1.8.10 release cycle 2018-05-28 13:01:20 +03:00
Péter Szilágyi
ff9b14617e params: release go-ethereum v1.8.9 2018-05-28 12:58:22 +03:00
Wenbiao Zheng
d6ed2f67a8 eth, node, trie: fix minor typos (#16802) 2018-05-24 15:55:20 +03:00
Péter Szilágyi
54294b45b1 Merge pull request #16803 from karalabe/trie-avoid-funccall
trie: cleaner logic, one less func call
2018-05-24 15:54:00 +03:00
Péter Szilágyi
d31802312a trie: cleaner logic, one less func call 2018-05-24 13:46:45 +03:00
Ryan Schneider
55b579e02c core: use a wrapped map to remove contention in TxPool.Get. (#16670)
* core: use a wrapped `map` and `sync.RWMutex` for `TxPool.all` to remove contention in `TxPool.Get`.

* core: Remove redundant `txLookup.Find` and improve comments on txLookup methods.
2018-05-23 15:55:42 +03:00
Abel Nieto
be22ee8dda core/vm: fix typo in instructions.go (#16788) 2018-05-23 15:02:10 +03:00
Péter Szilágyi
56de337e57 Merge pull request #16722 from karalabe/trie-iterator-proofs
trie: support proof generation from the iterator
2018-05-23 13:51:09 +03:00
Péter Szilágyi
c934c06cc1 trie: support proof generation from the iterator 2018-05-23 13:02:20 +03:00
gary rong
fbf57d53e2 core/types: convert status type from uint to uint64 (#16784) 2018-05-23 11:10:24 +03:00
gary rong
6ce21a4744 vendor, ethdb: print warning log if leveldb is performing compaction (#16766)
* vendor: update leveldb package

* ethdb: print warning log if db is performing compaction

* ethdb: update annotation and log
2018-05-22 11:12:52 +03:00
kiel barry
9af364e42b node: all golint warnings fixed (#16773)
* node: all golint warnings fixed

* node: rm per peter

* node: rm per peter
2018-05-22 10:29:41 +03:00
kiel barry
09d44247f7 log: fixes for golint warnings (#16775) 2018-05-22 10:28:43 +03:00
kiel barry
0fe47e98c4 trie: fixes to comply with golint (#16771) 2018-05-21 23:41:31 +03:00
Péter Szilágyi
415969f534 Merge pull request #16769 from karalabe/async-broadcasts
eth: propagate blocks and transactions async
2018-05-21 13:42:25 +03:00
Péter Szilágyi
d9cee2c172 eth: propagate blocks and transactions async 2018-05-21 11:32:42 +03:00
Péter Szilágyi
ab6bdbd9b0 Merge pull request #16758 from hadv/fix/typos
Fix some typos in comment code and output log
2018-05-19 19:40:55 +03:00
Péter Szilágyi
953b5ac015 Merge pull request #16720 from rjl493456442/PreTxsEvent
all: collate new transaction events together
2018-05-19 19:39:28 +03:00
hadv
f2fdb75dd9 core, consensus: fix some typos in comment code and output log 2018-05-19 15:44:36 +07:00
Péter Szilágyi
f9c456e02d Merge pull request #16753 from karalabe/go-1.10.2
travis, appveyor: bump Go release to 1.10.2
2018-05-18 13:18:54 +03:00
Péter Szilágyi
579bd0f9fb travis, appveyor: bump Go release to 1.10.2 2018-05-18 12:24:04 +03:00
Péter Szilágyi
49719e21bc core, eth: minor txpool event cleanups 2018-05-18 12:08:24 +03:00
rjl493456442
a2e43d28d0 all: collate new transaction events together 2018-05-18 11:46:44 +03:00
Felix Lange
6286c255f1 p2p/enr: updates for discovery v4 compatibility (#16679)
This applies spec changes from ethereum/EIPs#1049 and adds support for
pluggable identity schemes.

Some care has been taken to make the "v4" scheme standalone. It uses
public APIs only and could be moved out of package enr at any time.

A couple of minor changes were needed to make identity schemes work:

- The sequence number is now updated in Set instead of when signing.
- Record is now copy-safe, i.e. calling Set on a shallow copy doesn't
  modify the record it was copied from.
2018-05-17 15:11:27 +02:00
Péter Szilágyi
f6bc65fc68 Merge pull request #16739 from karalabe/android-trusty
travis: try to upgrade android builder to trusty
2018-05-14 16:56:02 +03:00
Péter Szilágyi
ff8a033f18 travis: try to upgrade android builder to trusty 2018-05-14 16:42:26 +03:00
Guillaume Ballet
247b5f0369 accounts/abi: allow abi: tags when unpacking structs
Go code users can now tag event struct members with `abi:` to specify in what fields the event will be de-serialized.

See PR #16648 for details.
2018-05-14 14:47:31 +02:00
Péter Szilágyi
49ec4f0cd1 VERSION, params: start 1.8.9 release cycle 2018-05-14 13:16:55 +03:00
979 changed files with 142706 additions and 35297 deletions

38
.github/CODEOWNERS vendored
View File

@@ -1,12 +1,32 @@
# Lines starting with '#' are comments.
# Each line is a file pattern followed by one or more owners.
accounts/usbwallet @karalabe
consensus @karalabe
core/ @karalabe @holiman
eth/ @karalabe
les/ @zsfelfoldi
light/ @zsfelfoldi
mobile/ @karalabe
p2p/ @fjl @zsfelfoldi
whisper/ @gballet @gluk256
accounts/usbwallet @karalabe
consensus @karalabe
core/ @karalabe @holiman
eth/ @karalabe
les/ @zsfelfoldi
light/ @zsfelfoldi
mobile/ @karalabe
p2p/ @fjl @zsfelfoldi
swarm/bmt @zelig
swarm/dev @lmars
swarm/fuse @jmozah @holisticode
swarm/grafana_dashboards @nonsense
swarm/metrics @nonsense @holisticode
swarm/multihash @nolash
swarm/network/bitvector @zelig @janos @gbalint
swarm/network/priorityqueue @zelig @janos @gbalint
swarm/network/simulations @zelig
swarm/network/stream @janos @zelig @gbalint @holisticode @justelad
swarm/network/stream/intervals @janos
swarm/network/stream/testing @zelig
swarm/pot @zelig
swarm/pss @nolash @zelig @nonsense
swarm/services @zelig
swarm/state @justelad
swarm/storage/encryption @gbalint @zelig @nagydani
swarm/storage/mock @janos
swarm/storage/mru @nolash
swarm/testutil @lmars
whisper/ @gballet @gluk256

View File

@@ -7,5 +7,5 @@ closeComment: >
This issue has been automatically closed because there has been no response
to our request for more information from the original author. With only the
information that is currently in the issue, we don't have enough information
to take action. Please reach out if you have or find the answers we need so
that we can investigate further.
to take action. Please reach out if you have more relevant information or
answers to our questions so that we can investigate further.

View File

@@ -14,11 +14,22 @@ matrix:
- go run build/ci.go install
- go run build/ci.go test -coverage $TEST_PACKAGES
# These are the latest Go versions.
- os: linux
dist: trusty
sudo: required
go: 1.10.x
script:
- sudo modprobe fuse
- sudo chmod 666 /dev/fuse
- sudo chown root:$USER /etc/fuse.conf
- go run build/ci.go install
- go run build/ci.go test -coverage $TEST_PACKAGES
# These are the latest Go versions.
- os: linux
dist: trusty
sudo: required
go: 1.11.x
script:
- sudo modprobe fuse
- sudo chmod 666 /dev/fuse
@@ -27,18 +38,16 @@ matrix:
- go run build/ci.go test -coverage $TEST_PACKAGES
- os: osx
go: 1.10.x
go: 1.11.x
script:
- unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703
- brew update
- brew cask install osxfuse
- go run build/ci.go install
- go run build/ci.go test -coverage $TEST_PACKAGES
# This builder only tests code linters on latest version of Go
- os: linux
dist: trusty
go: 1.10.x
go: 1.11.x
env:
- lint
git:
@@ -49,7 +58,7 @@ matrix:
# This builder does the Ubuntu PPA upload
- os: linux
dist: trusty
go: 1.10.x
go: 1.11.x
env:
- ubuntu-ppa
git:
@@ -68,7 +77,7 @@ matrix:
- os: linux
dist: trusty
sudo: required
go: 1.10.x
go: 1.11.x
env:
- azure-linux
git:
@@ -102,7 +111,7 @@ matrix:
dist: trusty
services:
- docker
go: 1.10.x
go: 1.11.x
env:
- azure-linux-mips
git:
@@ -126,7 +135,7 @@ matrix:
# This builder does the Android Maven and Azure uploads
- os: linux
dist: precise # Needed for the android tools
dist: trusty
addons:
apt:
packages:
@@ -146,16 +155,16 @@ matrix:
git:
submodules: false # avoid cloning ethereum/tests
before_install:
- curl https://storage.googleapis.com/golang/go1.10.1.linux-amd64.tar.gz | tar -xz
- curl https://storage.googleapis.com/golang/go1.11.linux-amd64.tar.gz | tar -xz
- export PATH=`pwd`/go/bin:$PATH
- export GOROOT=`pwd`/go
- export GOPATH=$HOME/go
script:
# Build the Android archive and upload it to Maven Central and Azure
- curl https://dl.google.com/android/repository/android-ndk-r16b-linux-x86_64.zip -o android-ndk-r16b.zip
- unzip -q android-ndk-r16b.zip && rm android-ndk-r16b.zip
- mv android-ndk-r16b $HOME
- export ANDROID_NDK=$HOME/android-ndk-r16b
- curl https://dl.google.com/android/repository/android-ndk-r17b-linux-x86_64.zip -o android-ndk-r17b.zip
- unzip -q android-ndk-r17b.zip && rm android-ndk-r17b.zip
- mv android-ndk-r17b $HOME
- export ANDROID_NDK=$HOME/android-ndk-r17b
- mkdir -p $GOPATH/src/github.com/ethereum
- ln -s `pwd` $GOPATH/src/github.com/ethereum
@@ -163,7 +172,7 @@ matrix:
# This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads
- os: osx
go: 1.10.x
go: 1.11.x
env:
- azure-osx
- azure-ios
@@ -192,7 +201,7 @@ matrix:
# This builder does the Azure archive purges to avoid accumulating junk
- os: linux
dist: trusty
go: 1.10.x
go: 1.11.x
env:
- azure-purge
git:

View File

@@ -171,3 +171,4 @@ xiekeyang <xiekeyang@users.noreply.github.com>
yoza <yoza.is12s@gmail.com>
ΞTHΞЯSPHΞЯΞ <{viktor.tron,nagydani,zsfelfoldi}@gmail.com>
Максим Чусовлянов <mchusovlianov@gmail.com>
Ralph Caraveo <deckarep@gmail.com>

View File

@@ -1,5 +1,5 @@
# Build Geth in a stock Go builder container
FROM golang:1.10-alpine as builder
FROM golang:1.11-alpine as builder
RUN apk add --no-cache make gcc musl-dev linux-headers

View File

@@ -1,5 +1,5 @@
# Build Geth in a stock Go builder container
FROM golang:1.10-alpine as builder
FROM golang:1.11-alpine as builder
RUN apk add --no-cache make gcc musl-dev linux-headers

View File

@@ -41,6 +41,7 @@ lint: ## Run linters.
build/env.sh go run build/ci.go lint
clean:
./build/clean_go_build_cache.sh
rm -fr build/_workspace/pkg/ $(GOBIN)/*
# The devtools target installs tools required for 'go generate'.

View File

@@ -7,7 +7,7 @@ https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/6874
)](https://godoc.org/github.com/ethereum/go-ethereum)
[![Go Report Card](https://goreportcard.com/badge/github.com/ethereum/go-ethereum)](https://goreportcard.com/report/github.com/ethereum/go-ethereum)
[![Travis](https://travis-ci.org/ethereum/go-ethereum.svg?branch=master)](https://travis-ci.org/ethereum/go-ethereum)
[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/ethereum/go-ethereum?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
[![Discord](https://img.shields.io/badge/discord-join%20chat-blue.svg)](https://discord.gg/nthXNEv)
Automated builds are available for stable releases and the unstable master branch.
Binary archives are published at https://geth.ethereum.org/downloads/.
@@ -34,13 +34,13 @@ The go-ethereum project comes with several wrappers/executables found in the `cm
| Command | Description |
|:----------:|-------------|
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default) archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. |
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. |
| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. |
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug`). |
| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://github.com/ethereum/wiki/wiki/JSON-RPC) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. |
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://github.com/ethereum/wiki/wiki/RLP)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
| `swarm` | swarm daemon and tools. This is the entrypoint for the swarm network. `swarm --help` for command line options and subcommands. See https://swarm-guide.readthedocs.io for swarm documentation. |
| `swarm` | Swarm daemon and tools. This is the entrypoint for the Swarm network. `swarm --help` for command line options and subcommands. See [Swarm README](https://github.com/ethereum/go-ethereum/tree/master/swarm) for more information. |
| `puppeth` | a CLI wizard that aids in creating a new Ethereum network. |
## Running geth
@@ -69,7 +69,7 @@ This command will:
* Start up Geth's built-in interactive [JavaScript console](https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console),
(via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://github.com/ethereum/wiki/wiki/JavaScript-API)
as well as Geth's own [management APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs).
This too is optional and if you leave it out you can always attach to an already running Geth instance
This tool is optional and if you leave it out you can always attach to an already running Geth instance
with `geth attach`.
### Full node on the Ethereum test network

View File

@@ -1 +0,0 @@
1.8.8

View File

@@ -111,9 +111,14 @@ func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interfa
if err := requireUnpackKind(value, typ, kind, arguments); err != nil {
return err
}
// If the output interface is a struct, make sure names don't collide
// If the interface is a struct, get of abi->struct_field mapping
var abi2struct map[string]string
if kind == reflect.Struct {
if err := requireUniqueStructFieldNames(arguments); err != nil {
var err error
abi2struct, err = mapAbiToStructFields(arguments, value)
if err != nil {
return err
}
}
@@ -123,9 +128,10 @@ func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interfa
switch kind {
case reflect.Struct:
err := unpackStruct(value, reflectValue, arg)
if err != nil {
return err
if structField, ok := abi2struct[arg.Name]; ok {
if err := set(value.FieldByName(structField), reflectValue, arg); err != nil {
return err
}
}
case reflect.Slice, reflect.Array:
if value.Len() < i {
@@ -151,17 +157,22 @@ func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues []interf
if len(marshalledValues) != 1 {
return fmt.Errorf("abi: wrong length, expected single value, got %d", len(marshalledValues))
}
elem := reflect.ValueOf(v).Elem()
kind := elem.Kind()
reflectValue := reflect.ValueOf(marshalledValues[0])
var abi2struct map[string]string
if kind == reflect.Struct {
//make sure names don't collide
if err := requireUniqueStructFieldNames(arguments); err != nil {
var err error
if abi2struct, err = mapAbiToStructFields(arguments, elem); err != nil {
return err
}
return unpackStruct(elem, reflectValue, arguments[0])
arg := arguments.NonIndexed()[0]
if structField, ok := abi2struct[arg.Name]; ok {
return set(elem.FieldByName(structField), reflectValue, arg)
}
return nil
}
return set(elem, reflectValue, arguments.NonIndexed()[0])
@@ -277,18 +288,3 @@ func capitalise(input string) string {
}
return strings.ToUpper(input[:1]) + input[1:]
}
//unpackStruct extracts each argument into its corresponding struct field
func unpackStruct(value, reflectValue reflect.Value, arg Argument) error {
name := capitalise(arg.Name)
typ := value.Type()
for j := 0; j < typ.NumField(); j++ {
// TODO read tags: `abi:"fieldName"`
if typ.Field(j).Name == name {
if err := set(value.Field(j), reflectValue, arg); err != nil {
return err
}
}
}
return nil
}

View File

@@ -65,11 +65,11 @@ type SimulatedBackend struct {
// NewSimulatedBackend creates a new binding backend using a simulated blockchain
// for testing purposes.
func NewSimulatedBackend(alloc core.GenesisAlloc) *SimulatedBackend {
func NewSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend {
database := ethdb.NewMemDatabase()
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, Alloc: alloc}
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, GasLimit: gasLimit, Alloc: alloc}
genesis.MustCommit(database)
blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{})
blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{}, nil)
backend := &SimulatedBackend{
database: database,
@@ -324,18 +324,24 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
//
// TODO(karalabe): Deprecate when the subscription one can return past data too.
func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) {
// Initialize unset filter boundaried to run from genesis to chain head
from := int64(0)
if query.FromBlock != nil {
from = query.FromBlock.Int64()
var filter *filters.Filter
if query.BlockHash != nil {
// Block filter requested, construct a single-shot filter
filter = filters.NewBlockFilter(&filterBackend{b.database, b.blockchain}, *query.BlockHash, query.Addresses, query.Topics)
} else {
// Initialize unset filter boundaried to run from genesis to chain head
from := int64(0)
if query.FromBlock != nil {
from = query.FromBlock.Int64()
}
to := int64(-1)
if query.ToBlock != nil {
to = query.ToBlock.Int64()
}
// Construct the range filter
filter = filters.NewRangeFilter(&filterBackend{b.database, b.blockchain}, from, to, query.Addresses, query.Topics)
}
to := int64(-1)
if query.ToBlock != nil {
to = query.ToBlock.Int64()
}
// Construct and execute the filter
filter := filters.New(&filterBackend{b.database, b.blockchain}, from, to, query.Addresses, query.Topics)
// Run the filter and return all the logs
logs, err := filter.Logs(ctx)
if err != nil {
return nil, err
@@ -430,6 +436,10 @@ func (fb *filterBackend) HeaderByNumber(ctx context.Context, block rpc.BlockNumb
return fb.bc.GetHeaderByNumber(uint64(block.Int64())), nil
}
func (fb *filterBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
return fb.bc.GetHeaderByHash(hash), nil
}
func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
number := rawdb.ReadHeaderNumber(fb.db, hash)
if number == nil {
@@ -454,7 +464,7 @@ func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*ty
return logs, nil
}
func (fb *filterBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription {
func (fb *filterBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
return event.NewSubscription(func(quit <-chan struct{}) error {
<-quit
return nil

View File

@@ -207,7 +207,7 @@ func bindTypeGo(kind abi.Type) string {
// The inner function of bindTypeGo, this finds the inner type of stringKind.
// (Or just the type itself if it is not an array or slice)
// The length of the matched part is returned, with the the translated type.
// The length of the matched part is returned, with the translated type.
func bindUnnestedTypeGo(stringKind string) (int, string) {
switch {
@@ -255,7 +255,7 @@ func bindTypeJava(kind abi.Type) string {
// The inner function of bindTypeJava, this finds the inner type of stringKind.
// (Or just the type itself if it is not an array or slice)
// The length of the matched part is returned, with the the translated type.
// The length of the matched part is returned, with the translated type.
func bindUnnestedTypeJava(stringKind string) (int, string) {
switch {

View File

@@ -229,7 +229,7 @@ var bindTests = []struct {
// Generate a new random account and a funded simulator
key, _ := crypto.GenerateKey()
auth := bind.NewKeyedTransactor(key)
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
// Deploy an interaction tester contract and call a transaction on it
_, _, interactor, err := DeployInteractor(auth, sim, "Deploy string")
@@ -270,7 +270,7 @@ var bindTests = []struct {
// Generate a new random account and a funded simulator
key, _ := crypto.GenerateKey()
auth := bind.NewKeyedTransactor(key)
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
// Deploy a tuple tester contract and execute a structured call on it
_, _, getter, err := DeployGetter(auth, sim)
@@ -302,7 +302,7 @@ var bindTests = []struct {
// Generate a new random account and a funded simulator
key, _ := crypto.GenerateKey()
auth := bind.NewKeyedTransactor(key)
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
// Deploy a tuple tester contract and execute a structured call on it
_, _, tupler, err := DeployTupler(auth, sim)
@@ -344,7 +344,7 @@ var bindTests = []struct {
// Generate a new random account and a funded simulator
key, _ := crypto.GenerateKey()
auth := bind.NewKeyedTransactor(key)
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
// Deploy a slice tester contract and execute a n array call on it
_, _, slicer, err := DeploySlicer(auth, sim)
@@ -378,7 +378,7 @@ var bindTests = []struct {
// Generate a new random account and a funded simulator
key, _ := crypto.GenerateKey()
auth := bind.NewKeyedTransactor(key)
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
// Deploy a default method invoker contract and execute its default method
_, _, defaulter, err := DeployDefaulter(auth, sim)
@@ -411,7 +411,7 @@ var bindTests = []struct {
`[{"constant":true,"inputs":[],"name":"String","outputs":[{"name":"","type":"string"}],"type":"function"}]`,
`
// Create a simulator and wrap a non-deployed contract
sim := backends.NewSimulatedBackend(nil)
sim := backends.NewSimulatedBackend(nil, uint64(10000000000))
nonexistent, err := NewNonExistent(common.Address{}, sim)
if err != nil {
@@ -447,7 +447,7 @@ var bindTests = []struct {
// Generate a new random account and a funded simulator
key, _ := crypto.GenerateKey()
auth := bind.NewKeyedTransactor(key)
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
// Deploy a funky gas pattern contract
_, _, limiter, err := DeployFunkyGasPattern(auth, sim)
@@ -482,7 +482,7 @@ var bindTests = []struct {
// Generate a new random account and a funded simulator
key, _ := crypto.GenerateKey()
auth := bind.NewKeyedTransactor(key)
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
// Deploy a sender tester contract and execute a structured call on it
_, _, callfrom, err := DeployCallFrom(auth, sim)
@@ -542,7 +542,7 @@ var bindTests = []struct {
// Generate a new random account and a funded simulator
key, _ := crypto.GenerateKey()
auth := bind.NewKeyedTransactor(key)
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
// Deploy a underscorer tester contract and execute a structured call on it
_, _, underscorer, err := DeployUnderscorer(auth, sim)
@@ -612,7 +612,7 @@ var bindTests = []struct {
// Generate a new random account and a funded simulator
key, _ := crypto.GenerateKey()
auth := bind.NewKeyedTransactor(key)
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
// Deploy an eventer contract
_, _, eventer, err := DeployEventer(auth, sim)
@@ -761,7 +761,7 @@ var bindTests = []struct {
// Generate a new random account and a funded simulator
key, _ := crypto.GenerateKey()
auth := bind.NewKeyedTransactor(key)
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
//deploy the test contract
_, _, testContract, err := DeployDeeplyNestedArray(auth, sim)
@@ -820,7 +820,7 @@ func TestBindings(t *testing.T) {
t.Skip("go sdk not found for testing")
}
// Skip the test if the go-ethereum sources are symlinked (https://github.com/golang/go/issues/14845)
linkTestCode := fmt.Sprintf("package linktest\nfunc CheckSymlinks(){\nfmt.Println(backends.NewSimulatedBackend(nil))\n}")
linkTestCode := fmt.Sprintf("package linktest\nfunc CheckSymlinks(){\nfmt.Println(backends.NewSimulatedBackend(nil,uint64(10000000000)))\n}")
linkTestDeps, err := imports.Process(os.TempDir(), []byte(linkTestCode), nil)
if err != nil {
t.Fatalf("failed check for goimports symlink bug: %v", err)

View File

@@ -53,9 +53,11 @@ var waitDeployedTests = map[string]struct {
func TestWaitDeployed(t *testing.T) {
for name, test := range waitDeployedTests {
backend := backends.NewSimulatedBackend(core.GenesisAlloc{
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000)},
})
backend := backends.NewSimulatedBackend(
core.GenesisAlloc{
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000)},
}, 10000000,
)
// Create the transaction.
tx := types.NewContractCreation(0, big.NewInt(0), test.gas, big.NewInt(1), common.FromHex(test.code))

View File

@@ -58,12 +58,28 @@ var jsonEventPledge = []byte(`{
"type": "event"
}`)
var jsonEventMixedCase = []byte(`{
"anonymous": false,
"inputs": [{
"indexed": false, "name": "value", "type": "uint256"
}, {
"indexed": false, "name": "_value", "type": "uint256"
}, {
"indexed": false, "name": "Value", "type": "uint256"
}],
"name": "MixedCase",
"type": "event"
}`)
// 1000000
var transferData1 = "00000000000000000000000000000000000000000000000000000000000f4240"
// "0x00Ce0d46d924CC8437c806721496599FC3FFA268", 2218516807680, "usd"
var pledgeData1 = "00000000000000000000000000ce0d46d924cc8437c806721496599fc3ffa2680000000000000000000000000000000000000000000000000000020489e800007573640000000000000000000000000000000000000000000000000000000000"
// 1000000,2218516807680,1000001
var mixedCaseData1 = "00000000000000000000000000000000000000000000000000000000000f42400000000000000000000000000000000000000000000000000000020489e8000000000000000000000000000000000000000000000000000000000000000f4241"
func TestEventId(t *testing.T) {
var table = []struct {
definition string
@@ -121,6 +137,27 @@ func TestEventTupleUnpack(t *testing.T) {
Value *big.Int
}
type EventTransferWithTag struct {
// this is valid because `value` is not exportable,
// so value is only unmarshalled into `Value1`.
value *big.Int
Value1 *big.Int `abi:"value"`
}
type BadEventTransferWithSameFieldAndTag struct {
Value *big.Int
Value1 *big.Int `abi:"value"`
}
type BadEventTransferWithDuplicatedTag struct {
Value1 *big.Int `abi:"value"`
Value2 *big.Int `abi:"value"`
}
type BadEventTransferWithEmptyTag struct {
Value *big.Int `abi:""`
}
type EventPledge struct {
Who common.Address
Wad *big.Int
@@ -133,9 +170,16 @@ func TestEventTupleUnpack(t *testing.T) {
Currency [3]byte
}
type EventMixedCase struct {
Value1 *big.Int `abi:"value"`
Value2 *big.Int `abi:"_value"`
Value3 *big.Int `abi:"Value"`
}
bigint := new(big.Int)
bigintExpected := big.NewInt(1000000)
bigintExpected2 := big.NewInt(2218516807680)
bigintExpected3 := big.NewInt(1000001)
addr := common.HexToAddress("0x00Ce0d46d924CC8437c806721496599FC3FFA268")
var testCases = []struct {
data string
@@ -158,6 +202,34 @@ func TestEventTupleUnpack(t *testing.T) {
jsonEventTransfer,
"",
"Can unpack ERC20 Transfer event into slice",
}, {
transferData1,
&EventTransferWithTag{},
&EventTransferWithTag{Value1: bigintExpected},
jsonEventTransfer,
"",
"Can unpack ERC20 Transfer event into structure with abi: tag",
}, {
transferData1,
&BadEventTransferWithDuplicatedTag{},
&BadEventTransferWithDuplicatedTag{},
jsonEventTransfer,
"struct: abi tag in 'Value2' already mapped",
"Can not unpack ERC20 Transfer event with duplicated abi tag",
}, {
transferData1,
&BadEventTransferWithSameFieldAndTag{},
&BadEventTransferWithSameFieldAndTag{},
jsonEventTransfer,
"abi: multiple variables maps to the same abi field 'value'",
"Can not unpack ERC20 Transfer event with a field and a tag mapping to the same abi variable",
}, {
transferData1,
&BadEventTransferWithEmptyTag{},
&BadEventTransferWithEmptyTag{},
jsonEventTransfer,
"struct: abi tag in 'Value' is empty",
"Can not unpack ERC20 Transfer event with an empty tag",
}, {
pledgeData1,
&EventPledge{},
@@ -216,6 +288,13 @@ func TestEventTupleUnpack(t *testing.T) {
jsonEventPledge,
"abi: cannot unmarshal tuple into map[string]interface {}",
"Can not unpack Pledge event into map",
}, {
mixedCaseData1,
&EventMixedCase{},
&EventMixedCase{Value1: bigintExpected, Value2: bigintExpected2, Value3: bigintExpected3},
jsonEventMixedCase,
"",
"Can unpack abi variables with mixed case",
}}
for _, tc := range testCases {
@@ -227,7 +306,7 @@ func TestEventTupleUnpack(t *testing.T) {
assert.Nil(err, "Should be able to unpack event data.")
assert.Equal(tc.expected, tc.dest, tc.name)
} else {
assert.EqualError(err, tc.error)
assert.EqualError(err, tc.error, tc.name)
}
})
}

View File

@@ -47,10 +47,8 @@ type Method struct {
// Please note that "int" is substitute for its canonical representation "int256"
func (method Method) Sig() string {
types := make([]string, len(method.Inputs))
i := 0
for _, input := range method.Inputs {
for i, input := range method.Inputs {
types[i] = input.Type.String()
i++
}
return fmt.Sprintf("%v(%v)", method.Name, strings.Join(types, ","))
}

View File

@@ -31,29 +31,14 @@ var (
uint16T = reflect.TypeOf(uint16(0))
uint32T = reflect.TypeOf(uint32(0))
uint64T = reflect.TypeOf(uint64(0))
intT = reflect.TypeOf(int(0))
int8T = reflect.TypeOf(int8(0))
int16T = reflect.TypeOf(int16(0))
int32T = reflect.TypeOf(int32(0))
int64T = reflect.TypeOf(int64(0))
addressT = reflect.TypeOf(common.Address{})
intTS = reflect.TypeOf([]int(nil))
int8TS = reflect.TypeOf([]int8(nil))
int16TS = reflect.TypeOf([]int16(nil))
int32TS = reflect.TypeOf([]int32(nil))
int64TS = reflect.TypeOf([]int64(nil))
)
// U256 converts a big Int into a 256bit EVM number.
func U256(n *big.Int) []byte {
return math.PaddedBigBytes(math.U256(n), 32)
}
// checks whether the given reflect value is signed. This also works for slices with a number type
func isSigned(v reflect.Value) bool {
switch v.Type() {
case intTS, int8TS, int16TS, int32TS, int64TS, intT, int8T, int16T, int32T, int64T:
return true
}
return false
}

View File

@@ -19,7 +19,6 @@ package abi
import (
"bytes"
"math/big"
"reflect"
"testing"
)
@@ -32,13 +31,3 @@ func TestNumberTypes(t *testing.T) {
t.Errorf("expected %x got %x", ubytes, unsigned)
}
}
func TestSigned(t *testing.T) {
if isSigned(reflect.ValueOf(uint(10))) {
t.Error("signed")
}
if !isSigned(reflect.ValueOf(int(10))) {
t.Error("not signed")
}
}

View File

@@ -19,6 +19,7 @@ package abi
import (
"fmt"
"reflect"
"strings"
)
// indirect recursively dereferences the value until it either gets the value
@@ -111,18 +112,101 @@ func requireUnpackKind(v reflect.Value, t reflect.Type, k reflect.Kind,
return nil
}
// requireUniqueStructFieldNames makes sure field names don't collide
func requireUniqueStructFieldNames(args Arguments) error {
exists := make(map[string]bool)
for _, arg := range args {
field := capitalise(arg.Name)
if field == "" {
return fmt.Errorf("abi: purely underscored output cannot unpack to struct")
// mapAbiToStringField maps abi to struct fields.
// first round: for each Exportable field that contains a `abi:""` tag
// and this field name exists in the arguments, pair them together.
// second round: for each argument field that has not been already linked,
// find what variable is expected to be mapped into, if it exists and has not been
// used, pair them.
func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]string, error) {
typ := value.Type()
abi2struct := make(map[string]string)
struct2abi := make(map[string]string)
// first round ~~~
for i := 0; i < typ.NumField(); i++ {
structFieldName := typ.Field(i).Name
// skip private struct fields.
if structFieldName[:1] != strings.ToUpper(structFieldName[:1]) {
continue
}
if exists[field] {
return fmt.Errorf("abi: multiple outputs mapping to the same struct field '%s'", field)
// skip fields that have no abi:"" tag.
var ok bool
var tagName string
if tagName, ok = typ.Field(i).Tag.Lookup("abi"); !ok {
continue
}
exists[field] = true
// check if tag is empty.
if tagName == "" {
return nil, fmt.Errorf("struct: abi tag in '%s' is empty", structFieldName)
}
// check which argument field matches with the abi tag.
found := false
for _, abiField := range args.NonIndexed() {
if abiField.Name == tagName {
if abi2struct[abiField.Name] != "" {
return nil, fmt.Errorf("struct: abi tag in '%s' already mapped", structFieldName)
}
// pair them
abi2struct[abiField.Name] = structFieldName
struct2abi[structFieldName] = abiField.Name
found = true
}
}
// check if this tag has been mapped.
if !found {
return nil, fmt.Errorf("struct: abi tag '%s' defined but not found in abi", tagName)
}
}
return nil
// second round ~~~
for _, arg := range args {
abiFieldName := arg.Name
structFieldName := capitalise(abiFieldName)
if structFieldName == "" {
return nil, fmt.Errorf("abi: purely underscored output cannot unpack to struct")
}
// this abi has already been paired, skip it... unless there exists another, yet unassigned
// struct field with the same field name. If so, raise an error:
// abi: [ { "name": "value" } ]
// struct { Value *big.Int , Value1 *big.Int `abi:"value"`}
if abi2struct[abiFieldName] != "" {
if abi2struct[abiFieldName] != structFieldName &&
struct2abi[structFieldName] == "" &&
value.FieldByName(structFieldName).IsValid() {
return nil, fmt.Errorf("abi: multiple variables maps to the same abi field '%s'", abiFieldName)
}
continue
}
// return an error if this struct field has already been paired.
if struct2abi[structFieldName] != "" {
return nil, fmt.Errorf("abi: multiple outputs mapping to the same struct field '%s'", structFieldName)
}
if value.FieldByName(structFieldName).IsValid() {
// pair them
abi2struct[abiFieldName] = structFieldName
struct2abi[structFieldName] = abiFieldName
} else {
// not paired, but annotate as used, to detect cases like
// abi : [ { "name": "value" }, { "name": "_value" } ]
// struct { Value *big.Int }
struct2abi[structFieldName] = abiFieldName
}
}
return abi2struct, nil
}

View File

@@ -103,7 +103,12 @@ func NewType(t string) (typ Type, err error) {
return typ, err
}
// parse the type and size of the abi-type.
parsedType := typeRegex.FindAllStringSubmatch(t, -1)[0]
matches := typeRegex.FindAllStringSubmatch(t, -1)
if len(matches) == 0 {
return Type{}, fmt.Errorf("invalid type '%v'", t)
}
parsedType := matches[0]
// varSize is the size of the variable
var varSize int
if len(parsedType[3]) > 0 {

View File

@@ -25,8 +25,17 @@ import (
"github.com/ethereum/go-ethereum/common"
)
var (
maxUint256 = big.NewInt(0).Add(
big.NewInt(0).Exp(big.NewInt(2), big.NewInt(256), nil),
big.NewInt(-1))
maxInt256 = big.NewInt(0).Add(
big.NewInt(0).Exp(big.NewInt(2), big.NewInt(255), nil),
big.NewInt(-1))
)
// reads the integer based on its kind
func readInteger(kind reflect.Kind, b []byte) interface{} {
func readInteger(typ byte, kind reflect.Kind, b []byte) interface{} {
switch kind {
case reflect.Uint8:
return b[len(b)-1]
@@ -45,7 +54,20 @@ func readInteger(kind reflect.Kind, b []byte) interface{} {
case reflect.Int64:
return int64(binary.BigEndian.Uint64(b[len(b)-8:]))
default:
return new(big.Int).SetBytes(b)
// the only case lefts for integer is int256/uint256.
// big.SetBytes can't tell if a number is negative, positive on itself.
// On EVM, if the returned number > max int256, it is negative.
ret := new(big.Int).SetBytes(b)
if typ == UintTy {
return ret
}
if ret.Cmp(maxInt256) > 0 {
ret.Add(maxUint256, big.NewInt(0).Neg(ret))
ret.Add(ret, big.NewInt(1))
ret.Neg(ret)
}
return ret
}
}
@@ -179,7 +201,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
case StringTy: // variable arrays are written at the end of the return bytes
return string(output[begin : begin+end]), nil
case IntTy, UintTy:
return readInteger(t.Kind, returnOutput), nil
return readInteger(t.T, t.Kind, returnOutput), nil
case BoolTy:
return readBool(returnOutput)
case AddressTy:

View File

@@ -117,6 +117,11 @@ var unpackTests = []unpackTest{
enc: "0000000000000000000000000000000000000000000000000000000000000001",
want: big.NewInt(1),
},
{
def: `[{"type": "int256"}]`,
enc: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
want: big.NewInt(-1),
},
{
def: `[{"type": "address"}]`,
enc: "0000000000000000000000000100000000000000000000000000000000000000",

View File

@@ -106,7 +106,7 @@ type Wallet interface {
// or optionally with the aid of any location metadata from the embedded URL field.
//
// If the wallet requires additional authentication to sign the request (e.g.
// a password to decrypt the account, or a PIN code o verify the transaction),
// a password to decrypt the account, or a PIN code to verify the transaction),
// an AuthNeededError instance will be returned, containing infos for the user
// about which fields or actions are needed. The user may retry by providing
// the needed details via SignTxWithPassphrase, or by other means (e.g. unlock

View File

@@ -27,10 +27,10 @@ import (
"sync"
"time"
mapset "github.com/deckarep/golang-set"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"gopkg.in/fatih/set.v0"
)
// Minimum amount of time between cache reloads. This limit applies if the platform does
@@ -79,7 +79,7 @@ func newAccountCache(keydir string) (*accountCache, chan struct{}) {
keydir: keydir,
byAddr: make(map[common.Address][]accounts.Account),
notify: make(chan struct{}, 1),
fileC: fileCache{all: set.NewNonTS()},
fileC: fileCache{all: mapset.NewThreadUnsafeSet()},
}
ac.watcher = newWatcher(ac)
return ac, ac.notify
@@ -237,7 +237,7 @@ func (ac *accountCache) scanAccounts() error {
log.Debug("Failed to reload keystore contents", "err", err)
return err
}
if creates.Size() == 0 && deletes.Size() == 0 && updates.Size() == 0 {
if creates.Cardinality() == 0 && deletes.Cardinality() == 0 && updates.Cardinality() == 0 {
return nil
}
// Create a helper method to scan the contents of the key files
@@ -272,15 +272,15 @@ func (ac *accountCache) scanAccounts() error {
// Process all the file diffs
start := time.Now()
for _, p := range creates.List() {
for _, p := range creates.ToSlice() {
if a := readAccount(p.(string)); a != nil {
ac.add(*a)
}
}
for _, p := range deletes.List() {
for _, p := range deletes.ToSlice() {
ac.deleteByFile(p.(string))
}
for _, p := range updates.List() {
for _, p := range updates.ToSlice() {
path := p.(string)
ac.deleteByFile(path)
if a := readAccount(path); a != nil {

View File

@@ -24,20 +24,20 @@ import (
"sync"
"time"
mapset "github.com/deckarep/golang-set"
"github.com/ethereum/go-ethereum/log"
set "gopkg.in/fatih/set.v0"
)
// fileCache is a cache of files seen during scan of keystore.
type fileCache struct {
all *set.SetNonTS // Set of all files from the keystore folder
lastMod time.Time // Last time instance when a file was modified
all mapset.Set // Set of all files from the keystore folder
lastMod time.Time // Last time instance when a file was modified
mu sync.RWMutex
}
// scan performs a new scan on the given directory, compares against the already
// cached filenames, and returns file sets: creates, deletes, updates.
func (fc *fileCache) scan(keyDir string) (set.Interface, set.Interface, set.Interface, error) {
func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, error) {
t0 := time.Now()
// List all the failes from the keystore folder
@@ -51,14 +51,14 @@ func (fc *fileCache) scan(keyDir string) (set.Interface, set.Interface, set.Inte
defer fc.mu.Unlock()
// Iterate all the files and gather their metadata
all := set.NewNonTS()
mods := set.NewNonTS()
all := mapset.NewThreadUnsafeSet()
mods := mapset.NewThreadUnsafeSet()
var newLastMod time.Time
for _, fi := range files {
// Skip any non-key files from the folder
path := filepath.Join(keyDir, fi.Name())
if skipKeyFile(fi) {
// Skip any non-key files from the folder
if nonKeyFile(fi) {
log.Trace("Ignoring file on account scan", "path", path)
continue
}
@@ -76,9 +76,9 @@ func (fc *fileCache) scan(keyDir string) (set.Interface, set.Interface, set.Inte
t2 := time.Now()
// Update the tracked files and return the three sets
deletes := set.Difference(fc.all, all) // Deletes = previous - current
creates := set.Difference(all, fc.all) // Creates = current - previous
updates := set.Difference(mods, creates) // Updates = modified - creates
deletes := fc.all.Difference(all) // Deletes = previous - current
creates := all.Difference(fc.all) // Creates = current - previous
updates := mods.Difference(creates) // Updates = modified - creates
fc.all, fc.lastMod = all, newLastMod
t3 := time.Now()
@@ -88,8 +88,8 @@ func (fc *fileCache) scan(keyDir string) (set.Interface, set.Interface, set.Inte
return creates, deletes, updates, nil
}
// skipKeyFile ignores editor backups, hidden files and folders/symlinks.
func skipKeyFile(fi os.FileInfo) bool {
// nonKeyFile ignores editor backups, hidden files and folders/symlinks.
func nonKeyFile(fi os.FileInfo) bool {
// Skip editor backups and UNIX-style hidden files.
if strings.HasSuffix(fi.Name(), "~") || strings.HasPrefix(fi.Name(), ".") {
return true

View File

@@ -179,26 +179,34 @@ func storeNewKey(ks keyStore, rand io.Reader, auth string) (*Key, accounts.Accou
return key, a, err
}
func writeKeyFile(file string, content []byte) error {
func writeTemporaryKeyFile(file string, content []byte) (string, error) {
// Create the keystore directory with appropriate permissions
// in case it is not present yet.
const dirPerm = 0700
if err := os.MkdirAll(filepath.Dir(file), dirPerm); err != nil {
return err
return "", err
}
// Atomic write: create a temporary hidden file first
// then move it into place. TempFile assigns mode 0600.
f, err := ioutil.TempFile(filepath.Dir(file), "."+filepath.Base(file)+".tmp")
if err != nil {
return err
return "", err
}
if _, err := f.Write(content); err != nil {
f.Close()
os.Remove(f.Name())
return err
return "", err
}
f.Close()
return os.Rename(f.Name(), file)
return f.Name(), nil
}
func writeKeyFile(file string, content []byte) error {
name, err := writeTemporaryKeyFile(file, content)
if err != nil {
return err
}
return os.Rename(name, file)
}
// keyFileName implements the naming convention for keyfiles:

View File

@@ -50,7 +50,7 @@ var (
var KeyStoreType = reflect.TypeOf(&KeyStore{})
// KeyStoreScheme is the protocol scheme prefixing account and wallet URLs.
var KeyStoreScheme = "keystore"
const KeyStoreScheme = "keystore"
// Maximum time between wallet refreshes (if filesystem notifications don't work).
const walletRefreshCycle = 3 * time.Second
@@ -78,7 +78,7 @@ type unlocked struct {
// NewKeyStore creates a keystore for the given directory.
func NewKeyStore(keydir string, scryptN, scryptP int) *KeyStore {
keydir, _ = filepath.Abs(keydir)
ks := &KeyStore{storage: &keyStorePassphrase{keydir, scryptN, scryptP}}
ks := &KeyStore{storage: &keyStorePassphrase{keydir, scryptN, scryptP, false}}
ks.init(keydir)
return ks
}

View File

@@ -28,18 +28,19 @@ package keystore
import (
"bytes"
"crypto/aes"
crand "crypto/rand"
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/randentropy"
"github.com/pborman/uuid"
"golang.org/x/crypto/pbkdf2"
"golang.org/x/crypto/scrypt"
@@ -72,6 +73,10 @@ type keyStorePassphrase struct {
keysDirPath string
scryptN int
scryptP int
// skipKeyFileVerification disables the security-feature which does
// reads and decrypts any newly created keyfiles. This should be 'false' in all
// cases except tests -- setting this to 'true' is not recommended.
skipKeyFileVerification bool
}
func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string) (*Key, error) {
@@ -93,7 +98,7 @@ func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string)
// StoreKey generates a key, encrypts with 'auth' and stores in the given directory
func StoreKey(dir, auth string, scryptN, scryptP int) (common.Address, error) {
_, a, err := storeNewKey(&keyStorePassphrase{dir, scryptN, scryptP}, crand.Reader, auth)
_, a, err := storeNewKey(&keyStorePassphrase{dir, scryptN, scryptP, false}, rand.Reader, auth)
return a.Address, err
}
@@ -102,7 +107,25 @@ func (ks keyStorePassphrase) StoreKey(filename string, key *Key, auth string) er
if err != nil {
return err
}
return writeKeyFile(filename, keyjson)
// Write into temporary file
tmpName, err := writeTemporaryKeyFile(filename, keyjson)
if err != nil {
return err
}
if !ks.skipKeyFileVerification {
// Verify that we can decrypt the file with the given password.
_, err = ks.GetKey(key.Address, tmpName, auth)
if err != nil {
msg := "An error was encountered when saving and verifying the keystore file. \n" +
"This indicates that the keystore is corrupted. \n" +
"The corrupted file is stored at \n%v\n" +
"Please file a ticket at:\n\n" +
"https://github.com/ethereum/go-ethereum/issues." +
"The error was : %s"
return fmt.Errorf(msg, tmpName, err)
}
}
return os.Rename(tmpName, filename)
}
func (ks keyStorePassphrase) JoinPath(filename string) string {
@@ -116,7 +139,11 @@ func (ks keyStorePassphrase) JoinPath(filename string) string {
// blob that can be decrypted later on.
func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
authArray := []byte(auth)
salt := randentropy.GetEntropyCSPRNG(32)
salt := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
panic("reading from crypto/rand failed: " + err.Error())
}
derivedKey, err := scrypt.Key(authArray, salt, scryptN, scryptR, scryptP, scryptDKLen)
if err != nil {
return nil, err
@@ -124,7 +151,10 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
encryptKey := derivedKey[:16]
keyBytes := math.PaddedBigBytes(key.PrivateKey.D, 32)
iv := randentropy.GetEntropyCSPRNG(aes.BlockSize) // 16
iv := make([]byte, aes.BlockSize) // 16
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
panic("reading from crypto/rand failed: " + err.Error())
}
cipherText, err := aesCTRXOR(encryptKey, keyBytes, iv)
if err != nil {
return nil, err

View File

@@ -37,7 +37,7 @@ func tmpKeyStoreIface(t *testing.T, encrypted bool) (dir string, ks keyStore) {
t.Fatal(err)
}
if encrypted {
ks = &keyStorePassphrase{d, veryLightScryptN, veryLightScryptP}
ks = &keyStorePassphrase{d, veryLightScryptN, veryLightScryptP, true}
} else {
ks = &keyStorePlain{d}
}
@@ -191,7 +191,7 @@ func TestV1_1(t *testing.T) {
func TestV1_2(t *testing.T) {
t.Parallel()
ks := &keyStorePassphrase{"testdata/v1", LightScryptN, LightScryptP}
ks := &keyStorePassphrase{"testdata/v1", LightScryptN, LightScryptP, true}
addr := common.HexToAddress("cb61d5a9c4896fb9658090b597ef0e7be6f7b67e")
file := "testdata/v1/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e"
k, err := ks.GetKey(addr, file, "g")

View File

@@ -76,12 +76,12 @@ func (u URL) MarshalJSON() ([]byte, error) {
// UnmarshalJSON parses url.
func (u *URL) UnmarshalJSON(input []byte) error {
var textUrl string
err := json.Unmarshal(input, &textUrl)
var textURL string
err := json.Unmarshal(input, &textURL)
if err != nil {
return err
}
url, err := parseURL(textUrl)
url, err := parseURL(textURL)
if err != nil {
return err
}

96
accounts/url_test.go Normal file
View File

@@ -0,0 +1,96 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package accounts
import (
"testing"
)
func TestURLParsing(t *testing.T) {
url, err := parseURL("https://ethereum.org")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if url.Scheme != "https" {
t.Errorf("expected: %v, got: %v", "https", url.Scheme)
}
if url.Path != "ethereum.org" {
t.Errorf("expected: %v, got: %v", "ethereum.org", url.Path)
}
_, err = parseURL("ethereum.org")
if err == nil {
t.Error("expected err, got: nil")
}
}
func TestURLString(t *testing.T) {
url := URL{Scheme: "https", Path: "ethereum.org"}
if url.String() != "https://ethereum.org" {
t.Errorf("expected: %v, got: %v", "https://ethereum.org", url.String())
}
url = URL{Scheme: "", Path: "ethereum.org"}
if url.String() != "ethereum.org" {
t.Errorf("expected: %v, got: %v", "ethereum.org", url.String())
}
}
func TestURLMarshalJSON(t *testing.T) {
url := URL{Scheme: "https", Path: "ethereum.org"}
json, err := url.MarshalJSON()
if err != nil {
t.Errorf("unexpcted error: %v", err)
}
if string(json) != "\"https://ethereum.org\"" {
t.Errorf("expected: %v, got: %v", "\"https://ethereum.org\"", string(json))
}
}
func TestURLUnmarshalJSON(t *testing.T) {
url := &URL{}
err := url.UnmarshalJSON([]byte("\"https://ethereum.org\""))
if err != nil {
t.Errorf("unexpcted error: %v", err)
}
if url.Scheme != "https" {
t.Errorf("expected: %v, got: %v", "https", url.Scheme)
}
if url.Path != "ethereum.org" {
t.Errorf("expected: %v, got: %v", "https", url.Path)
}
}
func TestURLComparison(t *testing.T) {
tests := []struct {
urlA URL
urlB URL
expect int
}{
{URL{"https", "ethereum.org"}, URL{"https", "ethereum.org"}, 0},
{URL{"http", "ethereum.org"}, URL{"https", "ethereum.org"}, -1},
{URL{"https", "ethereum.org/a"}, URL{"https", "ethereum.org"}, 1},
{URL{"https", "abc.org"}, URL{"https", "ethereum.org"}, -1},
}
for i, tt := range tests {
result := tt.urlA.Cmp(tt.urlB)
if result != tt.expect {
t.Errorf("test %d: cmp mismatch: expected: %d, got: %d", i, tt.expect, result)
}
}
}

View File

@@ -36,7 +36,7 @@ func Type(msg proto.Message) uint16 {
}
// Name returns the friendly message type name of a specific protocol buffer
// type numbers.
// type number.
func Name(kind uint16) string {
name := MessageType_name[int32(kind)]
if len(name) < 12 {

View File

@@ -302,7 +302,7 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
for i, component := range derivationPath {
binary.BigEndian.PutUint32(path[1+4*i:], component)
}
// Create the transaction RLP based on whether legacy or EIP155 signing was requeste
// Create the transaction RLP based on whether legacy or EIP155 signing was requested
var (
txrlp []byte
err error

View File

@@ -23,8 +23,8 @@ environment:
install:
- git submodule update --init
- rmdir C:\go /s /q
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.10.1.windows-%GETH_ARCH%.zip
- 7z x go1.10.1.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.windows-%GETH_ARCH%.zip
- 7z x go1.11.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
- go version
- gcc --version

View File

@@ -1,560 +0,0 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package bmt provides a binary merkle tree implementation
package bmt
import (
"fmt"
"hash"
"io"
"strings"
"sync"
"sync/atomic"
)
/*
Binary Merkle Tree Hash is a hash function over arbitrary datachunks of limited size
It is defined as the root hash of the binary merkle tree built over fixed size segments
of the underlying chunk using any base hash function (e.g keccak 256 SHA3)
It is used as the chunk hash function in swarm which in turn is the basis for the
128 branching swarm hash http://swarm-guide.readthedocs.io/en/latest/architecture.html#swarm-hash
The BMT is optimal for providing compact inclusion proofs, i.e. prove that a
segment is a substring of a chunk starting at a particular offset
The size of the underlying segments is fixed at 32 bytes (called the resolution
of the BMT hash), the EVM word size to optimize for on-chain BMT verification
as well as the hash size optimal for inclusion proofs in the merkle tree of the swarm hash.
Two implementations are provided:
* RefHasher is optimized for code simplicity and meant as a reference implementation
* Hasher is optimized for speed taking advantage of concurrency with minimalistic
control structure to coordinate the concurrent routines
It implements the ChunkHash interface as well as the go standard hash.Hash interface
*/
const (
// DefaultSegmentCount is the maximum number of segments of the underlying chunk
DefaultSegmentCount = 128 // Should be equal to storage.DefaultBranches
// DefaultPoolSize is the maximum number of bmt trees used by the hashers, i.e,
// the maximum number of concurrent BMT hashing operations performed by the same hasher
DefaultPoolSize = 8
)
// BaseHasher is a hash.Hash constructor function used for the base hash of the BMT.
type BaseHasher func() hash.Hash
// Hasher a reusable hasher for fixed maximum size chunks representing a BMT
// implements the hash.Hash interface
// reuse pool of Tree-s for amortised memory allocation and resource control
// supports order-agnostic concurrent segment writes
// as well as sequential read and write
// can not be called concurrently on more than one chunk
// can be further appended after Sum
// Reset gives back the Tree to the pool and guaranteed to leave
// the tree and itself in a state reusable for hashing a new chunk
type Hasher struct {
pool *TreePool // BMT resource pool
bmt *Tree // prebuilt BMT resource for flowcontrol and proofs
blocksize int // segment size (size of hash) also for hash.Hash
count int // segment count
size int // for hash.Hash same as hashsize
cur int // cursor position for rightmost currently open chunk
segment []byte // the rightmost open segment (not complete)
depth int // index of last level
result chan []byte // result channel
hash []byte // to record the result
max int32 // max segments for SegmentWriter interface
blockLength []byte // The block length that needes to be added in Sum
}
// New creates a reusable Hasher
// implements the hash.Hash interface
// pulls a new Tree from a resource pool for hashing each chunk
func New(p *TreePool) *Hasher {
return &Hasher{
pool: p,
depth: depth(p.SegmentCount),
size: p.SegmentSize,
blocksize: p.SegmentSize,
count: p.SegmentCount,
result: make(chan []byte),
}
}
// Node is a reuseable segment hasher representing a node in a BMT
// it allows for continued writes after a Sum
// and is left in completely reusable state after Reset
type Node struct {
level, index int // position of node for information/logging only
initial bool // first and last node
root bool // whether the node is root to a smaller BMT
isLeft bool // whether it is left side of the parent double segment
unbalanced bool // indicates if a node has only the left segment
parent *Node // BMT connections
state int32 // atomic increment impl concurrent boolean toggle
left, right []byte
}
// NewNode constructor for segment hasher nodes in the BMT
func NewNode(level, index int, parent *Node) *Node {
return &Node{
parent: parent,
level: level,
index: index,
initial: index == 0,
isLeft: index%2 == 0,
}
}
// TreePool provides a pool of Trees used as resources by Hasher
// a Tree popped from the pool is guaranteed to have clean state
// for hashing a new chunk
// Hasher Reset releases the Tree to the pool
type TreePool struct {
lock sync.Mutex
c chan *Tree
hasher BaseHasher
SegmentSize int
SegmentCount int
Capacity int
count int
}
// NewTreePool creates a Tree pool with hasher, segment size, segment count and capacity
// on GetTree it reuses free Trees or creates a new one if size is not reached
func NewTreePool(hasher BaseHasher, segmentCount, capacity int) *TreePool {
return &TreePool{
c: make(chan *Tree, capacity),
hasher: hasher,
SegmentSize: hasher().Size(),
SegmentCount: segmentCount,
Capacity: capacity,
}
}
// Drain drains the pool until it has no more than n resources
func (p *TreePool) Drain(n int) {
p.lock.Lock()
defer p.lock.Unlock()
for len(p.c) > n {
<-p.c
p.count--
}
}
// Reserve is blocking until it returns an available Tree
// it reuses free Trees or creates a new one if size is not reached
func (p *TreePool) Reserve() *Tree {
p.lock.Lock()
defer p.lock.Unlock()
var t *Tree
if p.count == p.Capacity {
return <-p.c
}
select {
case t = <-p.c:
default:
t = NewTree(p.hasher, p.SegmentSize, p.SegmentCount)
p.count++
}
return t
}
// Release gives back a Tree to the pool.
// This Tree is guaranteed to be in reusable state
// does not need locking
func (p *TreePool) Release(t *Tree) {
p.c <- t // can never fail but...
}
// Tree is a reusable control structure representing a BMT
// organised in a binary tree
// Hasher uses a TreePool to pick one for each chunk hash
// the Tree is 'locked' while not in the pool
type Tree struct {
leaves []*Node
}
// Draw draws the BMT (badly)
func (t *Tree) Draw(hash []byte, d int) string {
var left, right []string
var anc []*Node
for i, n := range t.leaves {
left = append(left, fmt.Sprintf("%v", hashstr(n.left)))
if i%2 == 0 {
anc = append(anc, n.parent)
}
right = append(right, fmt.Sprintf("%v", hashstr(n.right)))
}
anc = t.leaves
var hashes [][]string
for l := 0; len(anc) > 0; l++ {
var nodes []*Node
hash := []string{""}
for i, n := range anc {
hash = append(hash, fmt.Sprintf("%v|%v", hashstr(n.left), hashstr(n.right)))
if i%2 == 0 && n.parent != nil {
nodes = append(nodes, n.parent)
}
}
hash = append(hash, "")
hashes = append(hashes, hash)
anc = nodes
}
hashes = append(hashes, []string{"", fmt.Sprintf("%v", hashstr(hash)), ""})
total := 60
del := " "
var rows []string
for i := len(hashes) - 1; i >= 0; i-- {
var textlen int
hash := hashes[i]
for _, s := range hash {
textlen += len(s)
}
if total < textlen {
total = textlen + len(hash)
}
delsize := (total - textlen) / (len(hash) - 1)
if delsize > len(del) {
delsize = len(del)
}
row := fmt.Sprintf("%v: %v", len(hashes)-i-1, strings.Join(hash, del[:delsize]))
rows = append(rows, row)
}
rows = append(rows, strings.Join(left, " "))
rows = append(rows, strings.Join(right, " "))
return strings.Join(rows, "\n") + "\n"
}
// NewTree initialises the Tree by building up the nodes of a BMT
// segment size is stipulated to be the size of the hash
// segmentCount needs to be positive integer and does not need to be
// a power of two and can even be an odd number
// segmentSize * segmentCount determines the maximum chunk size
// hashed using the tree
func NewTree(hasher BaseHasher, segmentSize, segmentCount int) *Tree {
n := NewNode(0, 0, nil)
n.root = true
prevlevel := []*Node{n}
// iterate over levels and creates 2^level nodes
level := 1
count := 2
for d := 1; d <= depth(segmentCount); d++ {
nodes := make([]*Node, count)
for i := 0; i < len(nodes); i++ {
parent := prevlevel[i/2]
t := NewNode(level, i, parent)
nodes[i] = t
}
prevlevel = nodes
level++
count *= 2
}
// the datanode level is the nodes on the last level where
return &Tree{
leaves: prevlevel,
}
}
// methods needed by hash.Hash
// Size returns the size
func (h *Hasher) Size() int {
return h.size
}
// BlockSize returns the block size
func (h *Hasher) BlockSize() int {
return h.blocksize
}
// Sum returns the hash of the buffer
// hash.Hash interface Sum method appends the byte slice to the underlying
// data before it calculates and returns the hash of the chunk
func (h *Hasher) Sum(b []byte) (r []byte) {
t := h.bmt
i := h.cur
n := t.leaves[i]
j := i
// must run strictly before all nodes calculate
// datanodes are guaranteed to have a parent
if len(h.segment) > h.size && i > 0 && n.parent != nil {
n = n.parent
} else {
i *= 2
}
d := h.finalise(n, i)
h.writeSegment(j, h.segment, d)
c := <-h.result
h.releaseTree()
// sha3(length + BMT(pure_chunk))
if h.blockLength == nil {
return c
}
res := h.pool.hasher()
res.Reset()
res.Write(h.blockLength)
res.Write(c)
return res.Sum(nil)
}
// Hasher implements the SwarmHash interface
// Hash waits for the hasher result and returns it
// caller must call this on a BMT Hasher being written to
func (h *Hasher) Hash() []byte {
return <-h.result
}
// Hasher implements the io.Writer interface
// Write fills the buffer to hash
// with every full segment complete launches a hasher go routine
// that shoots up the BMT
func (h *Hasher) Write(b []byte) (int, error) {
l := len(b)
if l <= 0 {
return 0, nil
}
s := h.segment
i := h.cur
count := (h.count + 1) / 2
need := h.count*h.size - h.cur*2*h.size
size := h.size
if need > size {
size *= 2
}
if l < need {
need = l
}
// calculate missing bit to complete current open segment
rest := size - len(s)
if need < rest {
rest = need
}
s = append(s, b[:rest]...)
need -= rest
// read full segments and the last possibly partial segment
for need > 0 && i < count-1 {
// push all finished chunks we read
h.writeSegment(i, s, h.depth)
need -= size
if need < 0 {
size += need
}
s = b[rest : rest+size]
rest += size
i++
}
h.segment = s
h.cur = i
// otherwise, we can assume len(s) == 0, so all buffer is read and chunk is not yet full
return l, nil
}
// Hasher implements the io.ReaderFrom interface
// ReadFrom reads from io.Reader and appends to the data to hash using Write
// it reads so that chunk to hash is maximum length or reader reaches EOF
// caller must Reset the hasher prior to call
func (h *Hasher) ReadFrom(r io.Reader) (m int64, err error) {
bufsize := h.size*h.count - h.size*h.cur - len(h.segment)
buf := make([]byte, bufsize)
var read int
for {
var n int
n, err = r.Read(buf)
read += n
if err == io.EOF || read == len(buf) {
hash := h.Sum(buf[:n])
if read == len(buf) {
err = NewEOC(hash)
}
break
}
if err != nil {
break
}
n, err = h.Write(buf[:n])
if err != nil {
break
}
}
return int64(read), err
}
// Reset needs to be called before writing to the hasher
func (h *Hasher) Reset() {
h.getTree()
h.blockLength = nil
}
// Hasher implements the SwarmHash interface
// ResetWithLength needs to be called before writing to the hasher
// the argument is supposed to be the byte slice binary representation of
// the length of the data subsumed under the hash
func (h *Hasher) ResetWithLength(l []byte) {
h.Reset()
h.blockLength = l
}
// Release gives back the Tree to the pool whereby it unlocks
// it resets tree, segment and index
func (h *Hasher) releaseTree() {
if h.bmt != nil {
n := h.bmt.leaves[h.cur]
for ; n != nil; n = n.parent {
n.unbalanced = false
if n.parent != nil {
n.root = false
}
}
h.pool.Release(h.bmt)
h.bmt = nil
}
h.cur = 0
h.segment = nil
}
func (h *Hasher) writeSegment(i int, s []byte, d int) {
hash := h.pool.hasher()
n := h.bmt.leaves[i]
if len(s) > h.size && n.parent != nil {
go func() {
hash.Reset()
hash.Write(s)
s = hash.Sum(nil)
if n.root {
h.result <- s
return
}
h.run(n.parent, hash, d, n.index, s)
}()
return
}
go h.run(n, hash, d, i*2, s)
}
func (h *Hasher) run(n *Node, hash hash.Hash, d int, i int, s []byte) {
isLeft := i%2 == 0
for {
if isLeft {
n.left = s
} else {
n.right = s
}
if !n.unbalanced && n.toggle() {
return
}
if !n.unbalanced || !isLeft || i == 0 && d == 0 {
hash.Reset()
hash.Write(n.left)
hash.Write(n.right)
s = hash.Sum(nil)
} else {
s = append(n.left, n.right...)
}
h.hash = s
if n.root {
h.result <- s
return
}
isLeft = n.isLeft
n = n.parent
i++
}
}
// getTree obtains a BMT resource by reserving one from the pool
func (h *Hasher) getTree() *Tree {
if h.bmt != nil {
return h.bmt
}
t := h.pool.Reserve()
h.bmt = t
return t
}
// atomic bool toggle implementing a concurrent reusable 2-state object
// atomic addint with %2 implements atomic bool toggle
// it returns true if the toggler just put it in the active/waiting state
func (n *Node) toggle() bool {
return atomic.AddInt32(&n.state, 1)%2 == 1
}
func hashstr(b []byte) string {
end := len(b)
if end > 4 {
end = 4
}
return fmt.Sprintf("%x", b[:end])
}
func depth(n int) (d int) {
for l := (n - 1) / 2; l > 0; l /= 2 {
d++
}
return d
}
// finalise is following the zigzags on the tree belonging
// to the final datasegment
func (h *Hasher) finalise(n *Node, i int) (d int) {
isLeft := i%2 == 0
for {
// when the final segment's path is going via left segments
// the incoming data is pushed to the parent upon pulling the left
// we do not need toggle the state since this condition is
// detectable
n.unbalanced = isLeft
n.right = nil
if n.initial {
n.root = true
return d
}
isLeft = n.isLeft
n = n.parent
d++
}
}
// EOC (end of chunk) implements the error interface
type EOC struct {
Hash []byte // read the hash of the chunk off the error
}
// Error returns the error string
func (e *EOC) Error() string {
return fmt.Sprintf("hasher limit reached, chunk hash: %x", e.Hash)
}
// NewEOC creates new end of chunk error with the hash
func NewEOC(hash []byte) *EOC {
return &EOC{hash}
}

View File

@@ -1,85 +0,0 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// simple nonconcurrent reference implementation for hashsize segment based
// Binary Merkle tree hash on arbitrary but fixed maximum chunksize
//
// This implementation does not take advantage of any paralellisms and uses
// far more memory than necessary, but it is easy to see that it is correct.
// It can be used for generating test cases for optimized implementations.
// see testBMTHasherCorrectness function in bmt_test.go
package bmt
import (
"hash"
)
// RefHasher is the non-optimized easy to read reference implementation of BMT
type RefHasher struct {
span int
section int
cap int
h hash.Hash
}
// NewRefHasher returns a new RefHasher
func NewRefHasher(hasher BaseHasher, count int) *RefHasher {
h := hasher()
hashsize := h.Size()
maxsize := hashsize * count
c := 2
for ; c < count; c *= 2 {
}
if c > 2 {
c /= 2
}
return &RefHasher{
section: 2 * hashsize,
span: c * hashsize,
cap: maxsize,
h: h,
}
}
// Hash returns the BMT hash of the byte slice
// implements the SwarmHash interface
func (rh *RefHasher) Hash(d []byte) []byte {
if len(d) > rh.cap {
d = d[:rh.cap]
}
return rh.hash(d, rh.span)
}
func (rh *RefHasher) hash(d []byte, s int) []byte {
l := len(d)
left := d
var right []byte
if l > rh.section {
for ; s >= l; s /= 2 {
}
left = rh.hash(d[:s], s)
right = d[s:]
if l-s > rh.section/2 {
right = rh.hash(right, s)
}
}
defer rh.h.Reset()
rh.h.Write(left)
rh.h.Write(right)
h := rh.h.Sum(nil)
return h
}

View File

@@ -1,481 +0,0 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bmt
import (
"bytes"
crand "crypto/rand"
"fmt"
"hash"
"io"
"math/rand"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/ethereum/go-ethereum/crypto/sha3"
)
const (
maxproccnt = 8
)
// TestRefHasher tests that the RefHasher computes the expected BMT hash for
// all data lengths between 0 and 256 bytes
func TestRefHasher(t *testing.T) {
hashFunc := sha3.NewKeccak256
sha3 := func(data ...[]byte) []byte {
h := hashFunc()
for _, v := range data {
h.Write(v)
}
return h.Sum(nil)
}
// the test struct is used to specify the expected BMT hash for data
// lengths between "from" and "to"
type test struct {
from int64
to int64
expected func([]byte) []byte
}
var tests []*test
// all lengths in [0,64] should be:
//
// sha3(data)
//
tests = append(tests, &test{
from: 0,
to: 64,
expected: func(data []byte) []byte {
return sha3(data)
},
})
// all lengths in [65,96] should be:
//
// sha3(
// sha3(data[:64])
// data[64:]
// )
//
tests = append(tests, &test{
from: 65,
to: 96,
expected: func(data []byte) []byte {
return sha3(sha3(data[:64]), data[64:])
},
})
// all lengths in [97,128] should be:
//
// sha3(
// sha3(data[:64])
// sha3(data[64:])
// )
//
tests = append(tests, &test{
from: 97,
to: 128,
expected: func(data []byte) []byte {
return sha3(sha3(data[:64]), sha3(data[64:]))
},
})
// all lengths in [129,160] should be:
//
// sha3(
// sha3(
// sha3(data[:64])
// sha3(data[64:128])
// )
// data[128:]
// )
//
tests = append(tests, &test{
from: 129,
to: 160,
expected: func(data []byte) []byte {
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), data[128:])
},
})
// all lengths in [161,192] should be:
//
// sha3(
// sha3(
// sha3(data[:64])
// sha3(data[64:128])
// )
// sha3(data[128:])
// )
//
tests = append(tests, &test{
from: 161,
to: 192,
expected: func(data []byte) []byte {
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(data[128:]))
},
})
// all lengths in [193,224] should be:
//
// sha3(
// sha3(
// sha3(data[:64])
// sha3(data[64:128])
// )
// sha3(
// sha3(data[128:192])
// data[192:]
// )
// )
//
tests = append(tests, &test{
from: 193,
to: 224,
expected: func(data []byte) []byte {
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), data[192:]))
},
})
// all lengths in [225,256] should be:
//
// sha3(
// sha3(
// sha3(data[:64])
// sha3(data[64:128])
// )
// sha3(
// sha3(data[128:192])
// sha3(data[192:])
// )
// )
//
tests = append(tests, &test{
from: 225,
to: 256,
expected: func(data []byte) []byte {
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), sha3(data[192:])))
},
})
// run the tests
for _, x := range tests {
for length := x.from; length <= x.to; length++ {
t.Run(fmt.Sprintf("%d_bytes", length), func(t *testing.T) {
data := make([]byte, length)
if _, err := io.ReadFull(crand.Reader, data); err != nil && err != io.EOF {
t.Fatal(err)
}
expected := x.expected(data)
actual := NewRefHasher(hashFunc, 128).Hash(data)
if !bytes.Equal(actual, expected) {
t.Fatalf("expected %x, got %x", expected, actual)
}
})
}
}
}
func testDataReader(l int) (r io.Reader) {
return io.LimitReader(crand.Reader, int64(l))
}
func TestHasherCorrectness(t *testing.T) {
err := testHasher(testBaseHasher)
if err != nil {
t.Fatal(err)
}
}
func testHasher(f func(BaseHasher, []byte, int, int) error) error {
tdata := testDataReader(4128)
data := make([]byte, 4128)
tdata.Read(data)
hasher := sha3.NewKeccak256
size := hasher().Size()
counts := []int{1, 2, 3, 4, 5, 8, 16, 32, 64, 128}
var err error
for _, count := range counts {
max := count * size
incr := 1
for n := 0; n <= max+incr; n += incr {
err = f(hasher, data, n, count)
if err != nil {
return err
}
}
}
return nil
}
func TestHasherReuseWithoutRelease(t *testing.T) {
testHasherReuse(1, t)
}
func TestHasherReuseWithRelease(t *testing.T) {
testHasherReuse(maxproccnt, t)
}
func testHasherReuse(i int, t *testing.T) {
hasher := sha3.NewKeccak256
pool := NewTreePool(hasher, 128, i)
defer pool.Drain(0)
bmt := New(pool)
for i := 0; i < 500; i++ {
n := rand.Intn(4096)
tdata := testDataReader(n)
data := make([]byte, n)
tdata.Read(data)
err := testHasherCorrectness(bmt, hasher, data, n, 128)
if err != nil {
t.Fatal(err)
}
}
}
func TestHasherConcurrency(t *testing.T) {
hasher := sha3.NewKeccak256
pool := NewTreePool(hasher, 128, maxproccnt)
defer pool.Drain(0)
wg := sync.WaitGroup{}
cycles := 100
wg.Add(maxproccnt * cycles)
errc := make(chan error)
for p := 0; p < maxproccnt; p++ {
for i := 0; i < cycles; i++ {
go func() {
bmt := New(pool)
n := rand.Intn(4096)
tdata := testDataReader(n)
data := make([]byte, n)
tdata.Read(data)
err := testHasherCorrectness(bmt, hasher, data, n, 128)
wg.Done()
if err != nil {
errc <- err
}
}()
}
}
go func() {
wg.Wait()
close(errc)
}()
var err error
select {
case <-time.NewTimer(5 * time.Second).C:
err = fmt.Errorf("timed out")
case err = <-errc:
}
if err != nil {
t.Fatal(err)
}
}
func testBaseHasher(hasher BaseHasher, d []byte, n, count int) error {
pool := NewTreePool(hasher, count, 1)
defer pool.Drain(0)
bmt := New(pool)
return testHasherCorrectness(bmt, hasher, d, n, count)
}
func testHasherCorrectness(bmt hash.Hash, hasher BaseHasher, d []byte, n, count int) (err error) {
data := d[:n]
rbmt := NewRefHasher(hasher, count)
exp := rbmt.Hash(data)
timeout := time.NewTimer(time.Second)
c := make(chan error)
go func() {
bmt.Reset()
bmt.Write(data)
got := bmt.Sum(nil)
if !bytes.Equal(got, exp) {
c <- fmt.Errorf("wrong hash: expected %x, got %x", exp, got)
}
close(c)
}()
select {
case <-timeout.C:
err = fmt.Errorf("BMT hash calculation timed out")
case err = <-c:
}
return err
}
func BenchmarkSHA3_4k(t *testing.B) { benchmarkSHA3(4096, t) }
func BenchmarkSHA3_2k(t *testing.B) { benchmarkSHA3(4096/2, t) }
func BenchmarkSHA3_1k(t *testing.B) { benchmarkSHA3(4096/4, t) }
func BenchmarkSHA3_512b(t *testing.B) { benchmarkSHA3(4096/8, t) }
func BenchmarkSHA3_256b(t *testing.B) { benchmarkSHA3(4096/16, t) }
func BenchmarkSHA3_128b(t *testing.B) { benchmarkSHA3(4096/32, t) }
func BenchmarkBMTBaseline_4k(t *testing.B) { benchmarkBMTBaseline(4096, t) }
func BenchmarkBMTBaseline_2k(t *testing.B) { benchmarkBMTBaseline(4096/2, t) }
func BenchmarkBMTBaseline_1k(t *testing.B) { benchmarkBMTBaseline(4096/4, t) }
func BenchmarkBMTBaseline_512b(t *testing.B) { benchmarkBMTBaseline(4096/8, t) }
func BenchmarkBMTBaseline_256b(t *testing.B) { benchmarkBMTBaseline(4096/16, t) }
func BenchmarkBMTBaseline_128b(t *testing.B) { benchmarkBMTBaseline(4096/32, t) }
func BenchmarkRefHasher_4k(t *testing.B) { benchmarkRefHasher(4096, t) }
func BenchmarkRefHasher_2k(t *testing.B) { benchmarkRefHasher(4096/2, t) }
func BenchmarkRefHasher_1k(t *testing.B) { benchmarkRefHasher(4096/4, t) }
func BenchmarkRefHasher_512b(t *testing.B) { benchmarkRefHasher(4096/8, t) }
func BenchmarkRefHasher_256b(t *testing.B) { benchmarkRefHasher(4096/16, t) }
func BenchmarkRefHasher_128b(t *testing.B) { benchmarkRefHasher(4096/32, t) }
func BenchmarkHasher_4k(t *testing.B) { benchmarkHasher(4096, t) }
func BenchmarkHasher_2k(t *testing.B) { benchmarkHasher(4096/2, t) }
func BenchmarkHasher_1k(t *testing.B) { benchmarkHasher(4096/4, t) }
func BenchmarkHasher_512b(t *testing.B) { benchmarkHasher(4096/8, t) }
func BenchmarkHasher_256b(t *testing.B) { benchmarkHasher(4096/16, t) }
func BenchmarkHasher_128b(t *testing.B) { benchmarkHasher(4096/32, t) }
func BenchmarkHasherNoReuse_4k(t *testing.B) { benchmarkHasherReuse(1, 4096, t) }
func BenchmarkHasherNoReuse_2k(t *testing.B) { benchmarkHasherReuse(1, 4096/2, t) }
func BenchmarkHasherNoReuse_1k(t *testing.B) { benchmarkHasherReuse(1, 4096/4, t) }
func BenchmarkHasherNoReuse_512b(t *testing.B) { benchmarkHasherReuse(1, 4096/8, t) }
func BenchmarkHasherNoReuse_256b(t *testing.B) { benchmarkHasherReuse(1, 4096/16, t) }
func BenchmarkHasherNoReuse_128b(t *testing.B) { benchmarkHasherReuse(1, 4096/32, t) }
func BenchmarkHasherReuse_4k(t *testing.B) { benchmarkHasherReuse(16, 4096, t) }
func BenchmarkHasherReuse_2k(t *testing.B) { benchmarkHasherReuse(16, 4096/2, t) }
func BenchmarkHasherReuse_1k(t *testing.B) { benchmarkHasherReuse(16, 4096/4, t) }
func BenchmarkHasherReuse_512b(t *testing.B) { benchmarkHasherReuse(16, 4096/8, t) }
func BenchmarkHasherReuse_256b(t *testing.B) { benchmarkHasherReuse(16, 4096/16, t) }
func BenchmarkHasherReuse_128b(t *testing.B) { benchmarkHasherReuse(16, 4096/32, t) }
// benchmarks the minimum hashing time for a balanced (for simplicity) BMT
// by doing count/segmentsize parallel hashings of 2*segmentsize bytes
// doing it on n maxproccnt each reusing the base hasher
// the premise is that this is the minimum computation needed for a BMT
// therefore this serves as a theoretical optimum for concurrent implementations
func benchmarkBMTBaseline(n int, t *testing.B) {
tdata := testDataReader(64)
data := make([]byte, 64)
tdata.Read(data)
hasher := sha3.NewKeccak256
t.ReportAllocs()
t.ResetTimer()
for i := 0; i < t.N; i++ {
count := int32((n-1)/hasher().Size() + 1)
wg := sync.WaitGroup{}
wg.Add(maxproccnt)
var i int32
for j := 0; j < maxproccnt; j++ {
go func() {
defer wg.Done()
h := hasher()
for atomic.AddInt32(&i, 1) < count {
h.Reset()
h.Write(data)
h.Sum(nil)
}
}()
}
wg.Wait()
}
}
func benchmarkHasher(n int, t *testing.B) {
tdata := testDataReader(n)
data := make([]byte, n)
tdata.Read(data)
size := 1
hasher := sha3.NewKeccak256
segmentCount := 128
pool := NewTreePool(hasher, segmentCount, size)
bmt := New(pool)
t.ReportAllocs()
t.ResetTimer()
for i := 0; i < t.N; i++ {
bmt.Reset()
bmt.Write(data)
bmt.Sum(nil)
}
}
func benchmarkHasherReuse(poolsize, n int, t *testing.B) {
tdata := testDataReader(n)
data := make([]byte, n)
tdata.Read(data)
hasher := sha3.NewKeccak256
segmentCount := 128
pool := NewTreePool(hasher, segmentCount, poolsize)
cycles := 200
t.ReportAllocs()
t.ResetTimer()
for i := 0; i < t.N; i++ {
wg := sync.WaitGroup{}
wg.Add(cycles)
for j := 0; j < cycles; j++ {
bmt := New(pool)
go func() {
defer wg.Done()
bmt.Reset()
bmt.Write(data)
bmt.Sum(nil)
}()
}
wg.Wait()
}
}
func benchmarkSHA3(n int, t *testing.B) {
data := make([]byte, n)
tdata := testDataReader(n)
tdata.Read(data)
hasher := sha3.NewKeccak256
h := hasher()
t.ReportAllocs()
t.ResetTimer()
for i := 0; i < t.N; i++ {
h.Reset()
h.Write(data)
h.Sum(nil)
}
}
func benchmarkRefHasher(n int, t *testing.B) {
data := make([]byte, n)
tdata := testDataReader(n)
tdata.Read(data)
hasher := sha3.NewKeccak256
rbmt := NewRefHasher(hasher, 128)
t.ReportAllocs()
t.ResetTimer()
for i := 0; i < t.N; i++ {
rbmt.Hash(data)
}
}

View File

@@ -26,7 +26,7 @@ Available commands are:
install [ -arch architecture ] [ -cc compiler ] [ packages... ] -- builds packages and executables
test [ -coverage ] [ packages... ] -- runs the tests
lint -- runs certain pre-selected linters
archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -upload dest ] -- archives build artefacts
archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -upload dest ] -- archives build artifacts
importkeys -- imports signing keys from env
debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package
nsis -- creates a Windows NSIS installer
@@ -59,6 +59,8 @@ import (
"time"
"github.com/ethereum/go-ethereum/internal/build"
"github.com/ethereum/go-ethereum/params"
sv "github.com/ethereum/go-ethereum/swarm/version"
)
var (
@@ -77,52 +79,84 @@ var (
executablePath("geth"),
executablePath("puppeth"),
executablePath("rlpdump"),
executablePath("swarm"),
executablePath("wnode"),
}
// Files that end up in the swarm*.zip archive.
swarmArchiveFiles = []string{
"COPYING",
executablePath("swarm"),
}
// A debian package is created for all executables listed here.
debExecutables = []debExecutable{
{
Name: "abigen",
BinaryName: "abigen",
Description: "Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages.",
},
{
Name: "bootnode",
BinaryName: "bootnode",
Description: "Ethereum bootnode.",
},
{
Name: "evm",
BinaryName: "evm",
Description: "Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode.",
},
{
Name: "geth",
BinaryName: "geth",
Description: "Ethereum CLI client.",
},
{
Name: "puppeth",
BinaryName: "puppeth",
Description: "Ethereum private network manager.",
},
{
Name: "rlpdump",
BinaryName: "rlpdump",
Description: "Developer utility tool that prints RLP structures.",
},
{
Name: "swarm",
Description: "Ethereum Swarm daemon and tools",
},
{
Name: "wnode",
BinaryName: "wnode",
Description: "Ethereum Whisper diagnostic tool",
},
}
// A debian package is created for all executables listed here.
debSwarmExecutables = []debExecutable{
{
BinaryName: "swarm",
PackageName: "ethereum-swarm",
Description: "Ethereum Swarm daemon and tools",
},
}
debEthereum = debPackage{
Name: "ethereum",
Version: params.Version,
Executables: debExecutables,
}
debSwarm = debPackage{
Name: "ethereum-swarm",
Version: sv.Version,
Executables: debSwarmExecutables,
}
// Debian meta packages to build and push to Ubuntu PPA
debPackages = []debPackage{
debSwarm,
debEthereum,
}
// Packages to be cross-compiled by the xgo command
allCrossCompiledArchiveFiles = append(allToolsArchiveFiles, swarmArchiveFiles...)
// Distros for which packages are created.
// Note: vivid is unsupported because there is no golang-1.6 package for it.
// Note: wily is unsupported because it was officially deprecated on lanchpad.
// Note: yakkety is unsupported because it was officially deprecated on lanchpad.
// Note: zesty is unsupported because it was officially deprecated on lanchpad.
debDistros = []string{"trusty", "xenial", "artful", "bionic"}
// Note: artful is unsupported because it was officially deprecated on lanchpad.
debDistros = []string{"trusty", "xenial", "bionic", "cosmic"}
)
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
@@ -330,6 +364,7 @@ func doLint(cmdline []string) {
configs := []string{
"--vendor",
"--tests",
"--deadline=2m",
"--disable-all",
"--enable=goimports",
"--enable=varcheck",
@@ -349,7 +384,6 @@ func doLint(cmdline []string) {
}
// Release Packaging
func doArchive(cmdline []string) {
var (
arch = flag.String("arch", runtime.GOARCH, "Architecture cross packaging")
@@ -369,10 +403,14 @@ func doArchive(cmdline []string) {
}
var (
env = build.Env()
base = archiveBasename(*arch, env)
geth = "geth-" + base + ext
alltools = "geth-alltools-" + base + ext
env = build.Env()
basegeth = archiveBasename(*arch, params.ArchiveVersion(env.Commit))
geth = "geth-" + basegeth + ext
alltools = "geth-alltools-" + basegeth + ext
baseswarm = archiveBasename(*arch, sv.ArchiveVersion(env.Commit))
swarm = "swarm-" + baseswarm + ext
)
maybeSkipArchive(env)
if err := build.WriteArchive(geth, gethArchiveFiles); err != nil {
@@ -381,14 +419,17 @@ func doArchive(cmdline []string) {
if err := build.WriteArchive(alltools, allToolsArchiveFiles); err != nil {
log.Fatal(err)
}
for _, archive := range []string{geth, alltools} {
if err := build.WriteArchive(swarm, swarmArchiveFiles); err != nil {
log.Fatal(err)
}
for _, archive := range []string{geth, alltools, swarm} {
if err := archiveUpload(archive, *upload, *signer); err != nil {
log.Fatal(err)
}
}
}
func archiveBasename(arch string, env build.Environment) string {
func archiveBasename(arch string, archiveVersion string) string {
platform := runtime.GOOS + "-" + arch
if arch == "arm" {
platform += os.Getenv("GOARM")
@@ -399,18 +440,7 @@ func archiveBasename(arch string, env build.Environment) string {
if arch == "ios" {
platform = "ios-all"
}
return platform + "-" + archiveVersion(env)
}
func archiveVersion(env build.Environment) string {
version := build.VERSION()
if isUnstableBuild(env) {
version += "-unstable"
}
if env.Commit != "" {
version += "-" + env.Commit[:8]
}
return version
return platform + "-" + archiveVersion
}
func archiveUpload(archive string, blobstore string, signer string) error {
@@ -460,7 +490,6 @@ func maybeSkipArchive(env build.Environment) {
}
// Debian Packaging
func doDebianSource(cmdline []string) {
var (
signer = flag.String("signer", "", `Signing key name, also used as package author`)
@@ -484,21 +513,23 @@ func doDebianSource(cmdline []string) {
build.MustRun(gpg)
}
// Create the packages.
for _, distro := range debDistros {
meta := newDebMetadata(distro, *signer, env, now)
pkgdir := stageDebianSource(*workdir, meta)
debuild := exec.Command("debuild", "-S", "-sa", "-us", "-uc")
debuild.Dir = pkgdir
build.MustRun(debuild)
// Create Debian packages and upload them
for _, pkg := range debPackages {
for _, distro := range debDistros {
meta := newDebMetadata(distro, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables)
pkgdir := stageDebianSource(*workdir, meta)
debuild := exec.Command("debuild", "-S", "-sa", "-us", "-uc")
debuild.Dir = pkgdir
build.MustRun(debuild)
changes := fmt.Sprintf("%s_%s_source.changes", meta.Name(), meta.VersionString())
changes = filepath.Join(*workdir, changes)
if *signer != "" {
build.MustRunCommand("debsign", changes)
}
if *upload != "" {
build.MustRunCommand("dput", *upload, changes)
changes := fmt.Sprintf("%s_%s_source.changes", meta.Name(), meta.VersionString())
changes = filepath.Join(*workdir, changes)
if *signer != "" {
build.MustRunCommand("debsign", changes)
}
if *upload != "" {
build.MustRunCommand("dput", *upload, changes)
}
}
}
}
@@ -523,9 +554,17 @@ func isUnstableBuild(env build.Environment) bool {
return true
}
type debPackage struct {
Name string // the name of the Debian package to produce, e.g. "ethereum", or "ethereum-swarm"
Version string // the clean version of the debPackage, e.g. 1.8.12 or 0.3.0, without any metadata
Executables []debExecutable // executables to be included in the package
}
type debMetadata struct {
Env build.Environment
PackageName string
// go-ethereum version being built. Note that this
// is not the debian package version. The package version
// is constructed by VersionString.
@@ -537,21 +576,33 @@ type debMetadata struct {
}
type debExecutable struct {
Name, Description string
PackageName string
BinaryName string
Description string
}
func newDebMetadata(distro, author string, env build.Environment, t time.Time) debMetadata {
// Package returns the name of the package if present, or
// fallbacks to BinaryName
func (d debExecutable) Package() string {
if d.PackageName != "" {
return d.PackageName
}
return d.BinaryName
}
func newDebMetadata(distro, author string, env build.Environment, t time.Time, name string, version string, exes []debExecutable) debMetadata {
if author == "" {
// No signing key, use default author.
author = "Ethereum Builds <fjl@ethereum.org>"
}
return debMetadata{
PackageName: name,
Env: env,
Author: author,
Distro: distro,
Version: build.VERSION(),
Version: version,
Time: t.Format(time.RFC1123Z),
Executables: debExecutables,
Executables: exes,
}
}
@@ -559,9 +610,9 @@ func newDebMetadata(distro, author string, env build.Environment, t time.Time) d
// on all executable packages.
func (meta debMetadata) Name() string {
if isUnstableBuild(meta.Env) {
return "ethereum-unstable"
return meta.PackageName + "-unstable"
}
return "ethereum"
return meta.PackageName
}
// VersionString returns the debian version of the packages.
@@ -588,9 +639,9 @@ func (meta debMetadata) ExeList() string {
// ExeName returns the package name of an executable package.
func (meta debMetadata) ExeName(exe debExecutable) string {
if isUnstableBuild(meta.Env) {
return exe.Name + "-unstable"
return exe.Package() + "-unstable"
}
return exe.Name
return exe.Package()
}
// ExeConflicts returns the content of the Conflicts field
@@ -605,7 +656,7 @@ func (meta debMetadata) ExeConflicts(exe debExecutable) string {
// be preferred and the conflicting files should be handled via
// alternates. We might do this eventually but using a conflict is
// easier now.
return "ethereum, " + exe.Name
return "ethereum, " + exe.Package()
}
return ""
}
@@ -622,24 +673,23 @@ func stageDebianSource(tmpdir string, meta debMetadata) (pkgdir string) {
// Put the debian build files in place.
debian := filepath.Join(pkgdir, "debian")
build.Render("build/deb.rules", filepath.Join(debian, "rules"), 0755, meta)
build.Render("build/deb.changelog", filepath.Join(debian, "changelog"), 0644, meta)
build.Render("build/deb.control", filepath.Join(debian, "control"), 0644, meta)
build.Render("build/deb.copyright", filepath.Join(debian, "copyright"), 0644, meta)
build.Render("build/deb/"+meta.PackageName+"/deb.rules", filepath.Join(debian, "rules"), 0755, meta)
build.Render("build/deb/"+meta.PackageName+"/deb.changelog", filepath.Join(debian, "changelog"), 0644, meta)
build.Render("build/deb/"+meta.PackageName+"/deb.control", filepath.Join(debian, "control"), 0644, meta)
build.Render("build/deb/"+meta.PackageName+"/deb.copyright", filepath.Join(debian, "copyright"), 0644, meta)
build.RenderString("8\n", filepath.Join(debian, "compat"), 0644, meta)
build.RenderString("3.0 (native)\n", filepath.Join(debian, "source/format"), 0644, meta)
for _, exe := range meta.Executables {
install := filepath.Join(debian, meta.ExeName(exe)+".install")
docs := filepath.Join(debian, meta.ExeName(exe)+".docs")
build.Render("build/deb.install", install, 0644, exe)
build.Render("build/deb.docs", docs, 0644, exe)
build.Render("build/deb/"+meta.PackageName+"/deb.install", install, 0644, exe)
build.Render("build/deb/"+meta.PackageName+"/deb.docs", docs, 0644, exe)
}
return pkgdir
}
// Windows installer
func doWindowsInstaller(cmdline []string) {
// Parse the flags and make skip installer generation on PRs
var (
@@ -689,11 +739,11 @@ func doWindowsInstaller(cmdline []string) {
// Build the installer. This assumes that all the needed files have been previously
// built (don't mix building and packaging to keep cross compilation complexity to a
// minimum).
version := strings.Split(build.VERSION(), ".")
version := strings.Split(params.Version, ".")
if env.Commit != "" {
version[2] += "-" + env.Commit[:8]
}
installer, _ := filepath.Abs("geth-" + archiveBasename(*arch, env) + ".exe")
installer, _ := filepath.Abs("geth-" + archiveBasename(*arch, params.ArchiveVersion(env.Commit)) + ".exe")
build.MustRunCommand("makensis.exe",
"/DOUTPUTFILE="+installer,
"/DMAJORVERSION="+version[0],
@@ -745,7 +795,7 @@ func doAndroidArchive(cmdline []string) {
maybeSkipArchive(env)
// Sign and upload the archive to Azure
archive := "geth-" + archiveBasename("android", env) + ".aar"
archive := "geth-" + archiveBasename("android", params.ArchiveVersion(env.Commit)) + ".aar"
os.Rename("geth.aar", archive)
if err := archiveUpload(archive, *upload, *signer); err != nil {
@@ -830,7 +880,7 @@ func newMavenMetadata(env build.Environment) mavenMetadata {
}
}
// Render the version and package strings
version := build.VERSION()
version := params.Version
if isUnstableBuild(env) {
version += "-SNAPSHOT"
}
@@ -865,7 +915,7 @@ func doXCodeFramework(cmdline []string) {
build.MustRun(bind)
return
}
archive := "geth-" + archiveBasename("ios", env)
archive := "geth-" + archiveBasename("ios", params.ArchiveVersion(env.Commit))
if err := os.Mkdir(archive, os.ModePerm); err != nil {
log.Fatal(err)
}
@@ -921,7 +971,7 @@ func newPodMetadata(env build.Environment, archive string) podMetadata {
}
}
}
version := build.VERSION()
version := params.Version
if isUnstableBuild(env) {
version += "-unstable." + env.Buildnum
}
@@ -951,7 +1001,7 @@ func doXgo(cmdline []string) {
if *alltools {
args = append(args, []string{"--dest", GOBIN}...)
for _, res := range allToolsArchiveFiles {
for _, res := range allCrossCompiledArchiveFiles {
if strings.HasPrefix(res, GOBIN) {
// Binary tool found, cross build it explicitly
args = append(args, "./"+filepath.Join("cmd", filepath.Base(res)))
@@ -1017,23 +1067,14 @@ func doPurge(cmdline []string) {
}
for i := 0; i < len(blobs); i++ {
for j := i + 1; j < len(blobs); j++ {
iTime, err := time.Parse(time.RFC1123, blobs[i].Properties.LastModified)
if err != nil {
log.Fatal(err)
}
jTime, err := time.Parse(time.RFC1123, blobs[j].Properties.LastModified)
if err != nil {
log.Fatal(err)
}
if iTime.After(jTime) {
if blobs[i].Properties.LastModified.After(blobs[j].Properties.LastModified) {
blobs[i], blobs[j] = blobs[j], blobs[i]
}
}
}
// Filter out all archives more recent that the given threshold
for i, blob := range blobs {
timestamp, _ := time.Parse(time.RFC1123, blob.Properties.LastModified)
if time.Since(timestamp) < time.Duration(*limit)*24*time.Hour {
if time.Since(blob.Properties.LastModified) < time.Duration(*limit)*24*time.Hour {
blobs = blobs[:i]
break
}

19
build/clean_go_build_cache.sh Executable file
View File

@@ -0,0 +1,19 @@
#!/bin/sh
# Cleaning the Go cache only makes sense if we actually have Go installed... or
# if Go is actually callable. This does not hold true during deb packaging, so
# we need an explicit check to avoid build failures.
if ! command -v go > /dev/null; then
exit
fi
version_gt() {
test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"
}
golang_version=$(go version |cut -d' ' -f3 |sed 's/go//')
# Clean go build cache when go version is greater than or equal to 1.10
if !(version_gt 1.10 $golang_version); then
go clean -cache
fi

View File

@@ -1 +0,0 @@
build/bin/{{.Name}} usr/bin

View File

@@ -0,0 +1,19 @@
Source: {{.Name}}
Section: science
Priority: extra
Maintainer: {{.Author}}
Build-Depends: debhelper (>= 8.0.0), golang-1.10
Standards-Version: 3.9.5
Homepage: https://ethereum.org
Vcs-Git: git://github.com/ethereum/go-ethereum.git
Vcs-Browser: https://github.com/ethereum/go-ethereum
{{range .Executables}}
Package: {{$.ExeName .}}
Conflicts: {{$.ExeConflicts .}}
Architecture: any
Depends: ${shlibs:Depends}, ${misc:Depends}
Built-Using: ${misc:Built-Using}
Description: {{.Description}}
{{.Description}}
{{end}}

View File

@@ -1,4 +1,4 @@
Copyright 2016 The go-ethereum Authors
Copyright 2018 The go-ethereum Authors
go-ethereum is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by

View File

@@ -0,0 +1 @@
build/bin/{{.BinaryName}} usr/bin

View File

@@ -0,0 +1,5 @@
{{.Name}} ({{.VersionString}}) {{.Distro}}; urgency=low
* git build of {{.Env.Commit}}
-- {{.Author}} {{.Time}}

View File

@@ -11,8 +11,8 @@ Vcs-Browser: https://github.com/ethereum/go-ethereum
Package: {{.Name}}
Architecture: any
Depends: ${misc:Depends}, {{.ExeList}}
Description: Meta-package to install geth and other tools
Meta-package to install geth and other tools
Description: Meta-package to install geth, swarm, and other tools
Meta-package to install geth, swarm and other tools
{{range .Executables}}
Package: {{$.ExeName .}}

View File

@@ -0,0 +1,14 @@
Copyright 2018 The go-ethereum Authors
go-ethereum is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
go-ethereum is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.

View File

@@ -0,0 +1 @@
AUTHORS

View File

@@ -0,0 +1 @@
build/bin/{{.BinaryName}} usr/bin

View File

@@ -0,0 +1,13 @@
#!/usr/bin/make -f
# -*- makefile -*-
# Uncomment this to turn on verbose mode.
#export DH_VERBOSE=1
override_dh_auto_build:
build/env.sh /usr/lib/go-1.10/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
override_dh_auto_test:
%:
dh $@

View File

@@ -1,18 +1,18 @@
#!/usr/bin/env bash
#!/bin/sh
find_files() {
find . -not \( \
find . ! \( \
\( \
-wholename '.github' \
-o -wholename './build/_workspace' \
-o -wholename './build/bin' \
-o -wholename './crypto/bn256' \
-o -wholename '*/vendor/*' \
-path '.github' \
-o -path './build/_workspace' \
-o -path './build/bin' \
-o -path './crypto/bn256' \
-o -path '*/vendor/*' \
\) -prune \
\) -name '*.go'
}
GOFMT="gofmt -s -w";
GOIMPORTS="goimports -w";
find_files | xargs $GOFMT;
find_files | xargs $GOIMPORTS;
GOFMT="gofmt -s -w"
GOIMPORTS="goimports -w"
find_files | xargs $GOFMT
find_files | xargs $GOIMPORTS

View File

@@ -29,7 +29,7 @@ import (
)
var (
abiFlag = flag.String("abi", "", "Path to the Ethereum contract ABI json to bind")
abiFlag = flag.String("abi", "", "Path to the Ethereum contract ABI json to bind, - for STDIN")
binFlag = flag.String("bin", "", "Path to the Ethereum contract bytecode (generate deploy method)")
typFlag = flag.String("type", "", "Struct name for the binding (default = package name)")
@@ -75,16 +75,27 @@ func main() {
bins []string
types []string
)
if *solFlag != "" {
if *solFlag != "" || *abiFlag == "-" {
// Generate the list of types to exclude from binding
exclude := make(map[string]bool)
for _, kind := range strings.Split(*excFlag, ",") {
exclude[strings.ToLower(kind)] = true
}
contracts, err := compiler.CompileSolidity(*solcFlag, *solFlag)
if err != nil {
fmt.Printf("Failed to build Solidity contract: %v\n", err)
os.Exit(-1)
var contracts map[string]*compiler.Contract
var err error
if *solFlag != "" {
contracts, err = compiler.CompileSolidity(*solcFlag, *solFlag)
if err != nil {
fmt.Printf("Failed to build Solidity contract: %v\n", err)
os.Exit(-1)
}
} else {
contracts, err = contractsFromStdin()
if err != nil {
fmt.Printf("Failed to read input ABIs from STDIN: %v\n", err)
os.Exit(-1)
}
}
// Gather all non-excluded contract for binding
for name, contract := range contracts {
@@ -138,3 +149,12 @@ func main() {
os.Exit(-1)
}
}
func contractsFromStdin() (map[string]*compiler.Contract, error) {
bytes, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return nil, err
}
return compiler.ParseCombinedJSON(bytes, "", "", "", "")
}

View File

@@ -225,7 +225,7 @@ func initializeSecrets(c *cli.Context) error {
if _, err := os.Stat(location); err == nil {
return fmt.Errorf("file %v already exists, will not overwrite", location)
}
err = ioutil.WriteFile(location, masterSeed, 0700)
err = ioutil.WriteFile(location, masterSeed, 0400)
if err != nil {
return err
}
@@ -415,7 +415,7 @@ func signer(c *cli.Context) error {
// start http server
httpEndpoint := fmt.Sprintf("%s:%d", c.String(utils.RPCListenAddrFlag.Name), c.Int(rpcPortFlag.Name))
listener, _, err := rpc.StartHTTPEndpoint(httpEndpoint, rpcAPI, []string{"account"}, cors, vhosts)
listener, _, err := rpc.StartHTTPEndpoint(httpEndpoint, rpcAPI, []string{"account"}, cors, vhosts, rpc.DefaultHTTPTimeouts)
if err != nil {
utils.Fatalf("Could not start RPC api: %v", err)
}
@@ -540,14 +540,14 @@ func readMasterKey(ctx *cli.Context) ([]byte, error) {
// checkFile is a convenience function to check if a file
// * exists
// * is mode 0600
// * is mode 0400
func checkFile(filename string) error {
info, err := os.Stat(filename)
if err != nil {
return fmt.Errorf("failed stat on %s: %v", filename, err)
}
// Check the unix permission bits
if info.Mode().Perm()&077 != 0 {
if info.Mode().Perm()&0377 != 0 {
return fmt.Errorf("file (%v) has insecure file permissions (%v)", filename, info.Mode().String())
}
return nil

View File

@@ -21,21 +21,33 @@ Private key information can be printed by using the `--private` flag;
make sure to use this feature with great caution!
### `ethkey sign <keyfile> <message/file>`
### `ethkey signmessage <keyfile> <message/file>`
Sign the message with a keyfile.
It is possible to refer to a file containing the message.
To sign a message contained in a file, use the `--msgfile` flag.
### `ethkey verify <address> <signature> <message/file>`
### `ethkey verifymessage <address> <signature> <message/file>`
Verify the signature of the message.
It is possible to refer to a file containing the message.
To sign a message contained in a file, use the --msgfile flag.
### `ethkey changepassphrase <keyfile>`
Change the passphrase of a keyfile.
use the `--newpasswordfile` to point to the new password file.
## Passphrases
For every command that uses a keyfile, you will be prompted to provide the
passphrase for decrypting the keyfile. To avoid this message, it is possible
to pass the passphrase by using the `--passphrase` flag pointing to a file that
to pass the passphrase by using the `--passwordfile` flag pointing to a file that
contains the passphrase.
## JSON
In case you need to output the result in a JSON format, you shall by using the `--json` flag.

View File

@@ -0,0 +1,72 @@
package main
import (
"fmt"
"io/ioutil"
"strings"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/cmd/utils"
"gopkg.in/urfave/cli.v1"
)
var newPassphraseFlag = cli.StringFlag{
Name: "newpasswordfile",
Usage: "the file that contains the new passphrase for the keyfile",
}
var commandChangePassphrase = cli.Command{
Name: "changepassphrase",
Usage: "change the passphrase on a keyfile",
ArgsUsage: "<keyfile>",
Description: `
Change the passphrase of a keyfile.`,
Flags: []cli.Flag{
passphraseFlag,
newPassphraseFlag,
},
Action: func(ctx *cli.Context) error {
keyfilepath := ctx.Args().First()
// Read key from file.
keyjson, err := ioutil.ReadFile(keyfilepath)
if err != nil {
utils.Fatalf("Failed to read the keyfile at '%s': %v", keyfilepath, err)
}
// Decrypt key with passphrase.
passphrase := getPassphrase(ctx)
key, err := keystore.DecryptKey(keyjson, passphrase)
if err != nil {
utils.Fatalf("Error decrypting key: %v", err)
}
// Get a new passphrase.
fmt.Println("Please provide a new passphrase")
var newPhrase string
if passFile := ctx.String(newPassphraseFlag.Name); passFile != "" {
content, err := ioutil.ReadFile(passFile)
if err != nil {
utils.Fatalf("Failed to read new passphrase file '%s': %v", passFile, err)
}
newPhrase = strings.TrimRight(string(content), "\r\n")
} else {
newPhrase = promptPassphrase(true)
}
// Encrypt the key with the new passphrase.
newJson, err := keystore.EncryptKey(key, newPhrase, keystore.StandardScryptN, keystore.StandardScryptP)
if err != nil {
utils.Fatalf("Error encrypting with new passphrase: %v", err)
}
// Then write the new keyfile in place of the old one.
if err := ioutil.WriteFile(keyfilepath, newJson, 600); err != nil {
utils.Fatalf("Error writing new keyfile to disk: %v", err)
}
// Don't print anything. Just return successfully,
// producing a positive exit code.
return nil
},
}

View File

@@ -90,7 +90,7 @@ If you want to encrypt an existing private key, it can be specified by setting
}
// Encrypt key with passphrase.
passphrase := getPassPhrase(ctx, true)
passphrase := promptPassphrase(true)
keyjson, err := keystore.EncryptKey(key, passphrase, keystore.StandardScryptN, keystore.StandardScryptP)
if err != nil {
utils.Fatalf("Error encrypting key: %v", err)

View File

@@ -60,7 +60,7 @@ make sure to use this feature with great caution!`,
}
// Decrypt key with passphrase.
passphrase := getPassPhrase(ctx, false)
passphrase := getPassphrase(ctx)
key, err := keystore.DecryptKey(keyjson, passphrase)
if err != nil {
utils.Fatalf("Error decrypting key: %v", err)

View File

@@ -38,6 +38,7 @@ func init() {
app.Commands = []cli.Command{
commandGenerate,
commandInspect,
commandChangePassphrase,
commandSignMessage,
commandVerifyMessage,
}

View File

@@ -62,7 +62,7 @@ To sign a message contained in a file, use the --msgfile flag.
}
// Decrypt key with passphrase.
passphrase := getPassPhrase(ctx, false)
passphrase := getPassphrase(ctx)
key, err := keystore.DecryptKey(keyjson, passphrase)
if err != nil {
utils.Fatalf("Error decrypting key: %v", err)

View File

@@ -28,11 +28,32 @@ import (
"gopkg.in/urfave/cli.v1"
)
// getPassPhrase obtains a passphrase given by the user. It first checks the
// --passphrase command line flag and ultimately prompts the user for a
// promptPassphrase prompts the user for a passphrase. Set confirmation to true
// to require the user to confirm the passphrase.
func promptPassphrase(confirmation bool) string {
passphrase, err := console.Stdin.PromptPassword("Passphrase: ")
if err != nil {
utils.Fatalf("Failed to read passphrase: %v", err)
}
if confirmation {
confirm, err := console.Stdin.PromptPassword("Repeat passphrase: ")
if err != nil {
utils.Fatalf("Failed to read passphrase confirmation: %v", err)
}
if passphrase != confirm {
utils.Fatalf("Passphrases do not match")
}
}
return passphrase
}
// getPassphrase obtains a passphrase given by the user. It first checks the
// --passfile command line flag and ultimately prompts the user for a
// passphrase.
func getPassPhrase(ctx *cli.Context, confirmation bool) string {
// Look for the --passphrase flag.
func getPassphrase(ctx *cli.Context) string {
// Look for the --passwordfile flag.
passphraseFile := ctx.String(passphraseFlag.Name)
if passphraseFile != "" {
content, err := ioutil.ReadFile(passphraseFile)
@@ -44,20 +65,7 @@ func getPassPhrase(ctx *cli.Context, confirmation bool) string {
}
// Otherwise prompt the user for the passphrase.
passphrase, err := console.Stdin.PromptPassword("Passphrase: ")
if err != nil {
utils.Fatalf("Failed to read passphrase: %v", err)
}
if confirmation {
confirm, err := console.Stdin.PromptPassword("Repeat passphrase: ")
if err != nil {
utils.Fatalf("Failed to read passphrase confirmation: %v", err)
}
if passphrase != confirm {
utils.Fatalf("Passphrases do not match")
}
}
return passphrase
return promptPassphrase(false)
}
// signHash is a helper function that calculates a hash for the given message

View File

@@ -44,7 +44,7 @@ func disasmCmd(ctx *cli.Context) error {
return err
}
code := strings.TrimSpace(string(in[:]))
code := strings.TrimSpace(string(in))
fmt.Printf("%v\n", code)
return asm.PrintDisassembled(code)
}

View File

@@ -80,13 +80,13 @@ func runCmd(ctx *cli.Context) error {
}
var (
tracer vm.Tracer
debugLogger *vm.StructLogger
statedb *state.StateDB
chainConfig *params.ChainConfig
sender = common.BytesToAddress([]byte("sender"))
receiver = common.BytesToAddress([]byte("receiver"))
blockNumber uint64
tracer vm.Tracer
debugLogger *vm.StructLogger
statedb *state.StateDB
chainConfig *params.ChainConfig
sender = common.BytesToAddress([]byte("sender"))
receiver = common.BytesToAddress([]byte("receiver"))
genesisConfig *core.Genesis
)
if ctx.GlobalBool(MachineFlag.Name) {
tracer = NewJSONLogger(logconfig, os.Stdout)
@@ -98,13 +98,14 @@ func runCmd(ctx *cli.Context) error {
}
if ctx.GlobalString(GenesisFlag.Name) != "" {
gen := readGenesis(ctx.GlobalString(GenesisFlag.Name))
genesisConfig = gen
db := ethdb.NewMemDatabase()
genesis := gen.ToBlock(db)
statedb, _ = state.New(genesis.Root(), state.NewDatabase(db))
chainConfig = gen.Config
blockNumber = gen.Number
} else {
statedb, _ = state.New(common.Hash{}, state.NewDatabase(ethdb.NewMemDatabase()))
genesisConfig = new(core.Genesis)
}
if ctx.GlobalString(SenderFlag.Name) != "" {
sender = common.HexToAddress(ctx.GlobalString(SenderFlag.Name))
@@ -156,13 +157,19 @@ func runCmd(ctx *cli.Context) error {
}
initialGas := ctx.GlobalUint64(GasFlag.Name)
if genesisConfig.GasLimit != 0 {
initialGas = genesisConfig.GasLimit
}
runtimeConfig := runtime.Config{
Origin: sender,
State: statedb,
GasLimit: initialGas,
GasPrice: utils.GlobalBig(ctx, PriceFlag.Name),
Value: utils.GlobalBig(ctx, ValueFlag.Name),
BlockNumber: new(big.Int).SetUint64(blockNumber),
Difficulty: genesisConfig.Difficulty,
Time: new(big.Int).SetUint64(genesisConfig.Timestamp),
Coinbase: genesisConfig.Coinbase,
BlockNumber: new(big.Int).SetUint64(genesisConfig.Number),
EVMConfig: vm.Config{
Tracer: tracer,
Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name),

View File

@@ -77,9 +77,6 @@ var (
accJSONFlag = flag.String("account.json", "", "Key json file to fund user requests with")
accPassFlag = flag.String("account.pass", "", "Decryption password to access faucet funds")
githubUser = flag.String("github.user", "", "GitHub user to authenticate with for Gist access")
githubToken = flag.String("github.token", "", "GitHub personal token to access Gists with")
captchaToken = flag.String("captcha.token", "", "Recaptcha site key to authenticate client side")
captchaSecret = flag.String("captcha.secret", "", "Recaptcha secret key to authenticate server side")
@@ -160,7 +157,8 @@ func main() {
if blob, err = ioutil.ReadFile(*accPassFlag); err != nil {
log.Crit("Failed to read account password contents", "file", *accPassFlag, "err", err)
}
pass := string(blob)
// Delete trailing newline in password
pass := strings.TrimSuffix(string(blob), "\n")
ks := keystore.NewKeyStore(filepath.Join(os.Getenv("HOME"), ".faucet", "keys"), keystore.StandardScryptN, keystore.StandardScryptP)
if blob, err = ioutil.ReadFile(*accJSONFlag); err != nil {
@@ -201,6 +199,8 @@ type faucet struct {
keystore *keystore.KeyStore // Keystore containing the single signer
account accounts.Account // Account funding user faucet requests
head *types.Header // Current head header of the faucet
balance *big.Int // Current balance of the faucet
nonce uint64 // Current pending nonce of the faucet
price *big.Int // Current gas price to issue funds with
@@ -216,7 +216,7 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u
// Assemble the raw devp2p protocol stack
stack, err := node.New(&node.Config{
Name: "geth",
Version: params.Version,
Version: params.VersionWithMeta,
DataDir: filepath.Join(os.Getenv("HOME"), ".faucet"),
P2P: p2p.Config{
NAT: nat.Any(),
@@ -326,33 +326,30 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
nonce uint64
err error
)
for {
// Attempt to retrieve the stats, may error on no faucet connectivity
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
head, err = f.client.HeaderByNumber(ctx, nil)
if err == nil {
balance, err = f.client.BalanceAt(ctx, f.account.Address, head.Number)
if err == nil {
nonce, err = f.client.NonceAt(ctx, f.account.Address, nil)
}
for head == nil || balance == nil {
// Retrieve the current stats cached by the faucet
f.lock.RLock()
if f.head != nil {
head = types.CopyHeader(f.head)
}
cancel()
if f.balance != nil {
balance = new(big.Int).Set(f.balance)
}
nonce = f.nonce
f.lock.RUnlock()
// If stats retrieval failed, wait a bit and retry
if err != nil {
if err = sendError(conn, errors.New("Faucet offline: "+err.Error())); err != nil {
if head == nil || balance == nil {
// Report the faucet offline until initial stats are ready
if err = sendError(conn, errors.New("Faucet offline")); err != nil {
log.Warn("Failed to send faucet error to client", "err", err)
return
}
time.Sleep(3 * time.Second)
continue
}
// Initial stats reported successfully, proceed with user interaction
break
}
// Send over the initial stats and the latest header
if err = send(conn, map[string]interface{}{
"funds": balance.Div(balance, ether),
"funds": new(big.Int).Div(balance, ether),
"funded": nonce,
"peers": f.stack.Server().PeerCount(),
"requests": f.reqs,
@@ -474,7 +471,7 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
amount = new(big.Int).Div(amount, new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(msg.Tier)), nil))
tx := types.NewTransaction(f.nonce+uint64(len(f.reqs)), address, amount, 21000, f.price, nil)
signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainId)
signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainID)
if err != nil {
f.lock.Unlock()
if err = sendError(conn, err); err != nil {
@@ -522,6 +519,47 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
}
}
// refresh attempts to retrieve the latest header from the chain and extract the
// associated faucet balance and nonce for connectivity caching.
func (f *faucet) refresh(head *types.Header) error {
// Ensure a state update does not run for too long
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
// If no header was specified, use the current chain head
var err error
if head == nil {
if head, err = f.client.HeaderByNumber(ctx, nil); err != nil {
return err
}
}
// Retrieve the balance, nonce and gas price from the current head
var (
balance *big.Int
nonce uint64
price *big.Int
)
if balance, err = f.client.BalanceAt(ctx, f.account.Address, head.Number); err != nil {
return err
}
if nonce, err = f.client.NonceAt(ctx, f.account.Address, head.Number); err != nil {
return err
}
if price, err = f.client.SuggestGasPrice(ctx); err != nil {
return err
}
// Everything succeeded, update the cached stats and eject old requests
f.lock.Lock()
f.head, f.balance = head, balance
f.price, f.nonce = price, nonce
for len(f.reqs) > 0 && f.reqs[0].Tx.Nonce() < f.nonce {
f.reqs = f.reqs[1:]
}
f.lock.Unlock()
return nil
}
// loop keeps waiting for interesting events and pushes them out to connected
// websockets.
func (f *faucet) loop() {
@@ -539,45 +577,27 @@ func (f *faucet) loop() {
go func() {
for head := range update {
// New chain head arrived, query the current stats and stream to clients
var (
balance *big.Int
nonce uint64
price *big.Int
err error
)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
balance, err = f.client.BalanceAt(ctx, f.account.Address, head.Number)
if err == nil {
nonce, err = f.client.NonceAt(ctx, f.account.Address, nil)
if err == nil {
price, err = f.client.SuggestGasPrice(ctx)
}
timestamp := time.Unix(head.Time.Int64(), 0)
if time.Since(timestamp) > time.Hour {
log.Warn("Skipping faucet refresh, head too old", "number", head.Number, "hash", head.Hash(), "age", common.PrettyAge(timestamp))
continue
}
cancel()
// If querying the data failed, try for the next block
if err != nil {
if err := f.refresh(head); err != nil {
log.Warn("Failed to update faucet state", "block", head.Number, "hash", head.Hash(), "err", err)
continue
} else {
log.Info("Updated faucet state", "block", head.Number, "hash", head.Hash(), "balance", balance, "nonce", nonce, "price", price)
}
// Faucet state retrieved, update locally and send to clients
balance = new(big.Int).Div(balance, ether)
f.lock.Lock()
f.price, f.nonce = price, nonce
for len(f.reqs) > 0 && f.reqs[0].Tx.Nonce() < f.nonce {
f.reqs = f.reqs[1:]
}
f.lock.Unlock()
f.lock.RLock()
log.Info("Updated faucet state", "number", head.Number, "hash", head.Hash(), "age", common.PrettyAge(timestamp), "balance", f.balance, "nonce", f.nonce, "price", f.price)
balance := new(big.Int).Div(f.balance, ether)
peers := f.stack.Server().PeerCount()
for _, conn := range f.conns {
if err := send(conn, map[string]interface{}{
"funds": balance,
"funded": f.nonce,
"peers": f.stack.Server().PeerCount(),
"peers": peers,
"requests": f.reqs,
}, time.Second); err != nil {
log.Warn("Failed to send stats to client", "err", err)
@@ -638,59 +658,6 @@ func sendSuccess(conn *websocket.Conn, msg string) error {
return send(conn, map[string]string{"success": msg}, time.Second)
}
// authGitHub tries to authenticate a faucet request using GitHub gists, returning
// the username, avatar URL and Ethereum address to fund on success.
func authGitHub(url string) (string, string, common.Address, error) {
// Retrieve the gist from the GitHub Gist APIs
parts := strings.Split(url, "/")
req, _ := http.NewRequest("GET", "https://api.github.com/gists/"+parts[len(parts)-1], nil)
if *githubUser != "" {
req.SetBasicAuth(*githubUser, *githubToken)
}
res, err := http.DefaultClient.Do(req)
if err != nil {
return "", "", common.Address{}, err
}
var gist struct {
Owner struct {
Login string `json:"login"`
} `json:"owner"`
Files map[string]struct {
Content string `json:"content"`
} `json:"files"`
}
err = json.NewDecoder(res.Body).Decode(&gist)
res.Body.Close()
if err != nil {
return "", "", common.Address{}, err
}
if gist.Owner.Login == "" {
return "", "", common.Address{}, errors.New("Anonymous Gists not allowed")
}
// Iterate over all the files and look for Ethereum addresses
var address common.Address
for _, file := range gist.Files {
content := strings.TrimSpace(file.Content)
if len(content) == 2+common.AddressLength*2 {
address = common.HexToAddress(content)
}
}
if address == (common.Address{}) {
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
}
// Validate the user's existence since the API is unhelpful here
if res, err = http.Head("https://github.com/" + gist.Owner.Login); err != nil {
return "", "", common.Address{}, err
}
res.Body.Close()
if res.StatusCode != 200 {
return "", "", common.Address{}, errors.New("Invalid user... boom!")
}
// Everything passed validation, return the gathered infos
return gist.Owner.Login + "@github", fmt.Sprintf("https://github.com/%s.png?size=64", gist.Owner.Login), address, nil
}
// authTwitter tries to authenticate a faucet request using Twitter posts, returning
// the username, avatar URL and Ethereum address to fund on success.
func authTwitter(url string) (string, string, common.Address, error) {

View File

@@ -51,7 +51,7 @@ func reportBug(ctx *cli.Context) error {
fmt.Fprintln(&buff, "#### System information")
fmt.Fprintln(&buff)
fmt.Fprintln(&buff, "Version:", params.Version)
fmt.Fprintln(&buff, "Version:", params.VersionWithMeta)
fmt.Fprintln(&buff, "Go Version:", runtime.Version())
fmt.Fprintln(&buff, "OS:", runtime.GOOS)
printOSDetails(&buff)

View File

@@ -48,7 +48,6 @@ var (
ArgsUsage: "<genesisPath>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.LightModeFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
@@ -66,7 +65,7 @@ It expects the genesis file as argument.`,
Flags: []cli.Flag{
utils.DataDirFlag,
utils.CacheFlag,
utils.LightModeFlag,
utils.SyncModeFlag,
utils.GCModeFlag,
utils.CacheDatabaseFlag,
utils.CacheGCFlag,
@@ -87,14 +86,15 @@ processing will proceed even if an individual RLP-file import failure occurs.`,
Flags: []cli.Flag{
utils.DataDirFlag,
utils.CacheFlag,
utils.LightModeFlag,
utils.SyncModeFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
Requires a first argument of the file to write to.
Optional second and third arguments control the first and
last block to write. In this mode, the file will be appended
if already existing.`,
if already existing. If the file ends with .gz, the output will
be gzipped.`,
}
importPreimagesCommand = cli.Command{
Action: utils.MigrateFlags(importPreimages),
@@ -104,7 +104,7 @@ if already existing.`,
Flags: []cli.Flag{
utils.DataDirFlag,
utils.CacheFlag,
utils.LightModeFlag,
utils.SyncModeFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
@@ -118,7 +118,7 @@ if already existing.`,
Flags: []cli.Flag{
utils.DataDirFlag,
utils.CacheFlag,
utils.LightModeFlag,
utils.SyncModeFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
@@ -148,7 +148,6 @@ The first argument must be the directory containing the blockchain to download f
ArgsUsage: " ",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.LightModeFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
@@ -162,7 +161,7 @@ Remove blockchain and state databases`,
Flags: []cli.Flag{
utils.DataDirFlag,
utils.CacheFlag,
utils.LightModeFlag,
utils.SyncModeFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
@@ -341,9 +340,9 @@ func importPreimages(ctx *cli.Context) error {
start := time.Now()
if err := utils.ImportPreimages(diskdb, ctx.Args().First()); err != nil {
utils.Fatalf("Export error: %v\n", err)
utils.Fatalf("Import error: %v\n", err)
}
fmt.Printf("Export done in %v\n", time.Since(start))
fmt.Printf("Import done in %v\n", time.Since(start))
return nil
}

View File

@@ -168,6 +168,9 @@ func makeFullNode(ctx *cli.Context) *node.Node {
if ctx.GlobalIsSet(utils.WhisperMinPOWFlag.Name) {
cfg.Shh.MinimumAcceptedPOW = ctx.Float64(utils.WhisperMinPOWFlag.Name)
}
if ctx.GlobalIsSet(utils.WhisperRestrictConnectionBetweenLightClientsFlag.Name) {
cfg.Shh.RestrictConnectionBetweenLightClients = true
}
utils.RegisterShhService(stack, &cfg.Shh)
}

View File

@@ -31,7 +31,7 @@ import (
)
const (
ipcAPIs = "admin:1.0 debug:1.0 eth:1.0 miner:1.0 net:1.0 personal:1.0 rpc:1.0 shh:1.0 txpool:1.0 web3:1.0"
ipcAPIs = "admin:1.0 debug:1.0 eth:1.0 ethash:1.0 miner:1.0 net:1.0 personal:1.0 rpc:1.0 shh:1.0 txpool:1.0 web3:1.0"
httpAPIs = "eth:1.0 net:1.0 rpc:1.0 web3:1.0"
)
@@ -50,7 +50,7 @@ func TestConsoleWelcome(t *testing.T) {
geth.SetTemplateFunc("goos", func() string { return runtime.GOOS })
geth.SetTemplateFunc("goarch", func() string { return runtime.GOARCH })
geth.SetTemplateFunc("gover", runtime.Version)
geth.SetTemplateFunc("gethver", func() string { return params.Version })
geth.SetTemplateFunc("gethver", func() string { return params.VersionWithMeta })
geth.SetTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
geth.SetTemplateFunc("apis", func() string { return ipcAPIs })
@@ -133,7 +133,7 @@ func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) {
attach.SetTemplateFunc("goos", func() string { return runtime.GOOS })
attach.SetTemplateFunc("goarch", func() string { return runtime.GOARCH })
attach.SetTemplateFunc("gover", runtime.Version)
attach.SetTemplateFunc("gethver", func() string { return params.Version })
attach.SetTemplateFunc("gethver", func() string { return params.VersionWithMeta })
attach.SetTemplateFunc("etherbase", func() string { return geth.Etherbase })
attach.SetTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
attach.SetTemplateFunc("ipc", func() bool { return strings.HasPrefix(endpoint, "ipc") })

View File

@@ -77,7 +77,7 @@ var customGenesisTests = []struct {
"homesteadBlock" : 314,
"daoForkBlock" : 141,
"daoForkSupport" : true
},
}
}`,
query: "eth.getBlock(0).nonce",
result: "0x0000000000000042",

View File

@@ -19,12 +19,16 @@ package main
import (
"fmt"
"math"
"os"
"runtime"
godebug "runtime/debug"
"sort"
"strconv"
"strings"
"time"
"github.com/elastic/gosigar"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/cmd/utils"
@@ -68,6 +72,7 @@ var (
utils.EthashDatasetDirFlag,
utils.EthashDatasetsInMemoryFlag,
utils.EthashDatasetsOnDiskFlag,
utils.TxPoolLocalsFlag,
utils.TxPoolNoLocalsFlag,
utils.TxPoolJournalFlag,
utils.TxPoolRejournalFlag,
@@ -78,8 +83,6 @@ var (
utils.TxPoolAccountQueueFlag,
utils.TxPoolGlobalQueueFlag,
utils.TxPoolLifetimeFlag,
utils.FastSyncFlag,
utils.LightModeFlag,
utils.SyncModeFlag,
utils.GCModeFlag,
utils.LightServFlag,
@@ -92,11 +95,21 @@ var (
utils.ListenPortFlag,
utils.MaxPeersFlag,
utils.MaxPendingPeersFlag,
utils.EtherbaseFlag,
utils.GasPriceFlag,
utils.MinerThreadsFlag,
utils.MiningEnabledFlag,
utils.TargetGasLimitFlag,
utils.MinerThreadsFlag,
utils.MinerLegacyThreadsFlag,
utils.MinerNotifyFlag,
utils.MinerGasTargetFlag,
utils.MinerLegacyGasTargetFlag,
utils.MinerGasLimitFlag,
utils.MinerGasPriceFlag,
utils.MinerLegacyGasPriceFlag,
utils.MinerEtherbaseFlag,
utils.MinerLegacyEtherbaseFlag,
utils.MinerExtraDataFlag,
utils.MinerLegacyExtraDataFlag,
utils.MinerRecommitIntervalFlag,
utils.MinerNoVerfiyFlag,
utils.NATFlag,
utils.NoDiscoverFlag,
utils.DiscoveryV5Flag,
@@ -117,7 +130,8 @@ var (
utils.NoCompactionFlag,
utils.GpoBlocksFlag,
utils.GpoPercentileFlag,
utils.ExtraDataFlag,
utils.EWASMInterpreterFlag,
utils.EVMInterpreterFlag,
configFileFlag,
}
@@ -139,6 +153,16 @@ var (
utils.WhisperEnabledFlag,
utils.WhisperMaxMessageSizeFlag,
utils.WhisperMinPOWFlag,
utils.WhisperRestrictConnectionBetweenLightClientsFlag,
}
metricsFlags = []cli.Flag{
utils.MetricsEnableInfluxDBFlag,
utils.MetricsInfluxDBEndpointFlag,
utils.MetricsInfluxDBDatabaseFlag,
utils.MetricsInfluxDBUsernameFlag,
utils.MetricsInfluxDBPasswordFlag,
utils.MetricsInfluxDBHostTagFlag,
}
)
@@ -182,16 +206,40 @@ func init() {
app.Flags = append(app.Flags, consoleFlags...)
app.Flags = append(app.Flags, debug.Flags...)
app.Flags = append(app.Flags, whisperFlags...)
app.Flags = append(app.Flags, metricsFlags...)
app.Before = func(ctx *cli.Context) error {
runtime.GOMAXPROCS(runtime.NumCPU())
if err := debug.Setup(ctx); err != nil {
logdir := ""
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {
logdir = (&node.Config{DataDir: utils.MakeDataDir(ctx)}).ResolvePath("logs")
}
if err := debug.Setup(ctx, logdir); err != nil {
return err
}
// Cap the cache allowance and tune the garbage collector
var mem gosigar.Mem
if err := mem.Get(); err == nil {
allowance := int(mem.Total / 1024 / 1024 / 3)
if cache := ctx.GlobalInt(utils.CacheFlag.Name); cache > allowance {
log.Warn("Sanitizing cache to Go's GC limits", "provided", cache, "updated", allowance)
ctx.GlobalSet(utils.CacheFlag.Name, strconv.Itoa(allowance))
}
}
// Ensure Go's GC ignores the database cache for trigger percentage
cache := ctx.GlobalInt(utils.CacheFlag.Name)
gogc := math.Max(20, math.Min(100, 100/(float64(cache)/1024)))
log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc))
godebug.SetGCPercent(int(gogc))
// Start metrics export if enabled
utils.SetupMetrics(ctx)
// Start system runtime metrics collection
go metrics.CollectProcessMetrics(3 * time.Second)
utils.SetupNetwork(ctx)
return nil
}
@@ -213,6 +261,9 @@ func main() {
// It creates a default node based on the command line arguments and runs it in
// blocking mode, waiting for it to be shut down.
func geth(ctx *cli.Context) error {
if args := ctx.Args(); len(args) > 0 {
return fmt.Errorf("invalid command: %q", args[0])
}
node := makeFullNode(ctx)
startNode(ctx, node)
node.Wait()
@@ -267,11 +318,11 @@ func startNode(ctx *cli.Context, stack *node.Node) {
status, _ := event.Wallet.Status()
log.Info("New wallet appeared", "url", event.Wallet.URL(), "status", status)
derivationPath := accounts.DefaultBaseDerivationPath
if event.Wallet.URL().Scheme == "ledger" {
event.Wallet.SelfDerive(accounts.DefaultLedgerBaseDerivationPath, stateReader)
} else {
event.Wallet.SelfDerive(accounts.DefaultBaseDerivationPath, stateReader)
derivationPath = accounts.DefaultLedgerBaseDerivationPath
}
event.Wallet.SelfDerive(derivationPath, stateReader)
case accounts.WalletDropped:
log.Info("Old wallet dropped", "url", event.Wallet.URL())
@@ -282,25 +333,25 @@ func startNode(ctx *cli.Context, stack *node.Node) {
// Start auxiliary services if enabled
if ctx.GlobalBool(utils.MiningEnabledFlag.Name) || ctx.GlobalBool(utils.DeveloperFlag.Name) {
// Mining only makes sense if a full Ethereum node is running
if ctx.GlobalBool(utils.LightModeFlag.Name) || ctx.GlobalString(utils.SyncModeFlag.Name) == "light" {
if ctx.GlobalString(utils.SyncModeFlag.Name) == "light" {
utils.Fatalf("Light clients do not support mining")
}
var ethereum *eth.Ethereum
if err := stack.Service(&ethereum); err != nil {
utils.Fatalf("Ethereum service not running: %v", err)
}
// Use a reduced number of threads if requested
if threads := ctx.GlobalInt(utils.MinerThreadsFlag.Name); threads > 0 {
type threaded interface {
SetThreads(threads int)
}
if th, ok := ethereum.Engine().(threaded); ok {
th.SetThreads(threads)
}
}
// Set the gas price to the limits from the CLI and start mining
ethereum.TxPool().SetGasPrice(utils.GlobalBig(ctx, utils.GasPriceFlag.Name))
if err := ethereum.StartMining(true); err != nil {
gasprice := utils.GlobalBig(ctx, utils.MinerLegacyGasPriceFlag.Name)
if ctx.IsSet(utils.MinerGasPriceFlag.Name) {
gasprice = utils.GlobalBig(ctx, utils.MinerGasPriceFlag.Name)
}
ethereum.TxPool().SetGasPrice(gasprice)
threads := ctx.GlobalInt(utils.MinerLegacyThreadsFlag.Name)
if ctx.GlobalIsSet(utils.MinerThreadsFlag.Name) {
threads = ctx.GlobalInt(utils.MinerThreadsFlag.Name)
}
if err := ethereum.StartMining(threads); err != nil {
utils.Fatalf("Failed to start mining: %v", err)
}
}

View File

@@ -108,7 +108,7 @@ func makedag(ctx *cli.Context) error {
func version(ctx *cli.Context) error {
fmt.Println(strings.Title(clientIdentifier))
fmt.Println("Version:", params.Version)
fmt.Println("Version:", params.VersionWithMeta)
if gitCommit != "" {
fmt.Println("Git Commit:", gitCommit)
}

View File

@@ -185,12 +185,12 @@ func resolveMetric(metrics map[string]interface{}, pattern string, path string)
parts := strings.SplitN(pattern, "/", 2)
if len(parts) > 1 {
for _, variation := range strings.Split(parts[0], ",") {
if submetrics, ok := metrics[variation].(map[string]interface{}); !ok {
submetrics, ok := metrics[variation].(map[string]interface{})
if !ok {
utils.Fatalf("Failed to retrieve system metrics: %s", path+variation)
return nil
} else {
results = append(results, resolveMetric(submetrics, parts[1], path+variation+"/")...)
}
results = append(results, resolveMetric(submetrics, parts[1], path+variation+"/")...)
}
return results
}

View File

@@ -83,7 +83,8 @@ var AppHelpFlagGroups = []flagGroup{
utils.LightKDFFlag,
},
},
{Name: "DEVELOPER CHAIN",
{
Name: "DEVELOPER CHAIN",
Flags: []cli.Flag{
utils.DeveloperFlag,
utils.DeveloperPeriodFlag,
@@ -113,6 +114,7 @@ var AppHelpFlagGroups = []flagGroup{
{
Name: "TRANSACTION POOL",
Flags: []cli.Flag{
utils.TxPoolLocalsFlag,
utils.TxPoolNoLocalsFlag,
utils.TxPoolJournalFlag,
utils.TxPoolRejournalFlag,
@@ -184,10 +186,14 @@ var AppHelpFlagGroups = []flagGroup{
Flags: []cli.Flag{
utils.MiningEnabledFlag,
utils.MinerThreadsFlag,
utils.EtherbaseFlag,
utils.TargetGasLimitFlag,
utils.GasPriceFlag,
utils.ExtraDataFlag,
utils.MinerNotifyFlag,
utils.MinerGasPriceFlag,
utils.MinerGasTargetFlag,
utils.MinerGasLimitFlag,
utils.MinerEtherbaseFlag,
utils.MinerExtraDataFlag,
utils.MinerRecommitIntervalFlag,
utils.MinerNoVerfiyFlag,
},
},
{
@@ -201,16 +207,29 @@ var AppHelpFlagGroups = []flagGroup{
Name: "VIRTUAL MACHINE",
Flags: []cli.Flag{
utils.VMEnableDebugFlag,
utils.EVMInterpreterFlag,
utils.EWASMInterpreterFlag,
},
},
{
Name: "LOGGING AND DEBUGGING",
Flags: append([]cli.Flag{
utils.MetricsEnabledFlag,
utils.FakePoWFlag,
utils.NoCompactionFlag,
}, debug.Flags...),
},
{
Name: "METRICS AND STATS",
Flags: []cli.Flag{
utils.MetricsEnabledFlag,
utils.MetricsEnableInfluxDBFlag,
utils.MetricsInfluxDBEndpointFlag,
utils.MetricsInfluxDBDatabaseFlag,
utils.MetricsInfluxDBUsernameFlag,
utils.MetricsInfluxDBPasswordFlag,
utils.MetricsInfluxDBHostTagFlag,
},
},
{
Name: "WHISPER (EXPERIMENTAL)",
Flags: whisperFlags,
@@ -218,8 +237,11 @@ var AppHelpFlagGroups = []flagGroup{
{
Name: "DEPRECATED",
Flags: []cli.Flag{
utils.FastSyncFlag,
utils.LightModeFlag,
utils.MinerLegacyThreadsFlag,
utils.MinerLegacyGasTargetFlag,
utils.MinerLegacyGasPriceFlag,
utils.MinerLegacyEtherbaseFlag,
utils.MinerLegacyExtraDataFlag,
},
},
{

View File

@@ -180,7 +180,10 @@ func main() {
},
},
}
app.Run(os.Args)
if err := app.Run(os.Args); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func showNetwork(ctx *cli.Context) error {
@@ -275,9 +278,8 @@ func createNode(ctx *cli.Context) error {
if len(ctx.Args()) != 0 {
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
}
config := &adapters.NodeConfig{
Name: ctx.String("name"),
}
config := adapters.RandomNodeConfig()
config.Name = ctx.String("name")
if key := ctx.String("key"); key != "" {
privKey, err := crypto.HexToECDSA(key)
if err != nil {

View File

@@ -103,8 +103,8 @@ func newCppEthereumGenesisSpec(network string, genesis *core.Genesis) (*cppEther
spec.Params.ByzantiumForkBlock = (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())
spec.Params.ConstantinopleForkBlock = (hexutil.Uint64)(math.MaxUint64)
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainId.Uint64())
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainId.Uint64())
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
@@ -284,7 +284,7 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
spec.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor)
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainId.Uint64())
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
spec.Params.MaxCodeSize = params.MaxCodeSize
spec.Params.EIP155Transition = genesis.Config.EIP155Block.Uint64()
spec.Params.EIP98Transition = math.MaxUint64

View File

@@ -609,7 +609,7 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
}
template.Must(template.New("").Parse(dashboardContent)).Execute(indexfile, map[string]interface{}{
"Network": network,
"NetworkID": conf.Genesis.Config.ChainId,
"NetworkID": conf.Genesis.Config.ChainID,
"NetworkTitle": strings.Title(network),
"EthstatsPage": config.ethstats,
"ExplorerPage": config.explorer,
@@ -678,9 +678,9 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
// Build and deploy the dashboard service
if nocache {
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
}
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
}
// dashboardInfos is returned from a dashboard status check to allow reporting

View File

@@ -100,9 +100,9 @@ func deployEthstats(client *sshClient, network string, port int, secret string,
// Build and deploy the ethstats service
if nocache {
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
}
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
}
// ethstatsInfos is returned from an ethstats status check to allow reporting
@@ -122,7 +122,7 @@ func (info *ethstatsInfos) Report() map[string]string {
"Website address": info.host,
"Website listener port": strconv.Itoa(info.port),
"Login secret": info.secret,
"Banned addresses": fmt.Sprintf("%v", info.banned),
"Banned addresses": strings.Join(info.banned, "\n"),
}
}

View File

@@ -38,7 +38,7 @@ ADD chain.json /chain.json
RUN \
echo '(cd ../eth-net-intelligence-api && pm2 start /ethstats.json)' > explorer.sh && \
echo '(cd ../etherchain-light && npm start &)' >> explorer.sh && \
echo '/parity/parity --chain=/chain.json --port={{.NodePort}} --tracing=on --fat-db=on --pruning=archive' >> explorer.sh
echo 'exec /parity/parity --chain=/chain.json --port={{.NodePort}} --tracing=on --fat-db=on --pruning=archive' >> explorer.sh
ENTRYPOINT ["/bin/sh", "explorer.sh"]
`
@@ -140,9 +140,9 @@ func deployExplorer(client *sshClient, network string, chainspec []byte, config
// Build and deploy the boot or seal node service
if nocache {
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
}
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
}
// explorerInfos is returned from a block explorer status check to allow reporting

View File

@@ -133,9 +133,9 @@ func deployFaucet(client *sshClient, network string, bootnodes []string, config
// Build and deploy the faucet service
if nocache {
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
}
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
}
// faucetInfos is returned from a faucet status check to allow reporting various

View File

@@ -81,9 +81,9 @@ func deployNginx(client *sshClient, network string, port int, nocache bool) ([]b
// Build and deploy the reverse-proxy service
if nocache {
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
}
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
}
// nginxInfos is returned from an nginx reverse-proxy status check to allow

View File

@@ -42,7 +42,7 @@ ADD genesis.json /genesis.json
RUN \
echo 'geth --cache 512 init /genesis.json' > geth.sh && \{{if .Unlock}}
echo 'mkdir -p /root/.ethereum/keystore/ && cp /signer.json /root/.ethereum/keystore/' >> geth.sh && \{{end}}
echo $'geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine --minerthreads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --targetgaslimit {{.GasTarget}} --gasprice {{.GasPrice}}' >> geth.sh
echo $'exec geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--miner.etherbase {{.Etherbase}} --mine --miner.threads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --miner.gastarget {{.GasTarget}} --miner.gaslimit {{.GasLimit}} --miner.gasprice {{.GasPrice}}' >> geth.sh
ENTRYPOINT ["/bin/sh", "geth.sh"]
`
@@ -68,6 +68,7 @@ services:
- STATS_NAME={{.Ethstats}}
- MINER_NAME={{.Etherbase}}
- GAS_TARGET={{.GasTarget}}
- GAS_LIMIT={{.GasLimit}}
- GAS_PRICE={{.GasPrice}}
logging:
driver: "json-file"
@@ -104,6 +105,7 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n
"Ethstats": config.ethstats,
"Etherbase": config.etherbase,
"GasTarget": uint64(1000000 * config.gasTarget),
"GasLimit": uint64(1000000 * config.gasLimit),
"GasPrice": uint64(1000000000 * config.gasPrice),
"Unlock": config.keyJSON != "",
})
@@ -122,6 +124,7 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n
"Ethstats": config.ethstats[:strings.Index(config.ethstats, ":")],
"Etherbase": config.etherbase,
"GasTarget": config.gasTarget,
"GasLimit": config.gasLimit,
"GasPrice": config.gasPrice,
})
files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes()
@@ -139,9 +142,9 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n
// Build and deploy the boot or seal node service
if nocache {
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
}
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
}
// nodeInfos is returned from a boot or seal node status check to allow reporting
@@ -160,6 +163,7 @@ type nodeInfos struct {
keyJSON string
keyPass string
gasTarget float64
gasLimit float64
gasPrice float64
}
@@ -175,8 +179,9 @@ func (info *nodeInfos) Report() map[string]string {
}
if info.gasTarget > 0 {
// Miner or signer node
report["Gas limit (baseline target)"] = fmt.Sprintf("%0.3f MGas", info.gasTarget)
report["Gas price (minimum accepted)"] = fmt.Sprintf("%0.3f GWei", info.gasPrice)
report["Gas floor (baseline target)"] = fmt.Sprintf("%0.3f MGas", info.gasTarget)
report["Gas ceil (target maximum)"] = fmt.Sprintf("%0.3f MGas", info.gasLimit)
if info.etherbase != "" {
// Ethash proof-of-work miner
@@ -217,11 +222,12 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
totalPeers, _ := strconv.Atoi(infos.envvars["TOTAL_PEERS"])
lightPeers, _ := strconv.Atoi(infos.envvars["LIGHT_PEERS"])
gasTarget, _ := strconv.ParseFloat(infos.envvars["GAS_TARGET"], 64)
gasLimit, _ := strconv.ParseFloat(infos.envvars["GAS_LIMIT"], 64)
gasPrice, _ := strconv.ParseFloat(infos.envvars["GAS_PRICE"], 64)
// Container available, retrieve its node ID and its genesis json
var out []byte
if out, err = client.Run(fmt.Sprintf("docker exec %s_%s_1 geth --exec admin.nodeInfo.id attach", network, kind)); err != nil {
if out, err = client.Run(fmt.Sprintf("docker exec %s_%s_1 geth --exec admin.nodeInfo.id --cache=16 attach", network, kind)); err != nil {
return nil, ErrServiceUnreachable
}
id := bytes.Trim(bytes.TrimSpace(out), "\"")
@@ -256,6 +262,7 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
keyJSON: keyJSON,
keyPass: keyPass,
gasTarget: gasTarget,
gasLimit: gasLimit,
gasPrice: gasPrice,
}
stats.enode = fmt.Sprintf("enode://%s@%s:%d", id, client.address, stats.port)

View File

@@ -37,7 +37,7 @@ ADD genesis.json /genesis.json
RUN \
echo 'node server.js &' > wallet.sh && \
echo 'geth --cache 512 init /genesis.json' >> wallet.sh && \
echo $'geth --networkid {{.NetworkID}} --port {{.NodePort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --rpc --rpcaddr=0.0.0.0 --rpccorsdomain "*" --rpcvhosts "*"' >> wallet.sh
echo $'exec geth --networkid {{.NetworkID}} --port {{.NodePort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --rpc --rpcaddr=0.0.0.0 --rpccorsdomain "*" --rpcvhosts "*"' >> wallet.sh
RUN \
sed -i 's/PuppethNetworkID/{{.NetworkID}}/g' dist/js/etherwallet-master.js && \
@@ -120,9 +120,9 @@ func deployWallet(client *sshClient, network string, bootnodes []string, config
// Build and deploy the boot or seal node service
if nocache {
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
}
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
}
// walletInfos is returned from a web wallet status check to allow reporting

View File

@@ -45,33 +45,44 @@ type sshClient struct {
// dial establishes an SSH connection to a remote node using the current user and
// the user's configured private RSA key. If that fails, password authentication
// is fallen back to. The caller may override the login user via user@server:port.
// is fallen back to. server can be a string like user:identity@server:port.
func dial(server string, pubkey []byte) (*sshClient, error) {
// Figure out a label for the server and a logger
label := server
if strings.Contains(label, ":") {
label = label[:strings.Index(label, ":")]
}
login := ""
// Figure out username, identity, hostname and port
hostname := ""
hostport := server
username := ""
identity := "id_rsa" // default
if strings.Contains(server, "@") {
login = label[:strings.Index(label, "@")]
label = label[strings.Index(label, "@")+1:]
server = server[strings.Index(server, "@")+1:]
prefix := server[:strings.Index(server, "@")]
if strings.Contains(prefix, ":") {
username = prefix[:strings.Index(prefix, ":")]
identity = prefix[strings.Index(prefix, ":")+1:]
} else {
username = prefix
}
hostport = server[strings.Index(server, "@")+1:]
}
logger := log.New("server", label)
if strings.Contains(hostport, ":") {
hostname = hostport[:strings.Index(hostport, ":")]
} else {
hostname = hostport
hostport += ":22"
}
logger := log.New("server", server)
logger.Debug("Attempting to establish SSH connection")
user, err := user.Current()
if err != nil {
return nil, err
}
if login == "" {
login = user.Username
if username == "" {
username = user.Username
}
// Configure the supported authentication methods (private key and password)
var auths []ssh.AuthMethod
path := filepath.Join(user.HomeDir, ".ssh", "id_rsa")
path := filepath.Join(user.HomeDir, ".ssh", identity)
if buf, err := ioutil.ReadFile(path); err != nil {
log.Warn("No SSH key, falling back to passwords", "path", path, "err", err)
} else {
@@ -94,14 +105,14 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
}
}
auths = append(auths, ssh.PasswordCallback(func() (string, error) {
fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", login, server)
fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", username, server)
blob, err := terminal.ReadPassword(int(os.Stdin.Fd()))
fmt.Println()
return string(blob), err
}))
// Resolve the IP address of the remote server
addr, err := net.LookupHost(label)
addr, err := net.LookupHost(hostname)
if err != nil {
return nil, err
}
@@ -109,10 +120,7 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
return nil, errors.New("no IPs associated with domain")
}
// Try to dial in to the remote server
logger.Trace("Dialing remote SSH server", "user", login)
if !strings.Contains(server, ":") {
server += ":22"
}
logger.Trace("Dialing remote SSH server", "user", username)
keycheck := func(hostname string, remote net.Addr, key ssh.PublicKey) error {
// If no public key is known for SSH, ask the user to confirm
if pubkey == nil {
@@ -139,13 +147,13 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
// We have a mismatch, forbid connecting
return errors.New("ssh key mismatch, readd the machine to update")
}
client, err := ssh.Dial("tcp", server, &ssh.ClientConfig{User: login, Auth: auths, HostKeyCallback: keycheck})
client, err := ssh.Dial("tcp", hostport, &ssh.ClientConfig{User: username, Auth: auths, HostKeyCallback: keycheck})
if err != nil {
return nil, err
}
// Connection established, return our utility wrapper
c := &sshClient{
server: label,
server: hostname,
address: addr[0],
pubkey: pubkey,
client: client,

View File

@@ -92,7 +92,7 @@ func (w *wizard) deployDashboard() {
pages = append(pages, page)
}
}
// Promt the user to chose one, enter manually or simply not list this service
// Prompt the user to chose one, enter manually or simply not list this service
defLabel, defChoice := "don't list", len(pages)+2
if len(pages) > 0 {
defLabel, defChoice = pages[0], 1

View File

@@ -49,7 +49,7 @@ func (w *wizard) deployFaucet() {
existed := err == nil
infos.node.genesis, _ = json.MarshalIndent(w.conf.Genesis, "", " ")
infos.node.network = w.conf.Genesis.Config.ChainId.Int64()
infos.node.network = w.conf.Genesis.Config.ChainID.Int64()
// Figure out which port to listen on
fmt.Println()

View File

@@ -121,7 +121,7 @@ func (w *wizard) makeGenesis() {
// Query the user for some custom extras
fmt.Println()
fmt.Println("Specify your chain/network ID if you want an explicit one (default = random)")
genesis.Config.ChainId = new(big.Int).SetUint64(uint64(w.readDefaultInt(rand.Intn(65536))))
genesis.Config.ChainID = new(big.Int).SetUint64(uint64(w.readDefaultInt(rand.Intn(65536))))
// All done, store the genesis and flush to disk
log.Info("Configured new genesis block")

View File

@@ -82,7 +82,6 @@ func (w *wizard) gatherStats(server string, pubkey []byte, client *sshClient) *s
logger.Info("Starting remote server health-check")
stat := &serverStat{
address: client.address,
services: make(map[string]map[string]string),
}
if client == nil {
@@ -94,6 +93,8 @@ func (w *wizard) gatherStats(server string, pubkey []byte, client *sshClient) *s
}
client = conn
}
stat.address = client.address
// Client connected one way or another, run health-checks
logger.Debug("Checking for nginx availability")
if infos, err := checkNginx(client, w.network); err != nil {
@@ -203,7 +204,7 @@ func (stats serverStats) render() {
table.SetHeader([]string{"Server", "Address", "Service", "Config", "Value"})
table.SetAlignment(tablewriter.ALIGN_LEFT)
table.SetColWidth(100)
table.SetColWidth(40)
// Find the longest lines for all columns for the hacked separator
separator := make([]string, 5)
@@ -214,6 +215,9 @@ func (stats serverStats) render() {
if len(stat.address) > len(separator[1]) {
separator[1] = strings.Repeat("-", len(stat.address))
}
if len(stat.failure) > len(separator[1]) {
separator[1] = strings.Repeat("-", len(stat.failure))
}
for service, configs := range stat.services {
if len(service) > len(separator[2]) {
separator[2] = strings.Repeat("-", len(service))
@@ -222,8 +226,10 @@ func (stats serverStats) render() {
if len(config) > len(separator[3]) {
separator[3] = strings.Repeat("-", len(config))
}
if len(value) > len(separator[4]) {
separator[4] = strings.Repeat("-", len(value))
for _, val := range strings.Split(value, "\n") {
if len(val) > len(separator[4]) {
separator[4] = strings.Repeat("-", len(val))
}
}
}
}
@@ -248,7 +254,11 @@ func (stats serverStats) render() {
sort.Strings(services)
if len(services) == 0 {
table.Append([]string{server, stats[server].address, "", "", ""})
if stats[server].failure != "" {
table.Append([]string{server, stats[server].failure, "", "", ""})
} else {
table.Append([]string{server, stats[server].address, "", "", ""})
}
}
for j, service := range services {
// Add an empty line between all services
@@ -263,26 +273,20 @@ func (stats serverStats) render() {
sort.Strings(configs)
for k, config := range configs {
switch {
case j == 0 && k == 0:
table.Append([]string{server, stats[server].address, service, config, stats[server].services[service][config]})
case k == 0:
table.Append([]string{"", "", service, config, stats[server].services[service][config]})
default:
table.Append([]string{"", "", "", config, stats[server].services[service][config]})
for l, value := range strings.Split(stats[server].services[service][config], "\n") {
switch {
case j == 0 && k == 0 && l == 0:
table.Append([]string{server, stats[server].address, service, config, value})
case k == 0 && l == 0:
table.Append([]string{"", "", service, config, value})
case l == 0:
table.Append([]string{"", "", "", config, value})
default:
table.Append([]string{"", "", "", "", value})
}
}
}
}
}
table.Render()
}
// protips contains a collection of network infos to report pro-tips
// based on.
type protips struct {
genesis string
network int64
bootFull []string
bootLight []string
ethstats string
}

View File

@@ -62,14 +62,14 @@ func (w *wizard) manageServers() {
}
}
// makeServer reads a single line from stdin and interprets it as a hostname to
// connect to. It tries to establish a new SSH session and also executing some
// baseline validations.
// makeServer reads a single line from stdin and interprets it as
// username:identity@hostname to connect to. It tries to establish a
// new SSH session and also executing some baseline validations.
//
// If connection succeeds, the server is added to the wizards configs!
func (w *wizard) makeServer() string {
fmt.Println()
fmt.Println("Please enter remote server's address:")
fmt.Println("What is the remote server's address ([username[:identity]@]hostname[:port])?")
// Read and dial the server to ensure docker is present
input := w.readString()
@@ -87,7 +87,7 @@ func (w *wizard) makeServer() string {
return input
}
// selectServer lists the user all the currnetly known servers to choose from,
// selectServer lists the user all the currently known servers to choose from,
// also granting the option to add a new one.
func (w *wizard) selectServer() string {
// List the available server to the user and wait for a choice
@@ -115,7 +115,7 @@ func (w *wizard) selectServer() string {
// manageComponents displays a list of network components the user can tear down
// and an option
func (w *wizard) manageComponents() {
// List all the componens we can tear down, along with an entry to deploy a new one
// List all the components we can tear down, along with an entry to deploy a new one
fmt.Println()
var serviceHosts, serviceNames []string

View File

@@ -50,13 +50,13 @@ func (w *wizard) deployNode(boot bool) {
if boot {
infos = &nodeInfos{port: 30303, peersTotal: 512, peersLight: 256}
} else {
infos = &nodeInfos{port: 30303, peersTotal: 50, peersLight: 0, gasTarget: 4.7, gasPrice: 18}
infos = &nodeInfos{port: 30303, peersTotal: 50, peersLight: 0, gasTarget: 7.5, gasLimit: 10, gasPrice: 1}
}
}
existed := err == nil
infos.genesis, _ = json.MarshalIndent(w.conf.Genesis, "", " ")
infos.network = w.conf.Genesis.Config.ChainId.Int64()
infos.network = w.conf.Genesis.Config.ChainID.Int64()
// Figure out where the user wants to store the persistent data
fmt.Println()
@@ -107,7 +107,7 @@ func (w *wizard) deployNode(boot bool) {
// Ethash based miners only need an etherbase to mine against
fmt.Println()
if infos.etherbase == "" {
fmt.Printf("What address should the miner user?\n")
fmt.Printf("What address should the miner use?\n")
for {
if address := w.readAddress(); address != nil {
infos.etherbase = address.Hex()
@@ -115,7 +115,7 @@ func (w *wizard) deployNode(boot bool) {
}
}
} else {
fmt.Printf("What address should the miner user? (default = %s)\n", infos.etherbase)
fmt.Printf("What address should the miner use? (default = %s)\n", infos.etherbase)
infos.etherbase = w.readDefaultAddress(common.HexToAddress(infos.etherbase)).Hex()
}
} else if w.conf.Genesis.Config.Clique != nil {
@@ -152,6 +152,10 @@ func (w *wizard) deployNode(boot bool) {
fmt.Printf("What gas limit should empty blocks target (MGas)? (default = %0.3f)\n", infos.gasTarget)
infos.gasTarget = w.readDefaultFloat(infos.gasTarget)
fmt.Println()
fmt.Printf("What gas limit should full blocks target (MGas)? (default = %0.3f)\n", infos.gasLimit)
infos.gasLimit = w.readDefaultFloat(infos.gasLimit)
fmt.Println()
fmt.Printf("What gas price should the signer require (GWei)? (default = %0.3f)\n", infos.gasPrice)
infos.gasPrice = w.readDefaultFloat(infos.gasPrice)

View File

@@ -52,7 +52,7 @@ func (w *wizard) deployWallet() {
existed := err == nil
infos.genesis, _ = json.MarshalIndent(w.conf.Genesis, "", " ")
infos.network = w.conf.Genesis.Config.ChainId.Int64()
infos.network = w.conf.Genesis.Config.ChainID.Int64()
// Figure out which port to listen on
fmt.Println()

233
cmd/swarm/access.go Normal file
View File

@@ -0,0 +1,233 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"crypto/rand"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"strings"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/swarm/api"
"github.com/ethereum/go-ethereum/swarm/api/client"
"gopkg.in/urfave/cli.v1"
)
var salt = make([]byte, 32)
func init() {
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
panic("reading from crypto/rand failed: " + err.Error())
}
}
func accessNewPass(ctx *cli.Context) {
args := ctx.Args()
if len(args) != 1 {
utils.Fatalf("Expected 1 argument - the ref")
}
var (
ae *api.AccessEntry
accessKey []byte
err error
ref = args[0]
password = getPassPhrase("", 0, makePasswordList(ctx))
dryRun = ctx.Bool(SwarmDryRunFlag.Name)
)
accessKey, ae, err = api.DoPassword(ctx, password, salt)
if err != nil {
utils.Fatalf("error getting session key: %v", err)
}
m, err := api.GenerateAccessControlManifest(ctx, ref, accessKey, ae)
if dryRun {
err = printManifests(m, nil)
if err != nil {
utils.Fatalf("had an error printing the manifests: %v", err)
}
} else {
err = uploadManifests(ctx, m, nil)
if err != nil {
utils.Fatalf("had an error uploading the manifests: %v", err)
}
}
}
func accessNewPK(ctx *cli.Context) {
args := ctx.Args()
if len(args) != 1 {
utils.Fatalf("Expected 1 argument - the ref")
}
var (
ae *api.AccessEntry
sessionKey []byte
err error
ref = args[0]
privateKey = getPrivKey(ctx)
granteePublicKey = ctx.String(SwarmAccessGrantKeyFlag.Name)
dryRun = ctx.Bool(SwarmDryRunFlag.Name)
)
sessionKey, ae, err = api.DoPK(ctx, privateKey, granteePublicKey, salt)
if err != nil {
utils.Fatalf("error getting session key: %v", err)
}
m, err := api.GenerateAccessControlManifest(ctx, ref, sessionKey, ae)
if dryRun {
err = printManifests(m, nil)
if err != nil {
utils.Fatalf("had an error printing the manifests: %v", err)
}
} else {
err = uploadManifests(ctx, m, nil)
if err != nil {
utils.Fatalf("had an error uploading the manifests: %v", err)
}
}
}
func accessNewACT(ctx *cli.Context) {
args := ctx.Args()
if len(args) != 1 {
utils.Fatalf("Expected 1 argument - the ref")
}
var (
ae *api.AccessEntry
actManifest *api.Manifest
accessKey []byte
err error
ref = args[0]
pkGrantees = []string{}
passGrantees = []string{}
pkGranteesFilename = ctx.String(SwarmAccessGrantKeysFlag.Name)
passGranteesFilename = ctx.String(utils.PasswordFileFlag.Name)
privateKey = getPrivKey(ctx)
dryRun = ctx.Bool(SwarmDryRunFlag.Name)
)
if pkGranteesFilename == "" && passGranteesFilename == "" {
utils.Fatalf("you have to provide either a grantee public-keys file or an encryption passwords file (or both)")
}
if pkGranteesFilename != "" {
bytes, err := ioutil.ReadFile(pkGranteesFilename)
if err != nil {
utils.Fatalf("had an error reading the grantee public key list")
}
pkGrantees = strings.Split(string(bytes), "\n")
}
if passGranteesFilename != "" {
bytes, err := ioutil.ReadFile(passGranteesFilename)
if err != nil {
utils.Fatalf("could not read password filename: %v", err)
}
passGrantees = strings.Split(string(bytes), "\n")
}
accessKey, ae, actManifest, err = api.DoACT(ctx, privateKey, salt, pkGrantees, passGrantees)
if err != nil {
utils.Fatalf("error generating ACT manifest: %v", err)
}
if err != nil {
utils.Fatalf("error getting session key: %v", err)
}
m, err := api.GenerateAccessControlManifest(ctx, ref, accessKey, ae)
if err != nil {
utils.Fatalf("error generating root access manifest: %v", err)
}
if dryRun {
err = printManifests(m, actManifest)
if err != nil {
utils.Fatalf("had an error printing the manifests: %v", err)
}
} else {
err = uploadManifests(ctx, m, actManifest)
if err != nil {
utils.Fatalf("had an error uploading the manifests: %v", err)
}
}
}
func printManifests(rootAccessManifest, actManifest *api.Manifest) error {
js, err := json.Marshal(rootAccessManifest)
if err != nil {
return err
}
fmt.Println(string(js))
if actManifest != nil {
js, err := json.Marshal(actManifest)
if err != nil {
return err
}
fmt.Println(string(js))
}
return nil
}
func uploadManifests(ctx *cli.Context, rootAccessManifest, actManifest *api.Manifest) error {
bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
client := client.NewClient(bzzapi)
var (
key string
err error
)
if actManifest != nil {
key, err = client.UploadManifest(actManifest, false)
if err != nil {
return err
}
rootAccessManifest.Entries[0].Access.Act = key
}
key, err = client.UploadManifest(rootAccessManifest, false)
if err != nil {
return err
}
fmt.Println(key)
return nil
}
// makePasswordList reads password lines from the file specified by the global --password flag
// and also by the same subcommand --password flag.
// This function ia a fork of utils.MakePasswordList to lookup cli context for subcommand.
// Function ctx.SetGlobal is not setting the global flag value that can be accessed
// by ctx.GlobalString using the current version of cli package.
func makePasswordList(ctx *cli.Context) []string {
path := ctx.GlobalString(utils.PasswordFileFlag.Name)
if path == "" {
path = ctx.String(utils.PasswordFileFlag.Name)
if path == "" {
return nil
}
}
text, err := ioutil.ReadFile(path)
if err != nil {
utils.Fatalf("Failed to read password file: %v", err)
}
lines := strings.Split(string(text), "\n")
// Sanitise DOS line endings.
for i := range lines {
lines[i] = strings.TrimRight(lines[i], "\r")
}
return lines
}

605
cmd/swarm/access_test.go Normal file
View File

@@ -0,0 +1,605 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
// +build !windows
package main
import (
"bytes"
"crypto/rand"
"encoding/hex"
"encoding/json"
"io"
"io/ioutil"
gorand "math/rand"
"net/http"
"os"
"strings"
"testing"
"time"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/ecies"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/api"
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
"github.com/ethereum/go-ethereum/swarm/testutil"
)
const (
hashRegexp = `[a-f\d]{128}`
data = "notsorandomdata"
)
var DefaultCurve = crypto.S256()
// TestAccessPassword tests for the correct creation of an ACT manifest protected by a password.
// The test creates bogus content, uploads it encrypted, then creates the wrapping manifest with the Access entry
// The parties participating - node (publisher), uploads to second node then disappears. Content which was uploaded
// is then fetched through 2nd node. since the tested code is not key-aware - we can just
// fetch from the 2nd node using HTTP BasicAuth
func TestAccessPassword(t *testing.T) {
cluster := newTestCluster(t, 1)
defer cluster.Shutdown()
proxyNode := cluster.Nodes[0]
dataFilename := testutil.TempFileWithContent(t, data)
defer os.RemoveAll(dataFilename)
// upload the file with 'swarm up' and expect a hash
up := runSwarm(t,
"--bzzapi",
proxyNode.URL, //it doesn't matter through which node we upload content
"up",
"--encrypt",
dataFilename)
_, matches := up.ExpectRegexp(hashRegexp)
up.ExpectExit()
if len(matches) < 1 {
t.Fatal("no matches found")
}
ref := matches[0]
tmp, err := ioutil.TempDir("", "swarm-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
password := "smth"
passwordFilename := testutil.TempFileWithContent(t, "smth")
defer os.RemoveAll(passwordFilename)
up = runSwarm(t,
"access",
"new",
"pass",
"--dry-run",
"--password",
passwordFilename,
ref,
)
_, matches = up.ExpectRegexp(".+")
up.ExpectExit()
if len(matches) == 0 {
t.Fatalf("stdout not matched")
}
var m api.Manifest
err = json.Unmarshal([]byte(matches[0]), &m)
if err != nil {
t.Fatalf("unmarshal manifest: %v", err)
}
if len(m.Entries) != 1 {
t.Fatalf("expected one manifest entry, got %v", len(m.Entries))
}
e := m.Entries[0]
ct := "application/bzz-manifest+json"
if e.ContentType != ct {
t.Errorf("expected %q content type, got %q", ct, e.ContentType)
}
if e.Access == nil {
t.Fatal("manifest access is nil")
}
a := e.Access
if a.Type != "pass" {
t.Errorf(`got access type %q, expected "pass"`, a.Type)
}
if len(a.Salt) < 32 {
t.Errorf(`got salt with length %v, expected not less the 32 bytes`, len(a.Salt))
}
if a.KdfParams == nil {
t.Fatal("manifest access kdf params is nil")
}
if a.Publisher != "" {
t.Fatal("should be empty")
}
client := swarm.NewClient(cluster.Nodes[0].URL)
hash, err := client.UploadManifest(&m, false)
if err != nil {
t.Fatal(err)
}
httpClient := &http.Client{}
url := cluster.Nodes[0].URL + "/" + "bzz:/" + hash
response, err := httpClient.Get(url)
if err != nil {
t.Fatal(err)
}
if response.StatusCode != http.StatusUnauthorized {
t.Fatal("should be a 401")
}
authHeader := response.Header.Get("WWW-Authenticate")
if authHeader == "" {
t.Fatal("should be something here")
}
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
t.Fatal(err)
}
req.SetBasicAuth("", password)
response, err = http.DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
defer response.Body.Close()
if response.StatusCode != http.StatusOK {
t.Errorf("expected status %v, got %v", http.StatusOK, response.StatusCode)
}
d, err := ioutil.ReadAll(response.Body)
if err != nil {
t.Fatal(err)
}
if string(d) != data {
t.Errorf("expected decrypted data %q, got %q", data, string(d))
}
wrongPasswordFilename := testutil.TempFileWithContent(t, "just wr0ng")
defer os.RemoveAll(wrongPasswordFilename)
//download file with 'swarm down' with wrong password
up = runSwarm(t,
"--bzzapi",
proxyNode.URL,
"down",
"bzz:/"+hash,
tmp,
"--password",
wrongPasswordFilename)
_, matches = up.ExpectRegexp("unauthorized")
if len(matches) != 1 && matches[0] != "unauthorized" {
t.Fatal(`"unauthorized" not found in output"`)
}
up.ExpectExit()
}
// TestAccessPK tests for the correct creation of an ACT manifest between two parties (publisher and grantee).
// The test creates bogus content, uploads it encrypted, then creates the wrapping manifest with the Access entry
// The parties participating - node (publisher), uploads to second node (which is also the grantee) then disappears.
// Content which was uploaded is then fetched through the grantee's http proxy. Since the tested code is private-key aware,
// the test will fail if the proxy's given private key is not granted on the ACT.
func TestAccessPK(t *testing.T) {
// Setup Swarm and upload a test file to it
cluster := newTestCluster(t, 2)
defer cluster.Shutdown()
dataFilename := testutil.TempFileWithContent(t, data)
defer os.RemoveAll(dataFilename)
// upload the file with 'swarm up' and expect a hash
up := runSwarm(t,
"--bzzapi",
cluster.Nodes[0].URL,
"up",
"--encrypt",
dataFilename)
_, matches := up.ExpectRegexp(hashRegexp)
up.ExpectExit()
if len(matches) < 1 {
t.Fatal("no matches found")
}
ref := matches[0]
pk := cluster.Nodes[0].PrivateKey
granteePubKey := crypto.CompressPubkey(&pk.PublicKey)
publisherDir, err := ioutil.TempDir("", "swarm-account-dir-temp")
if err != nil {
t.Fatal(err)
}
passwordFilename := testutil.TempFileWithContent(t, testPassphrase)
defer os.RemoveAll(passwordFilename)
_, publisherAccount := getTestAccount(t, publisherDir)
up = runSwarm(t,
"--bzzaccount",
publisherAccount.Address.String(),
"--password",
passwordFilename,
"--datadir",
publisherDir,
"--bzzapi",
cluster.Nodes[0].URL,
"access",
"new",
"pk",
"--dry-run",
"--grant-key",
hex.EncodeToString(granteePubKey),
ref,
)
_, matches = up.ExpectRegexp(".+")
up.ExpectExit()
if len(matches) == 0 {
t.Fatalf("stdout not matched")
}
//get the public key from the publisher directory
publicKeyFromDataDir := runSwarm(t,
"--bzzaccount",
publisherAccount.Address.String(),
"--password",
passwordFilename,
"--datadir",
publisherDir,
"print-keys",
"--compressed",
)
_, publicKeyString := publicKeyFromDataDir.ExpectRegexp(".+")
publicKeyFromDataDir.ExpectExit()
pkComp := strings.Split(publicKeyString[0], "=")[1]
var m api.Manifest
err = json.Unmarshal([]byte(matches[0]), &m)
if err != nil {
t.Fatalf("unmarshal manifest: %v", err)
}
if len(m.Entries) != 1 {
t.Fatalf("expected one manifest entry, got %v", len(m.Entries))
}
e := m.Entries[0]
ct := "application/bzz-manifest+json"
if e.ContentType != ct {
t.Errorf("expected %q content type, got %q", ct, e.ContentType)
}
if e.Access == nil {
t.Fatal("manifest access is nil")
}
a := e.Access
if a.Type != "pk" {
t.Errorf(`got access type %q, expected "pk"`, a.Type)
}
if len(a.Salt) < 32 {
t.Errorf(`got salt with length %v, expected not less the 32 bytes`, len(a.Salt))
}
if a.KdfParams != nil {
t.Fatal("manifest access kdf params should be nil")
}
if a.Publisher != pkComp {
t.Fatal("publisher key did not match")
}
client := swarm.NewClient(cluster.Nodes[0].URL)
hash, err := client.UploadManifest(&m, false)
if err != nil {
t.Fatal(err)
}
httpClient := &http.Client{}
url := cluster.Nodes[0].URL + "/" + "bzz:/" + hash
response, err := httpClient.Get(url)
if err != nil {
t.Fatal(err)
}
if response.StatusCode != http.StatusOK {
t.Fatal("should be a 200")
}
d, err := ioutil.ReadAll(response.Body)
if err != nil {
t.Fatal(err)
}
if string(d) != data {
t.Errorf("expected decrypted data %q, got %q", data, string(d))
}
}
// TestAccessACT tests the creation of the ACT manifest end-to-end, without any bogus entries (i.e. default scenario = 3 nodes 1 unauthorized)
func TestAccessACT(t *testing.T) {
testAccessACT(t, 0)
}
// TestAccessACTScale tests the creation of the ACT manifest end-to-end, with 1000 bogus entries (i.e. 1000 EC keys + default scenario = 3 nodes 1 unauthorized = 1003 keys in the ACT manifest)
func TestAccessACTScale(t *testing.T) {
testAccessACT(t, 1000)
}
// TestAccessACT tests the e2e creation, uploading and downloading of an ACT access control with both EC keys AND password protection
// the test fires up a 3 node cluster, then randomly picks 2 nodes which will be acting as grantees to the data
// set and also protects the ACT with a password. the third node should fail decoding the reference as it will not be granted access.
// the third node then then tries to download using a correct password (and succeeds) then uses a wrong password and fails.
// the publisher uploads through one of the nodes then disappears.
func testAccessACT(t *testing.T, bogusEntries int) {
// Setup Swarm and upload a test file to it
const clusterSize = 3
cluster := newTestCluster(t, clusterSize)
defer cluster.Shutdown()
var uploadThroughNode = cluster.Nodes[0]
client := swarm.NewClient(uploadThroughNode.URL)
r1 := gorand.New(gorand.NewSource(time.Now().UnixNano()))
nodeToSkip := r1.Intn(clusterSize) // a number between 0 and 2 (node indices in `cluster`)
dataFilename := testutil.TempFileWithContent(t, data)
defer os.RemoveAll(dataFilename)
// upload the file with 'swarm up' and expect a hash
up := runSwarm(t,
"--bzzapi",
cluster.Nodes[0].URL,
"up",
"--encrypt",
dataFilename)
_, matches := up.ExpectRegexp(hashRegexp)
up.ExpectExit()
if len(matches) < 1 {
t.Fatal("no matches found")
}
ref := matches[0]
grantees := []string{}
for i, v := range cluster.Nodes {
if i == nodeToSkip {
continue
}
pk := v.PrivateKey
granteePubKey := crypto.CompressPubkey(&pk.PublicKey)
grantees = append(grantees, hex.EncodeToString(granteePubKey))
}
if bogusEntries > 0 {
bogusGrantees := []string{}
for i := 0; i < bogusEntries; i++ {
prv, err := ecies.GenerateKey(rand.Reader, DefaultCurve, nil)
if err != nil {
t.Fatal(err)
}
bogusGrantees = append(bogusGrantees, hex.EncodeToString(crypto.CompressPubkey(&prv.ExportECDSA().PublicKey)))
}
r2 := gorand.New(gorand.NewSource(time.Now().UnixNano()))
for i := 0; i < len(grantees); i++ {
insertAtIdx := r2.Intn(len(bogusGrantees))
bogusGrantees = append(bogusGrantees[:insertAtIdx], append([]string{grantees[i]}, bogusGrantees[insertAtIdx:]...)...)
}
grantees = bogusGrantees
}
granteesPubkeyListFile := testutil.TempFileWithContent(t, strings.Join(grantees, "\n"))
defer os.RemoveAll(granteesPubkeyListFile)
publisherDir, err := ioutil.TempDir("", "swarm-account-dir-temp")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(publisherDir)
passwordFilename := testutil.TempFileWithContent(t, testPassphrase)
defer os.RemoveAll(passwordFilename)
actPasswordFilename := testutil.TempFileWithContent(t, "smth")
defer os.RemoveAll(actPasswordFilename)
_, publisherAccount := getTestAccount(t, publisherDir)
up = runSwarm(t,
"--bzzaccount",
publisherAccount.Address.String(),
"--password",
passwordFilename,
"--datadir",
publisherDir,
"--bzzapi",
cluster.Nodes[0].URL,
"access",
"new",
"act",
"--grant-keys",
granteesPubkeyListFile,
"--password",
actPasswordFilename,
ref,
)
_, matches = up.ExpectRegexp(`[a-f\d]{64}`)
up.ExpectExit()
if len(matches) == 0 {
t.Fatalf("stdout not matched")
}
//get the public key from the publisher directory
publicKeyFromDataDir := runSwarm(t,
"--bzzaccount",
publisherAccount.Address.String(),
"--password",
passwordFilename,
"--datadir",
publisherDir,
"print-keys",
"--compressed",
)
_, publicKeyString := publicKeyFromDataDir.ExpectRegexp(".+")
publicKeyFromDataDir.ExpectExit()
pkComp := strings.Split(publicKeyString[0], "=")[1]
hash := matches[0]
m, _, err := client.DownloadManifest(hash)
if err != nil {
t.Fatalf("unmarshal manifest: %v", err)
}
if len(m.Entries) != 1 {
t.Fatalf("expected one manifest entry, got %v", len(m.Entries))
}
e := m.Entries[0]
ct := "application/bzz-manifest+json"
if e.ContentType != ct {
t.Errorf("expected %q content type, got %q", ct, e.ContentType)
}
if e.Access == nil {
t.Fatal("manifest access is nil")
}
a := e.Access
if a.Type != "act" {
t.Fatalf(`got access type %q, expected "act"`, a.Type)
}
if len(a.Salt) < 32 {
t.Fatalf(`got salt with length %v, expected not less the 32 bytes`, len(a.Salt))
}
if a.Publisher != pkComp {
t.Fatal("publisher key did not match")
}
httpClient := &http.Client{}
// all nodes except the skipped node should be able to decrypt the content
for i, node := range cluster.Nodes {
log.Debug("trying to fetch from node", "node index", i)
url := node.URL + "/" + "bzz:/" + hash
response, err := httpClient.Get(url)
if err != nil {
t.Fatal(err)
}
log.Debug("got response from node", "response code", response.StatusCode)
if i == nodeToSkip {
log.Debug("reached node to skip", "status code", response.StatusCode)
if response.StatusCode != http.StatusUnauthorized {
t.Fatalf("should be a 401")
}
// try downloading using a password instead, using the unauthorized node
passwordUrl := strings.Replace(url, "http://", "http://:smth@", -1)
response, err = httpClient.Get(passwordUrl)
if err != nil {
t.Fatal(err)
}
if response.StatusCode != http.StatusOK {
t.Fatal("should be a 200")
}
// now try with the wrong password, expect 401
passwordUrl = strings.Replace(url, "http://", "http://:smthWrong@", -1)
response, err = httpClient.Get(passwordUrl)
if err != nil {
t.Fatal(err)
}
if response.StatusCode != http.StatusUnauthorized {
t.Fatal("should be a 401")
}
continue
}
if response.StatusCode != http.StatusOK {
t.Fatal("should be a 200")
}
d, err := ioutil.ReadAll(response.Body)
if err != nil {
t.Fatal(err)
}
if string(d) != data {
t.Errorf("expected decrypted data %q, got %q", data, string(d))
}
}
}
// TestKeypairSanity is a sanity test for the crypto scheme for ACT. it asserts the correct shared secret according to
// the specs at https://github.com/ethersphere/swarm-docs/blob/eb857afda906c6e7bb90d37f3f334ccce5eef230/act.md
func TestKeypairSanity(t *testing.T) {
salt := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
t.Fatalf("reading from crypto/rand failed: %v", err.Error())
}
sharedSecret := "a85586744a1ddd56a7ed9f33fa24f40dd745b3a941be296a0d60e329dbdb896d"
for i, v := range []struct {
publisherPriv string
granteePub string
}{
{
publisherPriv: "ec5541555f3bc6376788425e9d1a62f55a82901683fd7062c5eddcc373a73459",
granteePub: "0226f213613e843a413ad35b40f193910d26eb35f00154afcde9ded57479a6224a",
},
{
publisherPriv: "70c7a73011aa56584a0009ab874794ee7e5652fd0c6911cd02f8b6267dd82d2d",
granteePub: "02e6f8d5e28faaa899744972bb847b6eb805a160494690c9ee7197ae9f619181db",
},
} {
b, _ := hex.DecodeString(v.granteePub)
granteePub, _ := crypto.DecompressPubkey(b)
publisherPrivate, _ := crypto.HexToECDSA(v.publisherPriv)
ssKey, err := api.NewSessionKeyPK(publisherPrivate, granteePub, salt)
if err != nil {
t.Fatal(err)
}
hasher := sha3.NewKeccak256()
hasher.Write(salt)
shared, err := hex.DecodeString(sharedSecret)
if err != nil {
t.Fatal(err)
}
hasher.Write(shared)
sum := hasher.Sum(nil)
if !bytes.Equal(ssKey, sum) {
t.Fatalf("%d: got a session key mismatch", i)
}
}
}

77
cmd/swarm/bootnodes.go Normal file
View File

@@ -0,0 +1,77 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
var SwarmBootnodes = []string{
// Foundation Swarm Gateway Cluster
"enode://e5c6f9215c919a5450a7b8c14c22535607b69f2c8e1e7f6f430cb25d7a2c27cd1df4c4f18ad7c1d7e5162e271ffcd3f20b1a1467fb6e790e7d727f3b2193de97@52.232.7.187:30399",
"enode://9b2fe07e69ccc7db5fef15793dab7d7d2e697ed92132d6e9548218e68a34613a8671ad03a6658d862b468ed693cae8a0f8f8d37274e4a657ffb59ca84676e45b@52.232.7.187:30400",
"enode://76c1059162c93ef9df0f01097c824d17c492634df211ef4c806935b349082233b63b90c23970254b3b7138d630400f7cf9b71e80355a446a8b733296cb04169a@52.232.7.187:30401",
"enode://ce46bbe2a8263145d65252d52da06e000ad350ed09c876a71ea9544efa42f63c1e1b6cc56307373aaad8f9dd069c90d0ed2dd1530106200e16f4ca681dd8ae2d@52.232.7.187:30402",
"enode://f431e0d6008a6c35c6e670373d828390c8323e53da8158e7bfc43cf07e632cc9e472188be8df01decadea2d4a068f1428caba769b632554a8fb0607bc296988f@52.232.7.187:30403",
"enode://174720abfff83d7392f121108ae50ea54e04889afe020df883655c0f6cb95414db945a0228d8982fe000d86fc9f4b7669161adc89cd7cd56f78f01489ab2b99b@52.232.7.187:30404",
"enode://2ae89be4be61a689b6f9ecee4360a59e185e010ab750f14b63b4ae43d4180e872e18e3437d4386ce44875dc7cc6eb761acba06412fe3178f3dac1dab3b65703e@52.232.7.187:30405",
"enode://24abebe1c0e6d75d6052ce3219a87be8573fd6397b4cb51f0773b83abba9b3d872bfb273cdc07389715b87adfac02f5235f5241442c5089802cbd8d42e310fce@52.232.7.187:30406",
"enode://d08dfa46bfbbdbcaafbb6e34abee4786610f6c91e0b76d7881f0334ac10dda41d8c1f2b6eedffb4493293c335c0ad46776443b2208d1fbbb9e1a90b25ee4eef2@52.232.7.187:30407",
"enode://8d95eb0f837d27581a43668ed3b8783d69dc4e84aa3edd7a0897e026155c8f59c8702fdc0375ee7bac15757c9c78e1315d9b73e4ce59c936db52ea4ae2f501c7@52.232.7.187:30408",
"enode://a5967cc804aebd422baaaba9f06f27c9e695ccab335b61088130f8cbe64e3cdf78793868c7051dfc06eecfe844fad54bc7f6dfaed9db3c7ecef279cb829c25fb@52.232.7.187:30409",
"enode://5f00134d81a8f2ebcc46f8766f627f492893eda48138f811b7de2168308171968f01710bca6da05764e74f14bae41652f554e6321f1aed85fa3461e89d075dbf@52.232.7.187:30410",
"enode://b2142b79b01a5aa66a5e23cc35e78219a8e97bc2412a6698cee24ae02e87078b725d71730711bd62e25ff1aa8658c6633778af8ac14c63814a337c3dd0ebda9f@52.232.7.187:30411",
"enode://1ffa7651094867d6486ce3ef46d27a052c2cb968b618346c6df7040322c7efc3337547ba85d4cbba32e8b31c42c867202554735c06d4c664b9afada2ed0c4b3c@52.232.7.187:30412",
"enode://129e0c3d5f5df12273754f6f703d2424409fa4baa599e0b758c55600169313887855e75b082028d2302ec034b303898cd697cc7ae8256ba924ce927510da2c8d@52.232.7.187:30413",
"enode://419e2dc0d2f5b022cf16b0e28842658284909fa027a0fbbb5e2b755e7f846ea02a8f0b66a7534981edf6a7bcf8a14855344c6668e2cd4476ccd35a11537c9144@52.232.7.187:30414",
"enode://23d55ad900583231b91f2f62e3f72eb498b342afd58b682be3af052eed62b5651094471065981de33d8786f075f05e3cca499503b0ac8ae84b2a06e99f5b0723@52.232.7.187:30415",
"enode://bc56e4158c00e9f616d7ea533def20a89bef959df4e62a768ff238ff4e1e9223f57ecff969941c20921bad98749baae311c0fbebce53bf7bbb9d3dc903640990@52.232.7.187:30416",
"enode://433ce15199c409875e7e72fffd69fdafe746f17b20f0d5555281722a65fde6c80328fab600d37d8624509adc072c445ce0dad4a1c01cff6acf3132c11d429d4d@52.232.7.187:30417",
"enode://632ee95b8f0eac51ef89ceb29313fef3a60050181d66a6b125583b1a225a7694b252edc016efb58aa3b251da756cb73280842a022c658ed405223b2f58626343@52.232.7.187:30418",
"enode://4a0f9bcff7a4b9ee453fb298d0fb222592efe121512e30cd72fef631beb8c6a15153a1456eb073ee18551c0e003c569651a101892dc4124e90b933733a498bb5@52.232.7.187:30419",
"enode://f0d80fbc72d16df30e19aac3051eb56a7aff0c8367686702e01ea132d8b0b3ee00cadd6a859d2cca98ec68d3d574f8a8a87dba2347ec1e2818dc84bc3fa34fae@52.232.7.187:30420",
"enode://a199146906e4f9f2b94b195a8308d9a59a3564b92efaab898a4243fe4c2ad918b7a8e4853d9d901d94fad878270a2669d644591299c3d43de1b298c00b92b4a7@52.232.7.187:30421",
"enode://052036ea8736b37adbfb684d90ce43e11b3591b51f31489d7c726b03618dea4f73b1e659deb928e6bf40564edcdcf08351643f42db3d4ca1c2b5db95dad59e94@52.232.7.187:30422",
"enode://460e2b8c6da8f12fac96c836e7d108f4b7ec55a1c64631bb8992339e117e1c28328fee83af863196e20af1487a655d13e5ceba90e980e92502d5bac5834c1f71@52.232.7.187:30423",
"enode://6d2cdd13741b2e72e9031e1b93c6d9a4e68de2844aa4e939f6a8a8498a7c1d7e2ee4c64217e92a6df08c9a32c6764d173552810ef1bd2ecb356532d389dd2136@52.232.7.187:30424",
"enode://62105fc25ce2cd5b299647f47eaa9211502dc76f0e9f461df915782df7242ac3223e3db04356ae6ed2977ccac20f0b16864406e9ca514a40a004cb6a5d0402aa@52.232.7.187:30425",
"enode://e0e388fc520fd493c33f0ce16685e6f98fb6aec28f2edc14ee6b179594ee519a896425b0025bb6f0e182dd3e468443f19c70885fbc66560d000093a668a86aa8@52.232.7.187:30426",
"enode://63f3353a72521ea10022127a4fe6b4acbef197c3fe668fd9f4805542d8a6fcf79f6335fbab62d180a35e19b739483e740858b113fdd7c13a26ad7b4e318a5aef@52.232.7.187:30427",
"enode://33a42b927085678d4aefd4e70b861cfca6ef5f6c143696c4f755973fd29e64c9e658cad57a66a687a7a156da1e3688b1fbdd17bececff2ee009fff038fa5666b@52.232.7.187:30428",
"enode://259ab5ab5c1daee3eab7e3819ab3177b82d25c29e6c2444fdd3f956e356afae79a72840ccf2d0665fe82c81ebc3b3734da1178ac9fd5d62c67e674b69f86b6be@52.232.7.187:30429",
"enode://558bccad7445ce3fd8db116ed6ab4aed1324fdbdac2348417340c1764dc46d46bffe0728e5b7d5c36f12e794c289f18f57f08f085d2c65c9910a5c7a65b6a66a@52.232.7.187:30430",
"enode://abe60937a0657ffded718e3f84a32987286983be257bdd6004775c4b525747c2b598f4fac49c8de324de5ce75b22673fa541a7ce2d555fb7f8ca325744ae3577@52.232.7.187:30431",
"enode://bce6f0aaa5b230742680084df71d4f026b3eff7f564265599216a1b06b765303fdc9325de30ffd5dfdaf302ce4b14322891d2faea50ce2ca298d7409f5858339@52.232.7.187:30432",
"enode://21b957c4e03277d42be6660730ec1b93f540764f26c6abdb54d006611139c7081248486206dfbf64fcaffd62589e9c6b8ea77a5297e4b21a605f1bcf49483ed0@52.232.7.187:30433",
"enode://ff104e30e64f24c3d7328acee8b13354e5551bc8d60bb25ecbd9632d955c7e34bb2d969482d173355baad91c8282f8b592624eb3929151090da3b4448d4d58fb@52.232.7.187:30434",
"enode://c76e2b5f81a521bceaec1518926a21380a345df9cf463461562c6845795512497fb67679e155fc96a74350f8b78de8f4c135dd52b106dbbb9795452021d09ea5@52.232.7.187:30435",
"enode://3288fd860105164f3e9b69934c4eb18f7146cfab31b5a671f994e21a36e9287766e5f9f075aefbc404538c77f7c2eb2a4495020a7633a1c3970d94e9fa770aeb@52.232.7.187:30436",
"enode://6cea859c7396d46b20cfcaa80f9a11cd112f8684f2f782f7b4c0e1e0af9212113429522075101923b9b957603e6c32095a6a07b5e5e35183c521952ee108dfaf@52.232.7.187:30437",
"enode://f628ec56e4ca8317cc24cc4ac9b27b95edcce7b96e1c7f3b53e30de4a8580fe44f2f0694a513bdb0a431acaf2824074d6ace4690247bbc34c14f426af8c056ea@52.232.7.187:30438",
"enode://055ec8b26fc105c4f97970a1cce9773a5e34c03f511b839db742198a1c571e292c54aa799e9afb991cc8a560529b8cdf3e0c344bc6c282aff2f68eec59361ddf@52.232.7.187:30439",
"enode://48cb0d430c328974226aa33a931d8446cd5a8d40f3ead8f4ce7ad60faa1278192eb6d58bed91258d63e81f255fc107eec2425ce2ae8b22350dd556076e160610@52.232.7.187:30440",
"enode://3fadb7af7f770d5ffc6b073b8d42834bebb18ce1fe8a4fe270d2b799e7051327093960dc61d9a18870db288f7746a0e6ea2a013cd6ab0e5f97ca08199473aace@52.232.7.187:30441",
"enode://a5d7168024c9992769cf380ffa559a64b4f39a29d468f579559863814eb0ae0ed689ac0871a3a2b4c78b03297485ec322d578281131ef5d5c09a4beb6200a97a@52.232.7.187:30442",
"enode://9c57744c5b2c2d71abcbe80512652f9234d4ab041b768a2a886ab390fe6f184860f40e113290698652d7e20a8ac74d27ac8671db23eb475b6c5e6253e4693bf8@52.232.7.187:30443",
"enode://daca9ff0c3176045a0e0ed228dee00ec86bc0939b135dc6b1caa23745d20fd0332e1ee74ad04020e89df56c7146d831a91b89d15ca3df05ba7618769fefab376@52.232.7.187:30444",
"enode://a3f6af59428cb4b9acb198db15ef5554fa43c2b0c18e468a269722d64a27218963a2975eaf82750b6262e42192b5e3669ea51337b4cda62b33987981bc5e0c1a@52.232.7.187:30445",
"enode://fe571422fa4651c3354c85dac61911a6a6520dd3c0332967a49d4133ca30e16a8a4946fa73ca2cb5de77917ea701a905e1c3015b2f4defcd53132b61cc84127a@52.232.7.187:30446",
// Mainframe
"enode://ee9a5a571ea6c8a59f9a8bb2c569c865e922b41c91d09b942e8c1d4dd2e1725bd2c26149da14de1f6321a2c6fdf1e07c503c3e093fb61696daebf74d6acd916b@54.186.219.160:30399",
"enode://a03f0562ecb8a992ad5242345535e73483cdc18ab934d36bf24b567d43447c2cea68f89f1d51d504dd13acc30f24ebce5a150bea2ccb1b722122ce4271dc199d@52.67.248.147:30399",
"enode://e2cbf9eafd85903d3b1c56743035284320695e0072bc8d7396e0542aa5e1c321b236f67eab66b79c2f15d4447fa4bbe74dd67d0467da23e7eb829f60ec8a812b@13.58.169.1:30399",
"enode://8b8c6bda6047f1cad9fab2db4d3d02b7aa26279902c32879f7bcd4a7d189fee77fdc36ee151ce6b84279b4792e72578fd529d2274d014132465758fbfee51cee@13.209.13.15:30399",
"enode://63f6a8818927e429585287cf2ca0cb9b11fa990b7b9b331c2962cdc6f21807a2473b26e8256225c26caff70d7218e59586d704d49061452c6852e382c885d03c@35.154.106.174:30399",
"enode://ed4bd3b794ed73f18e6dcc70c6624dfec63b5654f6ab54e8f40b16eff8afbd342d4230e099ddea40e84423f81b2d2ea79799dc345257b1fec6f6c422c9d008f7@52.213.20.99:30399",
}

View File

@@ -24,6 +24,7 @@ import (
"reflect"
"strconv"
"strings"
"time"
"unicode"
cli "gopkg.in/urfave/cli.v1"
@@ -58,19 +59,27 @@ var (
//constants for environment variables
const (
SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR"
SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT"
SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR"
SWARM_ENV_PORT = "SWARM_PORT"
SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID"
SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE"
SWARM_ENV_SWAP_API = "SWARM_SWAP_API"
SWARM_ENV_SYNC_ENABLE = "SWARM_SYNC_ENABLE"
SWARM_ENV_ENS_API = "SWARM_ENS_API"
SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR"
SWARM_ENV_CORS = "SWARM_CORS"
SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES"
GETH_ENV_DATADIR = "GETH_DATADIR"
SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR"
SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT"
SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR"
SWARM_ENV_PORT = "SWARM_PORT"
SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID"
SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE"
SWARM_ENV_SWAP_API = "SWARM_SWAP_API"
SWARM_ENV_SYNC_DISABLE = "SWARM_SYNC_DISABLE"
SWARM_ENV_SYNC_UPDATE_DELAY = "SWARM_ENV_SYNC_UPDATE_DELAY"
SWARM_ENV_LIGHT_NODE_ENABLE = "SWARM_LIGHT_NODE_ENABLE"
SWARM_ENV_DELIVERY_SKIP_CHECK = "SWARM_DELIVERY_SKIP_CHECK"
SWARM_ENV_ENS_API = "SWARM_ENS_API"
SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR"
SWARM_ENV_CORS = "SWARM_CORS"
SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES"
SWARM_ENV_PSS_ENABLE = "SWARM_PSS_ENABLE"
SWARM_ENV_STORE_PATH = "SWARM_STORE_PATH"
SWARM_ENV_STORE_CAPACITY = "SWARM_STORE_CAPACITY"
SWARM_ENV_STORE_CACHE_CAPACITY = "SWARM_STORE_CACHE_CAPACITY"
SWARM_ACCESS_PASSWORD = "SWARM_ACCESS_PASSWORD"
GETH_ENV_DATADIR = "GETH_DATADIR"
)
// These settings ensure that TOML keys use the same names as Go struct fields.
@@ -92,10 +101,8 @@ var tomlSettings = toml.Config{
//before booting the swarm node, build the configuration
func buildConfig(ctx *cli.Context) (config *bzzapi.Config, err error) {
//check for deprecated flags
checkDeprecated(ctx)
//start by creating a default config
config = bzzapi.NewDefaultConfig()
config = bzzapi.NewConfig()
//first load settings from config file (if provided)
config, err = configFileOverride(config, ctx)
if err != nil {
@@ -126,7 +133,7 @@ func initSwarmNode(config *bzzapi.Config, stack *node.Node, ctx *cli.Context) {
log.Debug(printConfig(config))
}
//override the current config with whatever is in the config file, if a config file has been provided
//configFileOverride overrides the current config with the config file, if a config file has been provided
func configFileOverride(config *bzzapi.Config, ctx *cli.Context) (*bzzapi.Config, error) {
var err error
@@ -136,7 +143,8 @@ func configFileOverride(config *bzzapi.Config, ctx *cli.Context) (*bzzapi.Config
if filepath = ctx.GlobalString(SwarmTomlConfigPathFlag.Name); filepath == "" {
utils.Fatalf("Config file flag provided with invalid file path")
}
f, err := os.Open(filepath)
var f *os.File
f, err = os.Open(filepath)
if err != nil {
return nil, err
}
@@ -168,7 +176,7 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
if networkid := ctx.GlobalString(SwarmNetworkIdFlag.Name); networkid != "" {
if id, _ := strconv.Atoi(networkid); id != 0 {
currentConfig.NetworkId = uint64(id)
currentConfig.NetworkID = uint64(id)
}
}
@@ -191,12 +199,24 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
currentConfig.SwapEnabled = true
}
if ctx.GlobalIsSet(SwarmSyncEnabledFlag.Name) {
currentConfig.SyncEnabled = true
if ctx.GlobalIsSet(SwarmSyncDisabledFlag.Name) {
currentConfig.SyncEnabled = false
}
currentConfig.SwapApi = ctx.GlobalString(SwarmSwapAPIFlag.Name)
if currentConfig.SwapEnabled && currentConfig.SwapApi == "" {
if d := ctx.GlobalDuration(SwarmSyncUpdateDelay.Name); d > 0 {
currentConfig.SyncUpdateDelay = d
}
if ctx.GlobalIsSet(SwarmLightNodeEnabled.Name) {
currentConfig.LightNodeEnabled = true
}
if ctx.GlobalIsSet(SwarmDeliverySkipCheckFlag.Name) {
currentConfig.DeliverySkipCheck = true
}
currentConfig.SwapAPI = ctx.GlobalString(SwarmSwapAPIFlag.Name)
if currentConfig.SwapEnabled && currentConfig.SwapAPI == "" {
utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API)
}
@@ -209,16 +229,20 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
currentConfig.EnsAPIs = ensAPIs
}
if ensaddr := ctx.GlobalString(DeprecatedEnsAddrFlag.Name); ensaddr != "" {
currentConfig.EnsRoot = common.HexToAddress(ensaddr)
}
if cors := ctx.GlobalString(CorsStringFlag.Name); cors != "" {
currentConfig.Cors = cors
}
if ctx.GlobalIsSet(utils.BootnodesFlag.Name) {
currentConfig.BootNodes = ctx.GlobalString(utils.BootnodesFlag.Name)
if storePath := ctx.GlobalString(SwarmStorePath.Name); storePath != "" {
currentConfig.LocalStoreParams.ChunkDbPath = storePath
}
if storeCapacity := ctx.GlobalUint64(SwarmStoreCapacity.Name); storeCapacity != 0 {
currentConfig.LocalStoreParams.DbCapacity = storeCapacity
}
if storeCacheCapacity := ctx.GlobalUint(SwarmStoreCacheCapacity.Name); storeCacheCapacity != 0 {
currentConfig.LocalStoreParams.CacheCapacity = storeCacheCapacity
}
return currentConfig
@@ -239,7 +263,7 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
if networkid := os.Getenv(SWARM_ENV_NETWORK_ID); networkid != "" {
if id, _ := strconv.Atoi(networkid); id != 0 {
currentConfig.NetworkId = uint64(id)
currentConfig.NetworkID = uint64(id)
}
}
@@ -262,17 +286,35 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
}
}
if syncenable := os.Getenv(SWARM_ENV_SYNC_ENABLE); syncenable != "" {
if sync, err := strconv.ParseBool(syncenable); err != nil {
currentConfig.SyncEnabled = sync
if syncdisable := os.Getenv(SWARM_ENV_SYNC_DISABLE); syncdisable != "" {
if sync, err := strconv.ParseBool(syncdisable); err != nil {
currentConfig.SyncEnabled = !sync
}
}
if v := os.Getenv(SWARM_ENV_DELIVERY_SKIP_CHECK); v != "" {
if skipCheck, err := strconv.ParseBool(v); err != nil {
currentConfig.DeliverySkipCheck = skipCheck
}
}
if v := os.Getenv(SWARM_ENV_SYNC_UPDATE_DELAY); v != "" {
if d, err := time.ParseDuration(v); err != nil {
currentConfig.SyncUpdateDelay = d
}
}
if lne := os.Getenv(SWARM_ENV_LIGHT_NODE_ENABLE); lne != "" {
if lightnode, err := strconv.ParseBool(lne); err != nil {
currentConfig.LightNodeEnabled = lightnode
}
}
if swapapi := os.Getenv(SWARM_ENV_SWAP_API); swapapi != "" {
currentConfig.SwapApi = swapapi
currentConfig.SwapAPI = swapapi
}
if currentConfig.SwapEnabled && currentConfig.SwapApi == "" {
if currentConfig.SwapEnabled && currentConfig.SwapAPI == "" {
utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API)
}
@@ -288,10 +330,6 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
currentConfig.Cors = cors
}
if bootnodes := os.Getenv(SWARM_ENV_BOOTNODES); bootnodes != "" {
currentConfig.BootNodes = bootnodes
}
return currentConfig
}
@@ -312,18 +350,6 @@ func dumpConfig(ctx *cli.Context) error {
return nil
}
//deprecated flags checked here
func checkDeprecated(ctx *cli.Context) {
// exit if the deprecated --ethapi flag is set
if ctx.GlobalString(DeprecatedEthAPIFlag.Name) != "" {
utils.Fatalf("--ethapi is no longer a valid command line flag, please use --ens-api and/or --swap-api.")
}
// warn if --ens-api flag is set
if ctx.GlobalString(DeprecatedEnsAddrFlag.Name) != "" {
log.Warn("--ens-addr is no longer a valid command line flag, please use --ens-api to specify contract address.")
}
}
//validate configuration parameters
func validateConfig(cfg *bzzapi.Config) (err error) {
for _, ensAPI := range cfg.EnsAPIs {

View File

@@ -20,6 +20,7 @@ import (
"fmt"
"io"
"io/ioutil"
"net"
"os"
"os/exec"
"testing"
@@ -34,7 +35,7 @@ import (
func TestDumpConfig(t *testing.T) {
swarm := runSwarm(t, "dumpconfig")
defaultConf := api.NewDefaultConfig()
defaultConf := api.NewConfig()
out, err := tomlSettings.Marshal(&defaultConf)
if err != nil {
t.Fatal(err)
@@ -43,7 +44,7 @@ func TestDumpConfig(t *testing.T) {
swarm.ExpectExit()
}
func TestFailsSwapEnabledNoSwapApi(t *testing.T) {
func TestConfigFailsSwapEnabledNoSwapApi(t *testing.T) {
flags := []string{
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545",
@@ -55,7 +56,7 @@ func TestFailsSwapEnabledNoSwapApi(t *testing.T) {
swarm.ExpectExit()
}
func TestFailsNoBzzAccount(t *testing.T) {
func TestConfigFailsNoBzzAccount(t *testing.T) {
flags := []string{
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545",
@@ -66,7 +67,7 @@ func TestFailsNoBzzAccount(t *testing.T) {
swarm.ExpectExit()
}
func TestCmdLineOverrides(t *testing.T) {
func TestConfigCmdLineOverrides(t *testing.T) {
dir, err := ioutil.TempDir("", "bzztest")
if err != nil {
t.Fatal(err)
@@ -85,9 +86,10 @@ func TestCmdLineOverrides(t *testing.T) {
flags := []string{
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort,
fmt.Sprintf("--%s", SwarmSyncEnabledFlag.Name),
fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name),
fmt.Sprintf("--%s", CorsStringFlag.Name), "*",
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
fmt.Sprintf("--%s", SwarmDeliverySkipCheckFlag.Name),
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
"--datadir", dir,
"--ipcpath", conf.IPCPath,
@@ -120,12 +122,16 @@ func TestCmdLineOverrides(t *testing.T) {
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
}
if info.NetworkId != 42 {
t.Fatalf("Expected network ID to be %d, got %d", 42, info.NetworkId)
if info.NetworkID != 42 {
t.Fatalf("Expected network ID to be %d, got %d", 42, info.NetworkID)
}
if !info.SyncEnabled {
t.Fatal("Expected Sync to be enabled, but is false")
if info.SyncEnabled {
t.Fatal("Expected Sync to be disabled, but is true")
}
if !info.DeliverySkipCheck {
t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not")
}
if info.Cors != "*" {
@@ -135,7 +141,7 @@ func TestCmdLineOverrides(t *testing.T) {
node.Shutdown()
}
func TestFileOverrides(t *testing.T) {
func TestConfigFileOverrides(t *testing.T) {
// assign ports
httpPort, err := assignTCPPort()
@@ -145,16 +151,16 @@ func TestFileOverrides(t *testing.T) {
//create a config file
//first, create a default conf
defaultConf := api.NewDefaultConfig()
defaultConf := api.NewConfig()
//change some values in order to test if they have been loaded
defaultConf.SyncEnabled = true
defaultConf.NetworkId = 54
defaultConf.SyncEnabled = false
defaultConf.DeliverySkipCheck = true
defaultConf.NetworkID = 54
defaultConf.Port = httpPort
defaultConf.StoreParams.DbCapacity = 9000000
defaultConf.ChunkerParams.Branches = 64
defaultConf.HiveParams.CallInterval = 6000000000
defaultConf.DbCapacity = 9000000
defaultConf.HiveParams.KeepAliveInterval = 6000000000
defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second
defaultConf.SyncParams.KeyBufferSize = 512
//defaultConf.SyncParams.KeyBufferSize = 512
//create a TOML string
out, err := tomlSettings.Marshal(&defaultConf)
if err != nil {
@@ -215,38 +221,38 @@ func TestFileOverrides(t *testing.T) {
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
}
if info.NetworkId != 54 {
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId)
if info.NetworkID != 54 {
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkID)
}
if !info.SyncEnabled {
t.Fatal("Expected Sync to be enabled, but is false")
if info.SyncEnabled {
t.Fatal("Expected Sync to be disabled, but is true")
}
if info.StoreParams.DbCapacity != 9000000 {
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId)
if !info.DeliverySkipCheck {
t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not")
}
if info.ChunkerParams.Branches != 64 {
t.Fatalf("Expected chunker params branches to be %d, got %d", 64, info.ChunkerParams.Branches)
if info.DbCapacity != 9000000 {
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkID)
}
if info.HiveParams.CallInterval != 6000000000 {
t.Fatalf("Expected HiveParams CallInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.CallInterval))
if info.HiveParams.KeepAliveInterval != 6000000000 {
t.Fatalf("Expected HiveParams KeepAliveInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.KeepAliveInterval))
}
if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second {
t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval)
}
if info.SyncParams.KeyBufferSize != 512 {
t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
}
// if info.SyncParams.KeyBufferSize != 512 {
// t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
// }
node.Shutdown()
}
func TestEnvVars(t *testing.T) {
func TestConfigEnvVars(t *testing.T) {
// assign ports
httpPort, err := assignTCPPort()
if err != nil {
@@ -257,7 +263,8 @@ func TestEnvVars(t *testing.T) {
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmPortFlag.EnvVar, httpPort))
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmNetworkIdFlag.EnvVar, "999"))
envVars = append(envVars, fmt.Sprintf("%s=%s", CorsStringFlag.EnvVar, "*"))
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmSyncEnabledFlag.EnvVar, "true"))
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmSyncDisabledFlag.EnvVar, "true"))
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmDeliverySkipCheckFlag.EnvVar, "true"))
dir, err := ioutil.TempDir("", "bzztest")
if err != nil {
@@ -326,23 +333,27 @@ func TestEnvVars(t *testing.T) {
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
}
if info.NetworkId != 999 {
t.Fatalf("Expected network ID to be %d, got %d", 999, info.NetworkId)
if info.NetworkID != 999 {
t.Fatalf("Expected network ID to be %d, got %d", 999, info.NetworkID)
}
if info.Cors != "*" {
t.Fatalf("Expected Cors flag to be set to %s, got %s", "*", info.Cors)
}
if !info.SyncEnabled {
t.Fatal("Expected Sync to be enabled, but is false")
if info.SyncEnabled {
t.Fatal("Expected Sync to be disabled, but is true")
}
if !info.DeliverySkipCheck {
t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not")
}
node.Shutdown()
cmd.Process.Kill()
}
func TestCmdLineOverridesFile(t *testing.T) {
func TestConfigCmdLineOverridesFile(t *testing.T) {
// assign ports
httpPort, err := assignTCPPort()
@@ -352,26 +363,27 @@ func TestCmdLineOverridesFile(t *testing.T) {
//create a config file
//first, create a default conf
defaultConf := api.NewDefaultConfig()
defaultConf := api.NewConfig()
//change some values in order to test if they have been loaded
defaultConf.SyncEnabled = false
defaultConf.NetworkId = 54
defaultConf.SyncEnabled = true
defaultConf.NetworkID = 54
defaultConf.Port = "8588"
defaultConf.StoreParams.DbCapacity = 9000000
defaultConf.ChunkerParams.Branches = 64
defaultConf.HiveParams.CallInterval = 6000000000
defaultConf.DbCapacity = 9000000
defaultConf.HiveParams.KeepAliveInterval = 6000000000
defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second
defaultConf.SyncParams.KeyBufferSize = 512
//defaultConf.SyncParams.KeyBufferSize = 512
//create a TOML file
out, err := tomlSettings.Marshal(&defaultConf)
if err != nil {
t.Fatalf("Error creating TOML file in TestFileOverride: %v", err)
}
//write file
f, err := ioutil.TempFile("", "testconfig.toml")
fname := "testconfig.toml"
f, err := ioutil.TempFile("", fname)
if err != nil {
t.Fatalf("Error writing TOML file in TestFileOverride: %v", err)
}
defer os.Remove(fname)
//write file
_, err = f.WriteString(string(out))
if err != nil {
@@ -392,7 +404,7 @@ func TestCmdLineOverridesFile(t *testing.T) {
flags := []string{
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "77",
fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort,
fmt.Sprintf("--%s", SwarmSyncEnabledFlag.Name),
fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name),
fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(),
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
"--ens-api", "",
@@ -427,33 +439,29 @@ func TestCmdLineOverridesFile(t *testing.T) {
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
}
if info.NetworkId != expectNetworkId {
t.Fatalf("Expected network ID to be %d, got %d", expectNetworkId, info.NetworkId)
if info.NetworkID != expectNetworkId {
t.Fatalf("Expected network ID to be %d, got %d", expectNetworkId, info.NetworkID)
}
if !info.SyncEnabled {
t.Fatal("Expected Sync to be enabled, but is false")
if info.SyncEnabled {
t.Fatal("Expected Sync to be disabled, but is true")
}
if info.StoreParams.DbCapacity != 9000000 {
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId)
if info.LocalStoreParams.DbCapacity != 9000000 {
t.Fatalf("Expected Capacity to be %d, got %d", 9000000, info.LocalStoreParams.DbCapacity)
}
if info.ChunkerParams.Branches != 64 {
t.Fatalf("Expected chunker params branches to be %d, got %d", 64, info.ChunkerParams.Branches)
}
if info.HiveParams.CallInterval != 6000000000 {
t.Fatalf("Expected HiveParams CallInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.CallInterval))
if info.HiveParams.KeepAliveInterval != 6000000000 {
t.Fatalf("Expected HiveParams KeepAliveInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.KeepAliveInterval))
}
if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second {
t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval)
}
if info.SyncParams.KeyBufferSize != 512 {
t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
}
// if info.SyncParams.KeyBufferSize != 512 {
// t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
// }
node.Shutdown()
}
@@ -552,3 +560,16 @@ func TestValidateConfig(t *testing.T) {
}
}
}
func assignTCPPort() (string, error) {
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return "", err
}
l.Close()
_, port, err := net.SplitHostPort(l.Addr().String())
if err != nil {
return "", err
}
return port, nil
}

View File

@@ -23,6 +23,7 @@ import (
"path/filepath"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/storage"
"gopkg.in/urfave/cli.v1"
@@ -30,11 +31,11 @@ import (
func dbExport(ctx *cli.Context) {
args := ctx.Args()
if len(args) != 2 {
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database) and <file> (path to write the tar archive to, - for stdout)")
if len(args) != 3 {
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to write the tar archive to, - for stdout) and the base key")
}
store, err := openDbStore(args[0])
store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
if err != nil {
utils.Fatalf("error opening local chunk database: %s", err)
}
@@ -62,11 +63,11 @@ func dbExport(ctx *cli.Context) {
func dbImport(ctx *cli.Context) {
args := ctx.Args()
if len(args) != 2 {
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database) and <file> (path to read the tar archive from, - for stdin)")
if len(args) != 3 {
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to read the tar archive from, - for stdin) and the base key")
}
store, err := openDbStore(args[0])
store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
if err != nil {
utils.Fatalf("error opening local chunk database: %s", err)
}
@@ -94,11 +95,11 @@ func dbImport(ctx *cli.Context) {
func dbClean(ctx *cli.Context) {
args := ctx.Args()
if len(args) != 1 {
utils.Fatalf("invalid arguments, please specify <chunkdb> (path to a local chunk database)")
if len(args) != 2 {
utils.Fatalf("invalid arguments, please specify <chunkdb> (path to a local chunk database) and the base key")
}
store, err := openDbStore(args[0])
store, err := openLDBStore(args[0], common.Hex2Bytes(args[1]))
if err != nil {
utils.Fatalf("error opening local chunk database: %s", err)
}
@@ -107,10 +108,13 @@ func dbClean(ctx *cli.Context) {
store.Cleanup()
}
func openDbStore(path string) (*storage.DbStore, error) {
func openLDBStore(path string, basekey []byte) (*storage.LDBStore, error) {
if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
return nil, fmt.Errorf("invalid chunkdb path: %s", err)
}
hash := storage.MakeHashFunc("SHA3")
return storage.NewDbStore(path, hash, 10000000, 0)
storeparams := storage.NewDefaultStoreParams()
ldbparams := storage.NewLDBStoreParams(storeparams, path)
ldbparams.BaseKey = basekey
return storage.NewLDBStore(ldbparams)
}

103
cmd/swarm/download.go Normal file
View File

@@ -0,0 +1,103 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/api"
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
"gopkg.in/urfave/cli.v1"
)
func download(ctx *cli.Context) {
log.Debug("downloading content using swarm down")
args := ctx.Args()
dest := "."
switch len(args) {
case 0:
utils.Fatalf("Usage: swarm down [options] <bzz locator> [<destination path>]")
case 1:
log.Trace(fmt.Sprintf("swarm down: no destination path - assuming working dir"))
default:
log.Trace(fmt.Sprintf("destination path arg: %s", args[1]))
if absDest, err := filepath.Abs(args[1]); err == nil {
dest = absDest
} else {
utils.Fatalf("could not get download path: %v", err)
}
}
var (
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
isRecursive = ctx.Bool(SwarmRecursiveFlag.Name)
client = swarm.NewClient(bzzapi)
)
if fi, err := os.Stat(dest); err == nil {
if isRecursive && !fi.Mode().IsDir() {
utils.Fatalf("destination path is not a directory!")
}
} else {
if !os.IsNotExist(err) {
utils.Fatalf("could not stat path: %v", err)
}
}
uri, err := api.Parse(args[0])
if err != nil {
utils.Fatalf("could not parse uri argument: %v", err)
}
dl := func(credentials string) error {
// assume behaviour according to --recursive switch
if isRecursive {
if err := client.DownloadDirectory(uri.Addr, uri.Path, dest, credentials); err != nil {
if err == swarm.ErrUnauthorized {
return err
}
return fmt.Errorf("directory %s: %v", uri.Path, err)
}
} else {
// we are downloading a file
log.Debug("downloading file/path from a manifest", "uri.Addr", uri.Addr, "uri.Path", uri.Path)
err := client.DownloadFile(uri.Addr, uri.Path, dest, credentials)
if err != nil {
if err == swarm.ErrUnauthorized {
return err
}
return fmt.Errorf("file %s from address: %s: %v", uri.Path, uri.Addr, err)
}
}
return nil
}
if passwords := makePasswordList(ctx); passwords != nil {
password := getPassPhrase(fmt.Sprintf("Downloading %s is restricted", uri), 0, passwords)
err = dl(password)
} else {
err = dl("")
}
if err != nil {
utils.Fatalf("download: %v", err)
}
}

139
cmd/swarm/export_test.go Normal file
View File

@@ -0,0 +1,139 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bytes"
"crypto/md5"
"crypto/rand"
"io"
"io/ioutil"
"net/http"
"os"
"strings"
"testing"
"github.com/ethereum/go-ethereum/swarm"
)
// TestCLISwarmExportImport perform the following test:
// 1. runs swarm node
// 2. uploads a random file
// 3. runs an export of the local datastore
// 4. runs a second swarm node
// 5. imports the exported datastore
// 6. fetches the uploaded random file from the second node
func TestCLISwarmExportImport(t *testing.T) {
cluster := newTestCluster(t, 1)
// generate random 10mb file
f, cleanup := generateRandomFile(t, 10000000)
defer cleanup()
// upload the file with 'swarm up' and expect a hash
up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", f.Name())
_, matches := up.ExpectRegexp(`[a-f\d]{64}`)
up.ExpectExit()
hash := matches[0]
var info swarm.Info
if err := cluster.Nodes[0].Client.Call(&info, "bzz_info"); err != nil {
t.Fatal(err)
}
cluster.Stop()
defer cluster.Cleanup()
// generate an export.tar
exportCmd := runSwarm(t, "db", "export", info.Path+"/chunks", info.Path+"/export.tar", strings.TrimPrefix(info.BzzKey, "0x"))
exportCmd.ExpectExit()
// start second cluster
cluster2 := newTestCluster(t, 1)
var info2 swarm.Info
if err := cluster2.Nodes[0].Client.Call(&info2, "bzz_info"); err != nil {
t.Fatal(err)
}
// stop second cluster, so that we close LevelDB
cluster2.Stop()
defer cluster2.Cleanup()
// import the export.tar
importCmd := runSwarm(t, "db", "import", info2.Path+"/chunks", info.Path+"/export.tar", strings.TrimPrefix(info2.BzzKey, "0x"))
importCmd.ExpectExit()
// spin second cluster back up
cluster2.StartExistingNodes(t, 1, strings.TrimPrefix(info2.BzzAccount, "0x"))
// try to fetch imported file
res, err := http.Get(cluster2.Nodes[0].URL + "/bzz:/" + hash)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 200 {
t.Fatalf("expected HTTP status %d, got %s", 200, res.Status)
}
// compare downloaded file with the generated random file
mustEqualFiles(t, f, res.Body)
}
func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) {
h := md5.New()
upLen, err := io.Copy(h, up)
if err != nil {
t.Fatal(err)
}
upHash := h.Sum(nil)
h.Reset()
downLen, err := io.Copy(h, down)
if err != nil {
t.Fatal(err)
}
downHash := h.Sum(nil)
if !bytes.Equal(upHash, downHash) || upLen != downLen {
t.Fatalf("downloaded imported file md5=%x (length %v) is not the same as the generated one mp5=%x (length %v)", downHash, downLen, upHash, upLen)
}
}
func generateRandomFile(t *testing.T, size int) (f *os.File, teardown func()) {
// create a tmp file
tmp, err := ioutil.TempFile("", "swarm-test")
if err != nil {
t.Fatal(err)
}
// callback for tmp file cleanup
teardown = func() {
tmp.Close()
os.Remove(tmp.Name())
}
// write 10mb random data to file
buf := make([]byte, 10000000)
_, err = rand.Read(buf)
if err != nil {
t.Fatal(err)
}
ioutil.WriteFile(tmp.Name(), buf, 0755)
return tmp, teardown
}

127
cmd/swarm/fs.go Normal file
View File

@@ -0,0 +1,127 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"context"
"fmt"
"path/filepath"
"strings"
"time"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/swarm/fuse"
"gopkg.in/urfave/cli.v1"
)
func mount(cliContext *cli.Context) {
args := cliContext.Args()
if len(args) < 2 {
utils.Fatalf("Usage: swarm fs mount --ipcpath <path to bzzd.ipc> <manifestHash> <file name>")
}
client, err := dialRPC(cliContext)
if err != nil {
utils.Fatalf("had an error dailing to RPC endpoint: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
mf := &fuse.MountInfo{}
mountPoint, err := filepath.Abs(filepath.Clean(args[1]))
if err != nil {
utils.Fatalf("error expanding path for mount point: %v", err)
}
err = client.CallContext(ctx, mf, "swarmfs_mount", args[0], mountPoint)
if err != nil {
utils.Fatalf("had an error calling the RPC endpoint while mounting: %v", err)
}
}
func unmount(cliContext *cli.Context) {
args := cliContext.Args()
if len(args) < 1 {
utils.Fatalf("Usage: swarm fs unmount --ipcpath <path to bzzd.ipc> <mount path>")
}
client, err := dialRPC(cliContext)
if err != nil {
utils.Fatalf("had an error dailing to RPC endpoint: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
mf := fuse.MountInfo{}
err = client.CallContext(ctx, &mf, "swarmfs_unmount", args[0])
if err != nil {
utils.Fatalf("encountered an error calling the RPC endpoint while unmounting: %v", err)
}
fmt.Printf("%s\n", mf.LatestManifest) //print the latest manifest hash for user reference
}
func listMounts(cliContext *cli.Context) {
client, err := dialRPC(cliContext)
if err != nil {
utils.Fatalf("had an error dailing to RPC endpoint: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
mf := []fuse.MountInfo{}
err = client.CallContext(ctx, &mf, "swarmfs_listmounts")
if err != nil {
utils.Fatalf("encountered an error calling the RPC endpoint while listing mounts: %v", err)
}
if len(mf) == 0 {
fmt.Print("Could not found any swarmfs mounts. Please make sure you've specified the correct RPC endpoint\n")
} else {
fmt.Printf("Found %d swarmfs mount(s):\n", len(mf))
for i, mountInfo := range mf {
fmt.Printf("%d:\n", i)
fmt.Printf("\tMount point: %s\n", mountInfo.MountPoint)
fmt.Printf("\tLatest Manifest: %s\n", mountInfo.LatestManifest)
fmt.Printf("\tStart Manifest: %s\n", mountInfo.StartManifest)
}
}
}
func dialRPC(ctx *cli.Context) (*rpc.Client, error) {
var endpoint string
if ctx.IsSet(utils.IPCPathFlag.Name) {
endpoint = ctx.String(utils.IPCPathFlag.Name)
} else {
utils.Fatalf("swarm ipc endpoint not specified")
}
if endpoint == "" {
endpoint = node.DefaultIPCEndpoint(clientIdentifier)
} else if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") {
// Backwards compatibility with geth < 1.5 which required
// these prefixes.
endpoint = endpoint[4:]
}
return rpc.Dial(endpoint)
}

Some files were not shown because too many files have changed in this diff Show More