Compare commits

..

107 Commits

Author SHA1 Message Date
Péter Szilágyi
316fc7ecfc params, swarm: release Geth v1.8.14 and Swarm v0.3.2 2018-08-22 11:39:00 +03:00
gary rong
af85d8e2b3 cmd, eth: apply default miner recommit setting (#17479) 2018-08-22 09:47:50 +03:00
Péter Szilágyi
6a8b47c880 Merge pull request #17472 from karalabe/txpool-locals
cmd, core, miner: add --txpool.locals and priority mining
2018-08-22 09:46:13 +03:00
Péter Szilágyi
e0d0e64ce2 cmd, core, miner: add --txpool.locals and priority mining 2018-08-22 09:43:57 +03:00
gary rong
b2c644ffb5 cmd, eth, miner: make recommit configurable (#17444)
* cmd, eth, miner: make recommit configurable

* cmd, eth, les, miner: polish a bit

* miner: filter duplicate sealing work

* cmd: remove uncessary conversion

* miner: avoid microptimization in favor of cleaner code
2018-08-21 22:56:54 +03:00
Geon Kim
522cfc68ff swarm: fix typos (#17473) 2018-08-21 21:13:33 +03:00
gary rong
a063fe9b2d miner: fix uncle iteration logic (#17469) 2018-08-21 17:33:04 +03:00
Péter Szilágyi
1dcad8b2de Merge pull request #17466 from karalabe/rinkeby-light-snapshots
consensus/clique, light: light client snapshots on Rinkeby
2018-08-21 16:13:26 +03:00
Jeremy Schlatter
86acdf1a5b vendor: update rjeczalik/notify so that it compiles on go1.11 (#17467) 2018-08-21 16:13:03 +03:00
Péter Szilágyi
9f036647e4 consensus/clique, light: light client snapshots on Rinkeby 2018-08-21 15:21:59 +03:00
Felföldi Zsolt
355fc47d39 les: fix CHT field in nodeInfo (#17465) 2018-08-21 14:58:10 +03:00
Pierre Neter
76301ca051 eth: upgradedb subcommand was dropped (#17464) 2018-08-21 13:36:38 +03:00
Anton Evangelatov
c582667c9b swarm/network: bump bzz protocol version (#17449) 2018-08-21 11:34:40 +03:00
Anton Evangelatov
dcd97c41fa swarm, swarm/network, swarm/pss: log error and fix logs (#17410)
* swarm, swarm/network, swarm/pss: log error and fix logs

* swarm/pss: log compressed publickey
2018-08-21 11:34:10 +03:00
Péter Szilágyi
85c6a1c526 Merge pull request #17451 from karalabe/bn256-relicense
crypto/bn256: add missing license file, release wrapper in BSD-3
2018-08-21 11:33:33 +03:00
Péter Szilágyi
ecca49e078 Merge pull request #17460 from holiman/tracerfix
Ensure from < to when tracing chain
2018-08-21 11:32:50 +03:00
Martin Holst Swende
106d196ec4 eth: ensure from<to when tracing chain (credits Chen Nan via bugbounty) 2018-08-21 09:48:53 +02:00
Péter Szilágyi
a6d45a5d00 crypto/bn256: add missing license file, release wrapper in BSD-3 2018-08-20 18:05:06 +03:00
Nilesh Trivedi
7d38d53ae4 cmd/puppeth: accept ssh identity in the server string (#17407)
* cmd/puppeth: Accept identityfile in the server string with fallback to id_rsa

* cmd/puppeth: code polishes + fix heath check double ports
2018-08-20 16:54:38 +03:00
Felföldi Zsolt
1de9ada401 light: new CHTs (#17448) 2018-08-20 16:49:28 +03:00
Aditya
c929030e28 core/types: fix docs about protected Vs (#17436) 2018-08-20 15:14:50 +03:00
Péter Szilágyi
87f294aa0b Merge pull request #17437 from hackmod/console-typo
console: fixed comment typo
2018-08-20 15:12:48 +03:00
Péter Szilágyi
68f0a414ea Merge pull request #17430 from karalabe/miner-notify-less-aggressive-test
consensus/ethash: reduce notify test aggressiveness
2018-08-20 15:11:49 +03:00
Anton Evangelatov
55d050ccd8 travis: remove brew update and osxfuse install (#17429) 2018-08-20 15:11:08 +03:00
Anton Evangelatov
a8aa89accb swarm/storage: cleanup task - remove bigger chunks (#17424) 2018-08-20 15:10:30 +03:00
Elad
c4078fc805 cmd/swarm: added swarm bootnodes (#17414) 2018-08-20 15:09:50 +03:00
Wuxiang
d3488c1aff p2p: fix typo (#17446) 2018-08-20 15:07:21 +03:00
hackyminer
0fd02fe9cf console: fixed comment typo 2018-08-18 07:47:10 +09:00
Péter Szilágyi
251c868008 consensus/ethash: reduce notify test aggressiveness 2018-08-17 18:12:39 +03:00
Péter Szilágyi
99e1a5e0fb Merge pull request #17426 from karalabe/miner-fees-log-fix
miner: update mining log with correct fee calculation
2018-08-17 16:46:57 +03:00
Péter Szilágyi
22cd3f70a6 miner: update mining log with correct fee calculation 2018-08-17 15:47:14 +03:00
Felix Lange
2695fa2213 les: fix crasher in NodeInfo when running as server (#17419)
* les: fix crasher in NodeInfo when running as server

The ProtocolManager computes CHT and Bloom trie roots by asking the
indexers for their current head. It tried to get the indexers from
LesOdr, but no LesOdr instance is created in server mode.

Attempt to fix this by moving the indexers, protocol creation and
NodeInfo to a new lesCommons struct which is embedded into both server
and client.

All this setup code should really be cleaned up, but this is just a
hotfix so we have to do that some other time.

* les: fix commons protocol maker
2018-08-17 13:21:53 +03:00
Anton Evangelatov
f44046a1c6 build: do not require ethereum-swarm deb when installing ethereum (#17425) 2018-08-17 11:33:39 +03:00
Péter Szilágyi
2a06791461 Merge pull request #17368 from karalabe/bn256-go1.11
crypto/bn256: fix issues caused by Go 1.11
2018-08-17 11:01:35 +03:00
Sasuke1964
60390878a5 accounts: fixed typo (#17421) 2018-08-17 10:38:02 +03:00
Péter Szilágyi
9bf6bb8f63 Merge pull request #17405 from karalabe/miner-remote-dag
consensus/ethash: use DAGs for remote mining, generate async
2018-08-16 15:25:25 +03:00
Péter Szilágyi
1d439b5e10 Merge pull request #17416 from karalabe/miner-details
miner: add gas and fee details to mining logs
2018-08-16 15:04:50 +03:00
Péter Szilágyi
62f5137a72 miner: add gas and fee details to mining logs 2018-08-16 14:49:00 +03:00
gary rong
54216811a0 miner: regenerate mining work every 3 seconds (#17413)
* miner: regenerate mining work every 3 seconds

* miner: polish
2018-08-16 14:14:33 +03:00
Péter Szilágyi
5952d962dc Merge pull request #17412 from karalabe/puppeth-fix-dial-panic
cmd/puppeth: fix nil panic on disconnected stats gathering
2018-08-16 13:19:01 +03:00
Péter Szilágyi
3e21adc648 crypto/bn256: fix issues caused by Go 1.11 2018-08-16 11:02:16 +03:00
Péter Szilágyi
b24fb76a3a cmd/puppeth: fix nil panic on disconnected stats gathering 2018-08-16 09:41:16 +03:00
Felföldi Zsolt
2cdf6ee7e0 light: CHT and bloom trie indexers working in light mode (#16534)
This PR enables the indexers to work in light client mode by
downloading a part of these tries (the Merkle proofs of the last
values of the last known section) in order to be able to add new
values and recalculate subsequent hashes. It also adds CHT data to
NodeInfo.
2018-08-15 22:25:46 +02:00
Elad
e8752f4e9f cmd/swarm, swarm: added access control functionality (#17404)
Co-authored-by: Janos Guljas <janos@resenje.org>
Co-authored-by: Anton Evangelatov <anton.evangelatov@gmail.com>
Co-authored-by: Balint Gabor <balint.g@gmail.com>
2018-08-15 17:41:52 +02:00
Péter Szilágyi
d8541a9f99 consensus/ethash: use DAGs for remote mining, generate async 2018-08-15 14:38:39 +03:00
gary rong
040aa2bb10 miner: streaming uncle blocks (#17320)
* miner: stream uncle block

* miner: polish
2018-08-15 14:09:17 +03:00
Péter Szilágyi
e598ae5c01 Merge pull request #17402 from karalabe/deprecate-flags
cmd: polish miner flags, deprecate olds, add upgrade path
2018-08-15 12:09:34 +03:00
Péter Szilágyi
2a17fe2561 cmd: polish miner flags, deprecate olds, add upgrade path 2018-08-15 11:41:23 +03:00
Jeff Prestes
212bba47ff backends: configurable gas limit to allow testing large contracts (#17358)
* backends: increase gaslimit in order to allow tests of large contracts

* backends: increase gaslimit in order to allow tests of large contracts

* backends: increase gaslimit in order to allow tests of large contracts
2018-08-15 10:15:42 +03:00
Felföldi Zsolt
b52bb31b76 p2p/discv5: add delay to refresh cycle when no seed nodes are found (#16994) 2018-08-14 22:59:18 +02:00
Felföldi Zsolt
b2ddb1fcbf les: implement client connection logic (#16899)
This PR implements les.freeClientPool. It also adds a simulated clock
in common/mclock, which enables time-sensitive tests to run quickly
and still produce accurate results, and package common/prque which is
a generalised variant of prque that enables removing elements other
than the top one from the queue.

les.freeClientPool implements a client database that limits the
connection time of each client and manages accepting/rejecting
incoming connections and even kicking out some connected clients. The
pool calculates recent usage time for each known client (a value that
increases linearly when the client is connected and decreases
exponentially when not connected). Clients with lower recent usage are
preferred, unknown nodes have the highest priority. Already connected
nodes receive a small bias in their favor in order to avoid accepting
and instantly kicking out clients.

Note: the pool can use any string for client identification. Using
signature keys for that purpose would not make sense when being known
has a negative value for the client. Currently the LES protocol
manager uses IP addresses (without port address) to identify clients.
2018-08-14 22:44:46 +02:00
gary rong
a1783d1697 miner: move agent logic to worker (#17351)
* miner: move agent logic to worker

* miner: polish

* core: persist block before reorg
2018-08-14 18:34:33 +03:00
gary rong
e0e0e53401 crypto: change formula for create2 (#17393) 2018-08-14 18:30:42 +03:00
Anton Evangelatov
97887d98da swarm/network, swarm/storage: validate chunk size (#17397)
* swarm/network, swarm/storage: validate default chunk size

* swarm/bmt, swarm/network, swarm/storage: update BMT hash initialisation

* swarm/bmt: move segmentCount to tests

* swarm/chunk: change chunk.DefaultSize to be untyped const

* swarm/storage: add size validator

* swarm/storage: add chunk size validation to localstore

* swarm/storage: move validation from localstore to validator

* swarm/storage: global chunk rules in MRU
2018-08-14 16:03:56 +02:00
Yao Zengzeng
8a040de60b README.md: fix some typos (#17381)
Signed-off-by: YaoZengzeng <yaozengzeng@zju.edu.cn>
2018-08-14 14:25:36 +03:00
Eugene Valeyev
e07e507d1a whisper: fixed broken partial topic filtering
Changes in #15811 broke partial topic filtering. Re-enable it.
2018-08-13 16:27:25 +02:00
Péter Szilágyi
d8328a96b4 Merge pull request #17347 from karalabe/miner-notify
cmd, consensus/ethash, eth: miner push notifications
2018-08-13 17:03:16 +03:00
Mymskmkt
fb368723ac core: fix comment typo (#17376) 2018-08-13 11:40:52 +03:00
Janoš Guljaš
6d1e292eef Manifest cli fix and upload defaultpath only once (#17375)
* cmd/swarm: fix manifest subcommands and add tests

* cmd/swarm: manifest update: update default entry for non-encrypted uploads

* swarm/api: upload defaultpath file only once

* swarm/api/client: improve UploadDirectory default path handling

* cmd/swarm: support absolute and relative default path values

* cmd/swarm: fix a typo in test

* cmd/swarm: check encrypted uploads in manifest update tests
2018-08-10 16:12:55 +02:00
Elad
3ec5dda4d2 swarm/api/http: added logging to denote request ended (#17371) 2018-08-10 13:49:37 +02:00
Péter Szilágyi
f0998415ba cmd, consensus/ethash, eth: miner push notifications 2018-08-10 09:06:59 +03:00
Janoš Guljaš
45eaef2431 cmd/swarm: solve rare cases of using the same random port in tests (#17352) 2018-08-09 16:15:59 +02:00
Janoš Guljaš
3bcb501c8f swarm/api: close tar writer in GetDirectoryTar to flush and clean (#17339) 2018-08-09 16:15:07 +02:00
Janoš Guljaš
d3e4c2dcb0 cmd/swarm: disable TestCLISwarmFs fuse test on darwin (#17340) 2018-08-09 16:14:30 +02:00
Anton Evangelatov
7b5c375825 cmd/swarm: remove shadow err (#17360) 2018-08-09 12:37:00 +03:00
Péter Szilágyi
beade042d1 Merge pull request #17357 from karalabe/tracer-trie-deref-bug
eth, trie: fix tracer GC which accidentally pruned the metaroot
2018-08-09 12:35:12 +03:00
Péter Szilágyi
11bbc66082 eth, trie: fix tracer GC which accidentally pruned the metaroot 2018-08-09 12:33:30 +03:00
libotony
834057592f p2p/discv5: fix negative index after uint convert to int (#17274) 2018-08-09 10:03:42 +03:00
Jay
abbb219933 rpc: fix a subscription name (#17345) 2018-08-09 08:56:35 +03:00
Mymskmkt
8051a0768a trie: fix comment typo (#17350) 2018-08-08 16:08:40 +03:00
Giulio M
a1eb9c7d13 swarm/api/http: fixed list leaf links (#17342) 2018-08-08 09:33:06 +02:00
Janoš Guljaš
00e6da9704 swarm/bmt: ignore data longer then 4096 bytes in Hasher.Write (#17338) 2018-08-07 15:34:33 +02:00
Attila Gazso
9df16f3468 swarm: Added lightnode flag (#17291)
* swarm: Added lightnode flag

Added --lightnode command line parameter
Added LightNode to Handshake message

* swarm/config: Fixed variable naming

* cmd/swarm: Changed BoolTFlag to BoolFlag for SwarmLightNodeEnabled

* swarm/network: Changed logging

* swarm/network: Changed protocol version testing

* swarm/network: Renamed DefaultNetworkID variable to TestProtocolNetworkID

* swarm/network: Bumped protocol version

* swarm/network: Changed LightNode handhsake test to table driven

* swarm/network: Changed back TestProtocolVersion to 5 for now

* swarm/network: Moved the test configuration inside the test function scope
2018-08-07 15:34:11 +02:00
b00ris
8461fea44b whisper: remove unused error (#17315) 2018-08-07 15:16:56 +02:00
Elad
4bb2dc3d09 swarm/api/http: test fixes (#17334) 2018-08-07 13:40:38 +02:00
Oleg Kovalov
cf05ef9106 p2p, swarm, trie: avoid copying slices in loops (#17265) 2018-08-07 13:56:40 +03:00
Anton Evangelatov
de9b0660ac swarm/README: add more sections to easily onboard developers (#17333) 2018-08-07 12:56:01 +02:00
Andrew Chiw
042191338d swarm/api/http: GET/PUT/PATCH/DELETE/POST multipart form unit tests. (#17277)
httpDo has a verbose option that dumps the HTTP request
2018-08-07 12:00:12 +02:00
Elad
93fe16b0a5 swarm/api/http: refactored http package (#17309) 2018-08-07 11:56:55 +02:00
Javier Peletier
64a4e89504 swarm/storage/mru: HOTFIX - fix panic in Handler.update (#17313) 2018-08-07 11:53:56 +02:00
Felföldi Zsolt
eef65b20fc p2p: use safe atomic operations when changing connFlags (#17325) 2018-08-06 15:46:30 +03:00
Felföldi Zsolt
c4df67461f Merge pull request #16333 from shazow/addremovetrustedpeer
rpc: Add admin_addTrustedPeer and admin_removeTrustedPeer.
2018-08-06 13:30:04 +02:00
gary rong
941018b570 miner: seperate state, receipts for different mining work (#17323) 2018-08-06 12:55:44 +03:00
Janoš Guljaš
a72ba5a55b cmd/swarm, swarm: various test fixes (#17299)
* swarm/network/simulation: increase the sleep duration for TestRun

* cmd/swarm, swarm: fix failing tests on mac

* cmd/swarm: update TestCLISwarmFs skip comment

* swarm/network/simulation: adjust disconnections on simulation close

* swarm/network/simulation: call cleanups after net shutdown
2018-08-06 12:33:22 +03:00
stormpang
6711f098d5 core/vm: fix comment typo (#17319)
antything --> anything
:P
2018-08-06 11:42:49 +03:00
Péter Szilágyi
c376a5263f Merge pull request #17318 from ligi/fix_punctuation
Fix punctuation - closes #17317
2018-08-06 11:11:58 +03:00
ligi
2901b8b2d2 README: Fix punctuation - closes #17317 2018-08-05 15:40:22 +02:00
Péter Szilágyi
35fcd2f423 Merge pull request #17311 from karalabe/puppeth-graceful-stop
cmd/puppeth: graceful shutdown on redeploys
2018-08-03 13:26:39 +03:00
Péter Szilágyi
faf0e06ed8 cmd/puppeth: graceful shutdown on redeploys 2018-08-03 12:08:19 +03:00
gary rong
51db5975cc consensus/ethash: move remote agent logic to ethash internal (#15853)
* consensus/ethash: start remote ggoroutine to handle remote mining

* consensus/ethash: expose remote miner api

* consensus/ethash: expose submitHashrate api

* miner, ethash: push empty block to sealer without waiting execution

* consensus, internal: add getHashrate API for ethash

* consensus: add three method for consensus interface

* miner: expose consensus engine running status to miner

* eth, miner: specify etherbase when miner created

* miner: commit new work when consensus engine is started

* consensus, miner: fix some logics

* all: delete useless interfaces

* consensus: polish a bit
2018-08-03 11:33:37 +03:00
Roc Yu
70176cda0e accounts/keystore: rename skipKeyFile to nonKeyFile to better reveal the function purpose (#17290) 2018-08-03 10:25:17 +03:00
Péter Szilágyi
d56fa8a659 Merge pull request #17310 from karalabe/mobile-nil-panic
mobile: fix missing return for CallMsg.SetTo(nil)
2018-08-03 09:37:57 +03:00
Péter Szilágyi
e4cb158d01 mobile: fix missing return for CallMsg.SetTo(nil) 2018-08-03 08:42:31 +03:00
Hyung-Kyu Hqueue Choi
0ab54de1a5 core/vm: update benchmarks for core/vm (#17308)
- Update benchmarks to use a pool of int pools.
  Unless benchmarks are aborted with segmentation fault.

Signed-off-by: Hyung-Kyu Choi <hqueue@users.noreply.github.com>
2018-08-03 08:15:33 +03:00
Péter Szilágyi
adc2944b4c Merge pull request #17301 from karalabe/tests-enable-constantinople
tests: enable the Constantinople fork definition
2018-08-02 14:42:06 +03:00
Péter Szilágyi
16eaf2b158 Merge pull request #17302 from karalabe/revert-evm-nil-panic
Revert "cmd/evm: change error msg output to stderr (#17118)"
2018-08-01 19:43:01 +03:00
Péter Szilágyi
83e2761c3a Revert "cmd/evm: change error msg output to stderr (#17118)"
This reverts commit fb9f7261ec.
2018-08-01 19:09:08 +03:00
Péter Szilágyi
353a82385b tests: enable the Constantinople fork definition 2018-08-01 18:47:09 +03:00
Anton Evangelatov
46d4721519 build: explicitly name all packages to be cross-compiled (#17288) 2018-07-31 16:34:54 +03:00
Péter Szilágyi
454382e81a params, swarm/version: begin Geth v1.8.14, Swarm v0.3.2 cycle 2018-07-31 13:39:22 +03:00
Andrey Petrov
6209545083 p2p: Wrap conn.flags ops with atomic.Load/Store 2018-06-21 12:22:47 -04:00
Andrey Petrov
193a402cc0 p2p: Test for peer.rw.flags race conditions 2018-06-21 12:22:47 -04:00
Andrey Petrov
dcca66bce8 p2p: Cache inbound flag on Peer.isInbound to avoid a race 2018-06-21 12:22:47 -04:00
Andrey Petrov
399aa710d5 p2p: Attempt to race check peer.Inbound() in TestServerDial 2018-06-21 12:22:47 -04:00
Andrey Petrov
699794d88d p2p: More tests for AddTrustedPeer/RemoveTrustedPeer 2018-06-21 12:22:47 -04:00
Andrey Petrov
773857a524 p2p: Test for MaxPeers=0 and TrustedPeer override 2018-06-21 12:21:48 -04:00
Andrey Petrov
2a75fe3308 rpc: Add admin_addTrustedPeer and admin_removeTrustedPeer.
These RPC calls are analogous to Parity's parity_addReservedPeer and
parity_removeReservedPeer.

They are useful for adjusting the trusted peer set during runtime,
without requiring restarting the server.
2018-06-21 12:21:48 -04:00
210 changed files with 8728 additions and 3357 deletions

View File

@@ -30,8 +30,6 @@ matrix:
go: 1.10.x
script:
- unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703
- brew update
- brew cask install osxfuse
- go run build/ci.go install
- go run build/ci.go test -coverage $TEST_PACKAGES

View File

@@ -34,7 +34,7 @@ The go-ethereum project comes with several wrappers/executables found in the `cm
| Command | Description |
|:----------:|-------------|
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default) archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. |
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. |
| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. |
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug`). |
@@ -69,7 +69,7 @@ This command will:
* Start up Geth's built-in interactive [JavaScript console](https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console),
(via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://github.com/ethereum/wiki/wiki/JavaScript-API)
as well as Geth's own [management APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs).
This too is optional and if you leave it out you can always attach to an already running Geth instance
This tool is optional and if you leave it out you can always attach to an already running Geth instance
with `geth attach`.
### Full node on the Ethereum test network

View File

@@ -65,9 +65,9 @@ type SimulatedBackend struct {
// NewSimulatedBackend creates a new binding backend using a simulated blockchain
// for testing purposes.
func NewSimulatedBackend(alloc core.GenesisAlloc) *SimulatedBackend {
func NewSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend {
database := ethdb.NewMemDatabase()
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, Alloc: alloc}
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, GasLimit: gasLimit, Alloc: alloc}
genesis.MustCommit(database)
blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{})

View File

@@ -229,7 +229,7 @@ var bindTests = []struct {
// Generate a new random account and a funded simulator
key, _ := crypto.GenerateKey()
auth := bind.NewKeyedTransactor(key)
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
// Deploy an interaction tester contract and call a transaction on it
_, _, interactor, err := DeployInteractor(auth, sim, "Deploy string")
@@ -270,7 +270,7 @@ var bindTests = []struct {
// Generate a new random account and a funded simulator
key, _ := crypto.GenerateKey()
auth := bind.NewKeyedTransactor(key)
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
// Deploy a tuple tester contract and execute a structured call on it
_, _, getter, err := DeployGetter(auth, sim)
@@ -302,7 +302,7 @@ var bindTests = []struct {
// Generate a new random account and a funded simulator
key, _ := crypto.GenerateKey()
auth := bind.NewKeyedTransactor(key)
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
// Deploy a tuple tester contract and execute a structured call on it
_, _, tupler, err := DeployTupler(auth, sim)
@@ -344,7 +344,7 @@ var bindTests = []struct {
// Generate a new random account and a funded simulator
key, _ := crypto.GenerateKey()
auth := bind.NewKeyedTransactor(key)
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
// Deploy a slice tester contract and execute a n array call on it
_, _, slicer, err := DeploySlicer(auth, sim)
@@ -378,7 +378,7 @@ var bindTests = []struct {
// Generate a new random account and a funded simulator
key, _ := crypto.GenerateKey()
auth := bind.NewKeyedTransactor(key)
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
// Deploy a default method invoker contract and execute its default method
_, _, defaulter, err := DeployDefaulter(auth, sim)
@@ -411,7 +411,7 @@ var bindTests = []struct {
`[{"constant":true,"inputs":[],"name":"String","outputs":[{"name":"","type":"string"}],"type":"function"}]`,
`
// Create a simulator and wrap a non-deployed contract
sim := backends.NewSimulatedBackend(nil)
sim := backends.NewSimulatedBackend(nil, uint64(10000000000))
nonexistent, err := NewNonExistent(common.Address{}, sim)
if err != nil {
@@ -447,7 +447,7 @@ var bindTests = []struct {
// Generate a new random account and a funded simulator
key, _ := crypto.GenerateKey()
auth := bind.NewKeyedTransactor(key)
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
// Deploy a funky gas pattern contract
_, _, limiter, err := DeployFunkyGasPattern(auth, sim)
@@ -482,7 +482,7 @@ var bindTests = []struct {
// Generate a new random account and a funded simulator
key, _ := crypto.GenerateKey()
auth := bind.NewKeyedTransactor(key)
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
// Deploy a sender tester contract and execute a structured call on it
_, _, callfrom, err := DeployCallFrom(auth, sim)
@@ -542,7 +542,7 @@ var bindTests = []struct {
// Generate a new random account and a funded simulator
key, _ := crypto.GenerateKey()
auth := bind.NewKeyedTransactor(key)
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
// Deploy a underscorer tester contract and execute a structured call on it
_, _, underscorer, err := DeployUnderscorer(auth, sim)
@@ -612,7 +612,7 @@ var bindTests = []struct {
// Generate a new random account and a funded simulator
key, _ := crypto.GenerateKey()
auth := bind.NewKeyedTransactor(key)
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
// Deploy an eventer contract
_, _, eventer, err := DeployEventer(auth, sim)
@@ -761,7 +761,7 @@ var bindTests = []struct {
// Generate a new random account and a funded simulator
key, _ := crypto.GenerateKey()
auth := bind.NewKeyedTransactor(key)
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
//deploy the test contract
_, _, testContract, err := DeployDeeplyNestedArray(auth, sim)
@@ -820,7 +820,7 @@ func TestBindings(t *testing.T) {
t.Skip("go sdk not found for testing")
}
// Skip the test if the go-ethereum sources are symlinked (https://github.com/golang/go/issues/14845)
linkTestCode := fmt.Sprintf("package linktest\nfunc CheckSymlinks(){\nfmt.Println(backends.NewSimulatedBackend(nil))\n}")
linkTestCode := fmt.Sprintf("package linktest\nfunc CheckSymlinks(){\nfmt.Println(backends.NewSimulatedBackend(nil,uint64(10000000000)))\n}")
linkTestDeps, err := imports.Process(os.TempDir(), []byte(linkTestCode), nil)
if err != nil {
t.Fatalf("failed check for goimports symlink bug: %v", err)

View File

@@ -53,9 +53,11 @@ var waitDeployedTests = map[string]struct {
func TestWaitDeployed(t *testing.T) {
for name, test := range waitDeployedTests {
backend := backends.NewSimulatedBackend(core.GenesisAlloc{
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000)},
})
backend := backends.NewSimulatedBackend(
core.GenesisAlloc{
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000)},
}, 10000000,
)
// Create the transaction.
tx := types.NewContractCreation(0, big.NewInt(0), test.gas, big.NewInt(1), common.FromHex(test.code))

View File

@@ -106,7 +106,7 @@ type Wallet interface {
// or optionally with the aid of any location metadata from the embedded URL field.
//
// If the wallet requires additional authentication to sign the request (e.g.
// a password to decrypt the account, or a PIN code o verify the transaction),
// a password to decrypt the account, or a PIN code to verify the transaction),
// an AuthNeededError instance will be returned, containing infos for the user
// about which fields or actions are needed. The user may retry by providing
// the needed details via SignTxWithPassphrase, or by other means (e.g. unlock

View File

@@ -56,9 +56,9 @@ func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, er
var newLastMod time.Time
for _, fi := range files {
// Skip any non-key files from the folder
path := filepath.Join(keyDir, fi.Name())
if skipKeyFile(fi) {
// Skip any non-key files from the folder
if nonKeyFile(fi) {
log.Trace("Ignoring file on account scan", "path", path)
continue
}
@@ -88,8 +88,8 @@ func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, er
return creates, deletes, updates, nil
}
// skipKeyFile ignores editor backups, hidden files and folders/symlinks.
func skipKeyFile(fi os.FileInfo) bool {
// nonKeyFile ignores editor backups, hidden files and folders/symlinks.
func nonKeyFile(fi os.FileInfo) bool {
// Skip editor backups and UNIX-style hidden files.
if strings.HasSuffix(fi.Name(), "~") || strings.HasPrefix(fi.Name(), ".") {
return true

View File

@@ -147,6 +147,9 @@ var (
debEthereum,
}
// Packages to be cross-compiled by the xgo command
allCrossCompiledArchiveFiles = append(allToolsArchiveFiles, swarmArchiveFiles...)
// Distros for which packages are created.
// Note: vivid is unsupported because there is no golang-1.6 package for it.
// Note: wily is unsupported because it was officially deprecated on lanchpad.
@@ -641,17 +644,6 @@ func (meta debMetadata) ExeName(exe debExecutable) string {
return exe.Package()
}
// EthereumSwarmPackageName returns the name of the swarm package based on
// environment, e.g. "ethereum-swarm-unstable", or "ethereum-swarm".
// This is needed so that we make sure that "ethereum" package,
// depends on and installs "ethereum-swarm"
func (meta debMetadata) EthereumSwarmPackageName() string {
if isUnstableBuild(meta.Env) {
return debSwarm.Name + "-unstable"
}
return debSwarm.Name
}
// ExeConflicts returns the content of the Conflicts field
// for executable packages.
func (meta debMetadata) ExeConflicts(exe debExecutable) string {
@@ -1009,7 +1001,7 @@ func doXgo(cmdline []string) {
if *alltools {
args = append(args, []string{"--dest", GOBIN}...)
for _, res := range allToolsArchiveFiles {
for _, res := range allCrossCompiledArchiveFiles {
if strings.HasPrefix(res, GOBIN) {
// Binary tool found, cross build it explicitly
args = append(args, "./"+filepath.Join("cmd", filepath.Base(res)))

View File

@@ -10,7 +10,7 @@ Vcs-Browser: https://github.com/ethereum/go-ethereum
Package: {{.Name}}
Architecture: any
Depends: ${misc:Depends}, {{.EthereumSwarmPackageName}}, {{.ExeList}}
Depends: ${misc:Depends}, {{.ExeList}}
Description: Meta-package to install geth, swarm, and other tools
Meta-package to install geth, swarm and other tools

View File

@@ -50,6 +50,6 @@ func compileCmd(ctx *cli.Context) error {
if err != nil {
return err
}
fmt.Fprintln(ctx.App.Writer, bin)
fmt.Println(bin)
return nil
}

View File

@@ -45,6 +45,6 @@ func disasmCmd(ctx *cli.Context) error {
}
code := strings.TrimSpace(string(in[:]))
fmt.Fprintf(ctx.App.Writer, "%v\n", code)
fmt.Printf("%v\n", code)
return asm.PrintDisassembled(code)
}

View File

@@ -30,11 +30,10 @@ func Compile(fn string, src []byte, debug bool) (string, error) {
bin, compileErrors := compiler.Compile()
if len(compileErrors) > 0 {
// report errors
errs := ""
for _, err := range compileErrors {
errs += fmt.Sprintf("%s:%v\n", fn, err)
fmt.Printf("%s:%v\n", fn, err)
}
return "", errors.New(errs + "compiling failed\n")
return "", errors.New("compiling failed")
}
return bin, nil
}

View File

@@ -128,13 +128,13 @@ func runCmd(ctx *cli.Context) error {
if ctx.GlobalString(CodeFileFlag.Name) == "-" {
//Try reading from stdin
if hexcode, err = ioutil.ReadAll(os.Stdin); err != nil {
fmt.Fprintf(ctx.App.ErrWriter, "Could not load code from stdin: %v\n", err)
fmt.Printf("Could not load code from stdin: %v\n", err)
os.Exit(1)
}
} else {
// Codefile with hex assembly
if hexcode, err = ioutil.ReadFile(ctx.GlobalString(CodeFileFlag.Name)); err != nil {
fmt.Fprintf(ctx.App.ErrWriter, "Could not load code from file: %v\n", err)
fmt.Printf("Could not load code from file: %v\n", err)
os.Exit(1)
}
}
@@ -172,11 +172,11 @@ func runCmd(ctx *cli.Context) error {
if cpuProfilePath := ctx.GlobalString(CPUProfileFlag.Name); cpuProfilePath != "" {
f, err := os.Create(cpuProfilePath)
if err != nil {
fmt.Fprintf(ctx.App.ErrWriter, "could not create CPU profile: %v\n", err)
fmt.Println("could not create CPU profile: ", err)
os.Exit(1)
}
if err := pprof.StartCPUProfile(f); err != nil {
fmt.Fprintf(ctx.App.ErrWriter, "could not start CPU profile: %v\n", err)
fmt.Println("could not start CPU profile: ", err)
os.Exit(1)
}
defer pprof.StopCPUProfile()
@@ -200,17 +200,17 @@ func runCmd(ctx *cli.Context) error {
if ctx.GlobalBool(DumpFlag.Name) {
statedb.IntermediateRoot(true)
fmt.Fprintln(ctx.App.Writer, string(statedb.Dump()))
fmt.Println(string(statedb.Dump()))
}
if memProfilePath := ctx.GlobalString(MemProfileFlag.Name); memProfilePath != "" {
f, err := os.Create(memProfilePath)
if err != nil {
fmt.Fprintf(ctx.App.ErrWriter, "could not create memory profile: %v\n", err)
fmt.Println("could not create memory profile: ", err)
os.Exit(1)
}
if err := pprof.WriteHeapProfile(f); err != nil {
fmt.Fprintf(ctx.App.ErrWriter, "could not create memory profile: %v\n", err)
fmt.Println("could not write memory profile: ", err)
os.Exit(1)
}
f.Close()
@@ -218,17 +218,17 @@ func runCmd(ctx *cli.Context) error {
if ctx.GlobalBool(DebugFlag.Name) {
if debugLogger != nil {
fmt.Fprintln(ctx.App.ErrWriter, "#### TRACE ####")
vm.WriteTrace(ctx.App.ErrWriter, debugLogger.StructLogs())
fmt.Fprintln(os.Stderr, "#### TRACE ####")
vm.WriteTrace(os.Stderr, debugLogger.StructLogs())
}
fmt.Fprintln(ctx.App.ErrWriter, "#### LOGS ####")
vm.WriteLogs(ctx.App.ErrWriter, statedb.Logs())
fmt.Fprintln(os.Stderr, "#### LOGS ####")
vm.WriteLogs(os.Stderr, statedb.Logs())
}
if ctx.GlobalBool(StatDumpFlag.Name) {
var mem goruntime.MemStats
goruntime.ReadMemStats(&mem)
fmt.Fprintf(ctx.App.ErrWriter, `evm execution time: %v
fmt.Fprintf(os.Stderr, `evm execution time: %v
heap objects: %d
allocations: %d
total allocations: %d
@@ -238,9 +238,9 @@ Gas used: %d
`, execTime, mem.HeapObjects, mem.Alloc, mem.TotalAlloc, mem.NumGC, initialGas-leftOverGas)
}
if tracer == nil {
fmt.Fprintf(ctx.App.Writer, "0x%x\n", ret)
fmt.Printf("0x%x\n", ret)
if err != nil {
fmt.Fprintf(ctx.App.ErrWriter, " error: %v\n", err)
fmt.Printf(" error: %v\n", err)
}
}

View File

@@ -107,7 +107,7 @@ func stateTestCmd(ctx *cli.Context) error {
}
// print state root for evmlab tracing (already committed above, so no need to delete objects again
if ctx.GlobalBool(MachineFlag.Name) && state != nil {
fmt.Fprintf(ctx.App.ErrWriter, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false))
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false))
}
results = append(results, *result)
@@ -115,13 +115,13 @@ func stateTestCmd(ctx *cli.Context) error {
// Print any structured logs collected
if ctx.GlobalBool(DebugFlag.Name) {
if debugger != nil {
fmt.Fprintln(ctx.App.ErrWriter, "#### TRACE ####")
vm.WriteTrace(ctx.App.ErrWriter, debugger.StructLogs())
fmt.Fprintln(os.Stderr, "#### TRACE ####")
vm.WriteTrace(os.Stderr, debugger.StructLogs())
}
}
}
}
out, _ := json.MarshalIndent(results, "", " ")
fmt.Fprintln(ctx.App.Writer, string(out))
fmt.Println(string(out))
return nil
}

View File

@@ -48,7 +48,6 @@ var (
ArgsUsage: "<genesisPath>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.LightModeFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
@@ -66,7 +65,7 @@ It expects the genesis file as argument.`,
Flags: []cli.Flag{
utils.DataDirFlag,
utils.CacheFlag,
utils.LightModeFlag,
utils.SyncModeFlag,
utils.GCModeFlag,
utils.CacheDatabaseFlag,
utils.CacheGCFlag,
@@ -87,7 +86,7 @@ processing will proceed even if an individual RLP-file import failure occurs.`,
Flags: []cli.Flag{
utils.DataDirFlag,
utils.CacheFlag,
utils.LightModeFlag,
utils.SyncModeFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
@@ -105,7 +104,7 @@ be gzipped.`,
Flags: []cli.Flag{
utils.DataDirFlag,
utils.CacheFlag,
utils.LightModeFlag,
utils.SyncModeFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
@@ -119,7 +118,7 @@ be gzipped.`,
Flags: []cli.Flag{
utils.DataDirFlag,
utils.CacheFlag,
utils.LightModeFlag,
utils.SyncModeFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
@@ -149,7 +148,6 @@ The first argument must be the directory containing the blockchain to download f
ArgsUsage: " ",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.LightModeFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
@@ -163,7 +161,7 @@ Remove blockchain and state databases`,
Flags: []cli.Flag{
utils.DataDirFlag,
utils.CacheFlag,
utils.LightModeFlag,
utils.SyncModeFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `

View File

@@ -31,7 +31,7 @@ import (
)
const (
ipcAPIs = "admin:1.0 debug:1.0 eth:1.0 miner:1.0 net:1.0 personal:1.0 rpc:1.0 shh:1.0 txpool:1.0 web3:1.0"
ipcAPIs = "admin:1.0 debug:1.0 eth:1.0 ethash:1.0 miner:1.0 net:1.0 personal:1.0 rpc:1.0 shh:1.0 txpool:1.0 web3:1.0"
httpAPIs = "eth:1.0 net:1.0 rpc:1.0 web3:1.0"
)

View File

@@ -72,6 +72,7 @@ var (
utils.EthashDatasetDirFlag,
utils.EthashDatasetsInMemoryFlag,
utils.EthashDatasetsOnDiskFlag,
utils.TxPoolLocalsFlag,
utils.TxPoolNoLocalsFlag,
utils.TxPoolJournalFlag,
utils.TxPoolRejournalFlag,
@@ -82,8 +83,6 @@ var (
utils.TxPoolAccountQueueFlag,
utils.TxPoolGlobalQueueFlag,
utils.TxPoolLifetimeFlag,
utils.FastSyncFlag,
utils.LightModeFlag,
utils.SyncModeFlag,
utils.GCModeFlag,
utils.LightServFlag,
@@ -96,11 +95,19 @@ var (
utils.ListenPortFlag,
utils.MaxPeersFlag,
utils.MaxPendingPeersFlag,
utils.EtherbaseFlag,
utils.GasPriceFlag,
utils.MinerThreadsFlag,
utils.MiningEnabledFlag,
utils.TargetGasLimitFlag,
utils.MinerThreadsFlag,
utils.MinerLegacyThreadsFlag,
utils.MinerNotifyFlag,
utils.MinerGasTargetFlag,
utils.MinerLegacyGasTargetFlag,
utils.MinerGasPriceFlag,
utils.MinerLegacyGasPriceFlag,
utils.MinerEtherbaseFlag,
utils.MinerLegacyEtherbaseFlag,
utils.MinerExtraDataFlag,
utils.MinerLegacyExtraDataFlag,
utils.MinerRecommitIntervalFlag,
utils.NATFlag,
utils.NoDiscoverFlag,
utils.DiscoveryV5Flag,
@@ -121,7 +128,6 @@ var (
utils.NoCompactionFlag,
utils.GpoBlocksFlag,
utils.GpoPercentileFlag,
utils.ExtraDataFlag,
configFileFlag,
}
@@ -323,7 +329,7 @@ func startNode(ctx *cli.Context, stack *node.Node) {
// Start auxiliary services if enabled
if ctx.GlobalBool(utils.MiningEnabledFlag.Name) || ctx.GlobalBool(utils.DeveloperFlag.Name) {
// Mining only makes sense if a full Ethereum node is running
if ctx.GlobalBool(utils.LightModeFlag.Name) || ctx.GlobalString(utils.SyncModeFlag.Name) == "light" {
if ctx.GlobalString(utils.SyncModeFlag.Name) == "light" {
utils.Fatalf("Light clients do not support mining")
}
var ethereum *eth.Ethereum
@@ -331,7 +337,11 @@ func startNode(ctx *cli.Context, stack *node.Node) {
utils.Fatalf("Ethereum service not running: %v", err)
}
// Use a reduced number of threads if requested
if threads := ctx.GlobalInt(utils.MinerThreadsFlag.Name); threads > 0 {
threads := ctx.GlobalInt(utils.MinerLegacyThreadsFlag.Name)
if ctx.GlobalIsSet(utils.MinerThreadsFlag.Name) {
threads = ctx.GlobalInt(utils.MinerThreadsFlag.Name)
}
if threads > 0 {
type threaded interface {
SetThreads(threads int)
}
@@ -340,7 +350,11 @@ func startNode(ctx *cli.Context, stack *node.Node) {
}
}
// Set the gas price to the limits from the CLI and start mining
ethereum.TxPool().SetGasPrice(utils.GlobalBig(ctx, utils.GasPriceFlag.Name))
gasprice := utils.GlobalBig(ctx, utils.MinerLegacyGasPriceFlag.Name)
if ctx.IsSet(utils.MinerGasPriceFlag.Name) {
gasprice = utils.GlobalBig(ctx, utils.MinerGasPriceFlag.Name)
}
ethereum.TxPool().SetGasPrice(gasprice)
if err := ethereum.StartMining(true); err != nil {
utils.Fatalf("Failed to start mining: %v", err)
}

View File

@@ -114,6 +114,7 @@ var AppHelpFlagGroups = []flagGroup{
{
Name: "TRANSACTION POOL",
Flags: []cli.Flag{
utils.TxPoolLocalsFlag,
utils.TxPoolNoLocalsFlag,
utils.TxPoolJournalFlag,
utils.TxPoolRejournalFlag,
@@ -185,10 +186,12 @@ var AppHelpFlagGroups = []flagGroup{
Flags: []cli.Flag{
utils.MiningEnabledFlag,
utils.MinerThreadsFlag,
utils.EtherbaseFlag,
utils.TargetGasLimitFlag,
utils.GasPriceFlag,
utils.ExtraDataFlag,
utils.MinerNotifyFlag,
utils.MinerGasPriceFlag,
utils.MinerGasTargetFlag,
utils.MinerEtherbaseFlag,
utils.MinerExtraDataFlag,
utils.MinerRecommitIntervalFlag,
},
},
{
@@ -230,8 +233,11 @@ var AppHelpFlagGroups = []flagGroup{
{
Name: "DEPRECATED",
Flags: []cli.Flag{
utils.FastSyncFlag,
utils.LightModeFlag,
utils.MinerLegacyThreadsFlag,
utils.MinerLegacyGasTargetFlag,
utils.MinerLegacyGasPriceFlag,
utils.MinerLegacyEtherbaseFlag,
utils.MinerLegacyExtraDataFlag,
},
},
{

View File

@@ -678,9 +678,9 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
// Build and deploy the dashboard service
if nocache {
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
}
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
}
// dashboardInfos is returned from a dashboard status check to allow reporting

View File

@@ -100,9 +100,9 @@ func deployEthstats(client *sshClient, network string, port int, secret string,
// Build and deploy the ethstats service
if nocache {
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
}
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
}
// ethstatsInfos is returned from an ethstats status check to allow reporting

View File

@@ -38,7 +38,7 @@ ADD chain.json /chain.json
RUN \
echo '(cd ../eth-net-intelligence-api && pm2 start /ethstats.json)' > explorer.sh && \
echo '(cd ../etherchain-light && npm start &)' >> explorer.sh && \
echo '/parity/parity --chain=/chain.json --port={{.NodePort}} --tracing=on --fat-db=on --pruning=archive' >> explorer.sh
echo 'exec /parity/parity --chain=/chain.json --port={{.NodePort}} --tracing=on --fat-db=on --pruning=archive' >> explorer.sh
ENTRYPOINT ["/bin/sh", "explorer.sh"]
`
@@ -140,9 +140,9 @@ func deployExplorer(client *sshClient, network string, chainspec []byte, config
// Build and deploy the boot or seal node service
if nocache {
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
}
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
}
// explorerInfos is returned from a block explorer status check to allow reporting

View File

@@ -133,9 +133,9 @@ func deployFaucet(client *sshClient, network string, bootnodes []string, config
// Build and deploy the faucet service
if nocache {
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
}
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
}
// faucetInfos is returned from a faucet status check to allow reporting various

View File

@@ -81,9 +81,9 @@ func deployNginx(client *sshClient, network string, port int, nocache bool) ([]b
// Build and deploy the reverse-proxy service
if nocache {
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
}
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
}
// nginxInfos is returned from an nginx reverse-proxy status check to allow

View File

@@ -42,7 +42,7 @@ ADD genesis.json /genesis.json
RUN \
echo 'geth --cache 512 init /genesis.json' > geth.sh && \{{if .Unlock}}
echo 'mkdir -p /root/.ethereum/keystore/ && cp /signer.json /root/.ethereum/keystore/' >> geth.sh && \{{end}}
echo $'geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine --minerthreads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --targetgaslimit {{.GasTarget}} --gasprice {{.GasPrice}}' >> geth.sh
echo $'exec geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--miner.etherbase {{.Etherbase}} --mine --miner.threads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --miner.gastarget {{.GasTarget}} --miner.gasprice {{.GasPrice}}' >> geth.sh
ENTRYPOINT ["/bin/sh", "geth.sh"]
`
@@ -139,9 +139,9 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n
// Build and deploy the boot or seal node service
if nocache {
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
}
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
}
// nodeInfos is returned from a boot or seal node status check to allow reporting

View File

@@ -37,7 +37,7 @@ ADD genesis.json /genesis.json
RUN \
echo 'node server.js &' > wallet.sh && \
echo 'geth --cache 512 init /genesis.json' >> wallet.sh && \
echo $'geth --networkid {{.NetworkID}} --port {{.NodePort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --rpc --rpcaddr=0.0.0.0 --rpccorsdomain "*" --rpcvhosts "*"' >> wallet.sh
echo $'exec geth --networkid {{.NetworkID}} --port {{.NodePort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --rpc --rpcaddr=0.0.0.0 --rpccorsdomain "*" --rpcvhosts "*"' >> wallet.sh
RUN \
sed -i 's/PuppethNetworkID/{{.NetworkID}}/g' dist/js/etherwallet-master.js && \
@@ -120,9 +120,9 @@ func deployWallet(client *sshClient, network string, bootnodes []string, config
// Build and deploy the boot or seal node service
if nocache {
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
}
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
}
// walletInfos is returned from a web wallet status check to allow reporting

View File

@@ -45,33 +45,44 @@ type sshClient struct {
// dial establishes an SSH connection to a remote node using the current user and
// the user's configured private RSA key. If that fails, password authentication
// is fallen back to. The caller may override the login user via user@server:port.
// is fallen back to. server can be a string like user:identity@server:port.
func dial(server string, pubkey []byte) (*sshClient, error) {
// Figure out a label for the server and a logger
label := server
if strings.Contains(label, ":") {
label = label[:strings.Index(label, ":")]
}
login := ""
// Figure out username, identity, hostname and port
hostname := ""
hostport := server
username := ""
identity := "id_rsa" // default
if strings.Contains(server, "@") {
login = label[:strings.Index(label, "@")]
label = label[strings.Index(label, "@")+1:]
server = server[strings.Index(server, "@")+1:]
prefix := server[:strings.Index(server, "@")]
if strings.Contains(prefix, ":") {
username = prefix[:strings.Index(prefix, ":")]
identity = prefix[strings.Index(prefix, ":")+1:]
} else {
username = prefix
}
hostport = server[strings.Index(server, "@")+1:]
}
logger := log.New("server", label)
if strings.Contains(hostport, ":") {
hostname = hostport[:strings.Index(hostport, ":")]
} else {
hostname = hostport
hostport += ":22"
}
logger := log.New("server", server)
logger.Debug("Attempting to establish SSH connection")
user, err := user.Current()
if err != nil {
return nil, err
}
if login == "" {
login = user.Username
if username == "" {
username = user.Username
}
// Configure the supported authentication methods (private key and password)
var auths []ssh.AuthMethod
path := filepath.Join(user.HomeDir, ".ssh", "id_rsa")
path := filepath.Join(user.HomeDir, ".ssh", identity)
if buf, err := ioutil.ReadFile(path); err != nil {
log.Warn("No SSH key, falling back to passwords", "path", path, "err", err)
} else {
@@ -94,14 +105,14 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
}
}
auths = append(auths, ssh.PasswordCallback(func() (string, error) {
fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", login, server)
fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", username, server)
blob, err := terminal.ReadPassword(int(os.Stdin.Fd()))
fmt.Println()
return string(blob), err
}))
// Resolve the IP address of the remote server
addr, err := net.LookupHost(label)
addr, err := net.LookupHost(hostname)
if err != nil {
return nil, err
}
@@ -109,10 +120,7 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
return nil, errors.New("no IPs associated with domain")
}
// Try to dial in to the remote server
logger.Trace("Dialing remote SSH server", "user", login)
if !strings.Contains(server, ":") {
server += ":22"
}
logger.Trace("Dialing remote SSH server", "user", username)
keycheck := func(hostname string, remote net.Addr, key ssh.PublicKey) error {
// If no public key is known for SSH, ask the user to confirm
if pubkey == nil {
@@ -139,13 +147,13 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
// We have a mismatch, forbid connecting
return errors.New("ssh key mismatch, readd the machine to update")
}
client, err := ssh.Dial("tcp", server, &ssh.ClientConfig{User: login, Auth: auths, HostKeyCallback: keycheck})
client, err := ssh.Dial("tcp", hostport, &ssh.ClientConfig{User: username, Auth: auths, HostKeyCallback: keycheck})
if err != nil {
return nil, err
}
// Connection established, return our utility wrapper
c := &sshClient{
server: label,
server: hostname,
address: addr[0],
pubkey: pubkey,
client: client,

View File

@@ -82,7 +82,6 @@ func (w *wizard) gatherStats(server string, pubkey []byte, client *sshClient) *s
logger.Info("Starting remote server health-check")
stat := &serverStat{
address: client.address,
services: make(map[string]map[string]string),
}
if client == nil {
@@ -94,6 +93,8 @@ func (w *wizard) gatherStats(server string, pubkey []byte, client *sshClient) *s
}
client = conn
}
stat.address = client.address
// Client connected one way or another, run health-checks
logger.Debug("Checking for nginx availability")
if infos, err := checkNginx(client, w.network); err != nil {
@@ -214,6 +215,9 @@ func (stats serverStats) render() {
if len(stat.address) > len(separator[1]) {
separator[1] = strings.Repeat("-", len(stat.address))
}
if len(stat.failure) > len(separator[1]) {
separator[1] = strings.Repeat("-", len(stat.failure))
}
for service, configs := range stat.services {
if len(service) > len(separator[2]) {
separator[2] = strings.Repeat("-", len(service))
@@ -250,7 +254,11 @@ func (stats serverStats) render() {
sort.Strings(services)
if len(services) == 0 {
table.Append([]string{server, stats[server].address, "", "", ""})
if stats[server].failure != "" {
table.Append([]string{server, stats[server].failure, "", "", ""})
} else {
table.Append([]string{server, stats[server].address, "", "", ""})
}
}
for j, service := range services {
// Add an empty line between all services

View File

@@ -62,14 +62,14 @@ func (w *wizard) manageServers() {
}
}
// makeServer reads a single line from stdin and interprets it as a hostname to
// connect to. It tries to establish a new SSH session and also executing some
// baseline validations.
// makeServer reads a single line from stdin and interprets it as
// username:identity@hostname to connect to. It tries to establish a
// new SSH session and also executing some baseline validations.
//
// If connection succeeds, the server is added to the wizards configs!
func (w *wizard) makeServer() string {
fmt.Println()
fmt.Println("Please enter remote server's address:")
fmt.Println("What is the remote server's address ([username[:identity]@]hostname[:port])?")
// Read and dial the server to ensure docker is present
input := w.readString()

219
cmd/swarm/access.go Normal file
View File

@@ -0,0 +1,219 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"crypto/rand"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"strings"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/swarm/api"
"github.com/ethereum/go-ethereum/swarm/api/client"
"gopkg.in/urfave/cli.v1"
)
var salt = make([]byte, 32)
func init() {
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
panic("reading from crypto/rand failed: " + err.Error())
}
}
func accessNewPass(ctx *cli.Context) {
args := ctx.Args()
if len(args) != 1 {
utils.Fatalf("Expected 1 argument - the ref")
}
var (
ae *api.AccessEntry
accessKey []byte
err error
ref = args[0]
password = getPassPhrase("", 0, makePasswordList(ctx))
dryRun = ctx.Bool(SwarmDryRunFlag.Name)
)
accessKey, ae, err = api.DoPasswordNew(ctx, password, salt)
if err != nil {
utils.Fatalf("error getting session key: %v", err)
}
m, err := api.GenerateAccessControlManifest(ctx, ref, accessKey, ae)
if dryRun {
err = printManifests(m, nil)
if err != nil {
utils.Fatalf("had an error printing the manifests: %v", err)
}
} else {
utils.Fatalf("uploading manifests")
err = uploadManifests(ctx, m, nil)
if err != nil {
utils.Fatalf("had an error uploading the manifests: %v", err)
}
}
}
func accessNewPK(ctx *cli.Context) {
args := ctx.Args()
if len(args) != 1 {
utils.Fatalf("Expected 1 argument - the ref")
}
var (
ae *api.AccessEntry
sessionKey []byte
err error
ref = args[0]
privateKey = getPrivKey(ctx)
granteePublicKey = ctx.String(SwarmAccessGrantKeyFlag.Name)
dryRun = ctx.Bool(SwarmDryRunFlag.Name)
)
sessionKey, ae, err = api.DoPKNew(ctx, privateKey, granteePublicKey, salt)
if err != nil {
utils.Fatalf("error getting session key: %v", err)
}
m, err := api.GenerateAccessControlManifest(ctx, ref, sessionKey, ae)
if dryRun {
err = printManifests(m, nil)
if err != nil {
utils.Fatalf("had an error printing the manifests: %v", err)
}
} else {
err = uploadManifests(ctx, m, nil)
if err != nil {
utils.Fatalf("had an error uploading the manifests: %v", err)
}
}
}
func accessNewACT(ctx *cli.Context) {
args := ctx.Args()
if len(args) != 1 {
utils.Fatalf("Expected 1 argument - the ref")
}
var (
ae *api.AccessEntry
actManifest *api.Manifest
accessKey []byte
err error
ref = args[0]
grantees = []string{}
actFilename = ctx.String(SwarmAccessGrantKeysFlag.Name)
privateKey = getPrivKey(ctx)
dryRun = ctx.Bool(SwarmDryRunFlag.Name)
)
bytes, err := ioutil.ReadFile(actFilename)
if err != nil {
utils.Fatalf("had an error reading the grantee public key list")
}
grantees = strings.Split(string(bytes), "\n")
accessKey, ae, actManifest, err = api.DoACTNew(ctx, privateKey, salt, grantees)
if err != nil {
utils.Fatalf("error generating ACT manifest: %v", err)
}
if err != nil {
utils.Fatalf("error getting session key: %v", err)
}
m, err := api.GenerateAccessControlManifest(ctx, ref, accessKey, ae)
if err != nil {
utils.Fatalf("error generating root access manifest: %v", err)
}
if dryRun {
err = printManifests(m, actManifest)
if err != nil {
utils.Fatalf("had an error printing the manifests: %v", err)
}
} else {
err = uploadManifests(ctx, m, actManifest)
if err != nil {
utils.Fatalf("had an error uploading the manifests: %v", err)
}
}
}
func printManifests(rootAccessManifest, actManifest *api.Manifest) error {
js, err := json.Marshal(rootAccessManifest)
if err != nil {
return err
}
fmt.Println(string(js))
if actManifest != nil {
js, err := json.Marshal(actManifest)
if err != nil {
return err
}
fmt.Println(string(js))
}
return nil
}
func uploadManifests(ctx *cli.Context, rootAccessManifest, actManifest *api.Manifest) error {
bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
client := client.NewClient(bzzapi)
var (
key string
err error
)
if actManifest != nil {
key, err = client.UploadManifest(actManifest, false)
if err != nil {
return err
}
rootAccessManifest.Entries[0].Access.Act = key
}
key, err = client.UploadManifest(rootAccessManifest, false)
if err != nil {
return err
}
fmt.Println(key)
return nil
}
// makePasswordList reads password lines from the file specified by the global --password flag
// and also by the same subcommand --password flag.
// This function ia a fork of utils.MakePasswordList to lookup cli context for subcommand.
// Function ctx.SetGlobal is not setting the global flag value that can be accessed
// by ctx.GlobalString using the current version of cli package.
func makePasswordList(ctx *cli.Context) []string {
path := ctx.GlobalString(utils.PasswordFileFlag.Name)
if path == "" {
path = ctx.String(utils.PasswordFileFlag.Name)
if path == "" {
return nil
}
}
text, err := ioutil.ReadFile(path)
if err != nil {
utils.Fatalf("Failed to read password file: %v", err)
}
lines := strings.Split(string(text), "\n")
// Sanitise DOS line endings.
for i := range lines {
lines[i] = strings.TrimRight(lines[i], "\r")
}
return lines
}

581
cmd/swarm/access_test.go Normal file
View File

@@ -0,0 +1,581 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bytes"
"crypto/rand"
"encoding/hex"
"encoding/json"
"io"
"io/ioutil"
gorand "math/rand"
"net/http"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/api"
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
)
// TestAccessPassword tests for the correct creation of an ACT manifest protected by a password.
// The test creates bogus content, uploads it encrypted, then creates the wrapping manifest with the Access entry
// The parties participating - node (publisher), uploads to second node then disappears. Content which was uploaded
// is then fetched through 2nd node. since the tested code is not key-aware - we can just
// fetch from the 2nd node using HTTP BasicAuth
func TestAccessPassword(t *testing.T) {
cluster := newTestCluster(t, 1)
defer cluster.Shutdown()
proxyNode := cluster.Nodes[0]
// create a tmp file
tmp, err := ioutil.TempDir("", "swarm-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
// write data to file
data := "notsorandomdata"
dataFilename := filepath.Join(tmp, "data.txt")
err = ioutil.WriteFile(dataFilename, []byte(data), 0666)
if err != nil {
t.Fatal(err)
}
hashRegexp := `[a-f\d]{128}`
// upload the file with 'swarm up' and expect a hash
up := runSwarm(t,
"--bzzapi",
proxyNode.URL, //it doesn't matter through which node we upload content
"up",
"--encrypt",
dataFilename)
_, matches := up.ExpectRegexp(hashRegexp)
up.ExpectExit()
if len(matches) < 1 {
t.Fatal("no matches found")
}
ref := matches[0]
password := "smth"
passwordFilename := filepath.Join(tmp, "password.txt")
err = ioutil.WriteFile(passwordFilename, []byte(password), 0666)
if err != nil {
t.Fatal(err)
}
up = runSwarm(t,
"access",
"new",
"pass",
"--dry-run",
"--password",
passwordFilename,
ref,
)
_, matches = up.ExpectRegexp(".+")
up.ExpectExit()
if len(matches) == 0 {
t.Fatalf("stdout not matched")
}
var m api.Manifest
err = json.Unmarshal([]byte(matches[0]), &m)
if err != nil {
t.Fatalf("unmarshal manifest: %v", err)
}
if len(m.Entries) != 1 {
t.Fatalf("expected one manifest entry, got %v", len(m.Entries))
}
e := m.Entries[0]
ct := "application/bzz-manifest+json"
if e.ContentType != ct {
t.Errorf("expected %q content type, got %q", ct, e.ContentType)
}
if e.Access == nil {
t.Fatal("manifest access is nil")
}
a := e.Access
if a.Type != "pass" {
t.Errorf(`got access type %q, expected "pass"`, a.Type)
}
if len(a.Salt) < 32 {
t.Errorf(`got salt with length %v, expected not less the 32 bytes`, len(a.Salt))
}
if a.KdfParams == nil {
t.Fatal("manifest access kdf params is nil")
}
client := swarm.NewClient(cluster.Nodes[0].URL)
hash, err := client.UploadManifest(&m, false)
if err != nil {
t.Fatal(err)
}
httpClient := &http.Client{}
url := cluster.Nodes[0].URL + "/" + "bzz:/" + hash
response, err := httpClient.Get(url)
if err != nil {
t.Fatal(err)
}
if response.StatusCode != http.StatusUnauthorized {
t.Fatal("should be a 401")
}
authHeader := response.Header.Get("WWW-Authenticate")
if authHeader == "" {
t.Fatal("should be something here")
}
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
t.Fatal(err)
}
req.SetBasicAuth("", password)
response, err = http.DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
defer response.Body.Close()
if response.StatusCode != http.StatusOK {
t.Errorf("expected status %v, got %v", http.StatusOK, response.StatusCode)
}
d, err := ioutil.ReadAll(response.Body)
if err != nil {
t.Fatal(err)
}
if string(d) != data {
t.Errorf("expected decrypted data %q, got %q", data, string(d))
}
wrongPasswordFilename := filepath.Join(tmp, "password-wrong.txt")
err = ioutil.WriteFile(wrongPasswordFilename, []byte("just wr0ng"), 0666)
if err != nil {
t.Fatal(err)
}
//download file with 'swarm down' with wrong password
up = runSwarm(t,
"--bzzapi",
proxyNode.URL,
"down",
"bzz:/"+hash,
tmp,
"--password",
wrongPasswordFilename)
_, matches = up.ExpectRegexp("unauthorized")
if len(matches) != 1 && matches[0] != "unauthorized" {
t.Fatal(`"unauthorized" not found in output"`)
}
up.ExpectExit()
}
// TestAccessPK tests for the correct creation of an ACT manifest between two parties (publisher and grantee).
// The test creates bogus content, uploads it encrypted, then creates the wrapping manifest with the Access entry
// The parties participating - node (publisher), uploads to second node (which is also the grantee) then disappears.
// Content which was uploaded is then fetched through the grantee's http proxy. Since the tested code is private-key aware,
// the test will fail if the proxy's given private key is not granted on the ACT.
func TestAccessPK(t *testing.T) {
// Setup Swarm and upload a test file to it
cluster := newTestCluster(t, 1)
defer cluster.Shutdown()
// create a tmp file
tmp, err := ioutil.TempFile("", "swarm-test")
if err != nil {
t.Fatal(err)
}
defer tmp.Close()
defer os.Remove(tmp.Name())
// write data to file
data := "notsorandomdata"
_, err = io.WriteString(tmp, data)
if err != nil {
t.Fatal(err)
}
hashRegexp := `[a-f\d]{128}`
// upload the file with 'swarm up' and expect a hash
up := runSwarm(t,
"--bzzapi",
cluster.Nodes[0].URL,
"up",
"--encrypt",
tmp.Name())
_, matches := up.ExpectRegexp(hashRegexp)
up.ExpectExit()
if len(matches) < 1 {
t.Fatal("no matches found")
}
ref := matches[0]
pk := cluster.Nodes[0].PrivateKey
granteePubKey := crypto.CompressPubkey(&pk.PublicKey)
publisherDir, err := ioutil.TempDir("", "swarm-account-dir-temp")
if err != nil {
t.Fatal(err)
}
passFile, err := ioutil.TempFile("", "swarm-test")
if err != nil {
t.Fatal(err)
}
defer passFile.Close()
defer os.Remove(passFile.Name())
_, err = io.WriteString(passFile, testPassphrase)
if err != nil {
t.Fatal(err)
}
_, publisherAccount := getTestAccount(t, publisherDir)
up = runSwarm(t,
"--bzzaccount",
publisherAccount.Address.String(),
"--password",
passFile.Name(),
"--datadir",
publisherDir,
"--bzzapi",
cluster.Nodes[0].URL,
"access",
"new",
"pk",
"--dry-run",
"--grant-key",
hex.EncodeToString(granteePubKey),
ref,
)
_, matches = up.ExpectRegexp(".+")
up.ExpectExit()
if len(matches) == 0 {
t.Fatalf("stdout not matched")
}
var m api.Manifest
err = json.Unmarshal([]byte(matches[0]), &m)
if err != nil {
t.Fatalf("unmarshal manifest: %v", err)
}
if len(m.Entries) != 1 {
t.Fatalf("expected one manifest entry, got %v", len(m.Entries))
}
e := m.Entries[0]
ct := "application/bzz-manifest+json"
if e.ContentType != ct {
t.Errorf("expected %q content type, got %q", ct, e.ContentType)
}
if e.Access == nil {
t.Fatal("manifest access is nil")
}
a := e.Access
if a.Type != "pk" {
t.Errorf(`got access type %q, expected "pk"`, a.Type)
}
if len(a.Salt) < 32 {
t.Errorf(`got salt with length %v, expected not less the 32 bytes`, len(a.Salt))
}
if a.KdfParams != nil {
t.Fatal("manifest access kdf params should be nil")
}
client := swarm.NewClient(cluster.Nodes[0].URL)
hash, err := client.UploadManifest(&m, false)
if err != nil {
t.Fatal(err)
}
httpClient := &http.Client{}
url := cluster.Nodes[0].URL + "/" + "bzz:/" + hash
response, err := httpClient.Get(url)
if err != nil {
t.Fatal(err)
}
if response.StatusCode != http.StatusOK {
t.Fatal("should be a 200")
}
d, err := ioutil.ReadAll(response.Body)
if err != nil {
t.Fatal(err)
}
if string(d) != data {
t.Errorf("expected decrypted data %q, got %q", data, string(d))
}
}
// TestAccessACT tests the e2e creation, uploading and downloading of an ACT type access control
// the test fires up a 3 node cluster, then randomly picks 2 nodes which will be acting as grantees to the data
// set. the third node should fail decoding the reference as it will not be granted access. the publisher uploads through
// one of the nodes then disappears.
func TestAccessACT(t *testing.T) {
// Setup Swarm and upload a test file to it
cluster := newTestCluster(t, 3)
defer cluster.Shutdown()
var uploadThroughNode = cluster.Nodes[0]
client := swarm.NewClient(uploadThroughNode.URL)
r1 := gorand.New(gorand.NewSource(time.Now().UnixNano()))
nodeToSkip := r1.Intn(3) // a number between 0 and 2 (node indices in `cluster`)
// create a tmp file
tmp, err := ioutil.TempFile("", "swarm-test")
if err != nil {
t.Fatal(err)
}
defer tmp.Close()
defer os.Remove(tmp.Name())
// write data to file
data := "notsorandomdata"
_, err = io.WriteString(tmp, data)
if err != nil {
t.Fatal(err)
}
hashRegexp := `[a-f\d]{128}`
// upload the file with 'swarm up' and expect a hash
up := runSwarm(t,
"--bzzapi",
cluster.Nodes[0].URL,
"up",
"--encrypt",
tmp.Name())
_, matches := up.ExpectRegexp(hashRegexp)
up.ExpectExit()
if len(matches) < 1 {
t.Fatal("no matches found")
}
ref := matches[0]
grantees := []string{}
for i, v := range cluster.Nodes {
if i == nodeToSkip {
continue
}
pk := v.PrivateKey
granteePubKey := crypto.CompressPubkey(&pk.PublicKey)
grantees = append(grantees, hex.EncodeToString(granteePubKey))
}
granteesPubkeyListFile, err := ioutil.TempFile("", "grantees-pubkey-list.csv")
if err != nil {
t.Fatal(err)
}
_, err = granteesPubkeyListFile.WriteString(strings.Join(grantees, "\n"))
if err != nil {
t.Fatal(err)
}
defer granteesPubkeyListFile.Close()
defer os.Remove(granteesPubkeyListFile.Name())
publisherDir, err := ioutil.TempDir("", "swarm-account-dir-temp")
if err != nil {
t.Fatal(err)
}
passFile, err := ioutil.TempFile("", "swarm-test")
if err != nil {
t.Fatal(err)
}
defer passFile.Close()
defer os.Remove(passFile.Name())
_, err = io.WriteString(passFile, testPassphrase)
if err != nil {
t.Fatal(err)
}
_, publisherAccount := getTestAccount(t, publisherDir)
up = runSwarm(t,
"--bzzaccount",
publisherAccount.Address.String(),
"--password",
passFile.Name(),
"--datadir",
publisherDir,
"--bzzapi",
cluster.Nodes[0].URL,
"access",
"new",
"act",
"--grant-keys",
granteesPubkeyListFile.Name(),
ref,
)
_, matches = up.ExpectRegexp(`[a-f\d]{64}`)
up.ExpectExit()
if len(matches) == 0 {
t.Fatalf("stdout not matched")
}
hash := matches[0]
m, _, err := client.DownloadManifest(hash)
if err != nil {
t.Fatalf("unmarshal manifest: %v", err)
}
if len(m.Entries) != 1 {
t.Fatalf("expected one manifest entry, got %v", len(m.Entries))
}
e := m.Entries[0]
ct := "application/bzz-manifest+json"
if e.ContentType != ct {
t.Errorf("expected %q content type, got %q", ct, e.ContentType)
}
if e.Access == nil {
t.Fatal("manifest access is nil")
}
a := e.Access
if a.Type != "act" {
t.Fatalf(`got access type %q, expected "act"`, a.Type)
}
if len(a.Salt) < 32 {
t.Fatalf(`got salt with length %v, expected not less the 32 bytes`, len(a.Salt))
}
if a.KdfParams != nil {
t.Fatal("manifest access kdf params should be nil")
}
httpClient := &http.Client{}
// all nodes except the skipped node should be able to decrypt the content
for i, node := range cluster.Nodes {
log.Debug("trying to fetch from node", "node index", i)
url := node.URL + "/" + "bzz:/" + hash
response, err := httpClient.Get(url)
if err != nil {
t.Fatal(err)
}
log.Debug("got response from node", "response code", response.StatusCode)
if i == nodeToSkip {
log.Debug("reached node to skip", "status code", response.StatusCode)
if response.StatusCode != http.StatusUnauthorized {
t.Fatalf("should be a 401")
}
continue
}
if response.StatusCode != http.StatusOK {
t.Fatal("should be a 200")
}
d, err := ioutil.ReadAll(response.Body)
if err != nil {
t.Fatal(err)
}
if string(d) != data {
t.Errorf("expected decrypted data %q, got %q", data, string(d))
}
}
}
// TestKeypairSanity is a sanity test for the crypto scheme for ACT. it asserts the correct shared secret according to
// the specs at https://github.com/ethersphere/swarm-docs/blob/eb857afda906c6e7bb90d37f3f334ccce5eef230/act.md
func TestKeypairSanity(t *testing.T) {
salt := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
t.Fatalf("reading from crypto/rand failed: %v", err.Error())
}
sharedSecret := "a85586744a1ddd56a7ed9f33fa24f40dd745b3a941be296a0d60e329dbdb896d"
for i, v := range []struct {
publisherPriv string
granteePub string
}{
{
publisherPriv: "ec5541555f3bc6376788425e9d1a62f55a82901683fd7062c5eddcc373a73459",
granteePub: "0226f213613e843a413ad35b40f193910d26eb35f00154afcde9ded57479a6224a",
},
{
publisherPriv: "70c7a73011aa56584a0009ab874794ee7e5652fd0c6911cd02f8b6267dd82d2d",
granteePub: "02e6f8d5e28faaa899744972bb847b6eb805a160494690c9ee7197ae9f619181db",
},
} {
b, _ := hex.DecodeString(v.granteePub)
granteePub, _ := crypto.DecompressPubkey(b)
publisherPrivate, _ := crypto.HexToECDSA(v.publisherPriv)
ssKey, err := api.NewSessionKeyPK(publisherPrivate, granteePub, salt)
if err != nil {
t.Fatal(err)
}
hasher := sha3.NewKeccak256()
hasher.Write(salt)
shared, err := hex.DecodeString(sharedSecret)
if err != nil {
t.Fatal(err)
}
hasher.Write(shared)
sum := hasher.Sum(nil)
if !bytes.Equal(ssKey, sum) {
t.Fatalf("%d: got a session key mismatch", i)
}
}
}

77
cmd/swarm/bootnodes.go Normal file
View File

@@ -0,0 +1,77 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
var SwarmBootnodes = []string{
// Foundation Swarm Gateway Cluster
"enode://e5c6f9215c919a5450a7b8c14c22535607b69f2c8e1e7f6f430cb25d7a2c27cd1df4c4f18ad7c1d7e5162e271ffcd3f20b1a1467fb6e790e7d727f3b2193de97@52.232.7.187:30399",
"enode://9b2fe07e69ccc7db5fef15793dab7d7d2e697ed92132d6e9548218e68a34613a8671ad03a6658d862b468ed693cae8a0f8f8d37274e4a657ffb59ca84676e45b@52.232.7.187:30400",
"enode://76c1059162c93ef9df0f01097c824d17c492634df211ef4c806935b349082233b63b90c23970254b3b7138d630400f7cf9b71e80355a446a8b733296cb04169a@52.232.7.187:30401",
"enode://ce46bbe2a8263145d65252d52da06e000ad350ed09c876a71ea9544efa42f63c1e1b6cc56307373aaad8f9dd069c90d0ed2dd1530106200e16f4ca681dd8ae2d@52.232.7.187:30402",
"enode://f431e0d6008a6c35c6e670373d828390c8323e53da8158e7bfc43cf07e632cc9e472188be8df01decadea2d4a068f1428caba769b632554a8fb0607bc296988f@52.232.7.187:30403",
"enode://174720abfff83d7392f121108ae50ea54e04889afe020df883655c0f6cb95414db945a0228d8982fe000d86fc9f4b7669161adc89cd7cd56f78f01489ab2b99b@52.232.7.187:30404",
"enode://2ae89be4be61a689b6f9ecee4360a59e185e010ab750f14b63b4ae43d4180e872e18e3437d4386ce44875dc7cc6eb761acba06412fe3178f3dac1dab3b65703e@52.232.7.187:30405",
"enode://24abebe1c0e6d75d6052ce3219a87be8573fd6397b4cb51f0773b83abba9b3d872bfb273cdc07389715b87adfac02f5235f5241442c5089802cbd8d42e310fce@52.232.7.187:30406",
"enode://d08dfa46bfbbdbcaafbb6e34abee4786610f6c91e0b76d7881f0334ac10dda41d8c1f2b6eedffb4493293c335c0ad46776443b2208d1fbbb9e1a90b25ee4eef2@52.232.7.187:30407",
"enode://8d95eb0f837d27581a43668ed3b8783d69dc4e84aa3edd7a0897e026155c8f59c8702fdc0375ee7bac15757c9c78e1315d9b73e4ce59c936db52ea4ae2f501c7@52.232.7.187:30408",
"enode://a5967cc804aebd422baaaba9f06f27c9e695ccab335b61088130f8cbe64e3cdf78793868c7051dfc06eecfe844fad54bc7f6dfaed9db3c7ecef279cb829c25fb@52.232.7.187:30409",
"enode://5f00134d81a8f2ebcc46f8766f627f492893eda48138f811b7de2168308171968f01710bca6da05764e74f14bae41652f554e6321f1aed85fa3461e89d075dbf@52.232.7.187:30410",
"enode://b2142b79b01a5aa66a5e23cc35e78219a8e97bc2412a6698cee24ae02e87078b725d71730711bd62e25ff1aa8658c6633778af8ac14c63814a337c3dd0ebda9f@52.232.7.187:30411",
"enode://1ffa7651094867d6486ce3ef46d27a052c2cb968b618346c6df7040322c7efc3337547ba85d4cbba32e8b31c42c867202554735c06d4c664b9afada2ed0c4b3c@52.232.7.187:30412",
"enode://129e0c3d5f5df12273754f6f703d2424409fa4baa599e0b758c55600169313887855e75b082028d2302ec034b303898cd697cc7ae8256ba924ce927510da2c8d@52.232.7.187:30413",
"enode://419e2dc0d2f5b022cf16b0e28842658284909fa027a0fbbb5e2b755e7f846ea02a8f0b66a7534981edf6a7bcf8a14855344c6668e2cd4476ccd35a11537c9144@52.232.7.187:30414",
"enode://23d55ad900583231b91f2f62e3f72eb498b342afd58b682be3af052eed62b5651094471065981de33d8786f075f05e3cca499503b0ac8ae84b2a06e99f5b0723@52.232.7.187:30415",
"enode://bc56e4158c00e9f616d7ea533def20a89bef959df4e62a768ff238ff4e1e9223f57ecff969941c20921bad98749baae311c0fbebce53bf7bbb9d3dc903640990@52.232.7.187:30416",
"enode://433ce15199c409875e7e72fffd69fdafe746f17b20f0d5555281722a65fde6c80328fab600d37d8624509adc072c445ce0dad4a1c01cff6acf3132c11d429d4d@52.232.7.187:30417",
"enode://632ee95b8f0eac51ef89ceb29313fef3a60050181d66a6b125583b1a225a7694b252edc016efb58aa3b251da756cb73280842a022c658ed405223b2f58626343@52.232.7.187:30418",
"enode://4a0f9bcff7a4b9ee453fb298d0fb222592efe121512e30cd72fef631beb8c6a15153a1456eb073ee18551c0e003c569651a101892dc4124e90b933733a498bb5@52.232.7.187:30419",
"enode://f0d80fbc72d16df30e19aac3051eb56a7aff0c8367686702e01ea132d8b0b3ee00cadd6a859d2cca98ec68d3d574f8a8a87dba2347ec1e2818dc84bc3fa34fae@52.232.7.187:30420",
"enode://a199146906e4f9f2b94b195a8308d9a59a3564b92efaab898a4243fe4c2ad918b7a8e4853d9d901d94fad878270a2669d644591299c3d43de1b298c00b92b4a7@52.232.7.187:30421",
"enode://052036ea8736b37adbfb684d90ce43e11b3591b51f31489d7c726b03618dea4f73b1e659deb928e6bf40564edcdcf08351643f42db3d4ca1c2b5db95dad59e94@52.232.7.187:30422",
"enode://460e2b8c6da8f12fac96c836e7d108f4b7ec55a1c64631bb8992339e117e1c28328fee83af863196e20af1487a655d13e5ceba90e980e92502d5bac5834c1f71@52.232.7.187:30423",
"enode://6d2cdd13741b2e72e9031e1b93c6d9a4e68de2844aa4e939f6a8a8498a7c1d7e2ee4c64217e92a6df08c9a32c6764d173552810ef1bd2ecb356532d389dd2136@52.232.7.187:30424",
"enode://62105fc25ce2cd5b299647f47eaa9211502dc76f0e9f461df915782df7242ac3223e3db04356ae6ed2977ccac20f0b16864406e9ca514a40a004cb6a5d0402aa@52.232.7.187:30425",
"enode://e0e388fc520fd493c33f0ce16685e6f98fb6aec28f2edc14ee6b179594ee519a896425b0025bb6f0e182dd3e468443f19c70885fbc66560d000093a668a86aa8@52.232.7.187:30426",
"enode://63f3353a72521ea10022127a4fe6b4acbef197c3fe668fd9f4805542d8a6fcf79f6335fbab62d180a35e19b739483e740858b113fdd7c13a26ad7b4e318a5aef@52.232.7.187:30427",
"enode://33a42b927085678d4aefd4e70b861cfca6ef5f6c143696c4f755973fd29e64c9e658cad57a66a687a7a156da1e3688b1fbdd17bececff2ee009fff038fa5666b@52.232.7.187:30428",
"enode://259ab5ab5c1daee3eab7e3819ab3177b82d25c29e6c2444fdd3f956e356afae79a72840ccf2d0665fe82c81ebc3b3734da1178ac9fd5d62c67e674b69f86b6be@52.232.7.187:30429",
"enode://558bccad7445ce3fd8db116ed6ab4aed1324fdbdac2348417340c1764dc46d46bffe0728e5b7d5c36f12e794c289f18f57f08f085d2c65c9910a5c7a65b6a66a@52.232.7.187:30430",
"enode://abe60937a0657ffded718e3f84a32987286983be257bdd6004775c4b525747c2b598f4fac49c8de324de5ce75b22673fa541a7ce2d555fb7f8ca325744ae3577@52.232.7.187:30431",
"enode://bce6f0aaa5b230742680084df71d4f026b3eff7f564265599216a1b06b765303fdc9325de30ffd5dfdaf302ce4b14322891d2faea50ce2ca298d7409f5858339@52.232.7.187:30432",
"enode://21b957c4e03277d42be6660730ec1b93f540764f26c6abdb54d006611139c7081248486206dfbf64fcaffd62589e9c6b8ea77a5297e4b21a605f1bcf49483ed0@52.232.7.187:30433",
"enode://ff104e30e64f24c3d7328acee8b13354e5551bc8d60bb25ecbd9632d955c7e34bb2d969482d173355baad91c8282f8b592624eb3929151090da3b4448d4d58fb@52.232.7.187:30434",
"enode://c76e2b5f81a521bceaec1518926a21380a345df9cf463461562c6845795512497fb67679e155fc96a74350f8b78de8f4c135dd52b106dbbb9795452021d09ea5@52.232.7.187:30435",
"enode://3288fd860105164f3e9b69934c4eb18f7146cfab31b5a671f994e21a36e9287766e5f9f075aefbc404538c77f7c2eb2a4495020a7633a1c3970d94e9fa770aeb@52.232.7.187:30436",
"enode://6cea859c7396d46b20cfcaa80f9a11cd112f8684f2f782f7b4c0e1e0af9212113429522075101923b9b957603e6c32095a6a07b5e5e35183c521952ee108dfaf@52.232.7.187:30437",
"enode://f628ec56e4ca8317cc24cc4ac9b27b95edcce7b96e1c7f3b53e30de4a8580fe44f2f0694a513bdb0a431acaf2824074d6ace4690247bbc34c14f426af8c056ea@52.232.7.187:30438",
"enode://055ec8b26fc105c4f97970a1cce9773a5e34c03f511b839db742198a1c571e292c54aa799e9afb991cc8a560529b8cdf3e0c344bc6c282aff2f68eec59361ddf@52.232.7.187:30439",
"enode://48cb0d430c328974226aa33a931d8446cd5a8d40f3ead8f4ce7ad60faa1278192eb6d58bed91258d63e81f255fc107eec2425ce2ae8b22350dd556076e160610@52.232.7.187:30440",
"enode://3fadb7af7f770d5ffc6b073b8d42834bebb18ce1fe8a4fe270d2b799e7051327093960dc61d9a18870db288f7746a0e6ea2a013cd6ab0e5f97ca08199473aace@52.232.7.187:30441",
"enode://a5d7168024c9992769cf380ffa559a64b4f39a29d468f579559863814eb0ae0ed689ac0871a3a2b4c78b03297485ec322d578281131ef5d5c09a4beb6200a97a@52.232.7.187:30442",
"enode://9c57744c5b2c2d71abcbe80512652f9234d4ab041b768a2a886ab390fe6f184860f40e113290698652d7e20a8ac74d27ac8671db23eb475b6c5e6253e4693bf8@52.232.7.187:30443",
"enode://daca9ff0c3176045a0e0ed228dee00ec86bc0939b135dc6b1caa23745d20fd0332e1ee74ad04020e89df56c7146d831a91b89d15ca3df05ba7618769fefab376@52.232.7.187:30444",
"enode://a3f6af59428cb4b9acb198db15ef5554fa43c2b0c18e468a269722d64a27218963a2975eaf82750b6262e42192b5e3669ea51337b4cda62b33987981bc5e0c1a@52.232.7.187:30445",
"enode://fe571422fa4651c3354c85dac61911a6a6520dd3c0332967a49d4133ca30e16a8a4946fa73ca2cb5de77917ea701a905e1c3015b2f4defcd53132b61cc84127a@52.232.7.187:30446",
// Mainframe
"enode://ee9a5a571ea6c8a59f9a8bb2c569c865e922b41c91d09b942e8c1d4dd2e1725bd2c26149da14de1f6321a2c6fdf1e07c503c3e093fb61696daebf74d6acd916b@54.186.219.160:30399",
"enode://a03f0562ecb8a992ad5242345535e73483cdc18ab934d36bf24b567d43447c2cea68f89f1d51d504dd13acc30f24ebce5a150bea2ccb1b722122ce4271dc199d@52.67.248.147:30399",
"enode://e2cbf9eafd85903d3b1c56743035284320695e0072bc8d7396e0542aa5e1c321b236f67eab66b79c2f15d4447fa4bbe74dd67d0467da23e7eb829f60ec8a812b@13.58.169.1:30399",
"enode://8b8c6bda6047f1cad9fab2db4d3d02b7aa26279902c32879f7bcd4a7d189fee77fdc36ee151ce6b84279b4792e72578fd529d2274d014132465758fbfee51cee@13.209.13.15:30399",
"enode://63f6a8818927e429585287cf2ca0cb9b11fa990b7b9b331c2962cdc6f21807a2473b26e8256225c26caff70d7218e59586d704d49061452c6852e382c885d03c@35.154.106.174:30399",
"enode://ed4bd3b794ed73f18e6dcc70c6624dfec63b5654f6ab54e8f40b16eff8afbd342d4230e099ddea40e84423f81b2d2ea79799dc345257b1fec6f6c422c9d008f7@52.213.20.99:30399",
}

View File

@@ -68,6 +68,7 @@ const (
SWARM_ENV_SWAP_API = "SWARM_SWAP_API"
SWARM_ENV_SYNC_DISABLE = "SWARM_SYNC_DISABLE"
SWARM_ENV_SYNC_UPDATE_DELAY = "SWARM_ENV_SYNC_UPDATE_DELAY"
SWARM_ENV_LIGHT_NODE_ENABLE = "SWARM_LIGHT_NODE_ENABLE"
SWARM_ENV_DELIVERY_SKIP_CHECK = "SWARM_DELIVERY_SKIP_CHECK"
SWARM_ENV_ENS_API = "SWARM_ENS_API"
SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR"
@@ -77,6 +78,7 @@ const (
SWARM_ENV_STORE_PATH = "SWARM_STORE_PATH"
SWARM_ENV_STORE_CAPACITY = "SWARM_STORE_CAPACITY"
SWARM_ENV_STORE_CACHE_CAPACITY = "SWARM_STORE_CACHE_CAPACITY"
SWARM_ACCESS_PASSWORD = "SWARM_ACCESS_PASSWORD"
GETH_ENV_DATADIR = "GETH_DATADIR"
)
@@ -131,7 +133,7 @@ func initSwarmNode(config *bzzapi.Config, stack *node.Node, ctx *cli.Context) {
log.Debug(printConfig(config))
}
//override the current config with whatever is in the config file, if a config file has been provided
//configFileOverride overrides the current config with the config file, if a config file has been provided
func configFileOverride(config *bzzapi.Config, ctx *cli.Context) (*bzzapi.Config, error) {
var err error
@@ -141,7 +143,8 @@ func configFileOverride(config *bzzapi.Config, ctx *cli.Context) (*bzzapi.Config
if filepath = ctx.GlobalString(SwarmTomlConfigPathFlag.Name); filepath == "" {
utils.Fatalf("Config file flag provided with invalid file path")
}
f, err := os.Open(filepath)
var f *os.File
f, err = os.Open(filepath)
if err != nil {
return nil, err
}
@@ -204,6 +207,10 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
currentConfig.SyncUpdateDelay = d
}
if ctx.GlobalIsSet(SwarmLightNodeEnabled.Name) {
currentConfig.LightNodeEnabled = true
}
if ctx.GlobalIsSet(SwarmDeliverySkipCheckFlag.Name) {
currentConfig.DeliverySkipCheck = true
}
@@ -226,10 +233,6 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
currentConfig.Cors = cors
}
if ctx.GlobalIsSet(utils.BootnodesFlag.Name) {
currentConfig.BootNodes = ctx.GlobalString(utils.BootnodesFlag.Name)
}
if storePath := ctx.GlobalString(SwarmStorePath.Name); storePath != "" {
currentConfig.LocalStoreParams.ChunkDbPath = storePath
}
@@ -301,6 +304,12 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
}
}
if lne := os.Getenv(SWARM_ENV_LIGHT_NODE_ENABLE); lne != "" {
if lightnode, err := strconv.ParseBool(lne); err != nil {
currentConfig.LightNodeEnabled = lightnode
}
}
if swapapi := os.Getenv(SWARM_ENV_SWAP_API); swapapi != "" {
currentConfig.SwapAPI = swapapi
}
@@ -321,10 +330,6 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
currentConfig.Cors = cors
}
if bootnodes := os.Getenv(SWARM_ENV_BOOTNODES); bootnodes != "" {
currentConfig.BootNodes = bootnodes
}
return currentConfig
}

View File

@@ -20,6 +20,7 @@ import (
"fmt"
"io"
"io/ioutil"
"net"
"os"
"os/exec"
"testing"
@@ -559,3 +560,16 @@ func TestValidateConfig(t *testing.T) {
}
}
}
func assignTCPPort() (string, error) {
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return "", err
}
l.Close()
_, port, err := net.SplitHostPort(l.Addr().String())
if err != nil {
return "", err
}
return port, nil
}

View File

@@ -68,18 +68,36 @@ func download(ctx *cli.Context) {
utils.Fatalf("could not parse uri argument: %v", err)
}
// assume behaviour according to --recursive switch
if isRecursive {
if err := client.DownloadDirectory(uri.Addr, uri.Path, dest); err != nil {
utils.Fatalf("encoutered an error while downloading directory: %v", err)
}
} else {
// we are downloading a file
log.Debug(fmt.Sprintf("downloading file/path from a manifest. hash: %s, path:%s", uri.Addr, uri.Path))
dl := func(credentials string) error {
// assume behaviour according to --recursive switch
if isRecursive {
if err := client.DownloadDirectory(uri.Addr, uri.Path, dest, credentials); err != nil {
if err == swarm.ErrUnauthorized {
return err
}
return fmt.Errorf("directory %s: %v", uri.Path, err)
}
} else {
// we are downloading a file
log.Debug("downloading file/path from a manifest", "uri.Addr", uri.Addr, "uri.Path", uri.Path)
err := client.DownloadFile(uri.Addr, uri.Path, dest)
if err != nil {
utils.Fatalf("could not download %s from given address: %s. error: %v", uri.Path, uri.Addr, err)
err := client.DownloadFile(uri.Addr, uri.Path, dest, credentials)
if err != nil {
if err == swarm.ErrUnauthorized {
return err
}
return fmt.Errorf("file %s from address: %s: %v", uri.Path, uri.Addr, err)
}
}
return nil
}
if passwords := makePasswordList(ctx); passwords != nil {
password := getPassPhrase(fmt.Sprintf("Downloading %s is restricted", uri), 0, passwords)
err = dl(password)
} else {
err = dl("")
}
if err != nil {
utils.Fatalf("download: %v", err)
}
}

View File

@@ -92,7 +92,7 @@ func listMounts(cliContext *cli.Context) {
mf := []fuse.MountInfo{}
err = client.CallContext(ctx, &mf, "swarmfs_listmounts")
if err != nil {
utils.Fatalf("encountered an error calling the RPC endpoint while unmounting: %v", err)
utils.Fatalf("encountered an error calling the RPC endpoint while listing mounts: %v", err)
}
if len(mf) == 0 {
fmt.Print("Could not found any swarmfs mounts. Please make sure you've specified the correct RPC endpoint\n")

View File

@@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
// +build linux darwin freebsd
// +build linux freebsd
package main
@@ -43,6 +43,11 @@ type testFile struct {
}
// TestCLISwarmFs is a high-level test of swarmfs
//
// This test fails on travis for macOS as this executable exits with code 1
// and without any log messages in the log:
// /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse.
// This is the reason for this file not being built on darwin architecture.
func TestCLISwarmFs(t *testing.T) {
cluster := newTestCluster(t, 3)
defer cluster.Shutdown()

View File

@@ -44,7 +44,7 @@ func list(ctx *cli.Context) {
bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
client := swarm.NewClient(bzzapi)
list, err := client.List(manifest, prefix)
list, err := client.List(manifest, prefix, "")
if err != nil {
utils.Fatalf("Failed to generate file and directory list: %s", err)
}

View File

@@ -37,7 +37,6 @@ import (
"github.com/ethereum/go-ethereum/internal/debug"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/swarm"
bzzapi "github.com/ethereum/go-ethereum/swarm/api"
@@ -67,14 +66,7 @@ OPTIONS:
`
var (
gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
testbetBootNodes = []string{
"enode://ec8ae764f7cb0417bdfb009b9d0f18ab3818a3a4e8e7c67dd5f18971a93510a2e6f43cd0b69a27e439a9629457ea804104f37c85e41eed057d3faabbf7744cdf@13.74.157.139:30429",
"enode://c2e1fceb3bf3be19dff71eec6cccf19f2dbf7567ee017d130240c670be8594bc9163353ca55dd8df7a4f161dd94b36d0615c17418b5a3cdcbb4e9d99dfa4de37@13.74.157.139:30430",
"enode://fe29b82319b734ce1ec68b84657d57145fee237387e63273989d354486731e59f78858e452ef800a020559da22dcca759536e6aa5517c53930d29ce0b1029286@13.74.157.139:30431",
"enode://1d7187e7bde45cf0bee489ce9852dd6d1a0d9aa67a33a6b8e6db8a4fbc6fcfa6f0f1a5419343671521b863b187d1c73bad3603bae66421d157ffef357669ddb8@13.74.157.139:30432",
"enode://0e4cba800f7b1ee73673afa6a4acead4018f0149d2e3216be3f133318fd165b324cd71b81fbe1e80deac8dbf56e57a49db7be67f8b9bc81bd2b7ee496434fb5d@13.74.157.139:30433",
}
gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
)
var (
@@ -123,6 +115,11 @@ var (
Usage: "Duration for sync subscriptions update after no new peers are added (default 15s)",
EnvVar: SWARM_ENV_SYNC_UPDATE_DELAY,
}
SwarmLightNodeEnabled = cli.BoolFlag{
Name: "lightnode",
Usage: "Enable Swarm LightNode (default false)",
EnvVar: SWARM_ENV_LIGHT_NODE_ENABLE,
}
SwarmDeliverySkipCheckFlag = cli.BoolFlag{
Name: "delivery-skip-check",
Usage: "Skip chunk delivery check (default false)",
@@ -150,6 +147,14 @@ var (
Name: "defaultpath",
Usage: "path to file served for empty url path (none)",
}
SwarmAccessGrantKeyFlag = cli.StringFlag{
Name: "grant-key",
Usage: "grants a given public key access to an ACT",
}
SwarmAccessGrantKeysFlag = cli.StringFlag{
Name: "grant-keys",
Usage: "grants a given list of public keys in the following file (separated by line breaks) access to an ACT",
}
SwarmUpFromStdinFlag = cli.BoolFlag{
Name: "stdin",
Usage: "reads data to be uploaded from stdin",
@@ -162,6 +167,15 @@ var (
Name: "encrypt",
Usage: "use encrypted upload",
}
SwarmAccessPasswordFlag = cli.StringFlag{
Name: "password",
Usage: "Password",
EnvVar: SWARM_ACCESS_PASSWORD,
}
SwarmDryRunFlag = cli.BoolFlag{
Name: "dry-run",
Usage: "dry-run",
}
CorsStringFlag = cli.StringFlag{
Name: "corsdomain",
Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')",
@@ -247,6 +261,61 @@ func init() {
Flags: []cli.Flag{SwarmEncryptedFlag},
Description: "uploads a file or directory to swarm using the HTTP API and prints the root hash",
},
{
CustomHelpTemplate: helpTemplate,
Name: "access",
Usage: "encrypts a reference and embeds it into a root manifest",
ArgsUsage: "<ref>",
Description: "encrypts a reference and embeds it into a root manifest",
Subcommands: []cli.Command{
{
CustomHelpTemplate: helpTemplate,
Name: "new",
Usage: "encrypts a reference and embeds it into a root manifest",
ArgsUsage: "<ref>",
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
Subcommands: []cli.Command{
{
Action: accessNewPass,
CustomHelpTemplate: helpTemplate,
Flags: []cli.Flag{
utils.PasswordFileFlag,
SwarmDryRunFlag,
},
Name: "pass",
Usage: "encrypts a reference with a password and embeds it into a root manifest",
ArgsUsage: "<ref>",
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
},
{
Action: accessNewPK,
CustomHelpTemplate: helpTemplate,
Flags: []cli.Flag{
utils.PasswordFileFlag,
SwarmDryRunFlag,
SwarmAccessGrantKeyFlag,
},
Name: "pk",
Usage: "encrypts a reference with the node's private key and a given grantee's public key and embeds it into a root manifest",
ArgsUsage: "<ref>",
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
},
{
Action: accessNewACT,
CustomHelpTemplate: helpTemplate,
Flags: []cli.Flag{
SwarmAccessGrantKeysFlag,
SwarmDryRunFlag,
},
Name: "act",
Usage: "encrypts a reference with the node's private key and a given grantee's public key and embeds it into a root manifest",
ArgsUsage: "<ref>",
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
},
},
},
},
},
{
CustomHelpTemplate: helpTemplate,
Name: "resource",
@@ -299,16 +368,13 @@ func init() {
Description: "Prints the swarm hash of file or directory",
},
{
Action: download,
Name: "down",
Flags: []cli.Flag{SwarmRecursiveFlag},
Usage: "downloads a swarm manifest or a file inside a manifest",
ArgsUsage: " <uri> [<dir>]",
Description: `
Downloads a swarm bzz uri to the given dir. When no dir is provided, working directory is assumed. --recursive flag is expected when downloading a manifest with multiple entries.
`,
Action: download,
Name: "down",
Flags: []cli.Flag{SwarmRecursiveFlag, SwarmAccessPasswordFlag},
Usage: "downloads a swarm manifest or a file inside a manifest",
ArgsUsage: " <uri> [<dir>]",
Description: `Downloads a swarm bzz uri to the given dir. When no dir is provided, working directory is assumed. --recursive flag is expected when downloading a manifest with multiple entries.`,
},
{
Name: "manifest",
CustomHelpTemplate: helpTemplate,
@@ -317,23 +383,23 @@ Downloads a swarm bzz uri to the given dir. When no dir is provided, working dir
Description: "Updates a MANIFEST by adding/removing/updating the hash of a path.\nCOMMAND could be: add, update, remove",
Subcommands: []cli.Command{
{
Action: add,
Action: manifestAdd,
CustomHelpTemplate: helpTemplate,
Name: "add",
Usage: "add a new path to the manifest",
ArgsUsage: "<MANIFEST> <path> <hash> [<content-type>]",
ArgsUsage: "<MANIFEST> <path> <hash>",
Description: "Adds a new path to the manifest",
},
{
Action: update,
Action: manifestUpdate,
CustomHelpTemplate: helpTemplate,
Name: "update",
Usage: "update the hash for an already existing path in the manifest",
ArgsUsage: "<MANIFEST> <path> <newhash> [<newcontent-type>]",
ArgsUsage: "<MANIFEST> <path> <newhash>",
Description: "Update the hash for an already existing path in the manifest",
},
{
Action: remove,
Action: manifestRemove,
CustomHelpTemplate: helpTemplate,
Name: "remove",
Usage: "removes a path from the manifest",
@@ -408,16 +474,14 @@ pv(1) tool to get a progress bar:
Name: "import",
Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)",
ArgsUsage: "<chunkdb> <file>",
Description: `
Import chunks from a tar archive into a local chunk database (use - to read from stdin).
Description: `Import chunks from a tar archive into a local chunk database (use - to read from stdin).
swarm db import ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar
The import may be quite large, consider piping the input through the Unix
pv(1) tool to get a progress bar:
pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -
`,
pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -`,
},
{
Action: dbClean,
@@ -464,6 +528,7 @@ pv(1) tool to get a progress bar:
SwarmSwapAPIFlag,
SwarmSyncDisabledFlag,
SwarmSyncUpdateDelay,
SwarmLightNodeEnabled,
SwarmDeliverySkipCheckFlag,
SwarmListenAddrFlag,
SwarmPortFlag,
@@ -529,6 +594,7 @@ func version(ctx *cli.Context) error {
func bzzd(ctx *cli.Context) error {
//build a valid bzzapi.Config from all available sources:
//default config, file config, command line and env vars
bzzconfig, err := buildConfig(ctx)
if err != nil {
utils.Fatalf("unable to configure swarm: %v", err)
@@ -545,12 +611,16 @@ func bzzd(ctx *cli.Context) error {
if _, err := os.Stat(bzzconfig.Path); err == nil {
cfg.DataDir = bzzconfig.Path
}
//optionally set the bootnodes before configuring the node
setSwarmBootstrapNodes(ctx, &cfg)
//setup the ethereum node
utils.SetNodeConfig(ctx, &cfg)
stack, err := node.New(&cfg)
if err != nil {
utils.Fatalf("can't create node: %v", err)
}
//a few steps need to be done after the config phase is completed,
//due to overriding behavior
initSwarmNode(bzzconfig, stack, ctx)
@@ -568,16 +638,6 @@ func bzzd(ctx *cli.Context) error {
stack.Stop()
}()
// Add bootnodes as initial peers.
if bzzconfig.BootNodes != "" {
bootnodes := strings.Split(bzzconfig.BootNodes, ",")
injectBootnodes(stack.Server(), bootnodes)
} else {
if bzzconfig.NetworkID == 3 {
injectBootnodes(stack.Server(), testbetBootNodes)
}
}
stack.Wait()
return nil
}
@@ -685,17 +745,6 @@ func getPassPhrase(prompt string, i int, passwords []string) string {
return password
}
func injectBootnodes(srv *p2p.Server, nodes []string) {
for _, url := range nodes {
n, err := discover.ParseNode(url)
if err != nil {
log.Error("Invalid swarm bootnode", "err", err)
continue
}
srv.AddPeer(n)
}
}
// addDefaultHelpSubcommand scans through defined CLI commands and adds
// a basic help subcommand to each
// if a help command is already defined, it will take precedence over the default.
@@ -708,3 +757,20 @@ func addDefaultHelpSubcommands(commands []cli.Command) {
}
}
}
func setSwarmBootstrapNodes(ctx *cli.Context, cfg *node.Config) {
if ctx.GlobalIsSet(utils.BootnodesFlag.Name) || ctx.GlobalIsSet(utils.BootnodesV4Flag.Name) {
return
}
cfg.P2P.BootstrapNodes = []*discover.Node{}
for _, url := range SwarmBootnodes {
node, err := discover.ParseNode(url)
if err != nil {
log.Error("Bootstrap URL invalid", "enode", url, "err", err)
}
cfg.P2P.BootstrapNodes = append(cfg.P2P.BootstrapNodes, node)
}
log.Debug("added default swarm bootnodes", "length", len(cfg.P2P.BootstrapNodes))
}

View File

@@ -18,10 +18,8 @@
package main
import (
"encoding/json"
"fmt"
"mime"
"path/filepath"
"os"
"strings"
"github.com/ethereum/go-ethereum/cmd/utils"
@@ -30,127 +28,118 @@ import (
"gopkg.in/urfave/cli.v1"
)
const bzzManifestJSON = "application/bzz-manifest+json"
func add(ctx *cli.Context) {
// manifestAdd adds a new entry to the manifest at the given path.
// New entry hash, the last argument, must be the hash of a manifest
// with only one entry, which meta-data will be added to the original manifest.
// On success, this function will print new (updated) manifest's hash.
func manifestAdd(ctx *cli.Context) {
args := ctx.Args()
if len(args) < 3 {
utils.Fatalf("Need at least three arguments <MHASH> <path> <HASH> [<content-type>]")
if len(args) != 3 {
utils.Fatalf("Need exactly three arguments <MHASH> <path> <HASH>")
}
var (
mhash = args[0]
path = args[1]
hash = args[2]
ctype string
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
mroot api.Manifest
)
if len(args) > 3 {
ctype = args[3]
} else {
ctype = mime.TypeByExtension(filepath.Ext(path))
bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
client := swarm.NewClient(bzzapi)
m, _, err := client.DownloadManifest(hash)
if err != nil {
utils.Fatalf("Error downloading manifest to add: %v", err)
}
l := len(m.Entries)
if l == 0 {
utils.Fatalf("No entries in manifest %s", hash)
} else if l > 1 {
utils.Fatalf("Too many entries in manifest %s", hash)
}
newManifest := addEntryToManifest(ctx, mhash, path, hash, ctype)
newManifest := addEntryToManifest(client, mhash, path, m.Entries[0])
fmt.Println(newManifest)
if !wantManifest {
// Print the manifest. This is the only output to stdout.
mrootJSON, _ := json.MarshalIndent(mroot, "", " ")
fmt.Println(string(mrootJSON))
return
}
}
func update(ctx *cli.Context) {
// manifestUpdate replaces an existing entry of the manifest at the given path.
// New entry hash, the last argument, must be the hash of a manifest
// with only one entry, which meta-data will be added to the original manifest.
// On success, this function will print hash of the updated manifest.
func manifestUpdate(ctx *cli.Context) {
args := ctx.Args()
if len(args) < 3 {
utils.Fatalf("Need at least three arguments <MHASH> <path> <HASH>")
if len(args) != 3 {
utils.Fatalf("Need exactly three arguments <MHASH> <path> <HASH>")
}
var (
mhash = args[0]
path = args[1]
hash = args[2]
ctype string
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
mroot api.Manifest
)
if len(args) > 3 {
ctype = args[3]
} else {
ctype = mime.TypeByExtension(filepath.Ext(path))
bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
client := swarm.NewClient(bzzapi)
m, _, err := client.DownloadManifest(hash)
if err != nil {
utils.Fatalf("Error downloading manifest to update: %v", err)
}
l := len(m.Entries)
if l == 0 {
utils.Fatalf("No entries in manifest %s", hash)
} else if l > 1 {
utils.Fatalf("Too many entries in manifest %s", hash)
}
newManifest := updateEntryInManifest(ctx, mhash, path, hash, ctype)
newManifest, _, defaultEntryUpdated := updateEntryInManifest(client, mhash, path, m.Entries[0], true)
if defaultEntryUpdated {
// Print informational message to stderr
// allowing the user to get the new manifest hash from stdout
// without the need to parse the complete output.
fmt.Fprintln(os.Stderr, "Manifest default entry is updated, too")
}
fmt.Println(newManifest)
if !wantManifest {
// Print the manifest. This is the only output to stdout.
mrootJSON, _ := json.MarshalIndent(mroot, "", " ")
fmt.Println(string(mrootJSON))
return
}
}
func remove(ctx *cli.Context) {
// manifestRemove removes an existing entry of the manifest at the given path.
// On success, this function will print hash of the manifest which does not
// contain the path.
func manifestRemove(ctx *cli.Context) {
args := ctx.Args()
if len(args) < 2 {
utils.Fatalf("Need at least two arguments <MHASH> <path>")
if len(args) != 2 {
utils.Fatalf("Need exactly two arguments <MHASH> <path>")
}
var (
mhash = args[0]
path = args[1]
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
mroot api.Manifest
)
newManifest := removeEntryFromManifest(ctx, mhash, path)
fmt.Println(newManifest)
bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
client := swarm.NewClient(bzzapi)
if !wantManifest {
// Print the manifest. This is the only output to stdout.
mrootJSON, _ := json.MarshalIndent(mroot, "", " ")
fmt.Println(string(mrootJSON))
return
}
newManifest := removeEntryFromManifest(client, mhash, path)
fmt.Println(newManifest)
}
func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) string {
var (
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
client = swarm.NewClient(bzzapi)
longestPathEntry = api.ManifestEntry{}
)
func addEntryToManifest(client *swarm.Client, mhash, path string, entry api.ManifestEntry) string {
var longestPathEntry = api.ManifestEntry{}
mroot, isEncrypted, err := client.DownloadManifest(mhash)
if err != nil {
utils.Fatalf("Manifest download failed: %v", err)
}
//TODO: check if the "hash" to add is valid and present in swarm
_, _, err = client.DownloadManifest(hash)
if err != nil {
utils.Fatalf("Hash to add is not present: %v", err)
}
// See if we path is in this Manifest or do we have to dig deeper
for _, entry := range mroot.Entries {
if path == entry.Path {
for _, e := range mroot.Entries {
if path == e.Path {
utils.Fatalf("Path %s already present, not adding anything", path)
} else {
if entry.ContentType == bzzManifestJSON {
prfxlen := strings.HasPrefix(path, entry.Path)
if e.ContentType == api.ManifestType {
prfxlen := strings.HasPrefix(path, e.Path)
if prfxlen && len(path) > len(longestPathEntry.Path) {
longestPathEntry = entry
longestPathEntry = e
}
}
}
@@ -159,25 +148,21 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin
if longestPathEntry.Path != "" {
// Load the child Manifest add the entry there
newPath := path[len(longestPathEntry.Path):]
newHash := addEntryToManifest(ctx, longestPathEntry.Hash, newPath, hash, ctype)
newHash := addEntryToManifest(client, longestPathEntry.Hash, newPath, entry)
// Replace the hash for parent Manifests
newMRoot := &api.Manifest{}
for _, entry := range mroot.Entries {
if longestPathEntry.Path == entry.Path {
entry.Hash = newHash
for _, e := range mroot.Entries {
if longestPathEntry.Path == e.Path {
e.Hash = newHash
}
newMRoot.Entries = append(newMRoot.Entries, entry)
newMRoot.Entries = append(newMRoot.Entries, e)
}
mroot = newMRoot
} else {
// Add the entry in the leaf Manifest
newEntry := api.ManifestEntry{
Hash: hash,
Path: path,
ContentType: ctype,
}
mroot.Entries = append(mroot.Entries, newEntry)
entry.Path = path
mroot.Entries = append(mroot.Entries, entry)
}
newManifestHash, err := client.UploadManifest(mroot, isEncrypted)
@@ -185,14 +170,16 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin
utils.Fatalf("Manifest upload failed: %v", err)
}
return newManifestHash
}
func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) string {
// updateEntryInManifest updates an existing entry o path with a new one in the manifest with provided mhash
// finding the path recursively through all nested manifests. Argument isRoot is used for default
// entry update detection. If the updated entry has the same hash as the default entry, then the
// default entry in root manifest will be updated too.
// Returned values are the new manifest hash, hash of the entry that was replaced by the new entry and
// a a bool that is true if default entry is updated.
func updateEntryInManifest(client *swarm.Client, mhash, path string, entry api.ManifestEntry, isRoot bool) (newManifestHash, oldHash string, defaultEntryUpdated bool) {
var (
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
client = swarm.NewClient(bzzapi)
newEntry = api.ManifestEntry{}
longestPathEntry = api.ManifestEntry{}
)
@@ -202,17 +189,18 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st
utils.Fatalf("Manifest download failed: %v", err)
}
//TODO: check if the "hash" with which to update is valid and present in swarm
// See if we path is in this Manifest or do we have to dig deeper
for _, entry := range mroot.Entries {
if path == entry.Path {
newEntry = entry
for _, e := range mroot.Entries {
if path == e.Path {
newEntry = e
// keep the reference of the hash of the entry that should be replaced
// for default entry detection
oldHash = e.Hash
} else {
if entry.ContentType == bzzManifestJSON {
prfxlen := strings.HasPrefix(path, entry.Path)
if e.ContentType == api.ManifestType {
prfxlen := strings.HasPrefix(path, e.Path)
if prfxlen && len(path) > len(longestPathEntry.Path) {
longestPathEntry = entry
longestPathEntry = e
}
}
}
@@ -225,50 +213,50 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st
if longestPathEntry.Path != "" {
// Load the child Manifest add the entry there
newPath := path[len(longestPathEntry.Path):]
newHash := updateEntryInManifest(ctx, longestPathEntry.Hash, newPath, hash, ctype)
var newHash string
newHash, oldHash, _ = updateEntryInManifest(client, longestPathEntry.Hash, newPath, entry, false)
// Replace the hash for parent Manifests
newMRoot := &api.Manifest{}
for _, entry := range mroot.Entries {
if longestPathEntry.Path == entry.Path {
entry.Hash = newHash
for _, e := range mroot.Entries {
if longestPathEntry.Path == e.Path {
e.Hash = newHash
}
newMRoot.Entries = append(newMRoot.Entries, entry)
newMRoot.Entries = append(newMRoot.Entries, e)
}
mroot = newMRoot
}
if newEntry.Path != "" {
// update the manifest if the new entry is found and
// check if default entry should be updated
if newEntry.Path != "" || isRoot {
// Replace the hash for leaf Manifest
newMRoot := &api.Manifest{}
for _, entry := range mroot.Entries {
if newEntry.Path == entry.Path {
myEntry := api.ManifestEntry{
Hash: hash,
Path: entry.Path,
ContentType: ctype,
}
newMRoot.Entries = append(newMRoot.Entries, myEntry)
} else {
for _, e := range mroot.Entries {
if newEntry.Path == e.Path {
entry.Path = e.Path
newMRoot.Entries = append(newMRoot.Entries, entry)
} else if isRoot && e.Path == "" && e.Hash == oldHash {
entry.Path = e.Path
newMRoot.Entries = append(newMRoot.Entries, entry)
defaultEntryUpdated = true
} else {
newMRoot.Entries = append(newMRoot.Entries, e)
}
}
mroot = newMRoot
}
newManifestHash, err := client.UploadManifest(mroot, isEncrypted)
newManifestHash, err = client.UploadManifest(mroot, isEncrypted)
if err != nil {
utils.Fatalf("Manifest upload failed: %v", err)
}
return newManifestHash
return newManifestHash, oldHash, defaultEntryUpdated
}
func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string {
func removeEntryFromManifest(client *swarm.Client, mhash, path string) string {
var (
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
client = swarm.NewClient(bzzapi)
entryToRemove = api.ManifestEntry{}
longestPathEntry = api.ManifestEntry{}
)
@@ -283,7 +271,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string {
if path == entry.Path {
entryToRemove = entry
} else {
if entry.ContentType == bzzManifestJSON {
if entry.ContentType == api.ManifestType {
prfxlen := strings.HasPrefix(path, entry.Path)
if prfxlen && len(path) > len(longestPathEntry.Path) {
longestPathEntry = entry
@@ -299,7 +287,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string {
if longestPathEntry.Path != "" {
// Load the child Manifest remove the entry there
newPath := path[len(longestPathEntry.Path):]
newHash := removeEntryFromManifest(ctx, longestPathEntry.Hash, newPath)
newHash := removeEntryFromManifest(client, longestPathEntry.Hash, newPath)
// Replace the hash for parent Manifests
newMRoot := &api.Manifest{}

579
cmd/swarm/manifest_test.go Normal file
View File

@@ -0,0 +1,579 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bytes"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/ethereum/go-ethereum/swarm/api"
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
)
// TestManifestChange tests manifest add, update and remove
// cli commands without encryption.
func TestManifestChange(t *testing.T) {
testManifestChange(t, false)
}
// TestManifestChange tests manifest add, update and remove
// cli commands with encryption enabled.
func TestManifestChangeEncrypted(t *testing.T) {
testManifestChange(t, true)
}
// testManifestChange performs cli commands:
// - manifest add
// - manifest update
// - manifest remove
// on a manifest, testing the functionality of this
// comands on paths that are in root manifest or a nested one.
// Argument encrypt controls whether to use encryption or not.
func testManifestChange(t *testing.T, encrypt bool) {
t.Parallel()
cluster := newTestCluster(t, 1)
defer cluster.Shutdown()
tmp, err := ioutil.TempDir("", "swarm-manifest-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
origDir := filepath.Join(tmp, "orig")
if err := os.Mkdir(origDir, 0777); err != nil {
t.Fatal(err)
}
indexDataFilename := filepath.Join(origDir, "index.html")
err = ioutil.WriteFile(indexDataFilename, []byte("<h1>Test</h1>"), 0666)
if err != nil {
t.Fatal(err)
}
// Files paths robots.txt and robots.html share the same prefix "robots."
// which will result a manifest with a nested manifest under path "robots.".
// This will allow testing manifest changes on both root and nested manifest.
err = ioutil.WriteFile(filepath.Join(origDir, "robots.txt"), []byte("Disallow: /"), 0666)
if err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(filepath.Join(origDir, "robots.html"), []byte("<strong>No Robots Allowed</strong>"), 0666)
if err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(filepath.Join(origDir, "mutants.txt"), []byte("Frank\nMarcus"), 0666)
if err != nil {
t.Fatal(err)
}
args := []string{
"--bzzapi",
cluster.Nodes[0].URL,
"--recursive",
"--defaultpath",
indexDataFilename,
"up",
origDir,
}
if encrypt {
args = append(args, "--encrypt")
}
origManifestHash := runSwarmExpectHash(t, args...)
checkHashLength(t, origManifestHash, encrypt)
client := swarm.NewClient(cluster.Nodes[0].URL)
// upload a new file and use its manifest to add it the original manifest.
t.Run("add", func(t *testing.T) {
humansData := []byte("Ann\nBob")
humansDataFilename := filepath.Join(tmp, "humans.txt")
err = ioutil.WriteFile(humansDataFilename, humansData, 0666)
if err != nil {
t.Fatal(err)
}
humansManifestHash := runSwarmExpectHash(t,
"--bzzapi",
cluster.Nodes[0].URL,
"up",
humansDataFilename,
)
newManifestHash := runSwarmExpectHash(t,
"--bzzapi",
cluster.Nodes[0].URL,
"manifest",
"add",
origManifestHash,
"humans.txt",
humansManifestHash,
)
checkHashLength(t, newManifestHash, encrypt)
newManifest := downloadManifest(t, client, newManifestHash, encrypt)
var found bool
for _, e := range newManifest.Entries {
if e.Path == "humans.txt" {
found = true
if e.Size != int64(len(humansData)) {
t.Errorf("expected humans.txt size %v, got %v", len(humansData), e.Size)
}
if e.ModTime.IsZero() {
t.Errorf("got zero mod time for humans.txt")
}
ct := "text/plain; charset=utf-8"
if e.ContentType != ct {
t.Errorf("expected content type %q, got %q", ct, e.ContentType)
}
break
}
}
if !found {
t.Fatal("no humans.txt in new manifest")
}
checkFile(t, client, newManifestHash, "humans.txt", humansData)
})
// upload a new file and use its manifest to add it the original manifest,
// but ensure that the file will be in the nested manifest of the original one.
t.Run("add nested", func(t *testing.T) {
robotsData := []byte(`{"disallow": "/"}`)
robotsDataFilename := filepath.Join(tmp, "robots.json")
err = ioutil.WriteFile(robotsDataFilename, robotsData, 0666)
if err != nil {
t.Fatal(err)
}
robotsManifestHash := runSwarmExpectHash(t,
"--bzzapi",
cluster.Nodes[0].URL,
"up",
robotsDataFilename,
)
newManifestHash := runSwarmExpectHash(t,
"--bzzapi",
cluster.Nodes[0].URL,
"manifest",
"add",
origManifestHash,
"robots.json",
robotsManifestHash,
)
checkHashLength(t, newManifestHash, encrypt)
newManifest := downloadManifest(t, client, newManifestHash, encrypt)
var found bool
loop:
for _, e := range newManifest.Entries {
if e.Path == "robots." {
nestedManifest := downloadManifest(t, client, e.Hash, encrypt)
for _, e := range nestedManifest.Entries {
if e.Path == "json" {
found = true
if e.Size != int64(len(robotsData)) {
t.Errorf("expected robots.json size %v, got %v", len(robotsData), e.Size)
}
if e.ModTime.IsZero() {
t.Errorf("got zero mod time for robots.json")
}
ct := "application/json"
if e.ContentType != ct {
t.Errorf("expected content type %q, got %q", ct, e.ContentType)
}
break loop
}
}
}
}
if !found {
t.Fatal("no robots.json in new manifest")
}
checkFile(t, client, newManifestHash, "robots.json", robotsData)
})
// upload a new file and use its manifest to change the file it the original manifest.
t.Run("update", func(t *testing.T) {
indexData := []byte("<h1>Ethereum Swarm</h1>")
indexDataFilename := filepath.Join(tmp, "index.html")
err = ioutil.WriteFile(indexDataFilename, indexData, 0666)
if err != nil {
t.Fatal(err)
}
indexManifestHash := runSwarmExpectHash(t,
"--bzzapi",
cluster.Nodes[0].URL,
"up",
indexDataFilename,
)
newManifestHash := runSwarmExpectHash(t,
"--bzzapi",
cluster.Nodes[0].URL,
"manifest",
"update",
origManifestHash,
"index.html",
indexManifestHash,
)
checkHashLength(t, newManifestHash, encrypt)
newManifest := downloadManifest(t, client, newManifestHash, encrypt)
var found bool
for _, e := range newManifest.Entries {
if e.Path == "index.html" {
found = true
if e.Size != int64(len(indexData)) {
t.Errorf("expected index.html size %v, got %v", len(indexData), e.Size)
}
if e.ModTime.IsZero() {
t.Errorf("got zero mod time for index.html")
}
ct := "text/html; charset=utf-8"
if e.ContentType != ct {
t.Errorf("expected content type %q, got %q", ct, e.ContentType)
}
break
}
}
if !found {
t.Fatal("no index.html in new manifest")
}
checkFile(t, client, newManifestHash, "index.html", indexData)
// check default entry change
checkFile(t, client, newManifestHash, "", indexData)
})
// upload a new file and use its manifest to change the file it the original manifest,
// but ensure that the file is in the nested manifest of the original one.
t.Run("update nested", func(t *testing.T) {
robotsData := []byte(`<string>Only humans allowed!!!</strong>`)
robotsDataFilename := filepath.Join(tmp, "robots.html")
err = ioutil.WriteFile(robotsDataFilename, robotsData, 0666)
if err != nil {
t.Fatal(err)
}
humansManifestHash := runSwarmExpectHash(t,
"--bzzapi",
cluster.Nodes[0].URL,
"up",
robotsDataFilename,
)
newManifestHash := runSwarmExpectHash(t,
"--bzzapi",
cluster.Nodes[0].URL,
"manifest",
"update",
origManifestHash,
"robots.html",
humansManifestHash,
)
checkHashLength(t, newManifestHash, encrypt)
newManifest := downloadManifest(t, client, newManifestHash, encrypt)
var found bool
loop:
for _, e := range newManifest.Entries {
if e.Path == "robots." {
nestedManifest := downloadManifest(t, client, e.Hash, encrypt)
for _, e := range nestedManifest.Entries {
if e.Path == "html" {
found = true
if e.Size != int64(len(robotsData)) {
t.Errorf("expected robots.html size %v, got %v", len(robotsData), e.Size)
}
if e.ModTime.IsZero() {
t.Errorf("got zero mod time for robots.html")
}
ct := "text/html; charset=utf-8"
if e.ContentType != ct {
t.Errorf("expected content type %q, got %q", ct, e.ContentType)
}
break loop
}
}
}
}
if !found {
t.Fatal("no robots.html in new manifest")
}
checkFile(t, client, newManifestHash, "robots.html", robotsData)
})
// remove a file from the manifest.
t.Run("remove", func(t *testing.T) {
newManifestHash := runSwarmExpectHash(t,
"--bzzapi",
cluster.Nodes[0].URL,
"manifest",
"remove",
origManifestHash,
"mutants.txt",
)
checkHashLength(t, newManifestHash, encrypt)
newManifest := downloadManifest(t, client, newManifestHash, encrypt)
var found bool
for _, e := range newManifest.Entries {
if e.Path == "mutants.txt" {
found = true
break
}
}
if found {
t.Fatal("mutants.txt is not removed")
}
})
// remove a file from the manifest, but ensure that the file is in
// the nested manifest of the original one.
t.Run("remove nested", func(t *testing.T) {
newManifestHash := runSwarmExpectHash(t,
"--bzzapi",
cluster.Nodes[0].URL,
"manifest",
"remove",
origManifestHash,
"robots.html",
)
checkHashLength(t, newManifestHash, encrypt)
newManifest := downloadManifest(t, client, newManifestHash, encrypt)
var found bool
loop:
for _, e := range newManifest.Entries {
if e.Path == "robots." {
nestedManifest := downloadManifest(t, client, e.Hash, encrypt)
for _, e := range nestedManifest.Entries {
if e.Path == "html" {
found = true
break loop
}
}
}
}
if found {
t.Fatal("robots.html in not removed")
}
})
}
// TestNestedDefaultEntryUpdate tests if the default entry is updated
// if the file in nested manifest used for it is also updated.
func TestNestedDefaultEntryUpdate(t *testing.T) {
testNestedDefaultEntryUpdate(t, false)
}
// TestNestedDefaultEntryUpdateEncrypted tests if the default entry
// of encrypted upload is updated if the file in nested manifest
// used for it is also updated.
func TestNestedDefaultEntryUpdateEncrypted(t *testing.T) {
testNestedDefaultEntryUpdate(t, true)
}
func testNestedDefaultEntryUpdate(t *testing.T, encrypt bool) {
t.Parallel()
cluster := newTestCluster(t, 1)
defer cluster.Shutdown()
tmp, err := ioutil.TempDir("", "swarm-manifest-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
origDir := filepath.Join(tmp, "orig")
if err := os.Mkdir(origDir, 0777); err != nil {
t.Fatal(err)
}
indexData := []byte("<h1>Test</h1>")
indexDataFilename := filepath.Join(origDir, "index.html")
err = ioutil.WriteFile(indexDataFilename, indexData, 0666)
if err != nil {
t.Fatal(err)
}
// Add another file with common prefix as the default entry to test updates of
// default entry with nested manifests.
err = ioutil.WriteFile(filepath.Join(origDir, "index.txt"), []byte("Test"), 0666)
if err != nil {
t.Fatal(err)
}
args := []string{
"--bzzapi",
cluster.Nodes[0].URL,
"--recursive",
"--defaultpath",
indexDataFilename,
"up",
origDir,
}
if encrypt {
args = append(args, "--encrypt")
}
origManifestHash := runSwarmExpectHash(t, args...)
checkHashLength(t, origManifestHash, encrypt)
client := swarm.NewClient(cluster.Nodes[0].URL)
newIndexData := []byte("<h1>Ethereum Swarm</h1>")
newIndexDataFilename := filepath.Join(tmp, "index.html")
err = ioutil.WriteFile(newIndexDataFilename, newIndexData, 0666)
if err != nil {
t.Fatal(err)
}
newIndexManifestHash := runSwarmExpectHash(t,
"--bzzapi",
cluster.Nodes[0].URL,
"up",
newIndexDataFilename,
)
newManifestHash := runSwarmExpectHash(t,
"--bzzapi",
cluster.Nodes[0].URL,
"manifest",
"update",
origManifestHash,
"index.html",
newIndexManifestHash,
)
checkHashLength(t, newManifestHash, encrypt)
newManifest := downloadManifest(t, client, newManifestHash, encrypt)
var found bool
for _, e := range newManifest.Entries {
if e.Path == "index." {
found = true
newManifest = downloadManifest(t, client, e.Hash, encrypt)
break
}
}
if !found {
t.Fatal("no index. path in new manifest")
}
found = false
for _, e := range newManifest.Entries {
if e.Path == "html" {
found = true
if e.Size != int64(len(newIndexData)) {
t.Errorf("expected index.html size %v, got %v", len(newIndexData), e.Size)
}
if e.ModTime.IsZero() {
t.Errorf("got zero mod time for index.html")
}
ct := "text/html; charset=utf-8"
if e.ContentType != ct {
t.Errorf("expected content type %q, got %q", ct, e.ContentType)
}
break
}
}
if !found {
t.Fatal("no html in new manifest")
}
checkFile(t, client, newManifestHash, "index.html", newIndexData)
// check default entry change
checkFile(t, client, newManifestHash, "", newIndexData)
}
func runSwarmExpectHash(t *testing.T, args ...string) (hash string) {
t.Helper()
hashRegexp := `[a-f\d]{64,128}`
up := runSwarm(t, args...)
_, matches := up.ExpectRegexp(hashRegexp)
up.ExpectExit()
if len(matches) < 1 {
t.Fatal("no matches found")
}
return matches[0]
}
func checkHashLength(t *testing.T, hash string, encrypted bool) {
t.Helper()
l := len(hash)
if encrypted && l != 128 {
t.Errorf("expected hash length 128, got %v", l)
}
if !encrypted && l != 64 {
t.Errorf("expected hash length 64, got %v", l)
}
}
func downloadManifest(t *testing.T, client *swarm.Client, hash string, encrypted bool) (manifest *api.Manifest) {
t.Helper()
m, isEncrypted, err := client.DownloadManifest(hash)
if err != nil {
t.Fatal(err)
}
if encrypted != isEncrypted {
t.Error("new manifest encryption flag is not correct")
}
return m
}
func checkFile(t *testing.T, client *swarm.Client, hash, path string, expected []byte) {
t.Helper()
f, err := client.Download(hash, path)
if err != nil {
t.Fatal(err)
}
got, err := ioutil.ReadAll(f)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(got, expected) {
t.Errorf("expected file content %q, got %q", expected, got)
}
}

View File

@@ -17,12 +17,17 @@
package main
import (
"context"
"crypto/ecdsa"
"fmt"
"io/ioutil"
"net"
"os"
"path"
"path/filepath"
"runtime"
"sync"
"syscall"
"testing"
"time"
@@ -172,14 +177,15 @@ func (c *testCluster) Cleanup() {
}
type testNode struct {
Name string
Addr string
URL string
Enode string
Dir string
IpcPath string
Client *rpc.Client
Cmd *cmdtest.TestCmd
Name string
Addr string
URL string
Enode string
Dir string
IpcPath string
PrivateKey *ecdsa.PrivateKey
Client *rpc.Client
Cmd *cmdtest.TestCmd
}
const testPassphrase = "swarm-test-passphrase"
@@ -218,14 +224,12 @@ func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode {
}
// assign ports
httpPort, err := assignTCPPort()
if err != nil {
t.Fatal(err)
}
p2pPort, err := assignTCPPort()
ports, err := getAvailableTCPPorts(2)
if err != nil {
t.Fatal(err)
}
p2pPort := ports[0]
httpPort := ports[1]
// start the node
node.Cmd = runSwarm(t,
@@ -246,6 +250,17 @@ func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode {
}
}()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// ensure that all ports have active listeners
// so that the next node will not get the same
// when calling getAvailableTCPPorts
err = waitTCPPorts(ctx, ports...)
if err != nil {
t.Fatal(err)
}
// wait for the node to start
for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
node.Client, err = rpc.Dial(conf.IPCEndpoint())
@@ -277,17 +292,19 @@ func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode {
func newTestNode(t *testing.T, dir string) *testNode {
conf, account := getTestAccount(t, dir)
node := &testNode{Dir: dir}
ks := keystore.NewKeyStore(path.Join(dir, "keystore"), 1<<18, 1)
pk := decryptStoreAccount(ks, account.Address.Hex(), []string{testPassphrase})
node := &testNode{Dir: dir, PrivateKey: pk}
// assign ports
httpPort, err := assignTCPPort()
if err != nil {
t.Fatal(err)
}
p2pPort, err := assignTCPPort()
ports, err := getAvailableTCPPorts(2)
if err != nil {
t.Fatal(err)
}
p2pPort := ports[0]
httpPort := ports[1]
// start the node
node.Cmd = runSwarm(t,
@@ -308,6 +325,17 @@ func newTestNode(t *testing.T, dir string) *testNode {
}
}()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// ensure that all ports have active listeners
// so that the next node will not get the same
// when calling getAvailableTCPPorts
err = waitTCPPorts(ctx, ports...)
if err != nil {
t.Fatal(err)
}
// wait for the node to start
for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
node.Client, err = rpc.Dial(conf.IPCEndpoint())
@@ -343,15 +371,92 @@ func (n *testNode) Shutdown() {
}
}
func assignTCPPort() (string, error) {
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return "", err
// getAvailableTCPPorts returns a set of ports that
// nothing is listening on at the time.
//
// Function assignTCPPort cannot be called in sequence
// and guardantee that the same port will be returned in
// different calls as the listener is closed within the function,
// not after all listeners are started and selected unique
// available ports.
func getAvailableTCPPorts(count int) (ports []string, err error) {
for i := 0; i < count; i++ {
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return nil, err
}
// defer close in the loop to be sure the same port will not
// be selected in the next iteration
defer l.Close()
_, port, err := net.SplitHostPort(l.Addr().String())
if err != nil {
return nil, err
}
ports = append(ports, port)
}
return ports, nil
}
// waitTCPPorts blocks until tcp connections can be
// established on all provided ports. It runs all
// ports dialers in parallel, and returns the first
// encountered error.
// See waitTCPPort also.
func waitTCPPorts(ctx context.Context, ports ...string) error {
var err error
// mu locks err variable that is assigned in
// other goroutines
var mu sync.Mutex
// cancel is canceling all goroutines
// when the firs error is returned
// to prevent unnecessary waiting
ctx, cancel := context.WithCancel(ctx)
defer cancel()
var wg sync.WaitGroup
for _, port := range ports {
wg.Add(1)
go func(port string) {
defer wg.Done()
e := waitTCPPort(ctx, port)
mu.Lock()
defer mu.Unlock()
if e != nil && err == nil {
err = e
cancel()
}
}(port)
}
wg.Wait()
return err
}
// waitTCPPort blocks until tcp connection can be established
// ona provided port. It has a 3 minute timeout as maximum,
// to prevent long waiting, but it can be shortened with
// a provided context instance. Dialer has a 10 second timeout
// in every iteration, and connection refused error will be
// retried in 100 milliseconds periods.
func waitTCPPort(ctx context.Context, port string) error {
ctx, cancel := context.WithTimeout(ctx, 3*time.Minute)
defer cancel()
for {
c, err := (&net.Dialer{Timeout: 10 * time.Second}).DialContext(ctx, "tcp", "127.0.0.1:"+port)
if err != nil {
if operr, ok := err.(*net.OpError); ok {
if syserr, ok := operr.Err.(*os.SyscallError); ok && syserr.Err == syscall.ECONNREFUSED {
time.Sleep(100 * time.Millisecond)
continue
}
}
return err
}
return c.Close()
}
l.Close()
_, port, err := net.SplitHostPort(l.Addr().String())
if err != nil {
return "", err
}
return port, nil
}

View File

@@ -98,6 +98,17 @@ func upload(ctx *cli.Context) {
if !recursive {
return "", errors.New("Argument is a directory and recursive upload is disabled")
}
if defaultPath != "" {
// construct absolute default path
absDefaultPath, _ := filepath.Abs(defaultPath)
absFile, _ := filepath.Abs(file)
// make sure absolute directory ends with only one "/"
// to trim it from absolute default path and get relative default path
absFile = strings.TrimRight(absFile, "/") + "/"
if absDefaultPath != "" && absFile != "" && strings.HasPrefix(absDefaultPath, absFile) {
defaultPath = strings.TrimPrefix(absDefaultPath, absFile)
}
}
return client.UploadDirectory(file, defaultPath, "", toEncrypt)
}
} else {

View File

@@ -273,3 +273,84 @@ func testCLISwarmUpRecursive(toEncrypt bool, t *testing.T) {
}
}
}
// TestCLISwarmUpDefaultPath tests swarm recursive upload with relative and absolute
// default paths and with encryption.
func TestCLISwarmUpDefaultPath(t *testing.T) {
testCLISwarmUpDefaultPath(false, false, t)
testCLISwarmUpDefaultPath(false, true, t)
testCLISwarmUpDefaultPath(true, false, t)
testCLISwarmUpDefaultPath(true, true, t)
}
func testCLISwarmUpDefaultPath(toEncrypt bool, absDefaultPath bool, t *testing.T) {
cluster := newTestCluster(t, 1)
defer cluster.Shutdown()
tmp, err := ioutil.TempDir("", "swarm-defaultpath-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
err = ioutil.WriteFile(filepath.Join(tmp, "index.html"), []byte("<h1>Test</h1>"), 0666)
if err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(filepath.Join(tmp, "robots.txt"), []byte("Disallow: /"), 0666)
if err != nil {
t.Fatal(err)
}
defaultPath := "index.html"
if absDefaultPath {
defaultPath = filepath.Join(tmp, defaultPath)
}
args := []string{
"--bzzapi",
cluster.Nodes[0].URL,
"--recursive",
"--defaultpath",
defaultPath,
"up",
tmp,
}
if toEncrypt {
args = append(args, "--encrypt")
}
up := runSwarm(t, args...)
hashRegexp := `[a-f\d]{64,128}`
_, matches := up.ExpectRegexp(hashRegexp)
up.ExpectExit()
hash := matches[0]
client := swarm.NewClient(cluster.Nodes[0].URL)
m, isEncrypted, err := client.DownloadManifest(hash)
if err != nil {
t.Fatal(err)
}
if toEncrypt != isEncrypted {
t.Error("downloaded manifest is not encrypted")
}
var found bool
var entriesCount int
for _, e := range m.Entries {
entriesCount++
if e.Path == "" {
found = true
}
}
if !found {
t.Error("manifest default entry was not found")
}
if entriesCount != 3 {
t.Errorf("manifest contains %v entries, expected %v", entriesCount, 3)
}
}

View File

@@ -24,7 +24,6 @@ import (
"math/big"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
@@ -158,14 +157,6 @@ var (
Usage: "Document Root for HTTPClient file scheme",
Value: DirectoryString{homeDir()},
}
FastSyncFlag = cli.BoolFlag{
Name: "fast",
Usage: "Enable fast syncing through state downloads (replaced by --syncmode)",
}
LightModeFlag = cli.BoolFlag{
Name: "light",
Usage: "Enable light client mode (replaced by --syncmode)",
}
defaultSyncMode = eth.DefaultConfig.SyncMode
SyncModeFlag = TextMarshalerFlag{
Name: "syncmode",
@@ -242,6 +233,10 @@ var (
Value: eth.DefaultConfig.Ethash.DatasetsOnDisk,
}
// Transaction pool settings
TxPoolLocalsFlag = cli.StringFlag{
Name: "txpool.locals",
Usage: "Comma separated accounts to treat as locals (no flush, priority inclusion)",
}
TxPoolNoLocalsFlag = cli.BoolFlag{
Name: "txpool.nolocals",
Usage: "Disables price exemptions for locally submitted transactions",
@@ -318,29 +313,62 @@ var (
Usage: "Enable mining",
}
MinerThreadsFlag = cli.IntFlag{
Name: "minerthreads",
Name: "miner.threads",
Usage: "Number of CPU threads to use for mining",
Value: runtime.NumCPU(),
Value: 0,
}
TargetGasLimitFlag = cli.Uint64Flag{
Name: "targetgaslimit",
Usage: "Target gas limit sets the artificial target gas floor for the blocks to mine",
MinerLegacyThreadsFlag = cli.IntFlag{
Name: "minerthreads",
Usage: "Number of CPU threads to use for mining (deprecated, use --miner.threads)",
Value: 0,
}
MinerNotifyFlag = cli.StringFlag{
Name: "miner.notify",
Usage: "Comma separated HTTP URL list to notify of new work packages",
}
MinerGasTargetFlag = cli.Uint64Flag{
Name: "miner.gastarget",
Usage: "Target gas floor for mined blocks",
Value: params.GenesisGasLimit,
}
EtherbaseFlag = cli.StringFlag{
Name: "etherbase",
Usage: "Public address for block mining rewards (default = first account created)",
MinerLegacyGasTargetFlag = cli.Uint64Flag{
Name: "targetgaslimit",
Usage: "Target gas floor for mined blocks (deprecated, use --miner.gastarget)",
Value: params.GenesisGasLimit,
}
MinerGasPriceFlag = BigFlag{
Name: "miner.gasprice",
Usage: "Minimal gas price for mining a transactions",
Value: eth.DefaultConfig.MinerGasPrice,
}
MinerLegacyGasPriceFlag = BigFlag{
Name: "gasprice",
Usage: "Minimal gas price for mining a transactions (deprecated, use --miner.gasprice)",
Value: eth.DefaultConfig.MinerGasPrice,
}
MinerEtherbaseFlag = cli.StringFlag{
Name: "miner.etherbase",
Usage: "Public address for block mining rewards (default = first account)",
Value: "0",
}
GasPriceFlag = BigFlag{
Name: "gasprice",
Usage: "Minimal gas price to accept for mining a transactions",
Value: eth.DefaultConfig.GasPrice,
MinerLegacyEtherbaseFlag = cli.StringFlag{
Name: "etherbase",
Usage: "Public address for block mining rewards (default = first account, deprecated, use --miner.etherbase)",
Value: "0",
}
ExtraDataFlag = cli.StringFlag{
Name: "extradata",
MinerExtraDataFlag = cli.StringFlag{
Name: "miner.extradata",
Usage: "Block extra data set by the miner (default = client version)",
}
MinerLegacyExtraDataFlag = cli.StringFlag{
Name: "extradata",
Usage: "Block extra data set by the miner (default = client version, deprecated, use --miner.extradata)",
}
MinerRecommitIntervalFlag = cli.DurationFlag{
Name: "miner.recommit",
Usage: "Time interval to recreate the block being mined.",
Value: eth.DefaultConfig.MinerRecommit,
}
// Account settings
UnlockedAccountFlag = cli.StringFlag{
Name: "unlock",
@@ -810,10 +838,19 @@ func MakeAddress(ks *keystore.KeyStore, account string) (accounts.Account, error
// setEtherbase retrieves the etherbase either from the directly specified
// command line flags or from the keystore if CLI indexed.
func setEtherbase(ctx *cli.Context, ks *keystore.KeyStore, cfg *eth.Config) {
if ctx.GlobalIsSet(EtherbaseFlag.Name) {
account, err := MakeAddress(ks, ctx.GlobalString(EtherbaseFlag.Name))
// Extract the current etherbase, new flag overriding legacy one
var etherbase string
if ctx.GlobalIsSet(MinerLegacyEtherbaseFlag.Name) {
etherbase = ctx.GlobalString(MinerLegacyEtherbaseFlag.Name)
}
if ctx.GlobalIsSet(MinerEtherbaseFlag.Name) {
etherbase = ctx.GlobalString(MinerEtherbaseFlag.Name)
}
// Convert the etherbase into an address and configure it
if etherbase != "" {
account, err := MakeAddress(ks, etherbase)
if err != nil {
Fatalf("Option %q: %v", EtherbaseFlag.Name, err)
Fatalf("Invalid miner etherbase: %v", err)
}
cfg.Etherbase = account.Address
}
@@ -844,7 +881,7 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
setBootstrapNodes(ctx, cfg)
setBootstrapNodesV5(ctx, cfg)
lightClient := ctx.GlobalBool(LightModeFlag.Name) || ctx.GlobalString(SyncModeFlag.Name) == "light"
lightClient := ctx.GlobalString(SyncModeFlag.Name) == "light"
lightServer := ctx.GlobalInt(LightServFlag.Name) != 0
lightPeers := ctx.GlobalInt(LightPeersFlag.Name)
@@ -944,6 +981,16 @@ func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
}
func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) {
if ctx.GlobalIsSet(TxPoolLocalsFlag.Name) {
locals := strings.Split(ctx.GlobalString(TxPoolLocalsFlag.Name), ",")
for _, account := range locals {
if trimmed := strings.TrimSpace(account); !common.IsHexAddress(trimmed) {
Fatalf("Invalid account in --txpool.locals: %s", trimmed)
} else {
cfg.Locals = append(cfg.Locals, common.HexToAddress(account))
}
}
}
if ctx.GlobalIsSet(TxPoolNoLocalsFlag.Name) {
cfg.NoLocals = ctx.GlobalBool(TxPoolNoLocalsFlag.Name)
}
@@ -1049,8 +1096,6 @@ func SetShhConfig(ctx *cli.Context, stack *node.Node, cfg *whisper.Config) {
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
// Avoid conflicting network flags
checkExclusive(ctx, DeveloperFlag, TestnetFlag, RinkebyFlag)
checkExclusive(ctx, FastSyncFlag, LightModeFlag, SyncModeFlag)
checkExclusive(ctx, LightServFlag, LightModeFlag)
checkExclusive(ctx, LightServFlag, SyncModeFlag, "light")
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
@@ -1059,13 +1104,8 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
setTxPool(ctx, &cfg.TxPool)
setEthash(ctx, cfg)
switch {
case ctx.GlobalIsSet(SyncModeFlag.Name):
if ctx.GlobalIsSet(SyncModeFlag.Name) {
cfg.SyncMode = *GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode)
case ctx.GlobalBool(FastSyncFlag.Name):
cfg.SyncMode = downloader.FastSync
case ctx.GlobalBool(LightModeFlag.Name):
cfg.SyncMode = downloader.LightSync
}
if ctx.GlobalIsSet(LightServFlag.Name) {
cfg.LightServ = ctx.GlobalInt(LightServFlag.Name)
@@ -1090,17 +1130,32 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) {
cfg.TrieCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
}
if ctx.GlobalIsSet(MinerLegacyThreadsFlag.Name) {
cfg.MinerThreads = ctx.GlobalInt(MinerLegacyThreadsFlag.Name)
}
if ctx.GlobalIsSet(MinerThreadsFlag.Name) {
cfg.MinerThreads = ctx.GlobalInt(MinerThreadsFlag.Name)
}
if ctx.GlobalIsSet(MinerNotifyFlag.Name) {
cfg.MinerNotify = strings.Split(ctx.GlobalString(MinerNotifyFlag.Name), ",")
}
if ctx.GlobalIsSet(DocRootFlag.Name) {
cfg.DocRoot = ctx.GlobalString(DocRootFlag.Name)
}
if ctx.GlobalIsSet(ExtraDataFlag.Name) {
cfg.ExtraData = []byte(ctx.GlobalString(ExtraDataFlag.Name))
if ctx.GlobalIsSet(MinerLegacyExtraDataFlag.Name) {
cfg.MinerExtraData = []byte(ctx.GlobalString(MinerLegacyExtraDataFlag.Name))
}
if ctx.GlobalIsSet(GasPriceFlag.Name) {
cfg.GasPrice = GlobalBig(ctx, GasPriceFlag.Name)
if ctx.GlobalIsSet(MinerExtraDataFlag.Name) {
cfg.MinerExtraData = []byte(ctx.GlobalString(MinerExtraDataFlag.Name))
}
if ctx.GlobalIsSet(MinerLegacyGasPriceFlag.Name) {
cfg.MinerGasPrice = GlobalBig(ctx, MinerLegacyGasPriceFlag.Name)
}
if ctx.GlobalIsSet(MinerGasPriceFlag.Name) {
cfg.MinerGasPrice = GlobalBig(ctx, MinerGasPriceFlag.Name)
}
if ctx.GlobalIsSet(MinerRecommitIntervalFlag.Name) {
cfg.MinerRecommit = ctx.Duration(MinerRecommitIntervalFlag.Name)
}
if ctx.GlobalIsSet(VMEnableDebugFlag.Name) {
// TODO(fjl): force-enable this in --dev mode
@@ -1142,8 +1197,8 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
log.Info("Using developer account", "address", developer.Address)
cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.GlobalInt(DeveloperPeriodFlag.Name)), developer.Address)
if !ctx.GlobalIsSet(GasPriceFlag.Name) {
cfg.GasPrice = big.NewInt(1)
if !ctx.GlobalIsSet(MinerGasPriceFlag.Name) && !ctx.GlobalIsSet(MinerLegacyGasPriceFlag.Name) {
cfg.MinerGasPrice = big.NewInt(1)
}
}
// TODO(fjl): move trie cache generations into config
@@ -1217,7 +1272,10 @@ func RegisterEthStatsService(stack *node.Node, url string) {
// SetupNetwork configures the system for either the main net or some test network.
func SetupNetwork(ctx *cli.Context) {
// TODO(fjl): move target gas limit into config
params.TargetGasLimit = ctx.GlobalUint64(TargetGasLimitFlag.Name)
params.TargetGasLimit = ctx.GlobalUint64(MinerLegacyGasTargetFlag.Name)
if ctx.GlobalIsSet(MinerGasTargetFlag.Name) {
params.TargetGasLimit = ctx.GlobalUint64(MinerGasTargetFlag.Name)
}
}
func SetupMetrics(ctx *cli.Context) {
@@ -1248,7 +1306,7 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database {
handles = makeDatabaseHandles()
)
name := "chaindata"
if ctx.GlobalBool(LightModeFlag.Name) {
if ctx.GlobalString(SyncModeFlag.Name) == "light" {
name = "lightchaindata"
}
chainDb, err := stack.OpenDatabase(name, cache, handles)
@@ -1293,7 +1351,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
DatasetDir: stack.ResolvePath(eth.DefaultConfig.Ethash.DatasetDir),
DatasetsInMem: eth.DefaultConfig.Ethash.DatasetsInMem,
DatasetsOnDisk: eth.DefaultConfig.Ethash.DatasetsOnDisk,
})
}, nil)
}
}
if gcmode := ctx.GlobalString(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {

View File

@@ -30,3 +30,34 @@ type AbsTime time.Duration
func Now() AbsTime {
return AbsTime(monotime.Now())
}
// Add returns t + d.
func (t AbsTime) Add(d time.Duration) AbsTime {
return t + AbsTime(d)
}
// Clock interface makes it possible to replace the monotonic system clock with
// a simulated clock.
type Clock interface {
Now() AbsTime
Sleep(time.Duration)
After(time.Duration) <-chan time.Time
}
// System implements Clock using the system clock.
type System struct{}
// Now implements Clock.
func (System) Now() AbsTime {
return AbsTime(monotime.Now())
}
// Sleep implements Clock.
func (System) Sleep(d time.Duration) {
time.Sleep(d)
}
// After implements Clock.
func (System) After(d time.Duration) <-chan time.Time {
return time.After(d)
}

129
common/mclock/simclock.go Normal file
View File

@@ -0,0 +1,129 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package mclock
import (
"sync"
"time"
)
// Simulated implements a virtual Clock for reproducible time-sensitive tests. It
// simulates a scheduler on a virtual timescale where actual processing takes zero time.
//
// The virtual clock doesn't advance on its own, call Run to advance it and execute timers.
// Since there is no way to influence the Go scheduler, testing timeout behaviour involving
// goroutines needs special care. A good way to test such timeouts is as follows: First
// perform the action that is supposed to time out. Ensure that the timer you want to test
// is created. Then run the clock until after the timeout. Finally observe the effect of
// the timeout using a channel or semaphore.
type Simulated struct {
now AbsTime
scheduled []event
mu sync.RWMutex
cond *sync.Cond
}
type event struct {
do func()
at AbsTime
}
// Run moves the clock by the given duration, executing all timers before that duration.
func (s *Simulated) Run(d time.Duration) {
s.mu.Lock()
defer s.mu.Unlock()
s.init()
end := s.now + AbsTime(d)
for len(s.scheduled) > 0 {
ev := s.scheduled[0]
if ev.at > end {
break
}
s.now = ev.at
ev.do()
s.scheduled = s.scheduled[1:]
}
s.now = end
}
func (s *Simulated) ActiveTimers() int {
s.mu.RLock()
defer s.mu.RUnlock()
return len(s.scheduled)
}
func (s *Simulated) WaitForTimers(n int) {
s.mu.Lock()
defer s.mu.Unlock()
s.init()
for len(s.scheduled) < n {
s.cond.Wait()
}
}
// Now implements Clock.
func (s *Simulated) Now() AbsTime {
s.mu.RLock()
defer s.mu.RUnlock()
return s.now
}
// Sleep implements Clock.
func (s *Simulated) Sleep(d time.Duration) {
<-s.After(d)
}
// After implements Clock.
func (s *Simulated) After(d time.Duration) <-chan time.Time {
after := make(chan time.Time, 1)
s.insert(d, func() {
after <- (time.Time{}).Add(time.Duration(s.now))
})
return after
}
func (s *Simulated) insert(d time.Duration, do func()) {
s.mu.Lock()
defer s.mu.Unlock()
s.init()
at := s.now + AbsTime(d)
l, h := 0, len(s.scheduled)
ll := h
for l != h {
m := (l + h) / 2
if at < s.scheduled[m].at {
h = m
} else {
l = m + 1
}
}
s.scheduled = append(s.scheduled, event{})
copy(s.scheduled[l+1:], s.scheduled[l:ll])
s.scheduled[l] = event{do: do, at: at}
s.cond.Broadcast()
}
func (s *Simulated) init() {
if s.cond == nil {
s.cond = sync.NewCond(&s.mu)
}
}

57
common/prque/prque.go Executable file
View File

@@ -0,0 +1,57 @@
// This is a duplicated and slightly modified version of "gopkg.in/karalabe/cookiejar.v2/collections/prque".
package prque
import (
"container/heap"
)
// Priority queue data structure.
type Prque struct {
cont *sstack
}
// Creates a new priority queue.
func New(setIndex setIndexCallback) *Prque {
return &Prque{newSstack(setIndex)}
}
// Pushes a value with a given priority into the queue, expanding if necessary.
func (p *Prque) Push(data interface{}, priority int64) {
heap.Push(p.cont, &item{data, priority})
}
// Pops the value with the greates priority off the stack and returns it.
// Currently no shrinking is done.
func (p *Prque) Pop() (interface{}, int64) {
item := heap.Pop(p.cont).(*item)
return item.value, item.priority
}
// Pops only the item from the queue, dropping the associated priority value.
func (p *Prque) PopItem() interface{} {
return heap.Pop(p.cont).(*item).value
}
// Remove removes the element with the given index.
func (p *Prque) Remove(i int) interface{} {
if i < 0 {
return nil
}
return heap.Remove(p.cont, i)
}
// Checks whether the priority queue is empty.
func (p *Prque) Empty() bool {
return p.cont.Len() == 0
}
// Returns the number of element in the priority queue.
func (p *Prque) Size() int {
return p.cont.Len()
}
// Clears the contents of the priority queue.
func (p *Prque) Reset() {
*p = *New(p.cont.setIndex)
}

106
common/prque/sstack.go Executable file
View File

@@ -0,0 +1,106 @@
// This is a duplicated and slightly modified version of "gopkg.in/karalabe/cookiejar.v2/collections/prque".
package prque
// The size of a block of data
const blockSize = 4096
// A prioritized item in the sorted stack.
//
// Note: priorities can "wrap around" the int64 range, a comes before b if (a.priority - b.priority) > 0.
// The difference between the lowest and highest priorities in the queue at any point should be less than 2^63.
type item struct {
value interface{}
priority int64
}
// setIndexCallback is called when the element is moved to a new index.
// Providing setIndexCallback is optional, it is needed only if the application needs
// to delete elements other than the top one.
type setIndexCallback func(a interface{}, i int)
// Internal sortable stack data structure. Implements the Push and Pop ops for
// the stack (heap) functionality and the Len, Less and Swap methods for the
// sortability requirements of the heaps.
type sstack struct {
setIndex setIndexCallback
size int
capacity int
offset int
blocks [][]*item
active []*item
}
// Creates a new, empty stack.
func newSstack(setIndex setIndexCallback) *sstack {
result := new(sstack)
result.setIndex = setIndex
result.active = make([]*item, blockSize)
result.blocks = [][]*item{result.active}
result.capacity = blockSize
return result
}
// Pushes a value onto the stack, expanding it if necessary. Required by
// heap.Interface.
func (s *sstack) Push(data interface{}) {
if s.size == s.capacity {
s.active = make([]*item, blockSize)
s.blocks = append(s.blocks, s.active)
s.capacity += blockSize
s.offset = 0
} else if s.offset == blockSize {
s.active = s.blocks[s.size/blockSize]
s.offset = 0
}
if s.setIndex != nil {
s.setIndex(data.(*item).value, s.size)
}
s.active[s.offset] = data.(*item)
s.offset++
s.size++
}
// Pops a value off the stack and returns it. Currently no shrinking is done.
// Required by heap.Interface.
func (s *sstack) Pop() (res interface{}) {
s.size--
s.offset--
if s.offset < 0 {
s.offset = blockSize - 1
s.active = s.blocks[s.size/blockSize]
}
res, s.active[s.offset] = s.active[s.offset], nil
if s.setIndex != nil {
s.setIndex(res.(*item).value, -1)
}
return
}
// Returns the length of the stack. Required by sort.Interface.
func (s *sstack) Len() int {
return s.size
}
// Compares the priority of two elements of the stack (higher is first).
// Required by sort.Interface.
func (s *sstack) Less(i, j int) bool {
return (s.blocks[i/blockSize][i%blockSize].priority - s.blocks[j/blockSize][j%blockSize].priority) > 0
}
// Swaps two elements in the stack. Required by sort.Interface.
func (s *sstack) Swap(i, j int) {
ib, io, jb, jo := i/blockSize, i%blockSize, j/blockSize, j%blockSize
a, b := s.blocks[jb][jo], s.blocks[ib][io]
if s.setIndex != nil {
s.setIndex(a.value, i)
s.setIndex(b.value, j)
}
s.blocks[ib][io], s.blocks[jb][jo] = a, b
}
// Resets the stack, effectively clearing its contents.
func (s *sstack) Reset() {
*s = *newSstack(s.setIndex)
}

View File

@@ -387,22 +387,23 @@ func (c *Clique) snapshot(chain consensus.ChainReader, number uint64, hash commo
break
}
}
// If we're at block zero, make a snapshot
if number == 0 {
genesis := chain.GetHeaderByNumber(0)
if err := c.VerifyHeader(chain, genesis, false); err != nil {
return nil, err
// If we're at an checkpoint block, make a snapshot if it's known
if number%c.config.Epoch == 0 {
checkpoint := chain.GetHeaderByNumber(number)
if checkpoint != nil {
hash := checkpoint.Hash()
signers := make([]common.Address, (len(checkpoint.Extra)-extraVanity-extraSeal)/common.AddressLength)
for i := 0; i < len(signers); i++ {
copy(signers[i][:], checkpoint.Extra[extraVanity+i*common.AddressLength:])
}
snap = newSnapshot(c.config, c.signatures, number, hash, signers)
if err := snap.store(c.db); err != nil {
return nil, err
}
log.Info("Stored checkpoint snapshot to disk", "number", number, "hash", hash)
break
}
signers := make([]common.Address, (len(genesis.Extra)-extraVanity-extraSeal)/common.AddressLength)
for i := 0; i < len(signers); i++ {
copy(signers[i][:], genesis.Extra[extraVanity+i*common.AddressLength:])
}
snap = newSnapshot(c.config, c.signatures, 0, genesis.Hash(), signers)
if err := snap.store(c.db); err != nil {
return nil, err
}
log.Trace("Stored genesis voting snapshot to disk")
break
}
// No snapshot for this header, gather the header and move backward
var header *types.Header
@@ -672,6 +673,11 @@ func CalcDifficulty(snap *Snapshot, signer common.Address) *big.Int {
return new(big.Int).Set(diffNoTurn)
}
// Close implements consensus.Engine. It's a noop for clique as there is are no background threads.
func (c *Clique) Close() error {
return nil
}
// APIs implements consensus.Engine, returning the user facing RPC API to allow
// controlling the signer voting.
func (c *Clique) APIs(chain consensus.ChainReader) []rpc.API {

View File

@@ -84,7 +84,7 @@ func (r *testerChainReader) GetHeaderByNumber(number uint64) *types.Header {
if number == 0 {
return rawdb.ReadHeader(r.db, rawdb.ReadCanonicalHash(r.db, 0), 0)
}
panic("not supported")
return nil
}
// Tests that voting is evaluated correctly for various simple and complex scenarios.

View File

@@ -96,6 +96,9 @@ type Engine interface {
// APIs returns the RPC APIs this consensus engine provides.
APIs(chain ChainReader) []rpc.API
// Close terminates any background threads maintained by the consensus engine.
Close() error
}
// PoW is a consensus engine based on proof-of-work.

View File

@@ -729,7 +729,8 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) {
go func(idx int) {
defer pend.Done()
ethash := New(Config{cachedir, 0, 1, "", 0, 0, ModeNormal})
ethash := New(Config{cachedir, 0, 1, "", 0, 0, ModeNormal}, nil)
defer ethash.Close()
if err := ethash.VerifySeal(nil, block.Header()); err != nil {
t.Errorf("proc %d: block verification failed: %v", idx, err)
}

117
consensus/ethash/api.go Normal file
View File

@@ -0,0 +1,117 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package ethash
import (
"errors"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
)
var errEthashStopped = errors.New("ethash stopped")
// API exposes ethash related methods for the RPC interface.
type API struct {
ethash *Ethash // Make sure the mode of ethash is normal.
}
// GetWork returns a work package for external miner.
//
// The work package consists of 3 strings:
// result[0] - 32 bytes hex encoded current block header pow-hash
// result[1] - 32 bytes hex encoded seed hash used for DAG
// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
func (api *API) GetWork() ([3]string, error) {
if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest {
return [3]string{}, errors.New("not supported")
}
var (
workCh = make(chan [3]string, 1)
errc = make(chan error, 1)
)
select {
case api.ethash.fetchWorkCh <- &sealWork{errc: errc, res: workCh}:
case <-api.ethash.exitCh:
return [3]string{}, errEthashStopped
}
select {
case work := <-workCh:
return work, nil
case err := <-errc:
return [3]string{}, err
}
}
// SubmitWork can be used by external miner to submit their POW solution.
// It returns an indication if the work was accepted.
// Note either an invalid solution, a stale work a non-existent work will return false.
func (api *API) SubmitWork(nonce types.BlockNonce, hash, digest common.Hash) bool {
if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest {
return false
}
var errc = make(chan error, 1)
select {
case api.ethash.submitWorkCh <- &mineResult{
nonce: nonce,
mixDigest: digest,
hash: hash,
errc: errc,
}:
case <-api.ethash.exitCh:
return false
}
err := <-errc
return err == nil
}
// SubmitHashrate can be used for remote miners to submit their hash rate.
// This enables the node to report the combined hash rate of all miners
// which submit work through this node.
//
// It accepts the miner hash rate and an identifier which must be unique
// between nodes.
func (api *API) SubmitHashRate(rate hexutil.Uint64, id common.Hash) bool {
if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest {
return false
}
var done = make(chan struct{}, 1)
select {
case api.ethash.submitRateCh <- &hashrate{done: done, rate: uint64(rate), id: id}:
case <-api.ethash.exitCh:
return false
}
// Block until hash rate submitted successfully.
<-done
return true
}
// GetHashrate returns the current hashrate for local CPU miner and remote miner.
func (api *API) GetHashrate() uint64 {
return uint64(api.ethash.Hashrate())
}

View File

@@ -461,6 +461,13 @@ func calcDifficultyFrontier(time uint64, parent *types.Header) *big.Int {
// VerifySeal implements consensus.Engine, checking whether the given block satisfies
// the PoW difficulty requirements.
func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Header) error {
return ethash.verifySeal(chain, header, false)
}
// verifySeal checks whether a block satisfies the PoW difficulty requirements,
// either using the usual ethash cache for it, or alternatively using a full DAG
// to make remote mining fast.
func (ethash *Ethash) verifySeal(chain consensus.ChainReader, header *types.Header, fulldag bool) error {
// If we're running a fake PoW, accept any seal as valid
if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake {
time.Sleep(ethash.fakeDelay)
@@ -471,29 +478,52 @@ func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Head
}
// If we're running a shared PoW, delegate verification to it
if ethash.shared != nil {
return ethash.shared.VerifySeal(chain, header)
return ethash.shared.verifySeal(chain, header, fulldag)
}
// Ensure that we have a valid difficulty for the block
if header.Difficulty.Sign() <= 0 {
return errInvalidDifficulty
}
// Recompute the digest and PoW value and verify against the header
// Recompute the digest and PoW values
number := header.Number.Uint64()
cache := ethash.cache(number)
size := datasetSize(number)
if ethash.config.PowMode == ModeTest {
size = 32 * 1024
}
digest, result := hashimotoLight(size, cache.cache, header.HashNoNonce().Bytes(), header.Nonce.Uint64())
// Caches are unmapped in a finalizer. Ensure that the cache stays live
// until after the call to hashimotoLight so it's not unmapped while being used.
runtime.KeepAlive(cache)
var (
digest []byte
result []byte
)
// If fast-but-heavy PoW verification was requested, use an ethash dataset
if fulldag {
dataset := ethash.dataset(number, true)
if dataset.generated() {
digest, result = hashimotoFull(dataset.dataset, header.HashNoNonce().Bytes(), header.Nonce.Uint64())
// Datasets are unmapped in a finalizer. Ensure that the dataset stays alive
// until after the call to hashimotoFull so it's not unmapped while being used.
runtime.KeepAlive(dataset)
} else {
// Dataset not yet generated, don't hang, use a cache instead
fulldag = false
}
}
// If slow-but-light PoW verification was requested (or DAG not yet ready), use an ethash cache
if !fulldag {
cache := ethash.cache(number)
size := datasetSize(number)
if ethash.config.PowMode == ModeTest {
size = 32 * 1024
}
digest, result = hashimotoLight(size, cache.cache, header.HashNoNonce().Bytes(), header.Nonce.Uint64())
// Caches are unmapped in a finalizer. Ensure that the cache stays alive
// until after the call to hashimotoLight so it's not unmapped while being used.
runtime.KeepAlive(cache)
}
// Verify the calculated values against the ones provided in the header
if !bytes.Equal(header.MixDigest[:], digest) {
return errInvalidMixDigest
}
target := new(big.Int).Div(maxUint256, header.Difficulty)
target := new(big.Int).Div(two256, header.Difficulty)
if new(big.Int).SetBytes(result).Cmp(target) > 0 {
return errInvalidPoW
}

View File

@@ -29,11 +29,14 @@ import (
"runtime"
"strconv"
"sync"
"sync/atomic"
"time"
"unsafe"
mmap "github.com/edsrzf/mmap-go"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rpc"
@@ -43,11 +46,11 @@ import (
var ErrInvalidDumpMagic = errors.New("invalid dump magic")
var (
// maxUint256 is a big integer representing 2^256-1
maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
// two256 is a big integer representing 2^256
two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
// sharedEthash is a full instance that can be shared between multiple users.
sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal})
sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal}, nil)
// algorithmRevision is the data structure version used for file naming.
algorithmRevision = 23
@@ -279,6 +282,7 @@ type dataset struct {
mmap mmap.MMap // Memory map itself to unmap before releasing
dataset []uint32 // The actual cache data content
once sync.Once // Ensures the cache is generated only once
done uint32 // Atomic flag to determine generation status
}
// newDataset creates a new ethash mining dataset and returns it as a plain Go
@@ -290,6 +294,9 @@ func newDataset(epoch uint64) interface{} {
// generate ensures that the dataset content is generated before use.
func (d *dataset) generate(dir string, limit int, test bool) {
d.once.Do(func() {
// Mark the dataset generated after we're done. This is needed for remote
defer atomic.StoreUint32(&d.done, 1)
csize := cacheSize(d.epoch*epochLength + 1)
dsize := datasetSize(d.epoch*epochLength + 1)
seed := seedHash(d.epoch*epochLength + 1)
@@ -304,6 +311,8 @@ func (d *dataset) generate(dir string, limit int, test bool) {
d.dataset = make([]uint32, dsize/4)
generateDataset(d.dataset, d.epoch, cache)
return
}
// Disk storage is needed, this will get fancy
var endian string
@@ -346,6 +355,13 @@ func (d *dataset) generate(dir string, limit int, test bool) {
})
}
// generated returns whether this particular dataset finished generating already
// or not (it may not have been started at all). This is useful for remote miners
// to default to verification caches instead of blocking on DAG generations.
func (d *dataset) generated() bool {
return atomic.LoadUint32(&d.done) == 1
}
// finalizer closes any file handlers and memory maps open.
func (d *dataset) finalizer() {
if d.mmap != nil {
@@ -389,6 +405,30 @@ type Config struct {
PowMode Mode
}
// mineResult wraps the pow solution parameters for the specified block.
type mineResult struct {
nonce types.BlockNonce
mixDigest common.Hash
hash common.Hash
errc chan error
}
// hashrate wraps the hash rate submitted by the remote sealer.
type hashrate struct {
id common.Hash
ping time.Time
rate uint64
done chan struct{}
}
// sealWork wraps a seal work package for remote sealer.
type sealWork struct {
errc chan error
res chan [3]string
}
// Ethash is a consensus engine based on proof-of-work implementing the ethash
// algorithm.
type Ethash struct {
@@ -403,16 +443,28 @@ type Ethash struct {
update chan struct{} // Notification channel to update mining parameters
hashrate metrics.Meter // Meter tracking the average hashrate
// Remote sealer related fields
workCh chan *types.Block // Notification channel to push new work to remote sealer
resultCh chan *types.Block // Channel used by mining threads to return result
fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work
submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result
fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer.
submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate
// The fields below are hooks for testing
shared *Ethash // Shared PoW verifier to avoid cache regeneration
fakeFail uint64 // Block number which fails PoW check even in fake mode
fakeDelay time.Duration // Time delay to sleep for before returning from verify
lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields
lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields
closeOnce sync.Once // Ensures exit channel will not be closed twice.
exitCh chan chan error // Notification channel to exiting backend threads
}
// New creates a full sized ethash PoW scheme.
func New(config Config) *Ethash {
// New creates a full sized ethash PoW scheme and starts a background thread for
// remote mining, also optionally notifying a batch of remote services of new work
// packages.
func New(config Config, notify []string) *Ethash {
if config.CachesInMem <= 0 {
log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
config.CachesInMem = 1
@@ -423,19 +475,43 @@ func New(config Config) *Ethash {
if config.DatasetDir != "" && config.DatasetsOnDisk > 0 {
log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk)
}
return &Ethash{
config: config,
caches: newlru("cache", config.CachesInMem, newCache),
datasets: newlru("dataset", config.DatasetsInMem, newDataset),
update: make(chan struct{}),
hashrate: metrics.NewMeter(),
ethash := &Ethash{
config: config,
caches: newlru("cache", config.CachesInMem, newCache),
datasets: newlru("dataset", config.DatasetsInMem, newDataset),
update: make(chan struct{}),
hashrate: metrics.NewMeter(),
workCh: make(chan *types.Block),
resultCh: make(chan *types.Block),
fetchWorkCh: make(chan *sealWork),
submitWorkCh: make(chan *mineResult),
fetchRateCh: make(chan chan uint64),
submitRateCh: make(chan *hashrate),
exitCh: make(chan chan error),
}
go ethash.remote(notify)
return ethash
}
// NewTester creates a small sized ethash PoW scheme useful only for testing
// purposes.
func NewTester() *Ethash {
return New(Config{CachesInMem: 1, PowMode: ModeTest})
func NewTester(notify []string) *Ethash {
ethash := &Ethash{
config: Config{PowMode: ModeTest},
caches: newlru("cache", 1, newCache),
datasets: newlru("dataset", 1, newDataset),
update: make(chan struct{}),
hashrate: metrics.NewMeter(),
workCh: make(chan *types.Block),
resultCh: make(chan *types.Block),
fetchWorkCh: make(chan *sealWork),
submitWorkCh: make(chan *mineResult),
fetchRateCh: make(chan chan uint64),
submitRateCh: make(chan *hashrate),
exitCh: make(chan chan error),
}
go ethash.remote(notify)
return ethash
}
// NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts
@@ -489,6 +565,22 @@ func NewShared() *Ethash {
return &Ethash{shared: sharedEthash}
}
// Close closes the exit channel to notify all backend threads exiting.
func (ethash *Ethash) Close() error {
var err error
ethash.closeOnce.Do(func() {
// Short circuit if the exit channel is not allocated.
if ethash.exitCh == nil {
return
}
errc := make(chan error)
ethash.exitCh <- errc
err = <-errc
close(ethash.exitCh)
})
return err
}
// cache tries to retrieve a verification cache for the specified block number
// by first checking against a list of in-memory caches, then against caches
// stored on disk, and finally generating one if none can be found.
@@ -511,20 +603,34 @@ func (ethash *Ethash) cache(block uint64) *cache {
// dataset tries to retrieve a mining dataset for the specified block number
// by first checking against a list of in-memory datasets, then against DAGs
// stored on disk, and finally generating one if none can be found.
func (ethash *Ethash) dataset(block uint64) *dataset {
//
// If async is specified, not only the future but the current DAG is also
// generates on a background thread.
func (ethash *Ethash) dataset(block uint64, async bool) *dataset {
// Retrieve the requested ethash dataset
epoch := block / epochLength
currentI, futureI := ethash.datasets.get(epoch)
current := currentI.(*dataset)
// Wait for generation finish.
current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
// If async is specified, generate everything in a background thread
if async && !current.generated() {
go func() {
current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
// If we need a new future dataset, now's a good time to regenerate it.
if futureI != nil {
future := futureI.(*dataset)
go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
if futureI != nil {
future := futureI.(*dataset)
future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
}
}()
} else {
// Either blocking generation was requested, or already done
current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
if futureI != nil {
future := futureI.(*dataset)
go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
}
}
return current
}
@@ -561,14 +667,44 @@ func (ethash *Ethash) SetThreads(threads int) {
// Hashrate implements PoW, returning the measured rate of the search invocations
// per second over the last minute.
// Note the returned hashrate includes local hashrate, but also includes the total
// hashrate of all remote miner.
func (ethash *Ethash) Hashrate() float64 {
return ethash.hashrate.Rate1()
// Short circuit if we are run the ethash in normal/test mode.
if ethash.config.PowMode != ModeNormal && ethash.config.PowMode != ModeTest {
return ethash.hashrate.Rate1()
}
var res = make(chan uint64, 1)
select {
case ethash.fetchRateCh <- res:
case <-ethash.exitCh:
// Return local hashrate only if ethash is stopped.
return ethash.hashrate.Rate1()
}
// Gather total submitted hash rate of remote sealers.
return ethash.hashrate.Rate1() + float64(<-res)
}
// APIs implements consensus.Engine, returning the user facing RPC APIs. Currently
// that is empty.
// APIs implements consensus.Engine, returning the user facing RPC APIs.
func (ethash *Ethash) APIs(chain consensus.ChainReader) []rpc.API {
return nil
// In order to ensure backward compatibility, we exposes ethash RPC APIs
// to both eth and ethash namespaces.
return []rpc.API{
{
Namespace: "eth",
Version: "1.0",
Service: &API{ethash},
Public: true,
},
{
Namespace: "ethash",
Version: "1.0",
Service: &API{ethash},
Public: true,
},
}
}
// SeedHash is the seed to use for generating a verification cache and the mining

View File

@@ -23,22 +23,27 @@ import (
"os"
"sync"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
)
// Tests that ethash works correctly in test mode.
func TestTestMode(t *testing.T) {
head := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)}
header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)}
ethash := NewTester()
block, err := ethash.Seal(nil, types.NewBlockWithHeader(head), nil)
ethash := NewTester(nil)
defer ethash.Close()
block, err := ethash.Seal(nil, types.NewBlockWithHeader(header), nil)
if err != nil {
t.Fatalf("failed to seal block: %v", err)
}
head.Nonce = types.EncodeNonce(block.Nonce())
head.MixDigest = block.MixDigest()
if err := ethash.VerifySeal(nil, head); err != nil {
header.Nonce = types.EncodeNonce(block.Nonce())
header.MixDigest = block.MixDigest()
if err := ethash.VerifySeal(nil, header); err != nil {
t.Fatalf("unexpected verification error: %v", err)
}
}
@@ -51,7 +56,8 @@ func TestCacheFileEvict(t *testing.T) {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
e := New(Config{CachesInMem: 3, CachesOnDisk: 10, CacheDir: tmpdir, PowMode: ModeTest})
e := New(Config{CachesInMem: 3, CachesOnDisk: 10, CacheDir: tmpdir, PowMode: ModeTest}, nil)
defer e.Close()
workers := 8
epochs := 100
@@ -73,7 +79,90 @@ func verifyTest(wg *sync.WaitGroup, e *Ethash, workerIndex, epochs int) {
if block < 0 {
block = 0
}
head := &types.Header{Number: big.NewInt(block), Difficulty: big.NewInt(100)}
e.VerifySeal(nil, head)
header := &types.Header{Number: big.NewInt(block), Difficulty: big.NewInt(100)}
e.VerifySeal(nil, header)
}
}
func TestRemoteSealer(t *testing.T) {
ethash := NewTester(nil)
defer ethash.Close()
api := &API{ethash}
if _, err := api.GetWork(); err != errNoMiningWork {
t.Error("expect to return an error indicate there is no mining work")
}
header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)}
block := types.NewBlockWithHeader(header)
// Push new work.
ethash.Seal(nil, block, nil)
var (
work [3]string
err error
)
if work, err = api.GetWork(); err != nil || work[0] != block.HashNoNonce().Hex() {
t.Error("expect to return a mining work has same hash")
}
if res := api.SubmitWork(types.BlockNonce{}, block.HashNoNonce(), common.Hash{}); res {
t.Error("expect to return false when submit a fake solution")
}
// Push new block with same block number to replace the original one.
header = &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(1000)}
block = types.NewBlockWithHeader(header)
ethash.Seal(nil, block, nil)
if work, err = api.GetWork(); err != nil || work[0] != block.HashNoNonce().Hex() {
t.Error("expect to return the latest pushed work")
}
// Push block with higher block number.
newHead := &types.Header{Number: big.NewInt(2), Difficulty: big.NewInt(100)}
newBlock := types.NewBlockWithHeader(newHead)
ethash.Seal(nil, newBlock, nil)
if res := api.SubmitWork(types.BlockNonce{}, block.HashNoNonce(), common.Hash{}); res {
t.Error("expect to return false when submit a stale solution")
}
}
func TestHashRate(t *testing.T) {
var (
hashrate = []hexutil.Uint64{100, 200, 300}
expect uint64
ids = []common.Hash{common.HexToHash("a"), common.HexToHash("b"), common.HexToHash("c")}
)
ethash := NewTester(nil)
defer ethash.Close()
if tot := ethash.Hashrate(); tot != 0 {
t.Error("expect the result should be zero")
}
api := &API{ethash}
for i := 0; i < len(hashrate); i += 1 {
if res := api.SubmitHashRate(hashrate[i], ids[i]); !res {
t.Error("remote miner submit hashrate failed")
}
expect += uint64(hashrate[i])
}
if tot := ethash.Hashrate(); tot != float64(expect) {
t.Error("expect total hashrate should be same")
}
}
func TestClosedRemoteSealer(t *testing.T) {
ethash := NewTester(nil)
time.Sleep(1 * time.Second) // ensure exit channel is listening
ethash.Close()
api := &API{ethash}
if _, err := api.GetWork(); err != errEthashStopped {
t.Error("expect to return an error to indicate ethash is stopped")
}
if res := api.SubmitHashRate(hexutil.Uint64(100), common.HexToHash("a")); res {
t.Error("expect to return false when submit hashrate to a stopped ethash")
}
}

View File

@@ -17,12 +17,17 @@
package ethash
import (
"bytes"
crand "crypto/rand"
"encoding/json"
"errors"
"math"
"math/big"
"math/rand"
"net/http"
"runtime"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
@@ -30,6 +35,11 @@ import (
"github.com/ethereum/go-ethereum/log"
)
var (
errNoMiningWork = errors.New("no mining work available yet")
errInvalidSealResult = errors.New("invalid or stale proof-of-work solution")
)
// Seal implements consensus.Engine, attempting to find a nonce that satisfies
// the block's difficulty requirements.
func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop <-chan struct{}) (*types.Block, error) {
@@ -45,7 +55,6 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop
}
// Create a runner and the multiple search threads it directs
abort := make(chan struct{})
found := make(chan *types.Block)
ethash.lock.Lock()
threads := ethash.threads
@@ -64,12 +73,16 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop
if threads < 0 {
threads = 0 // Allows disabling local mining without extra logic around local/remote
}
// Push new work to remote sealer
if ethash.workCh != nil {
ethash.workCh <- block
}
var pend sync.WaitGroup
for i := 0; i < threads; i++ {
pend.Add(1)
go func(id int, nonce uint64) {
defer pend.Done()
ethash.mine(block, id, nonce, abort, found)
ethash.mine(block, id, nonce, abort, ethash.resultCh)
}(i, uint64(ethash.rand.Int63()))
}
// Wait until sealing is terminated or a nonce is found
@@ -78,7 +91,7 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop
case <-stop:
// Outside abort, stop all miner threads
close(abort)
case result = <-found:
case result = <-ethash.resultCh:
// One of the threads found a block, abort all others
close(abort)
case <-ethash.update:
@@ -99,9 +112,9 @@ func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan s
var (
header = block.Header()
hash = header.HashNoNonce().Bytes()
target = new(big.Int).Div(maxUint256, header.Difficulty)
target = new(big.Int).Div(two256, header.Difficulty)
number = header.Number.Uint64()
dataset = ethash.dataset(number)
dataset = ethash.dataset(number, false)
)
// Start generating random nonces until we abort or find a good one
var (
@@ -150,3 +163,164 @@ search:
// during sealing so it's not unmapped while being read.
runtime.KeepAlive(dataset)
}
// remote is a standalone goroutine to handle remote mining related stuff.
func (ethash *Ethash) remote(notify []string) {
var (
works = make(map[common.Hash]*types.Block)
rates = make(map[common.Hash]hashrate)
currentBlock *types.Block
currentWork [3]string
notifyTransport = &http.Transport{}
notifyClient = &http.Client{
Transport: notifyTransport,
Timeout: time.Second,
}
notifyReqs = make([]*http.Request, len(notify))
)
// notifyWork notifies all the specified mining endpoints of the availability of
// new work to be processed.
notifyWork := func() {
work := currentWork
blob, _ := json.Marshal(work)
for i, url := range notify {
// Terminate any previously pending request and create the new work
if notifyReqs[i] != nil {
notifyTransport.CancelRequest(notifyReqs[i])
}
notifyReqs[i], _ = http.NewRequest("POST", url, bytes.NewReader(blob))
notifyReqs[i].Header.Set("Content-Type", "application/json")
// Push the new work concurrently to all the remote nodes
go func(req *http.Request, url string) {
res, err := notifyClient.Do(req)
if err != nil {
log.Warn("Failed to notify remote miner", "err", err)
} else {
log.Trace("Notified remote miner", "miner", url, "hash", log.Lazy{Fn: func() common.Hash { return common.HexToHash(work[0]) }}, "target", work[2])
res.Body.Close()
}
}(notifyReqs[i], url)
}
}
// makeWork creates a work package for external miner.
//
// The work package consists of 3 strings:
// result[0], 32 bytes hex encoded current block header pow-hash
// result[1], 32 bytes hex encoded seed hash used for DAG
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
makeWork := func(block *types.Block) {
hash := block.HashNoNonce()
currentWork[0] = hash.Hex()
currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex()
currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex()
// Trace the seal work fetched by remote sealer.
currentBlock = block
works[hash] = block
}
// submitWork verifies the submitted pow solution, returning
// whether the solution was accepted or not (not can be both a bad pow as well as
// any other error, like no pending work or stale mining result).
submitWork := func(nonce types.BlockNonce, mixDigest common.Hash, hash common.Hash) bool {
// Make sure the work submitted is present
block := works[hash]
if block == nil {
log.Info("Work submitted but none pending", "hash", hash)
return false
}
// Verify the correctness of submitted result.
header := block.Header()
header.Nonce = nonce
header.MixDigest = mixDigest
start := time.Now()
if err := ethash.verifySeal(nil, header, true); err != nil {
log.Warn("Invalid proof-of-work submitted", "hash", hash, "elapsed", time.Since(start), "err", err)
return false
}
// Make sure the result channel is created.
if ethash.resultCh == nil {
log.Warn("Ethash result channel is empty, submitted mining result is rejected")
return false
}
log.Trace("Verified correct proof-of-work", "hash", hash, "elapsed", time.Since(start))
// Solutions seems to be valid, return to the miner and notify acceptance.
select {
case ethash.resultCh <- block.WithSeal(header):
delete(works, hash)
return true
default:
log.Info("Work submitted is stale", "hash", hash)
return false
}
}
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for {
select {
case block := <-ethash.workCh:
if currentBlock != nil && block.ParentHash() != currentBlock.ParentHash() {
// Start new round mining, throw out all previous work.
works = make(map[common.Hash]*types.Block)
}
// Update current work with new received block.
// Note same work can be past twice, happens when changing CPU threads.
makeWork(block)
// Notify and requested URLs of the new work availability
notifyWork()
case work := <-ethash.fetchWorkCh:
// Return current mining work to remote miner.
if currentBlock == nil {
work.errc <- errNoMiningWork
} else {
work.res <- currentWork
}
case result := <-ethash.submitWorkCh:
// Verify submitted PoW solution based on maintained mining blocks.
if submitWork(result.nonce, result.mixDigest, result.hash) {
result.errc <- nil
} else {
result.errc <- errInvalidSealResult
}
case result := <-ethash.submitRateCh:
// Trace remote sealer's hash rate by submitted value.
rates[result.id] = hashrate{rate: result.rate, ping: time.Now()}
close(result.done)
case req := <-ethash.fetchRateCh:
// Gather all hash rate submitted by remote sealer.
var total uint64
for _, rate := range rates {
// this could overflow
total += rate.rate
}
req <- total
case <-ticker.C:
// Clear stale submitted hash rate.
for id, rate := range rates {
if time.Since(rate.ping) > 10*time.Second {
delete(rates, id)
}
}
case errc := <-ethash.exitCh:
// Exit remote loop if ethash is closed and return relevant error.
errc <- nil
log.Trace("Ethash remote sealer is exiting")
return
}
}
}

View File

@@ -0,0 +1,115 @@
package ethash
import (
"encoding/json"
"io/ioutil"
"math/big"
"net"
"net/http"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
// Tests whether remote HTTP servers are correctly notified of new work.
func TestRemoteNotify(t *testing.T) {
// Start a simple webserver to capture notifications
sink := make(chan [3]string)
server := &http.Server{
Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
blob, err := ioutil.ReadAll(req.Body)
if err != nil {
t.Fatalf("failed to read miner notification: %v", err)
}
var work [3]string
if err := json.Unmarshal(blob, &work); err != nil {
t.Fatalf("failed to unmarshal miner notification: %v", err)
}
sink <- work
}),
}
// Open a custom listener to extract its local address
listener, err := net.Listen("tcp", "localhost:0")
if err != nil {
t.Fatalf("failed to open notification server: %v", err)
}
defer listener.Close()
go server.Serve(listener)
// Create the custom ethash engine
ethash := NewTester([]string{"http://" + listener.Addr().String()})
defer ethash.Close()
// Stream a work task and ensure the notification bubbles out
header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)}
block := types.NewBlockWithHeader(header)
ethash.Seal(nil, block, nil)
select {
case work := <-sink:
if want := header.HashNoNonce().Hex(); work[0] != want {
t.Errorf("work packet hash mismatch: have %s, want %s", work[0], want)
}
if want := common.BytesToHash(SeedHash(header.Number.Uint64())).Hex(); work[1] != want {
t.Errorf("work packet seed mismatch: have %s, want %s", work[1], want)
}
target := new(big.Int).Div(new(big.Int).Lsh(big.NewInt(1), 256), header.Difficulty)
if want := common.BytesToHash(target.Bytes()).Hex(); work[2] != want {
t.Errorf("work packet target mismatch: have %s, want %s", work[2], want)
}
case <-time.After(time.Second):
t.Fatalf("notification timed out")
}
}
// Tests that pushing work packages fast to the miner doesn't cause any daa race
// issues in the notifications.
func TestRemoteMultiNotify(t *testing.T) {
// Start a simple webserver to capture notifications
sink := make(chan [3]string, 64)
server := &http.Server{
Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
blob, err := ioutil.ReadAll(req.Body)
if err != nil {
t.Fatalf("failed to read miner notification: %v", err)
}
var work [3]string
if err := json.Unmarshal(blob, &work); err != nil {
t.Fatalf("failed to unmarshal miner notification: %v", err)
}
sink <- work
}),
}
// Open a custom listener to extract its local address
listener, err := net.Listen("tcp", "localhost:0")
if err != nil {
t.Fatalf("failed to open notification server: %v", err)
}
defer listener.Close()
go server.Serve(listener)
// Create the custom ethash engine
ethash := NewTester([]string{"http://" + listener.Addr().String()})
defer ethash.Close()
// Stream a lot of work task and ensure all the notifications bubble out
for i := 0; i < cap(sink); i++ {
header := &types.Header{Number: big.NewInt(int64(i)), Difficulty: big.NewInt(100)}
block := types.NewBlockWithHeader(header)
ethash.Seal(nil, block, nil)
}
for i := 0; i < cap(sink); i++ {
select {
case <-sink:
case <-time.After(250 * time.Millisecond):
t.Fatalf("notification %d timed out", i)
}
}
}

View File

@@ -314,7 +314,7 @@ func (c *Console) Interactive() {
input = "" // Current user input
scheduler = make(chan string) // Channel to send the next prompt on and receive the input
)
// Start a goroutine to listen for promt requests and send back inputs
// Start a goroutine to listen for prompt requests and send back inputs
go func() {
for {
// Read the next user input

View File

@@ -201,7 +201,7 @@ func TestInteractive(t *testing.T) {
go tester.console.Interactive()
// Wait for a promt and send a statement back
// Wait for a prompt and send a statement back
select {
case <-tester.input.scheduler:
case <-time.After(time.Second):
@@ -212,7 +212,7 @@ func TestInteractive(t *testing.T) {
case <-time.After(time.Second):
t.Fatalf("input feedback timeout")
}
// Wait for the second promt and ensure first statement was evaluated
// Wait for the second prompt and ensure first statement was evaluated
select {
case <-tester.input.scheduler:
case <-time.After(time.Second):
@@ -249,7 +249,7 @@ func TestExecute(t *testing.T) {
}
// Tests that the JavaScript objects returned by statement executions are properly
// pretty printed instead of just displaing "[object]".
// pretty printed instead of just displaying "[object]".
func TestPrettyPrint(t *testing.T) {
tester := newTester(t, nil)
defer tester.Close(t)
@@ -300,7 +300,7 @@ func TestIndenting(t *testing.T) {
}{
{`var a = 1;`, 0},
{`"some string"`, 0},
{`"some string with (parentesis`, 0},
{`"some string with (parenthesis`, 0},
{`"some string with newline
("`, 0},
{`function v(a,b) {}`, 0},

View File

@@ -27,7 +27,7 @@ import (
// Only this reader may be used for input because it keeps an internal buffer.
var Stdin = newTerminalPrompter()
// UserPrompter defines the methods needed by the console to promt the user for
// UserPrompter defines the methods needed by the console to prompt the user for
// various types of inputs.
type UserPrompter interface {
// PromptInput displays the given prompt to the user and requests some textual

View File

@@ -46,7 +46,7 @@ func newTestBackend() *backends.SimulatedBackend {
addr0: {Balance: big.NewInt(1000000000)},
addr1: {Balance: big.NewInt(1000000000)},
addr2: {Balance: big.NewInt(1000000000)},
})
}, 10000000)
}
func deploy(prvKey *ecdsa.PrivateKey, amount *big.Int, backend *backends.SimulatedBackend) (common.Address, error) {

View File

@@ -40,7 +40,7 @@ var (
)
func main() {
backend := backends.NewSimulatedBackend(testAlloc)
backend := backends.NewSimulatedBackend(testAlloc, uint64(100000000))
auth := bind.NewKeyedTransactor(testKey)
// Deploy the contract, get the code.

View File

@@ -35,7 +35,7 @@ var (
)
func TestENS(t *testing.T) {
contractBackend := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}})
contractBackend := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}}, 10000000)
transactOpts := bind.NewKeyedTransactor(key)
ensAddr, ens, err := DeployENS(transactOpts, contractBackend)

View File

@@ -899,9 +899,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
return NonStatTy, err
}
// Write other block data using a batch.
batch := bc.db.NewBatch()
rawdb.WriteBlock(batch, block)
rawdb.WriteBlock(bc.db, block)
root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
if err != nil {
@@ -955,6 +953,9 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
}
}
}
// Write other block data using a batch.
batch := bc.db.NewBatch()
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
// If the total difficulty is higher than our known, add it to the canonical chain

View File

@@ -17,6 +17,7 @@
package core
import (
"context"
"encoding/binary"
"fmt"
"sync"
@@ -37,11 +38,11 @@ import (
type ChainIndexerBackend interface {
// Reset initiates the processing of a new chain segment, potentially terminating
// any partially completed operations (in case of a reorg).
Reset(section uint64, prevHead common.Hash) error
Reset(ctx context.Context, section uint64, prevHead common.Hash) error
// Process crunches through the next header in the chain segment. The caller
// will ensure a sequential order of headers.
Process(header *types.Header)
Process(ctx context.Context, header *types.Header) error
// Commit finalizes the section metadata and stores it into the database.
Commit() error
@@ -71,9 +72,11 @@ type ChainIndexer struct {
backend ChainIndexerBackend // Background processor generating the index data content
children []*ChainIndexer // Child indexers to cascade chain updates to
active uint32 // Flag whether the event loop was started
update chan struct{} // Notification channel that headers should be processed
quit chan chan error // Quit channel to tear down running goroutines
active uint32 // Flag whether the event loop was started
update chan struct{} // Notification channel that headers should be processed
quit chan chan error // Quit channel to tear down running goroutines
ctx context.Context
ctxCancel func()
sectionSize uint64 // Number of blocks in a single chain segment to process
confirmsReq uint64 // Number of confirmations before processing a completed segment
@@ -105,6 +108,8 @@ func NewChainIndexer(chainDb, indexDb ethdb.Database, backend ChainIndexerBacken
}
// Initialize database dependent fields and start the updater
c.loadValidSections()
c.ctx, c.ctxCancel = context.WithCancel(context.Background())
go c.updateLoop()
return c
@@ -138,6 +143,8 @@ func (c *ChainIndexer) Start(chain ChainIndexerChain) {
func (c *ChainIndexer) Close() error {
var errs []error
c.ctxCancel()
// Tear down the primary update loop
errc := make(chan error)
c.quit <- errc
@@ -297,6 +304,12 @@ func (c *ChainIndexer) updateLoop() {
c.lock.Unlock()
newHead, err := c.processSection(section, oldHead)
if err != nil {
select {
case <-c.ctx.Done():
<-c.quit <- nil
return
default:
}
c.log.Error("Section processing failed", "error", err)
}
c.lock.Lock()
@@ -344,7 +357,7 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com
// Reset and partial processing
if err := c.backend.Reset(section, lastHead); err != nil {
if err := c.backend.Reset(c.ctx, section, lastHead); err != nil {
c.setValidSections(0)
return common.Hash{}, err
}
@@ -360,11 +373,12 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com
} else if header.ParentHash != lastHead {
return common.Hash{}, fmt.Errorf("chain reorged during section processing")
}
c.backend.Process(header)
if err := c.backend.Process(c.ctx, header); err != nil {
return common.Hash{}, err
}
lastHead = header.Hash()
}
if err := c.backend.Commit(); err != nil {
c.log.Error("Section commit failed", "error", err)
return common.Hash{}, err
}
return lastHead, nil

View File

@@ -17,6 +17,7 @@
package core
import (
"context"
"fmt"
"math/big"
"math/rand"
@@ -210,13 +211,13 @@ func (b *testChainIndexBackend) reorg(headNum uint64) uint64 {
return b.stored * b.indexer.sectionSize
}
func (b *testChainIndexBackend) Reset(section uint64, prevHead common.Hash) error {
func (b *testChainIndexBackend) Reset(ctx context.Context, section uint64, prevHead common.Hash) error {
b.section = section
b.headerCnt = 0
return nil
}
func (b *testChainIndexBackend) Process(header *types.Header) {
func (b *testChainIndexBackend) Process(ctx context.Context, header *types.Header) error {
b.headerCnt++
if b.headerCnt > b.indexer.sectionSize {
b.t.Error("Processing too many headers")
@@ -227,6 +228,7 @@ func (b *testChainIndexBackend) Process(header *types.Header) {
b.t.Fatal("Unexpected call to Process")
case b.processCh <- header.Number.Uint64():
}
return nil
}
func (b *testChainIndexBackend) Commit() error {

View File

@@ -29,9 +29,6 @@ type PendingLogsEvent struct {
Logs []*types.Log
}
// PendingStateEvent is posted pre mining and notifies of pending state changes.
type PendingStateEvent struct{}
// NewMinedBlockEvent is posted when a block has been imported.
type NewMinedBlockEvent struct{ Block *types.Block }

View File

@@ -22,7 +22,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
)
// senderCacher is a concurrent tranaction sender recoverer anc cacher.
// senderCacher is a concurrent transaction sender recoverer anc cacher.
var senderCacher = newTxSenderCacher(runtime.NumCPU())
// txSenderCacherRequest is a request for recovering transaction senders with a
@@ -45,7 +45,7 @@ type txSenderCacher struct {
}
// newTxSenderCacher creates a new transaction sender background cacher and starts
// as many procesing goroutines as allowed by the GOMAXPROCS on construction.
// as many processing goroutines as allowed by the GOMAXPROCS on construction.
func newTxSenderCacher(threads int) *txSenderCacher {
cacher := &txSenderCacher{
tasks: make(chan *txSenderCacherRequest, threads),

View File

@@ -123,9 +123,10 @@ type blockChain interface {
// TxPoolConfig are the configuration parameters of the transaction pool.
type TxPoolConfig struct {
NoLocals bool // Whether local transaction handling should be disabled
Journal string // Journal of local transactions to survive node restarts
Rejournal time.Duration // Time interval to regenerate the local transaction journal
Locals []common.Address // Addresses that should be treated by default as local
NoLocals bool // Whether local transaction handling should be disabled
Journal string // Journal of local transactions to survive node restarts
Rejournal time.Duration // Time interval to regenerate the local transaction journal
PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool
PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce)
@@ -231,6 +232,10 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block
gasPrice: new(big.Int).SetUint64(config.PriceLimit),
}
pool.locals = newAccountSet(pool.signer)
for _, addr := range config.Locals {
log.Info("Setting new local account", "address", addr)
pool.locals.add(addr)
}
pool.priced = newTxPricedList(pool.all)
pool.reset(nil, chain.CurrentBlock().Header())
@@ -534,6 +539,14 @@ func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) {
return pending, nil
}
// Locals retrieves the accounts currently considered local by the pool.
func (pool *TxPool) Locals() []common.Address {
pool.mu.Lock()
defer pool.mu.Unlock()
return pool.locals.flatten()
}
// local retrieves all currently known local transactions, groupped by origin
// account and sorted by nonce. The returned transaction set is a copy and can be
// freely modified by calling code.
@@ -665,7 +678,10 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) {
}
// Mark local addresses and journal local transactions
if local {
pool.locals.add(from)
if !pool.locals.contains(from) {
log.Info("Setting new local account", "address", from)
pool.locals.add(from)
}
}
pool.journalTx(from, tx)
@@ -1138,6 +1154,7 @@ func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
type accountSet struct {
accounts map[common.Address]struct{}
signer types.Signer
cache *[]common.Address
}
// newAccountSet creates a new address set with an associated signer for sender
@@ -1167,6 +1184,20 @@ func (as *accountSet) containsTx(tx *types.Transaction) bool {
// add inserts a new address into the set to track.
func (as *accountSet) add(addr common.Address) {
as.accounts[addr] = struct{}{}
as.cache = nil
}
// flatten returns the list of addresses within this set, also caching it for later
// reuse. The returned slice should not be changed!
func (as *accountSet) flatten() []common.Address {
if as.cache == nil {
accounts := make([]common.Address, 0, len(as.accounts))
for account := range as.accounts {
accounts = append(accounts, account)
}
as.cache = &accounts
}
return *as.cache
}
// txLookup is used internally by TxPool to track transactions while allowing lookup without

View File

@@ -119,7 +119,7 @@ func isProtectedV(V *big.Int) bool {
v := V.Uint64()
return v != 27 && v != 28
}
// anything not 27 or 28 are considered unprotected
// anything not 27 or 28 is considered protected
return true
}

View File

@@ -184,7 +184,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
precompiles = PrecompiledContractsByzantium
}
if precompiles[addr] == nil && evm.ChainConfig().IsEIP158(evm.BlockNumber) && value.Sign() == 0 {
// Calling a non existing account, don't do antything, but ping the tracer
// Calling a non existing account, don't do anything, but ping the tracer
if evm.vmConfig.Debug && evm.depth == 0 {
evm.vmConfig.Tracer.CaptureStart(caller.Address(), addr, false, input, gas, value)
evm.vmConfig.Tracer.CaptureEnd(ret, 0, 0, nil)
@@ -427,7 +427,7 @@ func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.I
// Create2 creates a new contract using code as deployment code.
//
// The different between Create2 with Create is Create2 uses sha3(msg.sender ++ salt ++ init_code)[12:]
// The different between Create2 with Create is Create2 uses sha3(0xff ++ msg.sender ++ salt ++ sha3(init_code))[12:]
// instead of the usual sender-and-nonce-hash as the address where the contract is initialized at.
func (evm *EVM) Create2(caller ContractRef, code []byte, gas uint64, endowment *big.Int, salt *big.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) {
contractAddr = crypto.CreateAddress2(caller.Address(), common.BigToHash(salt), code)

View File

@@ -214,6 +214,7 @@ func opBenchmark(bench *testing.B, op func(pc *uint64, interpreter *EVMInterpret
)
env.interpreter = evmInterpreter
evmInterpreter.intPool = poolOfIntPools.get()
// convert args
byteArgs := make([][]byte, len(args))
for i, arg := range args {
@@ -229,6 +230,7 @@ func opBenchmark(bench *testing.B, op func(pc *uint64, interpreter *EVMInterpret
op(&pc, evmInterpreter, nil, nil, stack)
stack.pop()
}
poolOfIntPools.put(evmInterpreter.intPool)
}
func BenchmarkOpAdd64(b *testing.B) {
@@ -474,6 +476,7 @@ func BenchmarkOpMstore(bench *testing.B) {
)
env.interpreter = evmInterpreter
evmInterpreter.intPool = poolOfIntPools.get()
mem.Resize(64)
pc := uint64(0)
memStart := big.NewInt(0)
@@ -484,4 +487,5 @@ func BenchmarkOpMstore(bench *testing.B) {
stack.pushN(value, memStart)
opMstore(&pc, evmInterpreter, nil, mem, stack)
}
poolOfIntPools.put(evmInterpreter.intPool)
}

28
crypto/bn256/LICENSE Normal file
View File

@@ -0,0 +1,28 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Copyright (c) 2018 Péter Szilágyi. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,18 +1,6 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Copyright 2018 Péter Szilágyi. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
// +build amd64 arm64

View File

@@ -1,18 +1,6 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Copyright 2018 Péter Szilágyi. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
// +build gofuzz

View File

@@ -1,18 +1,6 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Copyright 2018 Péter Szilágyi. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
// +build !amd64,!arm64

View File

@@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -110,7 +110,7 @@ TEXT ·gfpMul(SB),0,$160-24
MOVQ b+16(FP), SI
// Jump to a slightly different implementation if MULX isn't supported.
CMPB runtime·support_bmi2(SB), $0
CMPB ·hasBMI2(SB), $0
JE nobmi2Mul
mulBMI2(0(DI),8(DI),16(DI),24(DI), 0(SI))

View File

@@ -5,6 +5,13 @@ package bn256
// This file contains forward declarations for the architecture-specific
// assembly implementations of these functions, provided that they exist.
import (
"golang.org/x/sys/cpu"
)
//nolint:varcheck
var hasBMI2 = cpu.X86.HasBMI2
// go:noescape
func gfpNeg(c, a *gfP)

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package bn256 implements a particular bilinear group at the 128-bit security level.
// Package bn256 implements a particular bilinear group.
//
// Bilinear groups are the basis of many of the new cryptographic protocols
// that have been proposed over the past decade. They consist of a triplet of
@@ -14,6 +14,10 @@
// Barreto-Naehrig curve as described in
// http://cryptojedi.org/papers/dclxvi-20100714.pdf. Its output is compatible
// with the implementation described in that paper.
//
// (This package previously claimed to operate at a 128-bit security level.
// However, recent improvements in attacks mean that is no longer true. See
// https://moderncrypto.org/mail-archive/curves/2016/000740.html.)
package bn256
import (
@@ -50,8 +54,8 @@ func RandomG1(r io.Reader) (*big.Int, *G1, error) {
return k, new(G1).ScalarBaseMult(k), nil
}
func (g *G1) String() string {
return "bn256.G1" + g.p.String()
func (e *G1) String() string {
return "bn256.G1" + e.p.String()
}
// CurvePoints returns p's curve points in big integer
@@ -98,15 +102,19 @@ func (e *G1) Neg(a *G1) *G1 {
}
// Marshal converts n to a byte slice.
func (n *G1) Marshal() []byte {
n.p.MakeAffine(nil)
xBytes := new(big.Int).Mod(n.p.x, P).Bytes()
yBytes := new(big.Int).Mod(n.p.y, P).Bytes()
func (e *G1) Marshal() []byte {
// Each value is a 256-bit number.
const numBytes = 256 / 8
if e.p.IsInfinity() {
return make([]byte, numBytes*2)
}
e.p.MakeAffine(nil)
xBytes := new(big.Int).Mod(e.p.x, P).Bytes()
yBytes := new(big.Int).Mod(e.p.y, P).Bytes()
ret := make([]byte, numBytes*2)
copy(ret[1*numBytes-len(xBytes):], xBytes)
copy(ret[2*numBytes-len(yBytes):], yBytes)
@@ -175,8 +183,8 @@ func RandomG2(r io.Reader) (*big.Int, *G2, error) {
return k, new(G2).ScalarBaseMult(k), nil
}
func (g *G2) String() string {
return "bn256.G2" + g.p.String()
func (e *G2) String() string {
return "bn256.G2" + e.p.String()
}
// CurvePoints returns the curve points of p which includes the real
@@ -216,6 +224,13 @@ func (e *G2) Add(a, b *G2) *G2 {
// Marshal converts n into a byte slice.
func (n *G2) Marshal() []byte {
// Each value is a 256-bit number.
const numBytes = 256 / 8
if n.p.IsInfinity() {
return make([]byte, numBytes*4)
}
n.p.MakeAffine(nil)
xxBytes := new(big.Int).Mod(n.p.x.x, P).Bytes()
@@ -223,9 +238,6 @@ func (n *G2) Marshal() []byte {
yxBytes := new(big.Int).Mod(n.p.y.x, P).Bytes()
yyBytes := new(big.Int).Mod(n.p.y.y, P).Bytes()
// Each value is a 256-bit number.
const numBytes = 256 / 8
ret := make([]byte, numBytes*4)
copy(ret[1*numBytes-len(xxBytes):], xxBytes)
copy(ret[2*numBytes-len(xyBytes):], xyBytes)

View File

@@ -245,11 +245,19 @@ func (c *curvePoint) Mul(a *curvePoint, scalar *big.Int, pool *bnPool) *curvePoi
return c
}
// MakeAffine converts c to affine form and returns c. If c is ∞, then it sets
// c to 0 : 1 : 0.
func (c *curvePoint) MakeAffine(pool *bnPool) *curvePoint {
if words := c.z.Bits(); len(words) == 1 && words[0] == 1 {
return c
}
if c.IsInfinity() {
c.x.SetInt64(0)
c.y.SetInt64(1)
c.z.SetInt64(0)
c.t.SetInt64(0)
return c
}
zInv := pool.Get().ModInverse(c.z, P)
t := pool.Get().Mul(c.y, zInv)
t.Mod(t, P)

View File

@@ -225,11 +225,19 @@ func (c *twistPoint) Mul(a *twistPoint, scalar *big.Int, pool *bnPool) *twistPoi
return c
}
// MakeAffine converts c to affine form and returns c. If c is ∞, then it sets
// c to 0 : 1 : 0.
func (c *twistPoint) MakeAffine(pool *bnPool) *twistPoint {
if c.z.IsOne() {
return c
}
if c.IsInfinity() {
c.x.SetZero()
c.y.SetOne()
c.z.SetZero()
c.t.SetZero()
return c
}
zInv := newGFp2(pool).Invert(c.z, pool)
t := newGFp2(pool).Mul(c.y, zInv, pool)
zInv2 := newGFp2(pool).Square(zInv, pool)

View File

@@ -78,8 +78,8 @@ func CreateAddress(b common.Address, nonce uint64) common.Address {
// CreateAddress2 creates an ethereum address given the address bytes, initial
// contract code and a salt.
func CreateAddress2(b common.Address, salt common.Hash, code []byte) common.Address {
return common.BytesToAddress(Keccak256([]byte{0xff}, b.Bytes(), salt.Bytes(), code)[12:])
func CreateAddress2(b common.Address, salt [32]byte, code []byte) common.Address {
return common.BytesToAddress(Keccak256([]byte{0xff}, b.Bytes(), salt[:], Keccak256(code))[12:])
}
// ToECDSA creates a private key with the given D value.

View File

@@ -25,6 +25,7 @@ import (
"math/big"
"os"
"strings"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
@@ -34,7 +35,6 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/miner"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
@@ -70,16 +70,12 @@ func (api *PublicEthereumAPI) Hashrate() hexutil.Uint64 {
// PublicMinerAPI provides an API to control the miner.
// It offers only methods that operate on data that pose no security risk when it is publicly accessible.
type PublicMinerAPI struct {
e *Ethereum
agent *miner.RemoteAgent
e *Ethereum
}
// NewPublicMinerAPI create a new PublicMinerAPI instance.
func NewPublicMinerAPI(e *Ethereum) *PublicMinerAPI {
agent := miner.NewRemoteAgent(e.BlockChain(), e.Engine())
e.Miner().Register(agent)
return &PublicMinerAPI{e, agent}
return &PublicMinerAPI{e}
}
// Mining returns an indication if this node is currently mining.
@@ -87,37 +83,6 @@ func (api *PublicMinerAPI) Mining() bool {
return api.e.IsMining()
}
// SubmitWork can be used by external miner to submit their POW solution. It returns an indication if the work was
// accepted. Note, this is not an indication if the provided work was valid!
func (api *PublicMinerAPI) SubmitWork(nonce types.BlockNonce, solution, digest common.Hash) bool {
return api.agent.SubmitWork(nonce, digest, solution)
}
// GetWork returns a work package for external miner. The work package consists of 3 strings
// result[0], 32 bytes hex encoded current block header pow-hash
// result[1], 32 bytes hex encoded seed hash used for DAG
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
func (api *PublicMinerAPI) GetWork() ([3]string, error) {
if !api.e.IsMining() {
if err := api.e.StartMining(false); err != nil {
return [3]string{}, err
}
}
work, err := api.agent.GetWork()
if err != nil {
return work, fmt.Errorf("mining not ready: %v", err)
}
return work, nil
}
// SubmitHashrate can be used for remote miners to submit their hash rate. This enables the node to report the combined
// hash rate of all miners which submit work through this node. It accepts the miner hash rate and an identifier which
// must be unique between nodes.
func (api *PublicMinerAPI) SubmitHashrate(hashrate hexutil.Uint64, id common.Hash) bool {
api.agent.SubmitHashrate(id, uint64(hashrate))
return true
}
// PrivateMinerAPI provides private RPC methods to control the miner.
// These methods can be abused by external users and must be considered insecure for use by untrusted users.
type PrivateMinerAPI struct {
@@ -132,7 +97,8 @@ func NewPrivateMinerAPI(e *Ethereum) *PrivateMinerAPI {
// Start the miner with the given number of threads. If threads is nil the number
// of workers started is equal to the number of logical CPUs that are usable by
// this process. If mining is already running, this method adjust the number of
// threads allowed to use.
// threads allowed to use and updates the minimum price required by the transaction
// pool.
func (api *PrivateMinerAPI) Start(threads *int) error {
// Set the number of threads if the seal engine supports it
if threads == nil {
@@ -153,7 +119,6 @@ func (api *PrivateMinerAPI) Start(threads *int) error {
api.e.lock.RLock()
price := api.e.gasPrice
api.e.lock.RUnlock()
api.e.txPool.SetGasPrice(price)
return api.e.StartMining(true)
}
@@ -196,9 +161,14 @@ func (api *PrivateMinerAPI) SetEtherbase(etherbase common.Address) bool {
return true
}
// SetRecommitInterval updates the interval for miner sealing work recommitting.
func (api *PrivateMinerAPI) SetRecommitInterval(interval int) {
api.e.Miner().SetRecommitInterval(time.Duration(interval) * time.Millisecond)
}
// GetHashrate returns the current hashrate of the miner.
func (api *PrivateMinerAPI) GetHashrate() uint64 {
return uint64(api.e.miner.HashRate())
return api.e.miner.HashRate()
}
// PrivateAdminAPI is the collection of Ethereum full node-related APIs

View File

@@ -119,6 +119,9 @@ func (api *PrivateDebugAPI) TraceChain(ctx context.Context, start, end rpc.Block
if to == nil {
return nil, fmt.Errorf("end block #%d not found", end)
}
if from.Number().Cmp(to.Number()) >= 0 {
return nil, fmt.Errorf("end block (#%d) needs to come after start block (#%d)", end, start)
}
return api.traceChain(ctx, from, to, config)
}
@@ -297,7 +300,9 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl
database.TrieDB().Reference(root, common.Hash{})
}
// Dereference all past tries we ourselves are done working with
database.TrieDB().Dereference(proot)
if proot != (common.Hash{}) {
database.TrieDB().Dereference(proot)
}
proot = root
// TODO(karalabe): Do we need the preimages? Won't they accumulate too much?
@@ -526,7 +531,9 @@ func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (*
return nil, err
}
database.TrieDB().Reference(root, common.Hash{})
database.TrieDB().Dereference(proot)
if proot != (common.Hash{}) {
database.TrieDB().Dereference(proot)
}
proot = root
}
nodes, imgs := database.TrieDB().Size()

View File

@@ -124,13 +124,13 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
chainConfig: chainConfig,
eventMux: ctx.EventMux,
accountManager: ctx.AccountManager,
engine: CreateConsensusEngine(ctx, &config.Ethash, chainConfig, chainDb),
engine: CreateConsensusEngine(ctx, chainConfig, &config.Ethash, config.MinerNotify, chainDb),
shutdownChan: make(chan bool),
networkID: config.NetworkId,
gasPrice: config.GasPrice,
gasPrice: config.MinerGasPrice,
etherbase: config.Etherbase,
bloomRequests: make(chan chan *bloombits.Retrieval),
bloomIndexer: NewBloomIndexer(chainDb, params.BloomBitsBlocks),
bloomIndexer: NewBloomIndexer(chainDb, params.BloomBitsBlocks, bloomConfirms),
}
log.Info("Initialising Ethereum protocol", "versions", ProtocolVersions, "network", config.NetworkId)
@@ -138,7 +138,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
if !config.SkipBcVersionCheck {
bcVersion := rawdb.ReadDatabaseVersion(chainDb)
if bcVersion != core.BlockChainVersion && bcVersion != 0 {
return nil, fmt.Errorf("Blockchain DB version mismatch (%d / %d). Run geth upgradedb.\n", bcVersion, core.BlockChainVersion)
return nil, fmt.Errorf("Blockchain DB version mismatch (%d / %d).\n", bcVersion, core.BlockChainVersion)
}
rawdb.WriteDatabaseVersion(chainDb, core.BlockChainVersion)
}
@@ -166,13 +166,14 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.SyncMode, config.NetworkId, eth.eventMux, eth.txPool, eth.engine, eth.blockchain, chainDb); err != nil {
return nil, err
}
eth.miner = miner.New(eth, eth.chainConfig, eth.EventMux(), eth.engine)
eth.miner.SetExtra(makeExtraData(config.ExtraData))
eth.miner = miner.New(eth, eth.chainConfig, eth.EventMux(), eth.engine, config.MinerRecommit)
eth.miner.SetExtra(makeExtraData(config.MinerExtraData))
eth.APIBackend = &EthAPIBackend{eth, nil}
gpoParams := config.GPO
if gpoParams.Default == nil {
gpoParams.Default = config.GasPrice
gpoParams.Default = config.MinerGasPrice
}
eth.APIBackend.gpo = gasprice.NewOracle(eth.APIBackend, gpoParams)
@@ -209,7 +210,7 @@ func CreateDB(ctx *node.ServiceContext, config *Config, name string) (ethdb.Data
}
// CreateConsensusEngine creates the required type of consensus engine instance for an Ethereum service
func CreateConsensusEngine(ctx *node.ServiceContext, config *ethash.Config, chainConfig *params.ChainConfig, db ethdb.Database) consensus.Engine {
func CreateConsensusEngine(ctx *node.ServiceContext, chainConfig *params.ChainConfig, config *ethash.Config, notify []string, db ethdb.Database) consensus.Engine {
// If proof-of-authority is requested, set it up
if chainConfig.Clique != nil {
return clique.New(chainConfig.Clique, db)
@@ -221,7 +222,7 @@ func CreateConsensusEngine(ctx *node.ServiceContext, config *ethash.Config, chai
return ethash.NewFaker()
case ethash.ModeTest:
log.Warn("Ethash used in test mode")
return ethash.NewTester()
return ethash.NewTester(nil)
case ethash.ModeShared:
log.Warn("Ethash used in shared mode")
return ethash.NewShared()
@@ -233,7 +234,7 @@ func CreateConsensusEngine(ctx *node.ServiceContext, config *ethash.Config, chai
DatasetDir: config.DatasetDir,
DatasetsInMem: config.DatasetsInMem,
DatasetsOnDisk: config.DatasetsOnDisk,
})
}, notify)
engine.SetThreads(-1) // Disable CPU mining
return engine
}
@@ -411,6 +412,7 @@ func (s *Ethereum) Start(srvr *p2p.Server) error {
func (s *Ethereum) Stop() error {
s.bloomIndexer.Close()
s.blockchain.Stop()
s.engine.Close()
s.protocolManager.Stop()
if s.lesServer != nil {
s.lesServer.Stop()
@@ -421,6 +423,5 @@ func (s *Ethereum) Stop() error {
s.chainDb.Close()
close(s.shutdownChan)
return nil
}

View File

@@ -17,6 +17,7 @@
package eth
import (
"context"
"time"
"github.com/ethereum/go-ethereum/common"
@@ -92,30 +93,28 @@ const (
// BloomIndexer implements a core.ChainIndexer, building up a rotated bloom bits index
// for the Ethereum header bloom filters, permitting blazing fast filtering.
type BloomIndexer struct {
size uint64 // section size to generate bloombits for
db ethdb.Database // database instance to write index data and metadata into
gen *bloombits.Generator // generator to rotate the bloom bits crating the bloom index
section uint64 // Section is the section number being processed currently
head common.Hash // Head is the hash of the last header processed
size uint64 // section size to generate bloombits for
db ethdb.Database // database instance to write index data and metadata into
gen *bloombits.Generator // generator to rotate the bloom bits crating the bloom index
section uint64 // Section is the section number being processed currently
head common.Hash // Head is the hash of the last header processed
}
// NewBloomIndexer returns a chain indexer that generates bloom bits data for the
// canonical chain for fast logs filtering.
func NewBloomIndexer(db ethdb.Database, size uint64) *core.ChainIndexer {
func NewBloomIndexer(db ethdb.Database, size, confReq uint64) *core.ChainIndexer {
backend := &BloomIndexer{
db: db,
size: size,
}
table := ethdb.NewTable(db, string(rawdb.BloomBitsIndexPrefix))
return core.NewChainIndexer(db, table, backend, size, bloomConfirms, bloomThrottling, "bloombits")
return core.NewChainIndexer(db, table, backend, size, confReq, bloomThrottling, "bloombits")
}
// Reset implements core.ChainIndexerBackend, starting a new bloombits index
// section.
func (b *BloomIndexer) Reset(section uint64, lastSectionHead common.Hash) error {
func (b *BloomIndexer) Reset(ctx context.Context, section uint64, lastSectionHead common.Hash) error {
gen, err := bloombits.NewGenerator(uint(b.size))
b.gen, b.section, b.head = gen, section, common.Hash{}
return err
@@ -123,16 +122,16 @@ func (b *BloomIndexer) Reset(section uint64, lastSectionHead common.Hash) error
// Process implements core.ChainIndexerBackend, adding a new header's bloom into
// the index.
func (b *BloomIndexer) Process(header *types.Header) {
func (b *BloomIndexer) Process(ctx context.Context, header *types.Header) error {
b.gen.AddBloom(uint(header.Number.Uint64()-b.section*b.size), header.Bloom)
b.head = header.Hash()
return nil
}
// Commit implements core.ChainIndexerBackend, finalizing the bloom section and
// writing it out into the database.
func (b *BloomIndexer) Commit() error {
batch := b.db.NewBatch()
for i := 0; i < types.BloomBitLength; i++ {
bits, err := b.gen.Bitset(uint(i))
if err != nil {

View File

@@ -48,7 +48,8 @@ var DefaultConfig = Config{
DatabaseCache: 768,
TrieCache: 256,
TrieTimeout: 60 * time.Minute,
GasPrice: big.NewInt(18 * params.Shannon),
MinerGasPrice: big.NewInt(18 * params.Shannon),
MinerRecommit: 3 * time.Second,
TxPool: core.DefaultTxPoolConfig,
GPO: gasprice.Config{
@@ -95,10 +96,12 @@ type Config struct {
TrieTimeout time.Duration
// Mining-related options
Etherbase common.Address `toml:",omitempty"`
MinerThreads int `toml:",omitempty"`
ExtraData []byte `toml:",omitempty"`
GasPrice *big.Int
Etherbase common.Address `toml:",omitempty"`
MinerThreads int `toml:",omitempty"`
MinerNotify []string `toml:",omitempty"`
MinerExtraData []byte `toml:",omitempty"`
MinerGasPrice *big.Int
MinerRecommit time.Duration
// Ethash options
Ethash ethash.Config
@@ -117,5 +120,5 @@ type Config struct {
}
type configMarshaling struct {
ExtraData hexutil.Bytes
MinerExtraData hexutil.Bytes
}

View File

@@ -4,6 +4,7 @@ package eth
import (
"math/big"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
@@ -15,20 +16,26 @@ import (
var _ = (*configMarshaling)(nil)
// MarshalTOML marshals as TOML.
func (c Config) MarshalTOML() (interface{}, error) {
type Config struct {
Genesis *core.Genesis `toml:",omitempty"`
NetworkId uint64
SyncMode downloader.SyncMode
NoPruning bool
LightServ int `toml:",omitempty"`
LightPeers int `toml:",omitempty"`
SkipBcVersionCheck bool `toml:"-"`
DatabaseHandles int `toml:"-"`
DatabaseCache int
TrieCache int
TrieTimeout time.Duration
Etherbase common.Address `toml:",omitempty"`
MinerThreads int `toml:",omitempty"`
ExtraData hexutil.Bytes `toml:",omitempty"`
GasPrice *big.Int
MinerNotify []string `toml:",omitempty"`
MinerExtraData hexutil.Bytes `toml:",omitempty"`
MinerGasPrice *big.Int
MinerRecommit time.Duration
Ethash ethash.Config
TxPool core.TxPoolConfig
GPO gasprice.Config
@@ -39,15 +46,20 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.Genesis = c.Genesis
enc.NetworkId = c.NetworkId
enc.SyncMode = c.SyncMode
enc.NoPruning = c.NoPruning
enc.LightServ = c.LightServ
enc.LightPeers = c.LightPeers
enc.SkipBcVersionCheck = c.SkipBcVersionCheck
enc.DatabaseHandles = c.DatabaseHandles
enc.DatabaseCache = c.DatabaseCache
enc.TrieCache = c.TrieCache
enc.TrieTimeout = c.TrieTimeout
enc.Etherbase = c.Etherbase
enc.MinerThreads = c.MinerThreads
enc.ExtraData = c.ExtraData
enc.GasPrice = c.GasPrice
enc.MinerNotify = c.MinerNotify
enc.MinerExtraData = c.MinerExtraData
enc.MinerGasPrice = c.MinerGasPrice
enc.MinerRecommit = c.MinerRecommit
enc.Ethash = c.Ethash
enc.TxPool = c.TxPool
enc.GPO = c.GPO
@@ -56,20 +68,26 @@ func (c Config) MarshalTOML() (interface{}, error) {
return &enc, nil
}
// UnmarshalTOML unmarshals from TOML.
func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
type Config struct {
Genesis *core.Genesis `toml:",omitempty"`
NetworkId *uint64
SyncMode *downloader.SyncMode
NoPruning *bool
LightServ *int `toml:",omitempty"`
LightPeers *int `toml:",omitempty"`
SkipBcVersionCheck *bool `toml:"-"`
DatabaseHandles *int `toml:"-"`
DatabaseCache *int
TrieCache *int
TrieTimeout *time.Duration
Etherbase *common.Address `toml:",omitempty"`
MinerThreads *int `toml:",omitempty"`
ExtraData *hexutil.Bytes `toml:",omitempty"`
GasPrice *big.Int
MinerNotify []string `toml:",omitempty"`
MinerExtraData *hexutil.Bytes `toml:",omitempty"`
MinerGasPrice *big.Int
MinerRecommit *time.Duration
Ethash *ethash.Config
TxPool *core.TxPoolConfig
GPO *gasprice.Config
@@ -89,6 +107,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.SyncMode != nil {
c.SyncMode = *dec.SyncMode
}
if dec.NoPruning != nil {
c.NoPruning = *dec.NoPruning
}
if dec.LightServ != nil {
c.LightServ = *dec.LightServ
}
@@ -104,17 +125,29 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.DatabaseCache != nil {
c.DatabaseCache = *dec.DatabaseCache
}
if dec.TrieCache != nil {
c.TrieCache = *dec.TrieCache
}
if dec.TrieTimeout != nil {
c.TrieTimeout = *dec.TrieTimeout
}
if dec.Etherbase != nil {
c.Etherbase = *dec.Etherbase
}
if dec.MinerThreads != nil {
c.MinerThreads = *dec.MinerThreads
}
if dec.ExtraData != nil {
c.ExtraData = *dec.ExtraData
if dec.MinerNotify != nil {
c.MinerNotify = dec.MinerNotify
}
if dec.GasPrice != nil {
c.GasPrice = dec.GasPrice
if dec.MinerExtraData != nil {
c.MinerExtraData = *dec.MinerExtraData
}
if dec.MinerGasPrice != nil {
c.MinerGasPrice = dec.MinerGasPrice
}
if dec.MinerRecommit != nil {
c.MinerRecommit = *dec.MinerRecommit
}
if dec.Ethash != nil {
c.Ethash = *dec.Ethash

View File

@@ -21,6 +21,7 @@ var Modules = map[string]string{
"admin": Admin_JS,
"chequebook": Chequebook_JS,
"clique": Clique_JS,
"ethash": Ethash_JS,
"debug": Debug_JS,
"eth": Eth_JS,
"miner": Miner_JS,
@@ -109,6 +110,34 @@ web3._extend({
});
`
const Ethash_JS = `
web3._extend({
property: 'ethash',
methods: [
new web3._extend.Method({
name: 'getWork',
call: 'ethash_getWork',
params: 0
}),
new web3._extend.Method({
name: 'getHashrate',
call: 'ethash_getHashrate',
params: 0
}),
new web3._extend.Method({
name: 'submitWork',
call: 'ethash_submitWork',
params: 3,
}),
new web3._extend.Method({
name: 'submitHashRate',
call: 'ethash_submitHashRate',
params: 2,
}),
]
});
`
const Admin_JS = `
web3._extend({
property: 'admin',
@@ -123,6 +152,16 @@ web3._extend({
call: 'admin_removePeer',
params: 1
}),
new web3._extend.Method({
name: 'addTrustedPeer',
call: 'admin_addTrustedPeer',
params: 1
}),
new web3._extend.Method({
name: 'removeTrustedPeer',
call: 'admin_removeTrustedPeer',
params: 1
}),
new web3._extend.Method({
name: 'exportChain',
call: 'admin_exportChain',
@@ -480,6 +519,11 @@ web3._extend({
params: 1,
inputFormatter: [web3._extend.utils.fromDecimal]
}),
new web3._extend.Method({
name: 'setRecommitInterval',
call: 'miner_setRecommitInterval',
params: 1,
}),
new web3._extend.Method({
name: 'getHashrate',
call: 'miner_getHashrate'

View File

@@ -34,7 +34,6 @@ import (
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/filters"
"github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/light"
@@ -47,26 +46,24 @@ import (
)
type LightEthereum struct {
config *eth.Config
lesCommons
odr *LesOdr
relay *LesTxRelay
chainConfig *params.ChainConfig
// Channel for shutting down the service
shutdownChan chan bool
// Handlers
peers *peerSet
txPool *light.TxPool
blockchain *light.LightChain
protocolManager *ProtocolManager
serverPool *serverPool
reqDist *requestDistributor
retriever *retrieveManager
// DB interfaces
chainDb ethdb.Database // Block chain database
bloomRequests chan chan *bloombits.Retrieval // Channel receiving bloom data retrieval requests
bloomIndexer, chtIndexer, bloomTrieIndexer *core.ChainIndexer
// Handlers
peers *peerSet
txPool *light.TxPool
blockchain *light.LightChain
serverPool *serverPool
reqDist *requestDistributor
retriever *retrieveManager
bloomRequests chan chan *bloombits.Retrieval // Channel receiving bloom data retrieval requests
bloomIndexer *core.ChainIndexer
ApiBackend *LesApiBackend
@@ -95,30 +92,41 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
quitSync := make(chan struct{})
leth := &LightEthereum{
config: config,
chainConfig: chainConfig,
chainDb: chainDb,
eventMux: ctx.EventMux,
peers: peers,
reqDist: newRequestDistributor(peers, quitSync),
accountManager: ctx.AccountManager,
engine: eth.CreateConsensusEngine(ctx, &config.Ethash, chainConfig, chainDb),
shutdownChan: make(chan bool),
networkId: config.NetworkId,
bloomRequests: make(chan chan *bloombits.Retrieval),
bloomIndexer: eth.NewBloomIndexer(chainDb, light.BloomTrieFrequency),
chtIndexer: light.NewChtIndexer(chainDb, true),
bloomTrieIndexer: light.NewBloomTrieIndexer(chainDb, true),
lesCommons: lesCommons{
chainDb: chainDb,
config: config,
},
chainConfig: chainConfig,
eventMux: ctx.EventMux,
peers: peers,
reqDist: newRequestDistributor(peers, quitSync),
accountManager: ctx.AccountManager,
engine: eth.CreateConsensusEngine(ctx, chainConfig, &config.Ethash, nil, chainDb),
shutdownChan: make(chan bool),
networkId: config.NetworkId,
bloomRequests: make(chan chan *bloombits.Retrieval),
bloomIndexer: eth.NewBloomIndexer(chainDb, light.BloomTrieFrequency, light.HelperTrieConfirmations),
}
leth.relay = NewLesTxRelay(peers, leth.reqDist)
leth.serverPool = newServerPool(chainDb, quitSync, &leth.wg)
leth.retriever = newRetrieveManager(peers, leth.reqDist, leth.serverPool)
leth.odr = NewLesOdr(chainDb, leth.chtIndexer, leth.bloomTrieIndexer, leth.bloomIndexer, leth.retriever)
leth.odr = NewLesOdr(chainDb, leth.retriever)
leth.chtIndexer = light.NewChtIndexer(chainDb, true, leth.odr)
leth.bloomTrieIndexer = light.NewBloomTrieIndexer(chainDb, true, leth.odr)
leth.odr.SetIndexers(leth.chtIndexer, leth.bloomTrieIndexer, leth.bloomIndexer)
// Note: NewLightChain adds the trusted checkpoint so it needs an ODR with
// indexers already set but not started yet
if leth.blockchain, err = light.NewLightChain(leth.odr, leth.chainConfig, leth.engine); err != nil {
return nil, err
}
// Note: AddChildIndexer starts the update process for the child
leth.bloomIndexer.AddChildIndexer(leth.bloomTrieIndexer)
leth.chtIndexer.Start(leth.blockchain)
leth.bloomIndexer.Start(leth.blockchain)
// Rewind the chain in case of an incompatible config upgrade.
if compat, ok := genesisErr.(*params.ConfigCompatError); ok {
log.Warn("Rewinding chain to upgrade configuration", "err", compat)
@@ -127,13 +135,13 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
}
leth.txPool = light.NewTxPool(leth.chainConfig, leth.blockchain, leth.relay)
if leth.protocolManager, err = NewProtocolManager(leth.chainConfig, true, ClientProtocolVersions, config.NetworkId, leth.eventMux, leth.engine, leth.peers, leth.blockchain, nil, chainDb, leth.odr, leth.relay, leth.serverPool, quitSync, &leth.wg); err != nil {
if leth.protocolManager, err = NewProtocolManager(leth.chainConfig, true, config.NetworkId, leth.eventMux, leth.engine, leth.peers, leth.blockchain, nil, chainDb, leth.odr, leth.relay, leth.serverPool, quitSync, &leth.wg); err != nil {
return nil, err
}
leth.ApiBackend = &LesApiBackend{leth, nil}
gpoParams := config.GPO
if gpoParams.Default == nil {
gpoParams.Default = config.GasPrice
gpoParams.Default = config.MinerGasPrice
}
leth.ApiBackend.gpo = gasprice.NewOracle(leth.ApiBackend, gpoParams)
return leth, nil
@@ -209,14 +217,14 @@ func (s *LightEthereum) ResetWithGenesisBlock(gb *types.Block) {
func (s *LightEthereum) BlockChain() *light.LightChain { return s.blockchain }
func (s *LightEthereum) TxPool() *light.TxPool { return s.txPool }
func (s *LightEthereum) Engine() consensus.Engine { return s.engine }
func (s *LightEthereum) LesVersion() int { return int(s.protocolManager.SubProtocols[0].Version) }
func (s *LightEthereum) LesVersion() int { return int(ClientProtocolVersions[0]) }
func (s *LightEthereum) Downloader() *downloader.Downloader { return s.protocolManager.downloader }
func (s *LightEthereum) EventMux() *event.TypeMux { return s.eventMux }
// Protocols implements node.Service, returning all the currently configured
// network protocols to start.
func (s *LightEthereum) Protocols() []p2p.Protocol {
return s.protocolManager.SubProtocols
return s.makeProtocols(ClientProtocolVersions)
}
// Start implements node.Service, starting all internal goroutines needed by the
@@ -236,18 +244,12 @@ func (s *LightEthereum) Start(srvr *p2p.Server) error {
// Ethereum protocol.
func (s *LightEthereum) Stop() error {
s.odr.Stop()
if s.bloomIndexer != nil {
s.bloomIndexer.Close()
}
if s.chtIndexer != nil {
s.chtIndexer.Close()
}
if s.bloomTrieIndexer != nil {
s.bloomTrieIndexer.Close()
}
s.bloomIndexer.Close()
s.chtIndexer.Close()
s.blockchain.Stop()
s.protocolManager.Stop()
s.txPool.Stop()
s.engine.Close()
s.eventMux.Stop()

118
les/commons.go Normal file
View File

@@ -0,0 +1,118 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/params"
)
// lesCommons contains fields needed by both server and client.
type lesCommons struct {
config *eth.Config
chainDb ethdb.Database
protocolManager *ProtocolManager
chtIndexer, bloomTrieIndexer *core.ChainIndexer
}
// NodeInfo represents a short summary of the Ethereum sub-protocol metadata
// known about the host peer.
type NodeInfo struct {
Network uint64 `json:"network"` // Ethereum network ID (1=Frontier, 2=Morden, Ropsten=3, Rinkeby=4)
Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain
Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block
Config *params.ChainConfig `json:"config"` // Chain configuration for the fork rules
Head common.Hash `json:"head"` // SHA3 hash of the host's best owned block
CHT light.TrustedCheckpoint `json:"cht"` // Trused CHT checkpoint for fast catchup
}
// makeProtocols creates protocol descriptors for the given LES versions.
func (c *lesCommons) makeProtocols(versions []uint) []p2p.Protocol {
protos := make([]p2p.Protocol, len(versions))
for i, version := range versions {
version := version
protos[i] = p2p.Protocol{
Name: "les",
Version: version,
Length: ProtocolLengths[version],
NodeInfo: c.nodeInfo,
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
return c.protocolManager.runPeer(version, p, rw)
},
PeerInfo: func(id discover.NodeID) interface{} {
if p := c.protocolManager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
return p.Info()
}
return nil
},
}
}
return protos
}
// nodeInfo retrieves some protocol metadata about the running host node.
func (c *lesCommons) nodeInfo() interface{} {
var cht light.TrustedCheckpoint
sections, _, _ := c.chtIndexer.Sections()
sections2, _, _ := c.bloomTrieIndexer.Sections()
if !c.protocolManager.lightSync {
// convert to client section size if running in server mode
sections /= light.CHTFrequencyClient / light.CHTFrequencyServer
}
if sections2 < sections {
sections = sections2
}
if sections > 0 {
sectionIndex := sections - 1
sectionHead := c.bloomTrieIndexer.SectionHead(sectionIndex)
var chtRoot common.Hash
if c.protocolManager.lightSync {
chtRoot = light.GetChtRoot(c.chainDb, sectionIndex, sectionHead)
} else {
chtRoot = light.GetChtV2Root(c.chainDb, sectionIndex, sectionHead)
}
cht = light.TrustedCheckpoint{
SectionIdx: sectionIndex,
SectionHead: sectionHead,
CHTRoot: chtRoot,
BloomRoot: light.GetBloomTrieRoot(c.chainDb, sectionIndex, sectionHead),
}
}
chain := c.protocolManager.blockchain
head := chain.CurrentHeader()
hash := head.Hash()
return &NodeInfo{
Network: c.config.NetworkId,
Difficulty: chain.GetTd(hash, head.Number.Uint64()),
Genesis: chain.Genesis().Hash(),
Config: chain.Config(),
Head: chain.CurrentHeader().Hash(),
CHT: cht,
}
}

View File

@@ -20,14 +20,10 @@ package les
import (
"container/list"
"errors"
"sync"
"time"
)
// ErrNoPeers is returned if no peers capable of serving a queued request are available
var ErrNoPeers = errors.New("no suitable peers available")
// requestDistributor implements a mechanism that distributes requests to
// suitable peers, obeying flow control rules and prioritizing them in creation
// order (even when a resend is necessary).

278
les/freeclient.go Normal file
View File

@@ -0,0 +1,278 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package les implements the Light Ethereum Subprotocol.
package les
import (
"io"
"math"
"sync"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/common/prque"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
)
// freeClientPool implements a client database that limits the connection time
// of each client and manages accepting/rejecting incoming connections and even
// kicking out some connected clients. The pool calculates recent usage time
// for each known client (a value that increases linearly when the client is
// connected and decreases exponentially when not connected). Clients with lower
// recent usage are preferred, unknown nodes have the highest priority. Already
// connected nodes receive a small bias in their favor in order to avoid accepting
// and instantly kicking out clients.
//
// Note: the pool can use any string for client identification. Using signature
// keys for that purpose would not make sense when being known has a negative
// value for the client. Currently the LES protocol manager uses IP addresses
// (without port address) to identify clients.
type freeClientPool struct {
db ethdb.Database
lock sync.Mutex
clock mclock.Clock
closed bool
connectedLimit, totalLimit int
addressMap map[string]*freeClientPoolEntry
connPool, disconnPool *prque.Prque
startupTime mclock.AbsTime
logOffsetAtStartup int64
}
const (
recentUsageExpTC = time.Hour // time constant of the exponential weighting window for "recent" server usage
fixedPointMultiplier = 0x1000000 // constant to convert logarithms to fixed point format
connectedBias = time.Minute // this bias is applied in favor of already connected clients in order to avoid kicking them out very soon
)
// newFreeClientPool creates a new free client pool
func newFreeClientPool(db ethdb.Database, connectedLimit, totalLimit int, clock mclock.Clock) *freeClientPool {
pool := &freeClientPool{
db: db,
clock: clock,
addressMap: make(map[string]*freeClientPoolEntry),
connPool: prque.New(poolSetIndex),
disconnPool: prque.New(poolSetIndex),
connectedLimit: connectedLimit,
totalLimit: totalLimit,
}
pool.loadFromDb()
return pool
}
func (f *freeClientPool) stop() {
f.lock.Lock()
f.closed = true
f.saveToDb()
f.lock.Unlock()
}
// connect should be called after a successful handshake. If the connection was
// rejected, there is no need to call disconnect.
//
// Note: the disconnectFn callback should not block.
func (f *freeClientPool) connect(address string, disconnectFn func()) bool {
f.lock.Lock()
defer f.lock.Unlock()
if f.closed {
return false
}
e := f.addressMap[address]
now := f.clock.Now()
var recentUsage int64
if e == nil {
e = &freeClientPoolEntry{address: address, index: -1}
f.addressMap[address] = e
} else {
if e.connected {
log.Debug("Client already connected", "address", address)
return false
}
recentUsage = int64(math.Exp(float64(e.logUsage-f.logOffset(now)) / fixedPointMultiplier))
}
e.linUsage = recentUsage - int64(now)
// check whether (linUsage+connectedBias) is smaller than the highest entry in the connected pool
if f.connPool.Size() == f.connectedLimit {
i := f.connPool.PopItem().(*freeClientPoolEntry)
if e.linUsage+int64(connectedBias)-i.linUsage < 0 {
// kick it out and accept the new client
f.connPool.Remove(i.index)
f.calcLogUsage(i, now)
i.connected = false
f.disconnPool.Push(i, -i.logUsage)
log.Debug("Client kicked out", "address", i.address)
i.disconnectFn()
} else {
// keep the old client and reject the new one
f.connPool.Push(i, i.linUsage)
log.Debug("Client rejected", "address", address)
return false
}
}
f.disconnPool.Remove(e.index)
e.connected = true
e.disconnectFn = disconnectFn
f.connPool.Push(e, e.linUsage)
if f.connPool.Size()+f.disconnPool.Size() > f.totalLimit {
f.disconnPool.Pop()
}
log.Debug("Client accepted", "address", address)
return true
}
// disconnect should be called when a connection is terminated. If the disconnection
// was initiated by the pool itself using disconnectFn then calling disconnect is
// not necessary but permitted.
func (f *freeClientPool) disconnect(address string) {
f.lock.Lock()
defer f.lock.Unlock()
if f.closed {
return
}
e := f.addressMap[address]
now := f.clock.Now()
if !e.connected {
log.Debug("Client already disconnected", "address", address)
return
}
f.connPool.Remove(e.index)
f.calcLogUsage(e, now)
e.connected = false
f.disconnPool.Push(e, -e.logUsage)
log.Debug("Client disconnected", "address", address)
}
// logOffset calculates the time-dependent offset for the logarithmic
// representation of recent usage
func (f *freeClientPool) logOffset(now mclock.AbsTime) int64 {
// Note: fixedPointMultiplier acts as a multiplier here; the reason for dividing the divisor
// is to avoid int64 overflow. We assume that int64(recentUsageExpTC) >> fixedPointMultiplier.
logDecay := int64((time.Duration(now - f.startupTime)) / (recentUsageExpTC / fixedPointMultiplier))
return f.logOffsetAtStartup + logDecay
}
// calcLogUsage converts recent usage from linear to logarithmic representation
// when disconnecting a peer or closing the client pool
func (f *freeClientPool) calcLogUsage(e *freeClientPoolEntry, now mclock.AbsTime) {
dt := e.linUsage + int64(now)
if dt < 1 {
dt = 1
}
e.logUsage = int64(math.Log(float64(dt))*fixedPointMultiplier) + f.logOffset(now)
}
// freeClientPoolStorage is the RLP representation of the pool's database storage
type freeClientPoolStorage struct {
LogOffset uint64
List []*freeClientPoolEntry
}
// loadFromDb restores pool status from the database storage
// (automatically called at initialization)
func (f *freeClientPool) loadFromDb() {
enc, err := f.db.Get([]byte("freeClientPool"))
if err != nil {
return
}
var storage freeClientPoolStorage
err = rlp.DecodeBytes(enc, &storage)
if err != nil {
log.Error("Failed to decode client list", "err", err)
return
}
f.logOffsetAtStartup = int64(storage.LogOffset)
f.startupTime = f.clock.Now()
for _, e := range storage.List {
log.Debug("Loaded free client record", "address", e.address, "logUsage", e.logUsage)
f.addressMap[e.address] = e
f.disconnPool.Push(e, -e.logUsage)
}
}
// saveToDb saves pool status to the database storage
// (automatically called during shutdown)
func (f *freeClientPool) saveToDb() {
now := f.clock.Now()
storage := freeClientPoolStorage{
LogOffset: uint64(f.logOffset(now)),
List: make([]*freeClientPoolEntry, len(f.addressMap)),
}
i := 0
for _, e := range f.addressMap {
if e.connected {
f.calcLogUsage(e, now)
}
storage.List[i] = e
i++
}
enc, err := rlp.EncodeToBytes(storage)
if err != nil {
log.Error("Failed to encode client list", "err", err)
} else {
f.db.Put([]byte("freeClientPool"), enc)
}
}
// freeClientPoolEntry represents a client address known by the pool.
// When connected, recent usage is calculated as linUsage + int64(clock.Now())
// When disconnected, it is calculated as exp(logUsage - logOffset) where logOffset
// also grows linearly with time while the server is running.
// Conversion between linear and logarithmic representation happens when connecting
// or disconnecting the node.
//
// Note: linUsage and logUsage are values used with constantly growing offsets so
// even though they are close to each other at any time they may wrap around int64
// limits over time. Comparison should be performed accordingly.
type freeClientPoolEntry struct {
address string
connected bool
disconnectFn func()
linUsage, logUsage int64
index int
}
func (e *freeClientPoolEntry) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, []interface{}{e.address, uint64(e.logUsage)})
}
func (e *freeClientPoolEntry) DecodeRLP(s *rlp.Stream) error {
var entry struct {
Address string
LogUsage uint64
}
if err := s.Decode(&entry); err != nil {
return err
}
e.address = entry.Address
e.logUsage = int64(entry.LogUsage)
e.connected = false
e.index = -1
return nil
}
// poolSetIndex callback is used by both priority queues to set/update the index of
// the element in the queue. Index is needed to remove elements other than the top one.
func poolSetIndex(a interface{}, i int) {
a.(*freeClientPoolEntry).index = i
}

139
les/freeclient_test.go Normal file
View File

@@ -0,0 +1,139 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package light implements on-demand retrieval capable state and chain objects
// for the Ethereum Light Client.
package les
import (
"fmt"
"math/rand"
"testing"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/ethdb"
)
func TestFreeClientPoolL10C100(t *testing.T) {
testFreeClientPool(t, 10, 100)
}
func TestFreeClientPoolL40C200(t *testing.T) {
testFreeClientPool(t, 40, 200)
}
func TestFreeClientPoolL100C300(t *testing.T) {
testFreeClientPool(t, 100, 300)
}
const testFreeClientPoolTicks = 500000
func testFreeClientPool(t *testing.T, connLimit, clientCount int) {
var (
clock mclock.Simulated
db = ethdb.NewMemDatabase()
pool = newFreeClientPool(db, connLimit, 10000, &clock)
connected = make([]bool, clientCount)
connTicks = make([]int, clientCount)
disconnCh = make(chan int, clientCount)
)
peerId := func(i int) string {
return fmt.Sprintf("test peer #%d", i)
}
disconnFn := func(i int) func() {
return func() {
disconnCh <- i
}
}
// pool should accept new peers up to its connected limit
for i := 0; i < connLimit; i++ {
if pool.connect(peerId(i), disconnFn(i)) {
connected[i] = true
} else {
t.Fatalf("Test peer #%d rejected", i)
}
}
// since all accepted peers are new and should not be kicked out, the next one should be rejected
if pool.connect(peerId(connLimit), disconnFn(connLimit)) {
connected[connLimit] = true
t.Fatalf("Peer accepted over connected limit")
}
// randomly connect and disconnect peers, expect to have a similar total connection time at the end
for tickCounter := 0; tickCounter < testFreeClientPoolTicks; tickCounter++ {
clock.Run(1 * time.Second)
i := rand.Intn(clientCount)
if connected[i] {
pool.disconnect(peerId(i))
connected[i] = false
connTicks[i] += tickCounter
} else {
if pool.connect(peerId(i), disconnFn(i)) {
connected[i] = true
connTicks[i] -= tickCounter
}
}
pollDisconnects:
for {
select {
case i := <-disconnCh:
pool.disconnect(peerId(i))
if connected[i] {
connTicks[i] += tickCounter
connected[i] = false
}
default:
break pollDisconnects
}
}
}
expTicks := testFreeClientPoolTicks * connLimit / clientCount
expMin := expTicks - expTicks/10
expMax := expTicks + expTicks/10
// check if the total connected time of peers are all in the expected range
for i, c := range connected {
if c {
connTicks[i] += testFreeClientPoolTicks
}
if connTicks[i] < expMin || connTicks[i] > expMax {
t.Errorf("Total connected time of test node #%d (%d) outside expected range (%d to %d)", i, connTicks[i], expMin, expMax)
}
}
// a previously unknown peer should be accepted now
if !pool.connect("newPeer", func() {}) {
t.Fatalf("Previously unknown peer rejected")
}
// close and restart pool
pool.stop()
pool = newFreeClientPool(db, connLimit, 10000, &clock)
// try connecting all known peers (connLimit should be filled up)
for i := 0; i < clientCount; i++ {
pool.connect(peerId(i), func() {})
}
// expect pool to remember known nodes and kick out one of them to accept a new one
if !pool.connect("newPeer2", func() {}) {
t.Errorf("Previously unknown peer rejected after restarting pool")
}
pool.stop()
}

View File

@@ -20,7 +20,6 @@ package les
import (
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"math/big"
"net"
@@ -28,6 +27,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
@@ -39,7 +39,6 @@ import (
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/discv5"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
@@ -64,10 +63,6 @@ const (
disableClientRemovePeer = false
)
// errIncompatibleConfig is returned if the requested protocols and configs are
// not compatible (low protocol version restrictions and high requirements).
var errIncompatibleConfig = errors.New("incompatible configuration")
func errResp(code errCode, format string, v ...interface{}) error {
return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
}
@@ -104,6 +99,7 @@ type ProtocolManager struct {
odr *LesOdr
server *LesServer
serverPool *serverPool
clientPool *freeClientPool
lesTopic discv5.Topic
reqDist *requestDistributor
retriever *retrieveManager
@@ -113,8 +109,6 @@ type ProtocolManager struct {
peers *peerSet
maxPeers int
SubProtocols []p2p.Protocol
eventMux *event.TypeMux
// channels for fetcher, syncer, txsyncLoop
@@ -129,7 +123,7 @@ type ProtocolManager struct {
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
// with the ethereum network.
func NewProtocolManager(chainConfig *params.ChainConfig, lightSync bool, protocolVersions []uint, networkId uint64, mux *event.TypeMux, engine consensus.Engine, peers *peerSet, blockchain BlockChain, txpool txPool, chainDb ethdb.Database, odr *LesOdr, txrelay *LesTxRelay, serverPool *serverPool, quitSync chan struct{}, wg *sync.WaitGroup) (*ProtocolManager, error) {
func NewProtocolManager(chainConfig *params.ChainConfig, lightSync bool, networkId uint64, mux *event.TypeMux, engine consensus.Engine, peers *peerSet, blockchain BlockChain, txpool txPool, chainDb ethdb.Database, odr *LesOdr, txrelay *LesTxRelay, serverPool *serverPool, quitSync chan struct{}, wg *sync.WaitGroup) (*ProtocolManager, error) {
// Create the protocol manager with the base fields
manager := &ProtocolManager{
lightSync: lightSync,
@@ -153,54 +147,6 @@ func NewProtocolManager(chainConfig *params.ChainConfig, lightSync bool, protoco
manager.reqDist = odr.retriever.dist
}
// Initiate a sub-protocol for every implemented version we can handle
manager.SubProtocols = make([]p2p.Protocol, 0, len(protocolVersions))
for _, version := range protocolVersions {
// Compatible, initialize the sub-protocol
version := version // Closure for the run
manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{
Name: "les",
Version: version,
Length: ProtocolLengths[version],
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
var entry *poolEntry
peer := manager.newPeer(int(version), networkId, p, rw)
if manager.serverPool != nil {
addr := p.RemoteAddr().(*net.TCPAddr)
entry = manager.serverPool.connect(peer, addr.IP, uint16(addr.Port))
}
peer.poolEntry = entry
select {
case manager.newPeerCh <- peer:
manager.wg.Add(1)
defer manager.wg.Done()
err := manager.handle(peer)
if entry != nil {
manager.serverPool.disconnect(entry)
}
return err
case <-manager.quitSync:
if entry != nil {
manager.serverPool.disconnect(entry)
}
return p2p.DiscQuitting
}
},
NodeInfo: func() interface{} {
return manager.NodeInfo()
},
PeerInfo: func(id discover.NodeID) interface{} {
if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
return p.Info()
}
return nil
},
})
}
if len(manager.SubProtocols) == 0 {
return nil, errIncompatibleConfig
}
removePeer := manager.removePeer
if disableClientRemovePeer {
removePeer = func(id string) {}
@@ -226,6 +172,7 @@ func (pm *ProtocolManager) Start(maxPeers int) {
if pm.lightSync {
go pm.syncer()
} else {
pm.clientPool = newFreeClientPool(pm.chainDb, maxPeers, 10000, mclock.System{})
go func() {
for range pm.newPeerCh {
}
@@ -243,6 +190,9 @@ func (pm *ProtocolManager) Stop() {
pm.noMorePeers <- struct{}{}
close(pm.quitSync) // quits syncer, fetcher
if pm.clientPool != nil {
pm.clientPool.stop()
}
// Disconnect existing sessions.
// This also closes the gate for any new registrations on the peer set.
@@ -256,6 +206,32 @@ func (pm *ProtocolManager) Stop() {
log.Info("Light Ethereum protocol stopped")
}
// runPeer is the p2p protocol run function for the given version.
func (pm *ProtocolManager) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) error {
var entry *poolEntry
peer := pm.newPeer(int(version), pm.networkId, p, rw)
if pm.serverPool != nil {
addr := p.RemoteAddr().(*net.TCPAddr)
entry = pm.serverPool.connect(peer, addr.IP, uint16(addr.Port))
}
peer.poolEntry = entry
select {
case pm.newPeerCh <- peer:
pm.wg.Add(1)
defer pm.wg.Done()
err := pm.handle(peer)
if entry != nil {
pm.serverPool.disconnect(entry)
}
return err
case <-pm.quitSync:
if entry != nil {
pm.serverPool.disconnect(entry)
}
return p2p.DiscQuitting
}
}
func (pm *ProtocolManager) newPeer(pv int, nv uint64, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
return newPeer(pv, nv, p, newMeteredMsgWriter(rw))
}
@@ -264,7 +240,8 @@ func (pm *ProtocolManager) newPeer(pv int, nv uint64, p *p2p.Peer, rw p2p.MsgRea
// this function terminates, the peer is disconnected.
func (pm *ProtocolManager) handle(p *peer) error {
// Ignore maxPeers if this is a trusted peer
if pm.peers.Len() >= pm.maxPeers && !p.Peer.Info().Network.Trusted {
// In server mode we try to check into the client pool after handshake
if pm.lightSync && pm.peers.Len() >= pm.maxPeers && !p.Peer.Info().Network.Trusted {
return p2p.DiscTooManyPeers
}
@@ -282,6 +259,19 @@ func (pm *ProtocolManager) handle(p *peer) error {
p.Log().Debug("Light Ethereum handshake failed", "err", err)
return err
}
if !pm.lightSync && !p.Peer.Info().Network.Trusted {
addr, ok := p.RemoteAddr().(*net.TCPAddr)
// test peer address is not a tcp address, don't use client pool if can not typecast
if ok {
id := addr.IP.String()
if !pm.clientPool.connect(id, func() { go pm.removePeer(p.id) }) {
return p2p.DiscTooManyPeers
}
defer pm.clientPool.disconnect(id)
}
}
if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
rw.Init(p.version)
}
@@ -1183,30 +1173,6 @@ func (pm *ProtocolManager) txStatus(hashes []common.Hash) []txStatus {
return stats
}
// NodeInfo represents a short summary of the Ethereum sub-protocol metadata
// known about the host peer.
type NodeInfo struct {
Network uint64 `json:"network"` // Ethereum network ID (1=Frontier, 2=Morden, Ropsten=3, Rinkeby=4)
Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain
Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block
Config *params.ChainConfig `json:"config"` // Chain configuration for the fork rules
Head common.Hash `json:"head"` // SHA3 hash of the host's best owned block
}
// NodeInfo retrieves some protocol metadata about the running host node.
func (self *ProtocolManager) NodeInfo() *NodeInfo {
head := self.blockchain.CurrentHeader()
hash := head.Hash()
return &NodeInfo{
Network: self.networkId,
Difficulty: self.blockchain.GetTd(hash, head.Number.Uint64()),
Genesis: self.blockchain.Genesis().Hash(),
Config: self.blockchain.Config(),
Head: hash,
}
}
// downloaderPeerNotify implements peerSetNotify
type downloaderPeerNotify ProtocolManager
@@ -1238,7 +1204,7 @@ func (pc *peerConnection) RequestHeadersByHash(origin common.Hash, amount int, s
}
_, ok := <-pc.manager.reqDist.queue(rq)
if !ok {
return ErrNoPeers
return light.ErrNoPeers
}
return nil
}
@@ -1262,7 +1228,7 @@ func (pc *peerConnection) RequestHeadersByNumber(origin uint64, amount int, skip
}
_, ok := <-pc.manager.reqDist.queue(rq)
if !ok {
return ErrNoPeers
return light.ErrNoPeers
}
return nil
}

View File

@@ -156,12 +156,12 @@ func newTestProtocolManager(lightSync bool, blocks int, generator func(int, *cor
} else {
blockchain, _ := core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{})
chtIndexer := light.NewChtIndexer(db, false)
chtIndexer := light.NewChtIndexer(db, false, nil)
chtIndexer.Start(blockchain)
bbtIndexer := light.NewBloomTrieIndexer(db, false)
bbtIndexer := light.NewBloomTrieIndexer(db, false, nil)
bloomIndexer := eth.NewBloomIndexer(db, params.BloomBitsBlocks)
bloomIndexer := eth.NewBloomIndexer(db, params.BloomBitsBlocks, light.HelperTrieProcessConfirmations)
bloomIndexer.AddChildIndexer(bbtIndexer)
bloomIndexer.Start(blockchain)
@@ -172,18 +172,12 @@ func newTestProtocolManager(lightSync bool, blocks int, generator func(int, *cor
chain = blockchain
}
var protocolVersions []uint
if lightSync {
protocolVersions = ClientProtocolVersions
} else {
protocolVersions = ServerProtocolVersions
}
pm, err := NewProtocolManager(gspec.Config, lightSync, protocolVersions, NetworkId, evmux, engine, peers, chain, nil, db, odr, nil, nil, make(chan struct{}), new(sync.WaitGroup))
pm, err := NewProtocolManager(gspec.Config, lightSync, NetworkId, evmux, engine, peers, chain, nil, db, odr, nil, nil, make(chan struct{}), new(sync.WaitGroup))
if err != nil {
return nil, err
}
if !lightSync {
srv := &LesServer{protocolManager: pm}
srv := &LesServer{lesCommons: lesCommons{protocolManager: pm}}
pm.server = srv
srv.defParams = &flowcontrol.ServerParams{

View File

@@ -33,14 +33,11 @@ type LesOdr struct {
stop chan struct{}
}
func NewLesOdr(db ethdb.Database, chtIndexer, bloomTrieIndexer, bloomIndexer *core.ChainIndexer, retriever *retrieveManager) *LesOdr {
func NewLesOdr(db ethdb.Database, retriever *retrieveManager) *LesOdr {
return &LesOdr{
db: db,
chtIndexer: chtIndexer,
bloomTrieIndexer: bloomTrieIndexer,
bloomIndexer: bloomIndexer,
retriever: retriever,
stop: make(chan struct{}),
db: db,
retriever: retriever,
stop: make(chan struct{}),
}
}
@@ -54,6 +51,13 @@ func (odr *LesOdr) Database() ethdb.Database {
return odr.db
}
// SetIndexers adds the necessary chain indexers to the ODR backend
func (odr *LesOdr) SetIndexers(chtIndexer, bloomTrieIndexer, bloomIndexer *core.ChainIndexer) {
odr.chtIndexer = chtIndexer
odr.bloomTrieIndexer = bloomTrieIndexer
odr.bloomIndexer = bloomIndexer
}
// ChtIndexer returns the CHT chain indexer
func (odr *LesOdr) ChtIndexer() *core.ChainIndexer {
return odr.chtIndexer

View File

@@ -167,7 +167,8 @@ func testOdr(t *testing.T, protocol int, expFail uint64, fn odrTestFn) {
rm := newRetrieveManager(peers, dist, nil)
db := ethdb.NewMemDatabase()
ldb := ethdb.NewMemDatabase()
odr := NewLesOdr(ldb, light.NewChtIndexer(db, true), light.NewBloomTrieIndexer(db, true), eth.NewBloomIndexer(db, light.BloomTrieFrequency), rm)
odr := NewLesOdr(ldb, rm)
odr.SetIndexers(light.NewChtIndexer(db, true, nil), light.NewBloomTrieIndexer(db, true, nil), eth.NewBloomIndexer(db, light.BloomTrieFrequency, light.HelperTrieConfirmations))
pm := newTestProtocolManagerMust(t, false, 4, testChainGen, nil, nil, db)
lpm := newTestProtocolManagerMust(t, true, 0, nil, peers, odr, ldb)
_, err1, lpeer, err2 := newTestPeerPair("peer", protocol, pm, lpm)

Some files were not shown because too many files have changed in this diff Show More