Compare commits
199 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
316fc7ecfc | ||
|
af85d8e2b3 | ||
|
6a8b47c880 | ||
|
e0d0e64ce2 | ||
|
b2c644ffb5 | ||
|
522cfc68ff | ||
|
a063fe9b2d | ||
|
1dcad8b2de | ||
|
86acdf1a5b | ||
|
9f036647e4 | ||
|
355fc47d39 | ||
|
76301ca051 | ||
|
c582667c9b | ||
|
dcd97c41fa | ||
|
85c6a1c526 | ||
|
ecca49e078 | ||
|
106d196ec4 | ||
|
a6d45a5d00 | ||
|
7d38d53ae4 | ||
|
1de9ada401 | ||
|
c929030e28 | ||
|
87f294aa0b | ||
|
68f0a414ea | ||
|
55d050ccd8 | ||
|
a8aa89accb | ||
|
c4078fc805 | ||
|
d3488c1aff | ||
|
0fd02fe9cf | ||
|
251c868008 | ||
|
99e1a5e0fb | ||
|
22cd3f70a6 | ||
|
2695fa2213 | ||
|
f44046a1c6 | ||
|
2a06791461 | ||
|
60390878a5 | ||
|
9bf6bb8f63 | ||
|
1d439b5e10 | ||
|
62f5137a72 | ||
|
54216811a0 | ||
|
5952d962dc | ||
|
3e21adc648 | ||
|
b24fb76a3a | ||
|
2cdf6ee7e0 | ||
|
e8752f4e9f | ||
|
d8541a9f99 | ||
|
040aa2bb10 | ||
|
e598ae5c01 | ||
|
2a17fe2561 | ||
|
212bba47ff | ||
|
b52bb31b76 | ||
|
b2ddb1fcbf | ||
|
a1783d1697 | ||
|
e0e0e53401 | ||
|
97887d98da | ||
|
8a040de60b | ||
|
e07e507d1a | ||
|
d8328a96b4 | ||
|
fb368723ac | ||
|
6d1e292eef | ||
|
3ec5dda4d2 | ||
|
f0998415ba | ||
|
45eaef2431 | ||
|
3bcb501c8f | ||
|
d3e4c2dcb0 | ||
|
7b5c375825 | ||
|
beade042d1 | ||
|
11bbc66082 | ||
|
834057592f | ||
|
abbb219933 | ||
|
8051a0768a | ||
|
a1eb9c7d13 | ||
|
00e6da9704 | ||
|
9df16f3468 | ||
|
8461fea44b | ||
|
4bb2dc3d09 | ||
|
cf05ef9106 | ||
|
de9b0660ac | ||
|
042191338d | ||
|
93fe16b0a5 | ||
|
64a4e89504 | ||
|
eef65b20fc | ||
|
c4df67461f | ||
|
941018b570 | ||
|
a72ba5a55b | ||
|
6711f098d5 | ||
|
c376a5263f | ||
|
2901b8b2d2 | ||
|
35fcd2f423 | ||
|
faf0e06ed8 | ||
|
51db5975cc | ||
|
70176cda0e | ||
|
d56fa8a659 | ||
|
e4cb158d01 | ||
|
0ab54de1a5 | ||
|
adc2944b4c | ||
|
16eaf2b158 | ||
|
83e2761c3a | ||
|
353a82385b | ||
|
46d4721519 | ||
|
454382e81a | ||
|
225171a4bf | ||
|
702b8a7aec | ||
|
5d7e18539e | ||
|
c4a1d4fecf | ||
|
fb9f7261ec | ||
|
d927cbb638 | ||
|
2fbc454355 | ||
|
d6efa69187 | ||
|
3ea8ac6a9a | ||
|
8a9c31a307 | ||
|
6380c06c65 | ||
|
54d1111965 | ||
|
f00d0daf33 | ||
|
2cffd4ff3c | ||
|
7b1aa64220 | ||
|
8f4c4fea20 | ||
|
d42ce0f2c1 | ||
|
273c7a9dc4 | ||
|
a5d5609e38 | ||
|
93c0f1715d | ||
|
d9575e92fc | ||
|
11a402f747 | ||
|
021d6fbbbb | ||
|
feed8069a6 | ||
|
514022bde6 | ||
|
ff22ec31b6 | ||
|
0598707129 | ||
|
a511f6b515 | ||
|
8b1e14b7f8 | ||
|
6c412e313c | ||
|
6b232ce325 | ||
|
7abedf9bbb | ||
|
27a278e6e3 | ||
|
a0bcb16875 | ||
|
bc0a43191e | ||
|
1064b9e691 | ||
|
10780e8a00 | ||
|
2433349c80 | ||
|
58243b4d3e | ||
|
cab1cff11c | ||
|
2909f6d7a2 | ||
|
d96ba77113 | ||
|
62467e4405 | ||
|
d0082bb7ec | ||
|
21c059b67e | ||
|
9e24491c65 | ||
|
49f63deb24 | ||
|
b536460f8e | ||
|
afd8b84706 | ||
|
f6206efe5b | ||
|
ae674a3660 | ||
|
894022a3d5 | ||
|
68da9aa716 | ||
|
14bdcdeab4 | ||
|
fe6a9473dc | ||
|
427316a707 | ||
|
0647c4de7b | ||
|
7ddc2c9e95 | ||
|
dcaaa3c804 | ||
|
f5b128a5b3 | ||
|
fd982d3f3b | ||
|
526abe2736 | ||
|
8997efe31f | ||
|
4ea2d707f9 | ||
|
ee8877509a | ||
|
763e64cad8 | ||
|
040dd5bd5d | ||
|
dcdd57df62 | ||
|
323428865f | ||
|
65c91ad5e7 | ||
|
5d30be412b | ||
|
eb7f901289 | ||
|
db5e403afe | ||
|
96116758d2 | ||
|
65cebb7730 | ||
|
2e0391ea84 | ||
|
d483da766f | ||
|
7c9314f231 | ||
|
e1f1d3085c | ||
|
96339daf40 | ||
|
f7d3678c28 | ||
|
facf1bc9d6 | ||
|
e8824f6e74 | ||
|
a9835c1816 | ||
|
2eedbe799f | ||
|
b3711af051 | ||
|
30bdf817a0 | ||
|
fbeb4f20f9 | ||
|
0b20b1a050 | ||
|
4dbefc1f25 | ||
|
dbae1dc7b3 | ||
|
3b07451564 | ||
|
6209545083 | ||
|
193a402cc0 | ||
|
dcca66bce8 | ||
|
399aa710d5 | ||
|
699794d88d | ||
|
773857a524 | ||
|
2a75fe3308 |
@@ -30,8 +30,6 @@ matrix:
|
||||
go: 1.10.x
|
||||
script:
|
||||
- unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703
|
||||
- brew update
|
||||
- brew cask install osxfuse
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
|
||||
|
1
AUTHORS
1
AUTHORS
@@ -171,3 +171,4 @@ xiekeyang <xiekeyang@users.noreply.github.com>
|
||||
yoza <yoza.is12s@gmail.com>
|
||||
ΞTHΞЯSPHΞЯΞ <{viktor.tron,nagydani,zsfelfoldi}@gmail.com>
|
||||
Максим Чусовлянов <mchusovlianov@gmail.com>
|
||||
Ralph Caraveo <deckarep@gmail.com>
|
||||
|
1
Makefile
1
Makefile
@@ -41,6 +41,7 @@ lint: ## Run linters.
|
||||
build/env.sh go run build/ci.go lint
|
||||
|
||||
clean:
|
||||
./build/clean_go_build_cache.sh
|
||||
rm -fr build/_workspace/pkg/ $(GOBIN)/*
|
||||
|
||||
# The devtools target installs tools required for 'go generate'.
|
||||
|
@@ -34,13 +34,13 @@ The go-ethereum project comes with several wrappers/executables found in the `cm
|
||||
|
||||
| Command | Description |
|
||||
|:----------:|-------------|
|
||||
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default) archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. |
|
||||
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. |
|
||||
| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. |
|
||||
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
|
||||
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug`). |
|
||||
| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://github.com/ethereum/wiki/wiki/JSON-RPC) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. |
|
||||
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://github.com/ethereum/wiki/wiki/RLP)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
|
||||
| `swarm` | swarm daemon and tools. This is the entrypoint for the swarm network. `swarm --help` for command line options and subcommands. See https://swarm-guide.readthedocs.io for swarm documentation. |
|
||||
| `swarm` | Swarm daemon and tools. This is the entrypoint for the Swarm network. `swarm --help` for command line options and subcommands. See [Swarm README](https://github.com/ethereum/go-ethereum/tree/master/swarm) for more information. |
|
||||
| `puppeth` | a CLI wizard that aids in creating a new Ethereum network. |
|
||||
|
||||
## Running geth
|
||||
@@ -69,7 +69,7 @@ This command will:
|
||||
* Start up Geth's built-in interactive [JavaScript console](https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console),
|
||||
(via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://github.com/ethereum/wiki/wiki/JavaScript-API)
|
||||
as well as Geth's own [management APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs).
|
||||
This too is optional and if you leave it out you can always attach to an already running Geth instance
|
||||
This tool is optional and if you leave it out you can always attach to an already running Geth instance
|
||||
with `geth attach`.
|
||||
|
||||
### Full node on the Ethereum test network
|
||||
|
@@ -65,9 +65,9 @@ type SimulatedBackend struct {
|
||||
|
||||
// NewSimulatedBackend creates a new binding backend using a simulated blockchain
|
||||
// for testing purposes.
|
||||
func NewSimulatedBackend(alloc core.GenesisAlloc) *SimulatedBackend {
|
||||
func NewSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend {
|
||||
database := ethdb.NewMemDatabase()
|
||||
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, Alloc: alloc}
|
||||
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, GasLimit: gasLimit, Alloc: alloc}
|
||||
genesis.MustCommit(database)
|
||||
blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{})
|
||||
|
||||
@@ -324,18 +324,24 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
|
||||
//
|
||||
// TODO(karalabe): Deprecate when the subscription one can return past data too.
|
||||
func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) {
|
||||
// Initialize unset filter boundaried to run from genesis to chain head
|
||||
from := int64(0)
|
||||
if query.FromBlock != nil {
|
||||
from = query.FromBlock.Int64()
|
||||
var filter *filters.Filter
|
||||
if query.BlockHash != nil {
|
||||
// Block filter requested, construct a single-shot filter
|
||||
filter = filters.NewBlockFilter(&filterBackend{b.database, b.blockchain}, *query.BlockHash, query.Addresses, query.Topics)
|
||||
} else {
|
||||
// Initialize unset filter boundaried to run from genesis to chain head
|
||||
from := int64(0)
|
||||
if query.FromBlock != nil {
|
||||
from = query.FromBlock.Int64()
|
||||
}
|
||||
to := int64(-1)
|
||||
if query.ToBlock != nil {
|
||||
to = query.ToBlock.Int64()
|
||||
}
|
||||
// Construct the range filter
|
||||
filter = filters.NewRangeFilter(&filterBackend{b.database, b.blockchain}, from, to, query.Addresses, query.Topics)
|
||||
}
|
||||
to := int64(-1)
|
||||
if query.ToBlock != nil {
|
||||
to = query.ToBlock.Int64()
|
||||
}
|
||||
// Construct and execute the filter
|
||||
filter := filters.New(&filterBackend{b.database, b.blockchain}, from, to, query.Addresses, query.Topics)
|
||||
|
||||
// Run the filter and return all the logs
|
||||
logs, err := filter.Logs(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -430,6 +436,10 @@ func (fb *filterBackend) HeaderByNumber(ctx context.Context, block rpc.BlockNumb
|
||||
return fb.bc.GetHeaderByNumber(uint64(block.Int64())), nil
|
||||
}
|
||||
|
||||
func (fb *filterBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
|
||||
return fb.bc.GetHeaderByHash(hash), nil
|
||||
}
|
||||
|
||||
func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
|
||||
number := rawdb.ReadHeaderNumber(fb.db, hash)
|
||||
if number == nil {
|
||||
|
@@ -229,7 +229,7 @@ var bindTests = []struct {
|
||||
// Generate a new random account and a funded simulator
|
||||
key, _ := crypto.GenerateKey()
|
||||
auth := bind.NewKeyedTransactor(key)
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
|
||||
|
||||
// Deploy an interaction tester contract and call a transaction on it
|
||||
_, _, interactor, err := DeployInteractor(auth, sim, "Deploy string")
|
||||
@@ -270,7 +270,7 @@ var bindTests = []struct {
|
||||
// Generate a new random account and a funded simulator
|
||||
key, _ := crypto.GenerateKey()
|
||||
auth := bind.NewKeyedTransactor(key)
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
|
||||
|
||||
// Deploy a tuple tester contract and execute a structured call on it
|
||||
_, _, getter, err := DeployGetter(auth, sim)
|
||||
@@ -302,7 +302,7 @@ var bindTests = []struct {
|
||||
// Generate a new random account and a funded simulator
|
||||
key, _ := crypto.GenerateKey()
|
||||
auth := bind.NewKeyedTransactor(key)
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
|
||||
|
||||
// Deploy a tuple tester contract and execute a structured call on it
|
||||
_, _, tupler, err := DeployTupler(auth, sim)
|
||||
@@ -344,7 +344,7 @@ var bindTests = []struct {
|
||||
// Generate a new random account and a funded simulator
|
||||
key, _ := crypto.GenerateKey()
|
||||
auth := bind.NewKeyedTransactor(key)
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
|
||||
|
||||
// Deploy a slice tester contract and execute a n array call on it
|
||||
_, _, slicer, err := DeploySlicer(auth, sim)
|
||||
@@ -378,7 +378,7 @@ var bindTests = []struct {
|
||||
// Generate a new random account and a funded simulator
|
||||
key, _ := crypto.GenerateKey()
|
||||
auth := bind.NewKeyedTransactor(key)
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
|
||||
|
||||
// Deploy a default method invoker contract and execute its default method
|
||||
_, _, defaulter, err := DeployDefaulter(auth, sim)
|
||||
@@ -411,7 +411,7 @@ var bindTests = []struct {
|
||||
`[{"constant":true,"inputs":[],"name":"String","outputs":[{"name":"","type":"string"}],"type":"function"}]`,
|
||||
`
|
||||
// Create a simulator and wrap a non-deployed contract
|
||||
sim := backends.NewSimulatedBackend(nil)
|
||||
sim := backends.NewSimulatedBackend(nil, uint64(10000000000))
|
||||
|
||||
nonexistent, err := NewNonExistent(common.Address{}, sim)
|
||||
if err != nil {
|
||||
@@ -447,7 +447,7 @@ var bindTests = []struct {
|
||||
// Generate a new random account and a funded simulator
|
||||
key, _ := crypto.GenerateKey()
|
||||
auth := bind.NewKeyedTransactor(key)
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
|
||||
|
||||
// Deploy a funky gas pattern contract
|
||||
_, _, limiter, err := DeployFunkyGasPattern(auth, sim)
|
||||
@@ -482,7 +482,7 @@ var bindTests = []struct {
|
||||
// Generate a new random account and a funded simulator
|
||||
key, _ := crypto.GenerateKey()
|
||||
auth := bind.NewKeyedTransactor(key)
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
|
||||
|
||||
// Deploy a sender tester contract and execute a structured call on it
|
||||
_, _, callfrom, err := DeployCallFrom(auth, sim)
|
||||
@@ -542,7 +542,7 @@ var bindTests = []struct {
|
||||
// Generate a new random account and a funded simulator
|
||||
key, _ := crypto.GenerateKey()
|
||||
auth := bind.NewKeyedTransactor(key)
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
|
||||
|
||||
// Deploy a underscorer tester contract and execute a structured call on it
|
||||
_, _, underscorer, err := DeployUnderscorer(auth, sim)
|
||||
@@ -612,7 +612,7 @@ var bindTests = []struct {
|
||||
// Generate a new random account and a funded simulator
|
||||
key, _ := crypto.GenerateKey()
|
||||
auth := bind.NewKeyedTransactor(key)
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
|
||||
|
||||
// Deploy an eventer contract
|
||||
_, _, eventer, err := DeployEventer(auth, sim)
|
||||
@@ -761,7 +761,7 @@ var bindTests = []struct {
|
||||
// Generate a new random account and a funded simulator
|
||||
key, _ := crypto.GenerateKey()
|
||||
auth := bind.NewKeyedTransactor(key)
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
|
||||
|
||||
//deploy the test contract
|
||||
_, _, testContract, err := DeployDeeplyNestedArray(auth, sim)
|
||||
@@ -820,7 +820,7 @@ func TestBindings(t *testing.T) {
|
||||
t.Skip("go sdk not found for testing")
|
||||
}
|
||||
// Skip the test if the go-ethereum sources are symlinked (https://github.com/golang/go/issues/14845)
|
||||
linkTestCode := fmt.Sprintf("package linktest\nfunc CheckSymlinks(){\nfmt.Println(backends.NewSimulatedBackend(nil))\n}")
|
||||
linkTestCode := fmt.Sprintf("package linktest\nfunc CheckSymlinks(){\nfmt.Println(backends.NewSimulatedBackend(nil,uint64(10000000000)))\n}")
|
||||
linkTestDeps, err := imports.Process(os.TempDir(), []byte(linkTestCode), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed check for goimports symlink bug: %v", err)
|
||||
|
@@ -53,9 +53,11 @@ var waitDeployedTests = map[string]struct {
|
||||
|
||||
func TestWaitDeployed(t *testing.T) {
|
||||
for name, test := range waitDeployedTests {
|
||||
backend := backends.NewSimulatedBackend(core.GenesisAlloc{
|
||||
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000)},
|
||||
})
|
||||
backend := backends.NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000)},
|
||||
}, 10000000,
|
||||
)
|
||||
|
||||
// Create the transaction.
|
||||
tx := types.NewContractCreation(0, big.NewInt(0), test.gas, big.NewInt(1), common.FromHex(test.code))
|
||||
|
@@ -47,10 +47,8 @@ type Method struct {
|
||||
// Please note that "int" is substitute for its canonical representation "int256"
|
||||
func (method Method) Sig() string {
|
||||
types := make([]string, len(method.Inputs))
|
||||
i := 0
|
||||
for _, input := range method.Inputs {
|
||||
for i, input := range method.Inputs {
|
||||
types[i] = input.Type.String()
|
||||
i++
|
||||
}
|
||||
return fmt.Sprintf("%v(%v)", method.Name, strings.Join(types, ","))
|
||||
}
|
||||
|
@@ -106,7 +106,7 @@ type Wallet interface {
|
||||
// or optionally with the aid of any location metadata from the embedded URL field.
|
||||
//
|
||||
// If the wallet requires additional authentication to sign the request (e.g.
|
||||
// a password to decrypt the account, or a PIN code o verify the transaction),
|
||||
// a password to decrypt the account, or a PIN code to verify the transaction),
|
||||
// an AuthNeededError instance will be returned, containing infos for the user
|
||||
// about which fields or actions are needed. The user may retry by providing
|
||||
// the needed details via SignTxWithPassphrase, or by other means (e.g. unlock
|
||||
|
@@ -27,10 +27,10 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
mapset "github.com/deckarep/golang-set"
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gopkg.in/fatih/set.v0"
|
||||
)
|
||||
|
||||
// Minimum amount of time between cache reloads. This limit applies if the platform does
|
||||
@@ -79,7 +79,7 @@ func newAccountCache(keydir string) (*accountCache, chan struct{}) {
|
||||
keydir: keydir,
|
||||
byAddr: make(map[common.Address][]accounts.Account),
|
||||
notify: make(chan struct{}, 1),
|
||||
fileC: fileCache{all: set.NewNonTS()},
|
||||
fileC: fileCache{all: mapset.NewThreadUnsafeSet()},
|
||||
}
|
||||
ac.watcher = newWatcher(ac)
|
||||
return ac, ac.notify
|
||||
@@ -237,7 +237,7 @@ func (ac *accountCache) scanAccounts() error {
|
||||
log.Debug("Failed to reload keystore contents", "err", err)
|
||||
return err
|
||||
}
|
||||
if creates.Size() == 0 && deletes.Size() == 0 && updates.Size() == 0 {
|
||||
if creates.Cardinality() == 0 && deletes.Cardinality() == 0 && updates.Cardinality() == 0 {
|
||||
return nil
|
||||
}
|
||||
// Create a helper method to scan the contents of the key files
|
||||
@@ -272,15 +272,15 @@ func (ac *accountCache) scanAccounts() error {
|
||||
// Process all the file diffs
|
||||
start := time.Now()
|
||||
|
||||
for _, p := range creates.List() {
|
||||
for _, p := range creates.ToSlice() {
|
||||
if a := readAccount(p.(string)); a != nil {
|
||||
ac.add(*a)
|
||||
}
|
||||
}
|
||||
for _, p := range deletes.List() {
|
||||
for _, p := range deletes.ToSlice() {
|
||||
ac.deleteByFile(p.(string))
|
||||
}
|
||||
for _, p := range updates.List() {
|
||||
for _, p := range updates.ToSlice() {
|
||||
path := p.(string)
|
||||
ac.deleteByFile(path)
|
||||
if a := readAccount(path); a != nil {
|
||||
|
@@ -24,20 +24,20 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
mapset "github.com/deckarep/golang-set"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
set "gopkg.in/fatih/set.v0"
|
||||
)
|
||||
|
||||
// fileCache is a cache of files seen during scan of keystore.
|
||||
type fileCache struct {
|
||||
all *set.SetNonTS // Set of all files from the keystore folder
|
||||
lastMod time.Time // Last time instance when a file was modified
|
||||
all mapset.Set // Set of all files from the keystore folder
|
||||
lastMod time.Time // Last time instance when a file was modified
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// scan performs a new scan on the given directory, compares against the already
|
||||
// cached filenames, and returns file sets: creates, deletes, updates.
|
||||
func (fc *fileCache) scan(keyDir string) (set.Interface, set.Interface, set.Interface, error) {
|
||||
func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, error) {
|
||||
t0 := time.Now()
|
||||
|
||||
// List all the failes from the keystore folder
|
||||
@@ -51,14 +51,14 @@ func (fc *fileCache) scan(keyDir string) (set.Interface, set.Interface, set.Inte
|
||||
defer fc.mu.Unlock()
|
||||
|
||||
// Iterate all the files and gather their metadata
|
||||
all := set.NewNonTS()
|
||||
mods := set.NewNonTS()
|
||||
all := mapset.NewThreadUnsafeSet()
|
||||
mods := mapset.NewThreadUnsafeSet()
|
||||
|
||||
var newLastMod time.Time
|
||||
for _, fi := range files {
|
||||
// Skip any non-key files from the folder
|
||||
path := filepath.Join(keyDir, fi.Name())
|
||||
if skipKeyFile(fi) {
|
||||
// Skip any non-key files from the folder
|
||||
if nonKeyFile(fi) {
|
||||
log.Trace("Ignoring file on account scan", "path", path)
|
||||
continue
|
||||
}
|
||||
@@ -76,9 +76,9 @@ func (fc *fileCache) scan(keyDir string) (set.Interface, set.Interface, set.Inte
|
||||
t2 := time.Now()
|
||||
|
||||
// Update the tracked files and return the three sets
|
||||
deletes := set.Difference(fc.all, all) // Deletes = previous - current
|
||||
creates := set.Difference(all, fc.all) // Creates = current - previous
|
||||
updates := set.Difference(mods, creates) // Updates = modified - creates
|
||||
deletes := fc.all.Difference(all) // Deletes = previous - current
|
||||
creates := all.Difference(fc.all) // Creates = current - previous
|
||||
updates := mods.Difference(creates) // Updates = modified - creates
|
||||
|
||||
fc.all, fc.lastMod = all, newLastMod
|
||||
t3 := time.Now()
|
||||
@@ -88,8 +88,8 @@ func (fc *fileCache) scan(keyDir string) (set.Interface, set.Interface, set.Inte
|
||||
return creates, deletes, updates, nil
|
||||
}
|
||||
|
||||
// skipKeyFile ignores editor backups, hidden files and folders/symlinks.
|
||||
func skipKeyFile(fi os.FileInfo) bool {
|
||||
// nonKeyFile ignores editor backups, hidden files and folders/symlinks.
|
||||
func nonKeyFile(fi os.FileInfo) bool {
|
||||
// Skip editor backups and UNIX-style hidden files.
|
||||
if strings.HasSuffix(fi.Name(), "~") || strings.HasPrefix(fi.Name(), ".") {
|
||||
return true
|
||||
|
@@ -28,18 +28,18 @@ package keystore
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/aes"
|
||||
crand "crypto/rand"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/crypto/randentropy"
|
||||
"github.com/pborman/uuid"
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
"golang.org/x/crypto/scrypt"
|
||||
@@ -93,7 +93,7 @@ func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string)
|
||||
|
||||
// StoreKey generates a key, encrypts with 'auth' and stores in the given directory
|
||||
func StoreKey(dir, auth string, scryptN, scryptP int) (common.Address, error) {
|
||||
_, a, err := storeNewKey(&keyStorePassphrase{dir, scryptN, scryptP}, crand.Reader, auth)
|
||||
_, a, err := storeNewKey(&keyStorePassphrase{dir, scryptN, scryptP}, rand.Reader, auth)
|
||||
return a.Address, err
|
||||
}
|
||||
|
||||
@@ -116,7 +116,11 @@ func (ks keyStorePassphrase) JoinPath(filename string) string {
|
||||
// blob that can be decrypted later on.
|
||||
func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
|
||||
authArray := []byte(auth)
|
||||
salt := randentropy.GetEntropyCSPRNG(32)
|
||||
|
||||
salt := make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
|
||||
panic("reading from crypto/rand failed: " + err.Error())
|
||||
}
|
||||
derivedKey, err := scrypt.Key(authArray, salt, scryptN, scryptR, scryptP, scryptDKLen)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -124,7 +128,10 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
|
||||
encryptKey := derivedKey[:16]
|
||||
keyBytes := math.PaddedBigBytes(key.PrivateKey.D, 32)
|
||||
|
||||
iv := randentropy.GetEntropyCSPRNG(aes.BlockSize) // 16
|
||||
iv := make([]byte, aes.BlockSize) // 16
|
||||
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
|
||||
panic("reading from crypto/rand failed: " + err.Error())
|
||||
}
|
||||
cipherText, err := aesCTRXOR(encryptKey, keyBytes, iv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@@ -76,12 +76,12 @@ func (u URL) MarshalJSON() ([]byte, error) {
|
||||
|
||||
// UnmarshalJSON parses url.
|
||||
func (u *URL) UnmarshalJSON(input []byte) error {
|
||||
var textUrl string
|
||||
err := json.Unmarshal(input, &textUrl)
|
||||
var textURL string
|
||||
err := json.Unmarshal(input, &textURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
url, err := parseURL(textUrl)
|
||||
url, err := parseURL(textURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
96
accounts/url_test.go
Normal file
96
accounts/url_test.go
Normal file
@@ -0,0 +1,96 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package accounts
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestURLParsing(t *testing.T) {
|
||||
url, err := parseURL("https://ethereum.org")
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if url.Scheme != "https" {
|
||||
t.Errorf("expected: %v, got: %v", "https", url.Scheme)
|
||||
}
|
||||
if url.Path != "ethereum.org" {
|
||||
t.Errorf("expected: %v, got: %v", "ethereum.org", url.Path)
|
||||
}
|
||||
|
||||
_, err = parseURL("ethereum.org")
|
||||
if err == nil {
|
||||
t.Error("expected err, got: nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestURLString(t *testing.T) {
|
||||
url := URL{Scheme: "https", Path: "ethereum.org"}
|
||||
if url.String() != "https://ethereum.org" {
|
||||
t.Errorf("expected: %v, got: %v", "https://ethereum.org", url.String())
|
||||
}
|
||||
|
||||
url = URL{Scheme: "", Path: "ethereum.org"}
|
||||
if url.String() != "ethereum.org" {
|
||||
t.Errorf("expected: %v, got: %v", "ethereum.org", url.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestURLMarshalJSON(t *testing.T) {
|
||||
url := URL{Scheme: "https", Path: "ethereum.org"}
|
||||
json, err := url.MarshalJSON()
|
||||
if err != nil {
|
||||
t.Errorf("unexpcted error: %v", err)
|
||||
}
|
||||
if string(json) != "\"https://ethereum.org\"" {
|
||||
t.Errorf("expected: %v, got: %v", "\"https://ethereum.org\"", string(json))
|
||||
}
|
||||
}
|
||||
|
||||
func TestURLUnmarshalJSON(t *testing.T) {
|
||||
url := &URL{}
|
||||
err := url.UnmarshalJSON([]byte("\"https://ethereum.org\""))
|
||||
if err != nil {
|
||||
t.Errorf("unexpcted error: %v", err)
|
||||
}
|
||||
if url.Scheme != "https" {
|
||||
t.Errorf("expected: %v, got: %v", "https", url.Scheme)
|
||||
}
|
||||
if url.Path != "ethereum.org" {
|
||||
t.Errorf("expected: %v, got: %v", "https", url.Path)
|
||||
}
|
||||
}
|
||||
|
||||
func TestURLComparison(t *testing.T) {
|
||||
tests := []struct {
|
||||
urlA URL
|
||||
urlB URL
|
||||
expect int
|
||||
}{
|
||||
{URL{"https", "ethereum.org"}, URL{"https", "ethereum.org"}, 0},
|
||||
{URL{"http", "ethereum.org"}, URL{"https", "ethereum.org"}, -1},
|
||||
{URL{"https", "ethereum.org/a"}, URL{"https", "ethereum.org"}, 1},
|
||||
{URL{"https", "abc.org"}, URL{"https", "ethereum.org"}, -1},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
result := tt.urlA.Cmp(tt.urlB)
|
||||
if result != tt.expect {
|
||||
t.Errorf("test %d: cmp mismatch: expected: %d, got: %d", i, tt.expect, result)
|
||||
}
|
||||
}
|
||||
}
|
204
build/ci.go
204
build/ci.go
@@ -26,7 +26,7 @@ Available commands are:
|
||||
install [ -arch architecture ] [ -cc compiler ] [ packages... ] -- builds packages and executables
|
||||
test [ -coverage ] [ packages... ] -- runs the tests
|
||||
lint -- runs certain pre-selected linters
|
||||
archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -upload dest ] -- archives build artefacts
|
||||
archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -upload dest ] -- archives build artifacts
|
||||
importkeys -- imports signing keys from env
|
||||
debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package
|
||||
nsis -- creates a Windows NSIS installer
|
||||
@@ -59,6 +59,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/internal/build"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
sv "github.com/ethereum/go-ethereum/swarm/version"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -77,52 +79,84 @@ var (
|
||||
executablePath("geth"),
|
||||
executablePath("puppeth"),
|
||||
executablePath("rlpdump"),
|
||||
executablePath("swarm"),
|
||||
executablePath("wnode"),
|
||||
}
|
||||
|
||||
// Files that end up in the swarm*.zip archive.
|
||||
swarmArchiveFiles = []string{
|
||||
"COPYING",
|
||||
executablePath("swarm"),
|
||||
}
|
||||
|
||||
// A debian package is created for all executables listed here.
|
||||
debExecutables = []debExecutable{
|
||||
{
|
||||
Name: "abigen",
|
||||
BinaryName: "abigen",
|
||||
Description: "Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages.",
|
||||
},
|
||||
{
|
||||
Name: "bootnode",
|
||||
BinaryName: "bootnode",
|
||||
Description: "Ethereum bootnode.",
|
||||
},
|
||||
{
|
||||
Name: "evm",
|
||||
BinaryName: "evm",
|
||||
Description: "Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode.",
|
||||
},
|
||||
{
|
||||
Name: "geth",
|
||||
BinaryName: "geth",
|
||||
Description: "Ethereum CLI client.",
|
||||
},
|
||||
{
|
||||
Name: "puppeth",
|
||||
BinaryName: "puppeth",
|
||||
Description: "Ethereum private network manager.",
|
||||
},
|
||||
{
|
||||
Name: "rlpdump",
|
||||
BinaryName: "rlpdump",
|
||||
Description: "Developer utility tool that prints RLP structures.",
|
||||
},
|
||||
{
|
||||
Name: "swarm",
|
||||
Description: "Ethereum Swarm daemon and tools",
|
||||
},
|
||||
{
|
||||
Name: "wnode",
|
||||
BinaryName: "wnode",
|
||||
Description: "Ethereum Whisper diagnostic tool",
|
||||
},
|
||||
}
|
||||
|
||||
// A debian package is created for all executables listed here.
|
||||
debSwarmExecutables = []debExecutable{
|
||||
{
|
||||
BinaryName: "swarm",
|
||||
PackageName: "ethereum-swarm",
|
||||
Description: "Ethereum Swarm daemon and tools",
|
||||
},
|
||||
}
|
||||
|
||||
debEthereum = debPackage{
|
||||
Name: "ethereum",
|
||||
Version: params.Version,
|
||||
Executables: debExecutables,
|
||||
}
|
||||
|
||||
debSwarm = debPackage{
|
||||
Name: "ethereum-swarm",
|
||||
Version: sv.Version,
|
||||
Executables: debSwarmExecutables,
|
||||
}
|
||||
|
||||
// Debian meta packages to build and push to Ubuntu PPA
|
||||
debPackages = []debPackage{
|
||||
debSwarm,
|
||||
debEthereum,
|
||||
}
|
||||
|
||||
// Packages to be cross-compiled by the xgo command
|
||||
allCrossCompiledArchiveFiles = append(allToolsArchiveFiles, swarmArchiveFiles...)
|
||||
|
||||
// Distros for which packages are created.
|
||||
// Note: vivid is unsupported because there is no golang-1.6 package for it.
|
||||
// Note: wily is unsupported because it was officially deprecated on lanchpad.
|
||||
// Note: yakkety is unsupported because it was officially deprecated on lanchpad.
|
||||
// Note: zesty is unsupported because it was officially deprecated on lanchpad.
|
||||
debDistros = []string{"trusty", "xenial", "artful", "bionic"}
|
||||
// Note: artful is unsupported because it was officially deprecated on lanchpad.
|
||||
debDistros = []string{"trusty", "xenial", "bionic", "cosmic"}
|
||||
)
|
||||
|
||||
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
|
||||
@@ -350,7 +384,6 @@ func doLint(cmdline []string) {
|
||||
}
|
||||
|
||||
// Release Packaging
|
||||
|
||||
func doArchive(cmdline []string) {
|
||||
var (
|
||||
arch = flag.String("arch", runtime.GOARCH, "Architecture cross packaging")
|
||||
@@ -370,10 +403,14 @@ func doArchive(cmdline []string) {
|
||||
}
|
||||
|
||||
var (
|
||||
env = build.Env()
|
||||
base = archiveBasename(*arch, env)
|
||||
geth = "geth-" + base + ext
|
||||
alltools = "geth-alltools-" + base + ext
|
||||
env = build.Env()
|
||||
|
||||
basegeth = archiveBasename(*arch, params.ArchiveVersion(env.Commit))
|
||||
geth = "geth-" + basegeth + ext
|
||||
alltools = "geth-alltools-" + basegeth + ext
|
||||
|
||||
baseswarm = archiveBasename(*arch, sv.ArchiveVersion(env.Commit))
|
||||
swarm = "swarm-" + baseswarm + ext
|
||||
)
|
||||
maybeSkipArchive(env)
|
||||
if err := build.WriteArchive(geth, gethArchiveFiles); err != nil {
|
||||
@@ -382,14 +419,17 @@ func doArchive(cmdline []string) {
|
||||
if err := build.WriteArchive(alltools, allToolsArchiveFiles); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
for _, archive := range []string{geth, alltools} {
|
||||
if err := build.WriteArchive(swarm, swarmArchiveFiles); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
for _, archive := range []string{geth, alltools, swarm} {
|
||||
if err := archiveUpload(archive, *upload, *signer); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func archiveBasename(arch string, env build.Environment) string {
|
||||
func archiveBasename(arch string, archiveVersion string) string {
|
||||
platform := runtime.GOOS + "-" + arch
|
||||
if arch == "arm" {
|
||||
platform += os.Getenv("GOARM")
|
||||
@@ -400,18 +440,7 @@ func archiveBasename(arch string, env build.Environment) string {
|
||||
if arch == "ios" {
|
||||
platform = "ios-all"
|
||||
}
|
||||
return platform + "-" + archiveVersion(env)
|
||||
}
|
||||
|
||||
func archiveVersion(env build.Environment) string {
|
||||
version := build.VERSION()
|
||||
if isUnstableBuild(env) {
|
||||
version += "-unstable"
|
||||
}
|
||||
if env.Commit != "" {
|
||||
version += "-" + env.Commit[:8]
|
||||
}
|
||||
return version
|
||||
return platform + "-" + archiveVersion
|
||||
}
|
||||
|
||||
func archiveUpload(archive string, blobstore string, signer string) error {
|
||||
@@ -461,7 +490,6 @@ func maybeSkipArchive(env build.Environment) {
|
||||
}
|
||||
|
||||
// Debian Packaging
|
||||
|
||||
func doDebianSource(cmdline []string) {
|
||||
var (
|
||||
signer = flag.String("signer", "", `Signing key name, also used as package author`)
|
||||
@@ -485,21 +513,23 @@ func doDebianSource(cmdline []string) {
|
||||
build.MustRun(gpg)
|
||||
}
|
||||
|
||||
// Create the packages.
|
||||
for _, distro := range debDistros {
|
||||
meta := newDebMetadata(distro, *signer, env, now)
|
||||
pkgdir := stageDebianSource(*workdir, meta)
|
||||
debuild := exec.Command("debuild", "-S", "-sa", "-us", "-uc")
|
||||
debuild.Dir = pkgdir
|
||||
build.MustRun(debuild)
|
||||
// Create Debian packages and upload them
|
||||
for _, pkg := range debPackages {
|
||||
for _, distro := range debDistros {
|
||||
meta := newDebMetadata(distro, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables)
|
||||
pkgdir := stageDebianSource(*workdir, meta)
|
||||
debuild := exec.Command("debuild", "-S", "-sa", "-us", "-uc")
|
||||
debuild.Dir = pkgdir
|
||||
build.MustRun(debuild)
|
||||
|
||||
changes := fmt.Sprintf("%s_%s_source.changes", meta.Name(), meta.VersionString())
|
||||
changes = filepath.Join(*workdir, changes)
|
||||
if *signer != "" {
|
||||
build.MustRunCommand("debsign", changes)
|
||||
}
|
||||
if *upload != "" {
|
||||
build.MustRunCommand("dput", *upload, changes)
|
||||
changes := fmt.Sprintf("%s_%s_source.changes", meta.Name(), meta.VersionString())
|
||||
changes = filepath.Join(*workdir, changes)
|
||||
if *signer != "" {
|
||||
build.MustRunCommand("debsign", changes)
|
||||
}
|
||||
if *upload != "" {
|
||||
build.MustRunCommand("dput", *upload, changes)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -524,9 +554,17 @@ func isUnstableBuild(env build.Environment) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type debPackage struct {
|
||||
Name string // the name of the Debian package to produce, e.g. "ethereum", or "ethereum-swarm"
|
||||
Version string // the clean version of the debPackage, e.g. 1.8.12 or 0.3.0, without any metadata
|
||||
Executables []debExecutable // executables to be included in the package
|
||||
}
|
||||
|
||||
type debMetadata struct {
|
||||
Env build.Environment
|
||||
|
||||
PackageName string
|
||||
|
||||
// go-ethereum version being built. Note that this
|
||||
// is not the debian package version. The package version
|
||||
// is constructed by VersionString.
|
||||
@@ -538,21 +576,33 @@ type debMetadata struct {
|
||||
}
|
||||
|
||||
type debExecutable struct {
|
||||
Name, Description string
|
||||
PackageName string
|
||||
BinaryName string
|
||||
Description string
|
||||
}
|
||||
|
||||
func newDebMetadata(distro, author string, env build.Environment, t time.Time) debMetadata {
|
||||
// Package returns the name of the package if present, or
|
||||
// fallbacks to BinaryName
|
||||
func (d debExecutable) Package() string {
|
||||
if d.PackageName != "" {
|
||||
return d.PackageName
|
||||
}
|
||||
return d.BinaryName
|
||||
}
|
||||
|
||||
func newDebMetadata(distro, author string, env build.Environment, t time.Time, name string, version string, exes []debExecutable) debMetadata {
|
||||
if author == "" {
|
||||
// No signing key, use default author.
|
||||
author = "Ethereum Builds <fjl@ethereum.org>"
|
||||
}
|
||||
return debMetadata{
|
||||
PackageName: name,
|
||||
Env: env,
|
||||
Author: author,
|
||||
Distro: distro,
|
||||
Version: build.VERSION(),
|
||||
Version: version,
|
||||
Time: t.Format(time.RFC1123Z),
|
||||
Executables: debExecutables,
|
||||
Executables: exes,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -560,9 +610,9 @@ func newDebMetadata(distro, author string, env build.Environment, t time.Time) d
|
||||
// on all executable packages.
|
||||
func (meta debMetadata) Name() string {
|
||||
if isUnstableBuild(meta.Env) {
|
||||
return "ethereum-unstable"
|
||||
return meta.PackageName + "-unstable"
|
||||
}
|
||||
return "ethereum"
|
||||
return meta.PackageName
|
||||
}
|
||||
|
||||
// VersionString returns the debian version of the packages.
|
||||
@@ -589,9 +639,9 @@ func (meta debMetadata) ExeList() string {
|
||||
// ExeName returns the package name of an executable package.
|
||||
func (meta debMetadata) ExeName(exe debExecutable) string {
|
||||
if isUnstableBuild(meta.Env) {
|
||||
return exe.Name + "-unstable"
|
||||
return exe.Package() + "-unstable"
|
||||
}
|
||||
return exe.Name
|
||||
return exe.Package()
|
||||
}
|
||||
|
||||
// ExeConflicts returns the content of the Conflicts field
|
||||
@@ -606,7 +656,7 @@ func (meta debMetadata) ExeConflicts(exe debExecutable) string {
|
||||
// be preferred and the conflicting files should be handled via
|
||||
// alternates. We might do this eventually but using a conflict is
|
||||
// easier now.
|
||||
return "ethereum, " + exe.Name
|
||||
return "ethereum, " + exe.Package()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
@@ -623,24 +673,23 @@ func stageDebianSource(tmpdir string, meta debMetadata) (pkgdir string) {
|
||||
|
||||
// Put the debian build files in place.
|
||||
debian := filepath.Join(pkgdir, "debian")
|
||||
build.Render("build/deb.rules", filepath.Join(debian, "rules"), 0755, meta)
|
||||
build.Render("build/deb.changelog", filepath.Join(debian, "changelog"), 0644, meta)
|
||||
build.Render("build/deb.control", filepath.Join(debian, "control"), 0644, meta)
|
||||
build.Render("build/deb.copyright", filepath.Join(debian, "copyright"), 0644, meta)
|
||||
build.Render("build/deb/"+meta.PackageName+"/deb.rules", filepath.Join(debian, "rules"), 0755, meta)
|
||||
build.Render("build/deb/"+meta.PackageName+"/deb.changelog", filepath.Join(debian, "changelog"), 0644, meta)
|
||||
build.Render("build/deb/"+meta.PackageName+"/deb.control", filepath.Join(debian, "control"), 0644, meta)
|
||||
build.Render("build/deb/"+meta.PackageName+"/deb.copyright", filepath.Join(debian, "copyright"), 0644, meta)
|
||||
build.RenderString("8\n", filepath.Join(debian, "compat"), 0644, meta)
|
||||
build.RenderString("3.0 (native)\n", filepath.Join(debian, "source/format"), 0644, meta)
|
||||
for _, exe := range meta.Executables {
|
||||
install := filepath.Join(debian, meta.ExeName(exe)+".install")
|
||||
docs := filepath.Join(debian, meta.ExeName(exe)+".docs")
|
||||
build.Render("build/deb.install", install, 0644, exe)
|
||||
build.Render("build/deb.docs", docs, 0644, exe)
|
||||
build.Render("build/deb/"+meta.PackageName+"/deb.install", install, 0644, exe)
|
||||
build.Render("build/deb/"+meta.PackageName+"/deb.docs", docs, 0644, exe)
|
||||
}
|
||||
|
||||
return pkgdir
|
||||
}
|
||||
|
||||
// Windows installer
|
||||
|
||||
func doWindowsInstaller(cmdline []string) {
|
||||
// Parse the flags and make skip installer generation on PRs
|
||||
var (
|
||||
@@ -690,11 +739,11 @@ func doWindowsInstaller(cmdline []string) {
|
||||
// Build the installer. This assumes that all the needed files have been previously
|
||||
// built (don't mix building and packaging to keep cross compilation complexity to a
|
||||
// minimum).
|
||||
version := strings.Split(build.VERSION(), ".")
|
||||
version := strings.Split(params.Version, ".")
|
||||
if env.Commit != "" {
|
||||
version[2] += "-" + env.Commit[:8]
|
||||
}
|
||||
installer, _ := filepath.Abs("geth-" + archiveBasename(*arch, env) + ".exe")
|
||||
installer, _ := filepath.Abs("geth-" + archiveBasename(*arch, params.ArchiveVersion(env.Commit)) + ".exe")
|
||||
build.MustRunCommand("makensis.exe",
|
||||
"/DOUTPUTFILE="+installer,
|
||||
"/DMAJORVERSION="+version[0],
|
||||
@@ -746,7 +795,7 @@ func doAndroidArchive(cmdline []string) {
|
||||
maybeSkipArchive(env)
|
||||
|
||||
// Sign and upload the archive to Azure
|
||||
archive := "geth-" + archiveBasename("android", env) + ".aar"
|
||||
archive := "geth-" + archiveBasename("android", params.ArchiveVersion(env.Commit)) + ".aar"
|
||||
os.Rename("geth.aar", archive)
|
||||
|
||||
if err := archiveUpload(archive, *upload, *signer); err != nil {
|
||||
@@ -831,7 +880,7 @@ func newMavenMetadata(env build.Environment) mavenMetadata {
|
||||
}
|
||||
}
|
||||
// Render the version and package strings
|
||||
version := build.VERSION()
|
||||
version := params.Version
|
||||
if isUnstableBuild(env) {
|
||||
version += "-SNAPSHOT"
|
||||
}
|
||||
@@ -866,7 +915,7 @@ func doXCodeFramework(cmdline []string) {
|
||||
build.MustRun(bind)
|
||||
return
|
||||
}
|
||||
archive := "geth-" + archiveBasename("ios", env)
|
||||
archive := "geth-" + archiveBasename("ios", params.ArchiveVersion(env.Commit))
|
||||
if err := os.Mkdir(archive, os.ModePerm); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -922,7 +971,7 @@ func newPodMetadata(env build.Environment, archive string) podMetadata {
|
||||
}
|
||||
}
|
||||
}
|
||||
version := build.VERSION()
|
||||
version := params.Version
|
||||
if isUnstableBuild(env) {
|
||||
version += "-unstable." + env.Buildnum
|
||||
}
|
||||
@@ -952,7 +1001,7 @@ func doXgo(cmdline []string) {
|
||||
|
||||
if *alltools {
|
||||
args = append(args, []string{"--dest", GOBIN}...)
|
||||
for _, res := range allToolsArchiveFiles {
|
||||
for _, res := range allCrossCompiledArchiveFiles {
|
||||
if strings.HasPrefix(res, GOBIN) {
|
||||
// Binary tool found, cross build it explicitly
|
||||
args = append(args, "./"+filepath.Join("cmd", filepath.Base(res)))
|
||||
@@ -1018,23 +1067,14 @@ func doPurge(cmdline []string) {
|
||||
}
|
||||
for i := 0; i < len(blobs); i++ {
|
||||
for j := i + 1; j < len(blobs); j++ {
|
||||
iTime, err := time.Parse(time.RFC1123, blobs[i].Properties.LastModified)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
jTime, err := time.Parse(time.RFC1123, blobs[j].Properties.LastModified)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if iTime.After(jTime) {
|
||||
if blobs[i].Properties.LastModified.After(blobs[j].Properties.LastModified) {
|
||||
blobs[i], blobs[j] = blobs[j], blobs[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
// Filter out all archives more recent that the given threshold
|
||||
for i, blob := range blobs {
|
||||
timestamp, _ := time.Parse(time.RFC1123, blob.Properties.LastModified)
|
||||
if time.Since(timestamp) < time.Duration(*limit)*24*time.Hour {
|
||||
if time.Since(blob.Properties.LastModified) < time.Duration(*limit)*24*time.Hour {
|
||||
blobs = blobs[:i]
|
||||
break
|
||||
}
|
||||
|
19
build/clean_go_build_cache.sh
Executable file
19
build/clean_go_build_cache.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Cleaning the Go cache only makes sense if we actually have Go installed... or
|
||||
# if Go is actually callable. This does not hold true during deb packaging, so
|
||||
# we need an explicit check to avoid build failures.
|
||||
if ! command -v go > /dev/null; then
|
||||
exit
|
||||
fi
|
||||
|
||||
version_gt() {
|
||||
test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"
|
||||
}
|
||||
|
||||
golang_version=$(go version |cut -d' ' -f3 |sed 's/go//')
|
||||
|
||||
# Clean go build cache when go version is greater than or equal to 1.10
|
||||
if !(version_gt 1.10 $golang_version); then
|
||||
go clean -cache
|
||||
fi
|
@@ -1 +0,0 @@
|
||||
build/bin/{{.Name}} usr/bin
|
19
build/deb/ethereum-swarm/deb.control
Normal file
19
build/deb/ethereum-swarm/deb.control
Normal file
@@ -0,0 +1,19 @@
|
||||
Source: {{.Name}}
|
||||
Section: science
|
||||
Priority: extra
|
||||
Maintainer: {{.Author}}
|
||||
Build-Depends: debhelper (>= 8.0.0), golang-1.10
|
||||
Standards-Version: 3.9.5
|
||||
Homepage: https://ethereum.org
|
||||
Vcs-Git: git://github.com/ethereum/go-ethereum.git
|
||||
Vcs-Browser: https://github.com/ethereum/go-ethereum
|
||||
|
||||
{{range .Executables}}
|
||||
Package: {{$.ExeName .}}
|
||||
Conflicts: {{$.ExeConflicts .}}
|
||||
Architecture: any
|
||||
Depends: ${shlibs:Depends}, ${misc:Depends}
|
||||
Built-Using: ${misc:Built-Using}
|
||||
Description: {{.Description}}
|
||||
{{.Description}}
|
||||
{{end}}
|
@@ -1,4 +1,4 @@
|
||||
Copyright 2016 The go-ethereum Authors
|
||||
Copyright 2018 The go-ethereum Authors
|
||||
|
||||
go-ethereum is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
1
build/deb/ethereum-swarm/deb.install
Normal file
1
build/deb/ethereum-swarm/deb.install
Normal file
@@ -0,0 +1 @@
|
||||
build/bin/{{.BinaryName}} usr/bin
|
5
build/deb/ethereum/deb.changelog
Normal file
5
build/deb/ethereum/deb.changelog
Normal file
@@ -0,0 +1,5 @@
|
||||
{{.Name}} ({{.VersionString}}) {{.Distro}}; urgency=low
|
||||
|
||||
* git build of {{.Env.Commit}}
|
||||
|
||||
-- {{.Author}} {{.Time}}
|
@@ -11,8 +11,8 @@ Vcs-Browser: https://github.com/ethereum/go-ethereum
|
||||
Package: {{.Name}}
|
||||
Architecture: any
|
||||
Depends: ${misc:Depends}, {{.ExeList}}
|
||||
Description: Meta-package to install geth and other tools
|
||||
Meta-package to install geth and other tools
|
||||
Description: Meta-package to install geth, swarm, and other tools
|
||||
Meta-package to install geth, swarm and other tools
|
||||
|
||||
{{range .Executables}}
|
||||
Package: {{$.ExeName .}}
|
14
build/deb/ethereum/deb.copyright
Normal file
14
build/deb/ethereum/deb.copyright
Normal file
@@ -0,0 +1,14 @@
|
||||
Copyright 2018 The go-ethereum Authors
|
||||
|
||||
go-ethereum is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
go-ethereum is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
1
build/deb/ethereum/deb.docs
Normal file
1
build/deb/ethereum/deb.docs
Normal file
@@ -0,0 +1 @@
|
||||
AUTHORS
|
1
build/deb/ethereum/deb.install
Normal file
1
build/deb/ethereum/deb.install
Normal file
@@ -0,0 +1 @@
|
||||
build/bin/{{.BinaryName}} usr/bin
|
13
build/deb/ethereum/deb.rules
Normal file
13
build/deb/ethereum/deb.rules
Normal file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/make -f
|
||||
# -*- makefile -*-
|
||||
|
||||
# Uncomment this to turn on verbose mode.
|
||||
#export DH_VERBOSE=1
|
||||
|
||||
override_dh_auto_build:
|
||||
build/env.sh /usr/lib/go-1.10/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
|
||||
|
||||
override_dh_auto_test:
|
||||
|
||||
%:
|
||||
dh $@
|
@@ -415,7 +415,7 @@ func signer(c *cli.Context) error {
|
||||
|
||||
// start http server
|
||||
httpEndpoint := fmt.Sprintf("%s:%d", c.String(utils.RPCListenAddrFlag.Name), c.Int(rpcPortFlag.Name))
|
||||
listener, _, err := rpc.StartHTTPEndpoint(httpEndpoint, rpcAPI, []string{"account"}, cors, vhosts)
|
||||
listener, _, err := rpc.StartHTTPEndpoint(httpEndpoint, rpcAPI, []string{"account"}, cors, vhosts, rpc.DefaultHTTPTimeouts)
|
||||
if err != nil {
|
||||
utils.Fatalf("Could not start RPC api: %v", err)
|
||||
}
|
||||
|
@@ -213,7 +213,7 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u
|
||||
// Assemble the raw devp2p protocol stack
|
||||
stack, err := node.New(&node.Config{
|
||||
Name: "geth",
|
||||
Version: params.Version,
|
||||
Version: params.VersionWithMeta,
|
||||
DataDir: filepath.Join(os.Getenv("HOME"), ".faucet"),
|
||||
P2P: p2p.Config{
|
||||
NAT: nat.Any(),
|
||||
|
@@ -51,7 +51,7 @@ func reportBug(ctx *cli.Context) error {
|
||||
|
||||
fmt.Fprintln(&buff, "#### System information")
|
||||
fmt.Fprintln(&buff)
|
||||
fmt.Fprintln(&buff, "Version:", params.Version)
|
||||
fmt.Fprintln(&buff, "Version:", params.VersionWithMeta)
|
||||
fmt.Fprintln(&buff, "Go Version:", runtime.Version())
|
||||
fmt.Fprintln(&buff, "OS:", runtime.GOOS)
|
||||
printOSDetails(&buff)
|
||||
|
@@ -48,7 +48,6 @@ var (
|
||||
ArgsUsage: "<genesisPath>",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.LightModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
@@ -66,7 +65,7 @@ It expects the genesis file as argument.`,
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
utils.GCModeFlag,
|
||||
utils.CacheDatabaseFlag,
|
||||
utils.CacheGCFlag,
|
||||
@@ -87,14 +86,15 @@ processing will proceed even if an individual RLP-file import failure occurs.`,
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
Requires a first argument of the file to write to.
|
||||
Optional second and third arguments control the first and
|
||||
last block to write. In this mode, the file will be appended
|
||||
if already existing.`,
|
||||
if already existing. If the file ends with .gz, the output will
|
||||
be gzipped.`,
|
||||
}
|
||||
importPreimagesCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(importPreimages),
|
||||
@@ -104,7 +104,7 @@ if already existing.`,
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
@@ -118,7 +118,7 @@ if already existing.`,
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
@@ -148,7 +148,6 @@ The first argument must be the directory containing the blockchain to download f
|
||||
ArgsUsage: " ",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.LightModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
@@ -162,7 +161,7 @@ Remove blockchain and state databases`,
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
|
@@ -31,7 +31,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
ipcAPIs = "admin:1.0 debug:1.0 eth:1.0 miner:1.0 net:1.0 personal:1.0 rpc:1.0 shh:1.0 txpool:1.0 web3:1.0"
|
||||
ipcAPIs = "admin:1.0 debug:1.0 eth:1.0 ethash:1.0 miner:1.0 net:1.0 personal:1.0 rpc:1.0 shh:1.0 txpool:1.0 web3:1.0"
|
||||
httpAPIs = "eth:1.0 net:1.0 rpc:1.0 web3:1.0"
|
||||
)
|
||||
|
||||
@@ -50,7 +50,7 @@ func TestConsoleWelcome(t *testing.T) {
|
||||
geth.SetTemplateFunc("goos", func() string { return runtime.GOOS })
|
||||
geth.SetTemplateFunc("goarch", func() string { return runtime.GOARCH })
|
||||
geth.SetTemplateFunc("gover", runtime.Version)
|
||||
geth.SetTemplateFunc("gethver", func() string { return params.Version })
|
||||
geth.SetTemplateFunc("gethver", func() string { return params.VersionWithMeta })
|
||||
geth.SetTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
|
||||
geth.SetTemplateFunc("apis", func() string { return ipcAPIs })
|
||||
|
||||
@@ -133,7 +133,7 @@ func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) {
|
||||
attach.SetTemplateFunc("goos", func() string { return runtime.GOOS })
|
||||
attach.SetTemplateFunc("goarch", func() string { return runtime.GOARCH })
|
||||
attach.SetTemplateFunc("gover", runtime.Version)
|
||||
attach.SetTemplateFunc("gethver", func() string { return params.Version })
|
||||
attach.SetTemplateFunc("gethver", func() string { return params.VersionWithMeta })
|
||||
attach.SetTemplateFunc("etherbase", func() string { return geth.Etherbase })
|
||||
attach.SetTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
|
||||
attach.SetTemplateFunc("ipc", func() bool { return strings.HasPrefix(endpoint, "ipc") })
|
||||
|
@@ -72,6 +72,7 @@ var (
|
||||
utils.EthashDatasetDirFlag,
|
||||
utils.EthashDatasetsInMemoryFlag,
|
||||
utils.EthashDatasetsOnDiskFlag,
|
||||
utils.TxPoolLocalsFlag,
|
||||
utils.TxPoolNoLocalsFlag,
|
||||
utils.TxPoolJournalFlag,
|
||||
utils.TxPoolRejournalFlag,
|
||||
@@ -82,8 +83,6 @@ var (
|
||||
utils.TxPoolAccountQueueFlag,
|
||||
utils.TxPoolGlobalQueueFlag,
|
||||
utils.TxPoolLifetimeFlag,
|
||||
utils.FastSyncFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
utils.GCModeFlag,
|
||||
utils.LightServFlag,
|
||||
@@ -96,11 +95,19 @@ var (
|
||||
utils.ListenPortFlag,
|
||||
utils.MaxPeersFlag,
|
||||
utils.MaxPendingPeersFlag,
|
||||
utils.EtherbaseFlag,
|
||||
utils.GasPriceFlag,
|
||||
utils.MinerThreadsFlag,
|
||||
utils.MiningEnabledFlag,
|
||||
utils.TargetGasLimitFlag,
|
||||
utils.MinerThreadsFlag,
|
||||
utils.MinerLegacyThreadsFlag,
|
||||
utils.MinerNotifyFlag,
|
||||
utils.MinerGasTargetFlag,
|
||||
utils.MinerLegacyGasTargetFlag,
|
||||
utils.MinerGasPriceFlag,
|
||||
utils.MinerLegacyGasPriceFlag,
|
||||
utils.MinerEtherbaseFlag,
|
||||
utils.MinerLegacyEtherbaseFlag,
|
||||
utils.MinerExtraDataFlag,
|
||||
utils.MinerLegacyExtraDataFlag,
|
||||
utils.MinerRecommitIntervalFlag,
|
||||
utils.NATFlag,
|
||||
utils.NoDiscoverFlag,
|
||||
utils.DiscoveryV5Flag,
|
||||
@@ -121,7 +128,6 @@ var (
|
||||
utils.NoCompactionFlag,
|
||||
utils.GpoBlocksFlag,
|
||||
utils.GpoPercentileFlag,
|
||||
utils.ExtraDataFlag,
|
||||
configFileFlag,
|
||||
}
|
||||
|
||||
@@ -199,10 +205,15 @@ func init() {
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
if err := debug.Setup(ctx); err != nil {
|
||||
|
||||
logdir := ""
|
||||
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {
|
||||
logdir = (&node.Config{DataDir: utils.MakeDataDir(ctx)}).ResolvePath("logs")
|
||||
}
|
||||
if err := debug.Setup(ctx, logdir); err != nil {
|
||||
return err
|
||||
}
|
||||
// Cap the cache allowance and tune the garbage colelctor
|
||||
// Cap the cache allowance and tune the garbage collector
|
||||
var mem gosigar.Mem
|
||||
if err := mem.Get(); err == nil {
|
||||
allowance := int(mem.Total / 1024 / 1024 / 3)
|
||||
@@ -246,6 +257,9 @@ func main() {
|
||||
// It creates a default node based on the command line arguments and runs it in
|
||||
// blocking mode, waiting for it to be shut down.
|
||||
func geth(ctx *cli.Context) error {
|
||||
if args := ctx.Args(); len(args) > 0 {
|
||||
return fmt.Errorf("invalid command: %q", args[0])
|
||||
}
|
||||
node := makeFullNode(ctx)
|
||||
startNode(ctx, node)
|
||||
node.Wait()
|
||||
@@ -300,11 +314,11 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
status, _ := event.Wallet.Status()
|
||||
log.Info("New wallet appeared", "url", event.Wallet.URL(), "status", status)
|
||||
|
||||
derivationPath := accounts.DefaultBaseDerivationPath
|
||||
if event.Wallet.URL().Scheme == "ledger" {
|
||||
event.Wallet.SelfDerive(accounts.DefaultLedgerBaseDerivationPath, stateReader)
|
||||
} else {
|
||||
event.Wallet.SelfDerive(accounts.DefaultBaseDerivationPath, stateReader)
|
||||
derivationPath = accounts.DefaultLedgerBaseDerivationPath
|
||||
}
|
||||
event.Wallet.SelfDerive(derivationPath, stateReader)
|
||||
|
||||
case accounts.WalletDropped:
|
||||
log.Info("Old wallet dropped", "url", event.Wallet.URL())
|
||||
@@ -315,7 +329,7 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
// Start auxiliary services if enabled
|
||||
if ctx.GlobalBool(utils.MiningEnabledFlag.Name) || ctx.GlobalBool(utils.DeveloperFlag.Name) {
|
||||
// Mining only makes sense if a full Ethereum node is running
|
||||
if ctx.GlobalBool(utils.LightModeFlag.Name) || ctx.GlobalString(utils.SyncModeFlag.Name) == "light" {
|
||||
if ctx.GlobalString(utils.SyncModeFlag.Name) == "light" {
|
||||
utils.Fatalf("Light clients do not support mining")
|
||||
}
|
||||
var ethereum *eth.Ethereum
|
||||
@@ -323,7 +337,11 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
utils.Fatalf("Ethereum service not running: %v", err)
|
||||
}
|
||||
// Use a reduced number of threads if requested
|
||||
if threads := ctx.GlobalInt(utils.MinerThreadsFlag.Name); threads > 0 {
|
||||
threads := ctx.GlobalInt(utils.MinerLegacyThreadsFlag.Name)
|
||||
if ctx.GlobalIsSet(utils.MinerThreadsFlag.Name) {
|
||||
threads = ctx.GlobalInt(utils.MinerThreadsFlag.Name)
|
||||
}
|
||||
if threads > 0 {
|
||||
type threaded interface {
|
||||
SetThreads(threads int)
|
||||
}
|
||||
@@ -332,7 +350,11 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
}
|
||||
}
|
||||
// Set the gas price to the limits from the CLI and start mining
|
||||
ethereum.TxPool().SetGasPrice(utils.GlobalBig(ctx, utils.GasPriceFlag.Name))
|
||||
gasprice := utils.GlobalBig(ctx, utils.MinerLegacyGasPriceFlag.Name)
|
||||
if ctx.IsSet(utils.MinerGasPriceFlag.Name) {
|
||||
gasprice = utils.GlobalBig(ctx, utils.MinerGasPriceFlag.Name)
|
||||
}
|
||||
ethereum.TxPool().SetGasPrice(gasprice)
|
||||
if err := ethereum.StartMining(true); err != nil {
|
||||
utils.Fatalf("Failed to start mining: %v", err)
|
||||
}
|
||||
|
@@ -108,7 +108,7 @@ func makedag(ctx *cli.Context) error {
|
||||
|
||||
func version(ctx *cli.Context) error {
|
||||
fmt.Println(strings.Title(clientIdentifier))
|
||||
fmt.Println("Version:", params.Version)
|
||||
fmt.Println("Version:", params.VersionWithMeta)
|
||||
if gitCommit != "" {
|
||||
fmt.Println("Git Commit:", gitCommit)
|
||||
}
|
||||
|
@@ -185,12 +185,12 @@ func resolveMetric(metrics map[string]interface{}, pattern string, path string)
|
||||
parts := strings.SplitN(pattern, "/", 2)
|
||||
if len(parts) > 1 {
|
||||
for _, variation := range strings.Split(parts[0], ",") {
|
||||
if submetrics, ok := metrics[variation].(map[string]interface{}); !ok {
|
||||
submetrics, ok := metrics[variation].(map[string]interface{})
|
||||
if !ok {
|
||||
utils.Fatalf("Failed to retrieve system metrics: %s", path+variation)
|
||||
return nil
|
||||
} else {
|
||||
results = append(results, resolveMetric(submetrics, parts[1], path+variation+"/")...)
|
||||
}
|
||||
results = append(results, resolveMetric(submetrics, parts[1], path+variation+"/")...)
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
@@ -83,7 +83,8 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.LightKDFFlag,
|
||||
},
|
||||
},
|
||||
{Name: "DEVELOPER CHAIN",
|
||||
{
|
||||
Name: "DEVELOPER CHAIN",
|
||||
Flags: []cli.Flag{
|
||||
utils.DeveloperFlag,
|
||||
utils.DeveloperPeriodFlag,
|
||||
@@ -113,6 +114,7 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
{
|
||||
Name: "TRANSACTION POOL",
|
||||
Flags: []cli.Flag{
|
||||
utils.TxPoolLocalsFlag,
|
||||
utils.TxPoolNoLocalsFlag,
|
||||
utils.TxPoolJournalFlag,
|
||||
utils.TxPoolRejournalFlag,
|
||||
@@ -184,10 +186,12 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
Flags: []cli.Flag{
|
||||
utils.MiningEnabledFlag,
|
||||
utils.MinerThreadsFlag,
|
||||
utils.EtherbaseFlag,
|
||||
utils.TargetGasLimitFlag,
|
||||
utils.GasPriceFlag,
|
||||
utils.ExtraDataFlag,
|
||||
utils.MinerNotifyFlag,
|
||||
utils.MinerGasPriceFlag,
|
||||
utils.MinerGasTargetFlag,
|
||||
utils.MinerEtherbaseFlag,
|
||||
utils.MinerExtraDataFlag,
|
||||
utils.MinerRecommitIntervalFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -229,8 +233,11 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
{
|
||||
Name: "DEPRECATED",
|
||||
Flags: []cli.Flag{
|
||||
utils.FastSyncFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.MinerLegacyThreadsFlag,
|
||||
utils.MinerLegacyGasTargetFlag,
|
||||
utils.MinerLegacyGasPriceFlag,
|
||||
utils.MinerLegacyEtherbaseFlag,
|
||||
utils.MinerLegacyExtraDataFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@@ -678,9 +678,9 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
|
||||
|
||||
// Build and deploy the dashboard service
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
|
||||
}
|
||||
|
||||
// dashboardInfos is returned from a dashboard status check to allow reporting
|
||||
|
@@ -100,9 +100,9 @@ func deployEthstats(client *sshClient, network string, port int, secret string,
|
||||
|
||||
// Build and deploy the ethstats service
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
|
||||
}
|
||||
|
||||
// ethstatsInfos is returned from an ethstats status check to allow reporting
|
||||
@@ -122,7 +122,7 @@ func (info *ethstatsInfos) Report() map[string]string {
|
||||
"Website address": info.host,
|
||||
"Website listener port": strconv.Itoa(info.port),
|
||||
"Login secret": info.secret,
|
||||
"Banned addresses": fmt.Sprintf("%v", info.banned),
|
||||
"Banned addresses": strings.Join(info.banned, "\n"),
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -38,7 +38,7 @@ ADD chain.json /chain.json
|
||||
RUN \
|
||||
echo '(cd ../eth-net-intelligence-api && pm2 start /ethstats.json)' > explorer.sh && \
|
||||
echo '(cd ../etherchain-light && npm start &)' >> explorer.sh && \
|
||||
echo '/parity/parity --chain=/chain.json --port={{.NodePort}} --tracing=on --fat-db=on --pruning=archive' >> explorer.sh
|
||||
echo 'exec /parity/parity --chain=/chain.json --port={{.NodePort}} --tracing=on --fat-db=on --pruning=archive' >> explorer.sh
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "explorer.sh"]
|
||||
`
|
||||
@@ -140,9 +140,9 @@ func deployExplorer(client *sshClient, network string, chainspec []byte, config
|
||||
|
||||
// Build and deploy the boot or seal node service
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
|
||||
}
|
||||
|
||||
// explorerInfos is returned from a block explorer status check to allow reporting
|
||||
|
@@ -133,9 +133,9 @@ func deployFaucet(client *sshClient, network string, bootnodes []string, config
|
||||
|
||||
// Build and deploy the faucet service
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
|
||||
}
|
||||
|
||||
// faucetInfos is returned from a faucet status check to allow reporting various
|
||||
|
@@ -81,9 +81,9 @@ func deployNginx(client *sshClient, network string, port int, nocache bool) ([]b
|
||||
|
||||
// Build and deploy the reverse-proxy service
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
|
||||
}
|
||||
|
||||
// nginxInfos is returned from an nginx reverse-proxy status check to allow
|
||||
|
@@ -42,7 +42,7 @@ ADD genesis.json /genesis.json
|
||||
RUN \
|
||||
echo 'geth --cache 512 init /genesis.json' > geth.sh && \{{if .Unlock}}
|
||||
echo 'mkdir -p /root/.ethereum/keystore/ && cp /signer.json /root/.ethereum/keystore/' >> geth.sh && \{{end}}
|
||||
echo $'geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine --minerthreads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --targetgaslimit {{.GasTarget}} --gasprice {{.GasPrice}}' >> geth.sh
|
||||
echo $'exec geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--miner.etherbase {{.Etherbase}} --mine --miner.threads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --miner.gastarget {{.GasTarget}} --miner.gasprice {{.GasPrice}}' >> geth.sh
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "geth.sh"]
|
||||
`
|
||||
@@ -139,9 +139,9 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n
|
||||
|
||||
// Build and deploy the boot or seal node service
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
|
||||
}
|
||||
|
||||
// nodeInfos is returned from a boot or seal node status check to allow reporting
|
||||
@@ -221,7 +221,7 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
|
||||
|
||||
// Container available, retrieve its node ID and its genesis json
|
||||
var out []byte
|
||||
if out, err = client.Run(fmt.Sprintf("docker exec %s_%s_1 geth --exec admin.nodeInfo.id attach", network, kind)); err != nil {
|
||||
if out, err = client.Run(fmt.Sprintf("docker exec %s_%s_1 geth --exec admin.nodeInfo.id --cache=16 attach", network, kind)); err != nil {
|
||||
return nil, ErrServiceUnreachable
|
||||
}
|
||||
id := bytes.Trim(bytes.TrimSpace(out), "\"")
|
||||
|
@@ -37,7 +37,7 @@ ADD genesis.json /genesis.json
|
||||
RUN \
|
||||
echo 'node server.js &' > wallet.sh && \
|
||||
echo 'geth --cache 512 init /genesis.json' >> wallet.sh && \
|
||||
echo $'geth --networkid {{.NetworkID}} --port {{.NodePort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --rpc --rpcaddr=0.0.0.0 --rpccorsdomain "*" --rpcvhosts "*"' >> wallet.sh
|
||||
echo $'exec geth --networkid {{.NetworkID}} --port {{.NodePort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --rpc --rpcaddr=0.0.0.0 --rpccorsdomain "*" --rpcvhosts "*"' >> wallet.sh
|
||||
|
||||
RUN \
|
||||
sed -i 's/PuppethNetworkID/{{.NetworkID}}/g' dist/js/etherwallet-master.js && \
|
||||
@@ -120,9 +120,9 @@ func deployWallet(client *sshClient, network string, bootnodes []string, config
|
||||
|
||||
// Build and deploy the boot or seal node service
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
|
||||
}
|
||||
|
||||
// walletInfos is returned from a web wallet status check to allow reporting
|
||||
|
@@ -45,33 +45,44 @@ type sshClient struct {
|
||||
|
||||
// dial establishes an SSH connection to a remote node using the current user and
|
||||
// the user's configured private RSA key. If that fails, password authentication
|
||||
// is fallen back to. The caller may override the login user via user@server:port.
|
||||
// is fallen back to. server can be a string like user:identity@server:port.
|
||||
func dial(server string, pubkey []byte) (*sshClient, error) {
|
||||
// Figure out a label for the server and a logger
|
||||
label := server
|
||||
if strings.Contains(label, ":") {
|
||||
label = label[:strings.Index(label, ":")]
|
||||
}
|
||||
login := ""
|
||||
// Figure out username, identity, hostname and port
|
||||
hostname := ""
|
||||
hostport := server
|
||||
username := ""
|
||||
identity := "id_rsa" // default
|
||||
|
||||
if strings.Contains(server, "@") {
|
||||
login = label[:strings.Index(label, "@")]
|
||||
label = label[strings.Index(label, "@")+1:]
|
||||
server = server[strings.Index(server, "@")+1:]
|
||||
prefix := server[:strings.Index(server, "@")]
|
||||
if strings.Contains(prefix, ":") {
|
||||
username = prefix[:strings.Index(prefix, ":")]
|
||||
identity = prefix[strings.Index(prefix, ":")+1:]
|
||||
} else {
|
||||
username = prefix
|
||||
}
|
||||
hostport = server[strings.Index(server, "@")+1:]
|
||||
}
|
||||
logger := log.New("server", label)
|
||||
if strings.Contains(hostport, ":") {
|
||||
hostname = hostport[:strings.Index(hostport, ":")]
|
||||
} else {
|
||||
hostname = hostport
|
||||
hostport += ":22"
|
||||
}
|
||||
logger := log.New("server", server)
|
||||
logger.Debug("Attempting to establish SSH connection")
|
||||
|
||||
user, err := user.Current()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if login == "" {
|
||||
login = user.Username
|
||||
if username == "" {
|
||||
username = user.Username
|
||||
}
|
||||
// Configure the supported authentication methods (private key and password)
|
||||
var auths []ssh.AuthMethod
|
||||
|
||||
path := filepath.Join(user.HomeDir, ".ssh", "id_rsa")
|
||||
path := filepath.Join(user.HomeDir, ".ssh", identity)
|
||||
if buf, err := ioutil.ReadFile(path); err != nil {
|
||||
log.Warn("No SSH key, falling back to passwords", "path", path, "err", err)
|
||||
} else {
|
||||
@@ -94,14 +105,14 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
|
||||
}
|
||||
}
|
||||
auths = append(auths, ssh.PasswordCallback(func() (string, error) {
|
||||
fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", login, server)
|
||||
fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", username, server)
|
||||
blob, err := terminal.ReadPassword(int(os.Stdin.Fd()))
|
||||
|
||||
fmt.Println()
|
||||
return string(blob), err
|
||||
}))
|
||||
// Resolve the IP address of the remote server
|
||||
addr, err := net.LookupHost(label)
|
||||
addr, err := net.LookupHost(hostname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -109,10 +120,7 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
|
||||
return nil, errors.New("no IPs associated with domain")
|
||||
}
|
||||
// Try to dial in to the remote server
|
||||
logger.Trace("Dialing remote SSH server", "user", login)
|
||||
if !strings.Contains(server, ":") {
|
||||
server += ":22"
|
||||
}
|
||||
logger.Trace("Dialing remote SSH server", "user", username)
|
||||
keycheck := func(hostname string, remote net.Addr, key ssh.PublicKey) error {
|
||||
// If no public key is known for SSH, ask the user to confirm
|
||||
if pubkey == nil {
|
||||
@@ -139,13 +147,13 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
|
||||
// We have a mismatch, forbid connecting
|
||||
return errors.New("ssh key mismatch, readd the machine to update")
|
||||
}
|
||||
client, err := ssh.Dial("tcp", server, &ssh.ClientConfig{User: login, Auth: auths, HostKeyCallback: keycheck})
|
||||
client, err := ssh.Dial("tcp", hostport, &ssh.ClientConfig{User: username, Auth: auths, HostKeyCallback: keycheck})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Connection established, return our utility wrapper
|
||||
c := &sshClient{
|
||||
server: label,
|
||||
server: hostname,
|
||||
address: addr[0],
|
||||
pubkey: pubkey,
|
||||
client: client,
|
||||
|
@@ -82,7 +82,6 @@ func (w *wizard) gatherStats(server string, pubkey []byte, client *sshClient) *s
|
||||
logger.Info("Starting remote server health-check")
|
||||
|
||||
stat := &serverStat{
|
||||
address: client.address,
|
||||
services: make(map[string]map[string]string),
|
||||
}
|
||||
if client == nil {
|
||||
@@ -94,6 +93,8 @@ func (w *wizard) gatherStats(server string, pubkey []byte, client *sshClient) *s
|
||||
}
|
||||
client = conn
|
||||
}
|
||||
stat.address = client.address
|
||||
|
||||
// Client connected one way or another, run health-checks
|
||||
logger.Debug("Checking for nginx availability")
|
||||
if infos, err := checkNginx(client, w.network); err != nil {
|
||||
@@ -203,7 +204,7 @@ func (stats serverStats) render() {
|
||||
|
||||
table.SetHeader([]string{"Server", "Address", "Service", "Config", "Value"})
|
||||
table.SetAlignment(tablewriter.ALIGN_LEFT)
|
||||
table.SetColWidth(100)
|
||||
table.SetColWidth(40)
|
||||
|
||||
// Find the longest lines for all columns for the hacked separator
|
||||
separator := make([]string, 5)
|
||||
@@ -214,6 +215,9 @@ func (stats serverStats) render() {
|
||||
if len(stat.address) > len(separator[1]) {
|
||||
separator[1] = strings.Repeat("-", len(stat.address))
|
||||
}
|
||||
if len(stat.failure) > len(separator[1]) {
|
||||
separator[1] = strings.Repeat("-", len(stat.failure))
|
||||
}
|
||||
for service, configs := range stat.services {
|
||||
if len(service) > len(separator[2]) {
|
||||
separator[2] = strings.Repeat("-", len(service))
|
||||
@@ -222,8 +226,10 @@ func (stats serverStats) render() {
|
||||
if len(config) > len(separator[3]) {
|
||||
separator[3] = strings.Repeat("-", len(config))
|
||||
}
|
||||
if len(value) > len(separator[4]) {
|
||||
separator[4] = strings.Repeat("-", len(value))
|
||||
for _, val := range strings.Split(value, "\n") {
|
||||
if len(val) > len(separator[4]) {
|
||||
separator[4] = strings.Repeat("-", len(val))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -248,7 +254,11 @@ func (stats serverStats) render() {
|
||||
sort.Strings(services)
|
||||
|
||||
if len(services) == 0 {
|
||||
table.Append([]string{server, stats[server].address, "", "", ""})
|
||||
if stats[server].failure != "" {
|
||||
table.Append([]string{server, stats[server].failure, "", "", ""})
|
||||
} else {
|
||||
table.Append([]string{server, stats[server].address, "", "", ""})
|
||||
}
|
||||
}
|
||||
for j, service := range services {
|
||||
// Add an empty line between all services
|
||||
@@ -263,13 +273,17 @@ func (stats serverStats) render() {
|
||||
sort.Strings(configs)
|
||||
|
||||
for k, config := range configs {
|
||||
switch {
|
||||
case j == 0 && k == 0:
|
||||
table.Append([]string{server, stats[server].address, service, config, stats[server].services[service][config]})
|
||||
case k == 0:
|
||||
table.Append([]string{"", "", service, config, stats[server].services[service][config]})
|
||||
default:
|
||||
table.Append([]string{"", "", "", config, stats[server].services[service][config]})
|
||||
for l, value := range strings.Split(stats[server].services[service][config], "\n") {
|
||||
switch {
|
||||
case j == 0 && k == 0 && l == 0:
|
||||
table.Append([]string{server, stats[server].address, service, config, value})
|
||||
case k == 0 && l == 0:
|
||||
table.Append([]string{"", "", service, config, value})
|
||||
case l == 0:
|
||||
table.Append([]string{"", "", "", config, value})
|
||||
default:
|
||||
table.Append([]string{"", "", "", "", value})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -62,14 +62,14 @@ func (w *wizard) manageServers() {
|
||||
}
|
||||
}
|
||||
|
||||
// makeServer reads a single line from stdin and interprets it as a hostname to
|
||||
// connect to. It tries to establish a new SSH session and also executing some
|
||||
// baseline validations.
|
||||
// makeServer reads a single line from stdin and interprets it as
|
||||
// username:identity@hostname to connect to. It tries to establish a
|
||||
// new SSH session and also executing some baseline validations.
|
||||
//
|
||||
// If connection succeeds, the server is added to the wizards configs!
|
||||
func (w *wizard) makeServer() string {
|
||||
fmt.Println()
|
||||
fmt.Println("Please enter remote server's address:")
|
||||
fmt.Println("What is the remote server's address ([username[:identity]@]hostname[:port])?")
|
||||
|
||||
// Read and dial the server to ensure docker is present
|
||||
input := w.readString()
|
||||
|
219
cmd/swarm/access.go
Normal file
219
cmd/swarm/access.go
Normal file
@@ -0,0 +1,219 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
"github.com/ethereum/go-ethereum/swarm/api/client"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var salt = make([]byte, 32)
|
||||
|
||||
func init() {
|
||||
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
|
||||
panic("reading from crypto/rand failed: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func accessNewPass(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 1 {
|
||||
utils.Fatalf("Expected 1 argument - the ref")
|
||||
}
|
||||
|
||||
var (
|
||||
ae *api.AccessEntry
|
||||
accessKey []byte
|
||||
err error
|
||||
ref = args[0]
|
||||
password = getPassPhrase("", 0, makePasswordList(ctx))
|
||||
dryRun = ctx.Bool(SwarmDryRunFlag.Name)
|
||||
)
|
||||
accessKey, ae, err = api.DoPasswordNew(ctx, password, salt)
|
||||
if err != nil {
|
||||
utils.Fatalf("error getting session key: %v", err)
|
||||
}
|
||||
m, err := api.GenerateAccessControlManifest(ctx, ref, accessKey, ae)
|
||||
if dryRun {
|
||||
err = printManifests(m, nil)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error printing the manifests: %v", err)
|
||||
}
|
||||
} else {
|
||||
utils.Fatalf("uploading manifests")
|
||||
err = uploadManifests(ctx, m, nil)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error uploading the manifests: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func accessNewPK(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 1 {
|
||||
utils.Fatalf("Expected 1 argument - the ref")
|
||||
}
|
||||
|
||||
var (
|
||||
ae *api.AccessEntry
|
||||
sessionKey []byte
|
||||
err error
|
||||
ref = args[0]
|
||||
privateKey = getPrivKey(ctx)
|
||||
granteePublicKey = ctx.String(SwarmAccessGrantKeyFlag.Name)
|
||||
dryRun = ctx.Bool(SwarmDryRunFlag.Name)
|
||||
)
|
||||
sessionKey, ae, err = api.DoPKNew(ctx, privateKey, granteePublicKey, salt)
|
||||
if err != nil {
|
||||
utils.Fatalf("error getting session key: %v", err)
|
||||
}
|
||||
m, err := api.GenerateAccessControlManifest(ctx, ref, sessionKey, ae)
|
||||
if dryRun {
|
||||
err = printManifests(m, nil)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error printing the manifests: %v", err)
|
||||
}
|
||||
} else {
|
||||
err = uploadManifests(ctx, m, nil)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error uploading the manifests: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func accessNewACT(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 1 {
|
||||
utils.Fatalf("Expected 1 argument - the ref")
|
||||
}
|
||||
|
||||
var (
|
||||
ae *api.AccessEntry
|
||||
actManifest *api.Manifest
|
||||
accessKey []byte
|
||||
err error
|
||||
ref = args[0]
|
||||
grantees = []string{}
|
||||
actFilename = ctx.String(SwarmAccessGrantKeysFlag.Name)
|
||||
privateKey = getPrivKey(ctx)
|
||||
dryRun = ctx.Bool(SwarmDryRunFlag.Name)
|
||||
)
|
||||
|
||||
bytes, err := ioutil.ReadFile(actFilename)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error reading the grantee public key list")
|
||||
}
|
||||
grantees = strings.Split(string(bytes), "\n")
|
||||
accessKey, ae, actManifest, err = api.DoACTNew(ctx, privateKey, salt, grantees)
|
||||
if err != nil {
|
||||
utils.Fatalf("error generating ACT manifest: %v", err)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
utils.Fatalf("error getting session key: %v", err)
|
||||
}
|
||||
m, err := api.GenerateAccessControlManifest(ctx, ref, accessKey, ae)
|
||||
if err != nil {
|
||||
utils.Fatalf("error generating root access manifest: %v", err)
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
err = printManifests(m, actManifest)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error printing the manifests: %v", err)
|
||||
}
|
||||
} else {
|
||||
err = uploadManifests(ctx, m, actManifest)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error uploading the manifests: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func printManifests(rootAccessManifest, actManifest *api.Manifest) error {
|
||||
js, err := json.Marshal(rootAccessManifest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(string(js))
|
||||
|
||||
if actManifest != nil {
|
||||
js, err := json.Marshal(actManifest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(string(js))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func uploadManifests(ctx *cli.Context, rootAccessManifest, actManifest *api.Manifest) error {
|
||||
bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client := client.NewClient(bzzapi)
|
||||
|
||||
var (
|
||||
key string
|
||||
err error
|
||||
)
|
||||
if actManifest != nil {
|
||||
key, err = client.UploadManifest(actManifest, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rootAccessManifest.Entries[0].Access.Act = key
|
||||
}
|
||||
key, err = client.UploadManifest(rootAccessManifest, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// makePasswordList reads password lines from the file specified by the global --password flag
|
||||
// and also by the same subcommand --password flag.
|
||||
// This function ia a fork of utils.MakePasswordList to lookup cli context for subcommand.
|
||||
// Function ctx.SetGlobal is not setting the global flag value that can be accessed
|
||||
// by ctx.GlobalString using the current version of cli package.
|
||||
func makePasswordList(ctx *cli.Context) []string {
|
||||
path := ctx.GlobalString(utils.PasswordFileFlag.Name)
|
||||
if path == "" {
|
||||
path = ctx.String(utils.PasswordFileFlag.Name)
|
||||
if path == "" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
text, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read password file: %v", err)
|
||||
}
|
||||
lines := strings.Split(string(text), "\n")
|
||||
// Sanitise DOS line endings.
|
||||
for i := range lines {
|
||||
lines[i] = strings.TrimRight(lines[i], "\r")
|
||||
}
|
||||
return lines
|
||||
}
|
581
cmd/swarm/access_test.go
Normal file
581
cmd/swarm/access_test.go
Normal file
@@ -0,0 +1,581 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
gorand "math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||
)
|
||||
|
||||
// TestAccessPassword tests for the correct creation of an ACT manifest protected by a password.
|
||||
// The test creates bogus content, uploads it encrypted, then creates the wrapping manifest with the Access entry
|
||||
// The parties participating - node (publisher), uploads to second node then disappears. Content which was uploaded
|
||||
// is then fetched through 2nd node. since the tested code is not key-aware - we can just
|
||||
// fetch from the 2nd node using HTTP BasicAuth
|
||||
func TestAccessPassword(t *testing.T) {
|
||||
cluster := newTestCluster(t, 1)
|
||||
defer cluster.Shutdown()
|
||||
proxyNode := cluster.Nodes[0]
|
||||
|
||||
// create a tmp file
|
||||
tmp, err := ioutil.TempDir("", "swarm-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
// write data to file
|
||||
data := "notsorandomdata"
|
||||
dataFilename := filepath.Join(tmp, "data.txt")
|
||||
|
||||
err = ioutil.WriteFile(dataFilename, []byte(data), 0666)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hashRegexp := `[a-f\d]{128}`
|
||||
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
up := runSwarm(t,
|
||||
"--bzzapi",
|
||||
proxyNode.URL, //it doesn't matter through which node we upload content
|
||||
"up",
|
||||
"--encrypt",
|
||||
dataFilename)
|
||||
_, matches := up.ExpectRegexp(hashRegexp)
|
||||
up.ExpectExit()
|
||||
|
||||
if len(matches) < 1 {
|
||||
t.Fatal("no matches found")
|
||||
}
|
||||
|
||||
ref := matches[0]
|
||||
|
||||
password := "smth"
|
||||
passwordFilename := filepath.Join(tmp, "password.txt")
|
||||
|
||||
err = ioutil.WriteFile(passwordFilename, []byte(password), 0666)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
up = runSwarm(t,
|
||||
"access",
|
||||
"new",
|
||||
"pass",
|
||||
"--dry-run",
|
||||
"--password",
|
||||
passwordFilename,
|
||||
ref,
|
||||
)
|
||||
|
||||
_, matches = up.ExpectRegexp(".+")
|
||||
up.ExpectExit()
|
||||
|
||||
if len(matches) == 0 {
|
||||
t.Fatalf("stdout not matched")
|
||||
}
|
||||
|
||||
var m api.Manifest
|
||||
|
||||
err = json.Unmarshal([]byte(matches[0]), &m)
|
||||
if err != nil {
|
||||
t.Fatalf("unmarshal manifest: %v", err)
|
||||
}
|
||||
|
||||
if len(m.Entries) != 1 {
|
||||
t.Fatalf("expected one manifest entry, got %v", len(m.Entries))
|
||||
}
|
||||
|
||||
e := m.Entries[0]
|
||||
|
||||
ct := "application/bzz-manifest+json"
|
||||
if e.ContentType != ct {
|
||||
t.Errorf("expected %q content type, got %q", ct, e.ContentType)
|
||||
}
|
||||
|
||||
if e.Access == nil {
|
||||
t.Fatal("manifest access is nil")
|
||||
}
|
||||
|
||||
a := e.Access
|
||||
|
||||
if a.Type != "pass" {
|
||||
t.Errorf(`got access type %q, expected "pass"`, a.Type)
|
||||
}
|
||||
if len(a.Salt) < 32 {
|
||||
t.Errorf(`got salt with length %v, expected not less the 32 bytes`, len(a.Salt))
|
||||
}
|
||||
if a.KdfParams == nil {
|
||||
t.Fatal("manifest access kdf params is nil")
|
||||
}
|
||||
|
||||
client := swarm.NewClient(cluster.Nodes[0].URL)
|
||||
|
||||
hash, err := client.UploadManifest(&m, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
httpClient := &http.Client{}
|
||||
|
||||
url := cluster.Nodes[0].URL + "/" + "bzz:/" + hash
|
||||
response, err := httpClient.Get(url)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if response.StatusCode != http.StatusUnauthorized {
|
||||
t.Fatal("should be a 401")
|
||||
}
|
||||
authHeader := response.Header.Get("WWW-Authenticate")
|
||||
if authHeader == "" {
|
||||
t.Fatal("should be something here")
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
req.SetBasicAuth("", password)
|
||||
|
||||
response, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
if response.StatusCode != http.StatusOK {
|
||||
t.Errorf("expected status %v, got %v", http.StatusOK, response.StatusCode)
|
||||
}
|
||||
d, err := ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(d) != data {
|
||||
t.Errorf("expected decrypted data %q, got %q", data, string(d))
|
||||
}
|
||||
|
||||
wrongPasswordFilename := filepath.Join(tmp, "password-wrong.txt")
|
||||
|
||||
err = ioutil.WriteFile(wrongPasswordFilename, []byte("just wr0ng"), 0666)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
//download file with 'swarm down' with wrong password
|
||||
up = runSwarm(t,
|
||||
"--bzzapi",
|
||||
proxyNode.URL,
|
||||
"down",
|
||||
"bzz:/"+hash,
|
||||
tmp,
|
||||
"--password",
|
||||
wrongPasswordFilename)
|
||||
|
||||
_, matches = up.ExpectRegexp("unauthorized")
|
||||
if len(matches) != 1 && matches[0] != "unauthorized" {
|
||||
t.Fatal(`"unauthorized" not found in output"`)
|
||||
}
|
||||
up.ExpectExit()
|
||||
}
|
||||
|
||||
// TestAccessPK tests for the correct creation of an ACT manifest between two parties (publisher and grantee).
|
||||
// The test creates bogus content, uploads it encrypted, then creates the wrapping manifest with the Access entry
|
||||
// The parties participating - node (publisher), uploads to second node (which is also the grantee) then disappears.
|
||||
// Content which was uploaded is then fetched through the grantee's http proxy. Since the tested code is private-key aware,
|
||||
// the test will fail if the proxy's given private key is not granted on the ACT.
|
||||
func TestAccessPK(t *testing.T) {
|
||||
// Setup Swarm and upload a test file to it
|
||||
cluster := newTestCluster(t, 1)
|
||||
defer cluster.Shutdown()
|
||||
|
||||
// create a tmp file
|
||||
tmp, err := ioutil.TempFile("", "swarm-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer tmp.Close()
|
||||
defer os.Remove(tmp.Name())
|
||||
|
||||
// write data to file
|
||||
data := "notsorandomdata"
|
||||
_, err = io.WriteString(tmp, data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hashRegexp := `[a-f\d]{128}`
|
||||
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
up := runSwarm(t,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
"up",
|
||||
"--encrypt",
|
||||
tmp.Name())
|
||||
_, matches := up.ExpectRegexp(hashRegexp)
|
||||
up.ExpectExit()
|
||||
|
||||
if len(matches) < 1 {
|
||||
t.Fatal("no matches found")
|
||||
}
|
||||
|
||||
ref := matches[0]
|
||||
|
||||
pk := cluster.Nodes[0].PrivateKey
|
||||
granteePubKey := crypto.CompressPubkey(&pk.PublicKey)
|
||||
|
||||
publisherDir, err := ioutil.TempDir("", "swarm-account-dir-temp")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
passFile, err := ioutil.TempFile("", "swarm-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer passFile.Close()
|
||||
defer os.Remove(passFile.Name())
|
||||
_, err = io.WriteString(passFile, testPassphrase)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, publisherAccount := getTestAccount(t, publisherDir)
|
||||
up = runSwarm(t,
|
||||
"--bzzaccount",
|
||||
publisherAccount.Address.String(),
|
||||
"--password",
|
||||
passFile.Name(),
|
||||
"--datadir",
|
||||
publisherDir,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
"access",
|
||||
"new",
|
||||
"pk",
|
||||
"--dry-run",
|
||||
"--grant-key",
|
||||
hex.EncodeToString(granteePubKey),
|
||||
ref,
|
||||
)
|
||||
|
||||
_, matches = up.ExpectRegexp(".+")
|
||||
up.ExpectExit()
|
||||
|
||||
if len(matches) == 0 {
|
||||
t.Fatalf("stdout not matched")
|
||||
}
|
||||
|
||||
var m api.Manifest
|
||||
|
||||
err = json.Unmarshal([]byte(matches[0]), &m)
|
||||
if err != nil {
|
||||
t.Fatalf("unmarshal manifest: %v", err)
|
||||
}
|
||||
|
||||
if len(m.Entries) != 1 {
|
||||
t.Fatalf("expected one manifest entry, got %v", len(m.Entries))
|
||||
}
|
||||
|
||||
e := m.Entries[0]
|
||||
|
||||
ct := "application/bzz-manifest+json"
|
||||
if e.ContentType != ct {
|
||||
t.Errorf("expected %q content type, got %q", ct, e.ContentType)
|
||||
}
|
||||
|
||||
if e.Access == nil {
|
||||
t.Fatal("manifest access is nil")
|
||||
}
|
||||
|
||||
a := e.Access
|
||||
|
||||
if a.Type != "pk" {
|
||||
t.Errorf(`got access type %q, expected "pk"`, a.Type)
|
||||
}
|
||||
if len(a.Salt) < 32 {
|
||||
t.Errorf(`got salt with length %v, expected not less the 32 bytes`, len(a.Salt))
|
||||
}
|
||||
if a.KdfParams != nil {
|
||||
t.Fatal("manifest access kdf params should be nil")
|
||||
}
|
||||
|
||||
client := swarm.NewClient(cluster.Nodes[0].URL)
|
||||
|
||||
hash, err := client.UploadManifest(&m, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
httpClient := &http.Client{}
|
||||
|
||||
url := cluster.Nodes[0].URL + "/" + "bzz:/" + hash
|
||||
response, err := httpClient.Get(url)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if response.StatusCode != http.StatusOK {
|
||||
t.Fatal("should be a 200")
|
||||
}
|
||||
d, err := ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(d) != data {
|
||||
t.Errorf("expected decrypted data %q, got %q", data, string(d))
|
||||
}
|
||||
}
|
||||
|
||||
// TestAccessACT tests the e2e creation, uploading and downloading of an ACT type access control
|
||||
// the test fires up a 3 node cluster, then randomly picks 2 nodes which will be acting as grantees to the data
|
||||
// set. the third node should fail decoding the reference as it will not be granted access. the publisher uploads through
|
||||
// one of the nodes then disappears.
|
||||
func TestAccessACT(t *testing.T) {
|
||||
// Setup Swarm and upload a test file to it
|
||||
cluster := newTestCluster(t, 3)
|
||||
defer cluster.Shutdown()
|
||||
|
||||
var uploadThroughNode = cluster.Nodes[0]
|
||||
client := swarm.NewClient(uploadThroughNode.URL)
|
||||
|
||||
r1 := gorand.New(gorand.NewSource(time.Now().UnixNano()))
|
||||
nodeToSkip := r1.Intn(3) // a number between 0 and 2 (node indices in `cluster`)
|
||||
// create a tmp file
|
||||
tmp, err := ioutil.TempFile("", "swarm-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer tmp.Close()
|
||||
defer os.Remove(tmp.Name())
|
||||
|
||||
// write data to file
|
||||
data := "notsorandomdata"
|
||||
_, err = io.WriteString(tmp, data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hashRegexp := `[a-f\d]{128}`
|
||||
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
up := runSwarm(t,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
"up",
|
||||
"--encrypt",
|
||||
tmp.Name())
|
||||
_, matches := up.ExpectRegexp(hashRegexp)
|
||||
up.ExpectExit()
|
||||
|
||||
if len(matches) < 1 {
|
||||
t.Fatal("no matches found")
|
||||
}
|
||||
|
||||
ref := matches[0]
|
||||
grantees := []string{}
|
||||
for i, v := range cluster.Nodes {
|
||||
if i == nodeToSkip {
|
||||
continue
|
||||
}
|
||||
pk := v.PrivateKey
|
||||
granteePubKey := crypto.CompressPubkey(&pk.PublicKey)
|
||||
grantees = append(grantees, hex.EncodeToString(granteePubKey))
|
||||
}
|
||||
|
||||
granteesPubkeyListFile, err := ioutil.TempFile("", "grantees-pubkey-list.csv")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = granteesPubkeyListFile.WriteString(strings.Join(grantees, "\n"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer granteesPubkeyListFile.Close()
|
||||
defer os.Remove(granteesPubkeyListFile.Name())
|
||||
|
||||
publisherDir, err := ioutil.TempDir("", "swarm-account-dir-temp")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
passFile, err := ioutil.TempFile("", "swarm-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer passFile.Close()
|
||||
defer os.Remove(passFile.Name())
|
||||
_, err = io.WriteString(passFile, testPassphrase)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, publisherAccount := getTestAccount(t, publisherDir)
|
||||
up = runSwarm(t,
|
||||
"--bzzaccount",
|
||||
publisherAccount.Address.String(),
|
||||
"--password",
|
||||
passFile.Name(),
|
||||
"--datadir",
|
||||
publisherDir,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
"access",
|
||||
"new",
|
||||
"act",
|
||||
"--grant-keys",
|
||||
granteesPubkeyListFile.Name(),
|
||||
ref,
|
||||
)
|
||||
|
||||
_, matches = up.ExpectRegexp(`[a-f\d]{64}`)
|
||||
up.ExpectExit()
|
||||
|
||||
if len(matches) == 0 {
|
||||
t.Fatalf("stdout not matched")
|
||||
}
|
||||
hash := matches[0]
|
||||
m, _, err := client.DownloadManifest(hash)
|
||||
if err != nil {
|
||||
t.Fatalf("unmarshal manifest: %v", err)
|
||||
}
|
||||
|
||||
if len(m.Entries) != 1 {
|
||||
t.Fatalf("expected one manifest entry, got %v", len(m.Entries))
|
||||
}
|
||||
|
||||
e := m.Entries[0]
|
||||
|
||||
ct := "application/bzz-manifest+json"
|
||||
if e.ContentType != ct {
|
||||
t.Errorf("expected %q content type, got %q", ct, e.ContentType)
|
||||
}
|
||||
|
||||
if e.Access == nil {
|
||||
t.Fatal("manifest access is nil")
|
||||
}
|
||||
|
||||
a := e.Access
|
||||
|
||||
if a.Type != "act" {
|
||||
t.Fatalf(`got access type %q, expected "act"`, a.Type)
|
||||
}
|
||||
if len(a.Salt) < 32 {
|
||||
t.Fatalf(`got salt with length %v, expected not less the 32 bytes`, len(a.Salt))
|
||||
}
|
||||
if a.KdfParams != nil {
|
||||
t.Fatal("manifest access kdf params should be nil")
|
||||
}
|
||||
|
||||
httpClient := &http.Client{}
|
||||
|
||||
// all nodes except the skipped node should be able to decrypt the content
|
||||
for i, node := range cluster.Nodes {
|
||||
log.Debug("trying to fetch from node", "node index", i)
|
||||
|
||||
url := node.URL + "/" + "bzz:/" + hash
|
||||
response, err := httpClient.Get(url)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
log.Debug("got response from node", "response code", response.StatusCode)
|
||||
|
||||
if i == nodeToSkip {
|
||||
log.Debug("reached node to skip", "status code", response.StatusCode)
|
||||
|
||||
if response.StatusCode != http.StatusUnauthorized {
|
||||
t.Fatalf("should be a 401")
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if response.StatusCode != http.StatusOK {
|
||||
t.Fatal("should be a 200")
|
||||
}
|
||||
d, err := ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(d) != data {
|
||||
t.Errorf("expected decrypted data %q, got %q", data, string(d))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestKeypairSanity is a sanity test for the crypto scheme for ACT. it asserts the correct shared secret according to
|
||||
// the specs at https://github.com/ethersphere/swarm-docs/blob/eb857afda906c6e7bb90d37f3f334ccce5eef230/act.md
|
||||
func TestKeypairSanity(t *testing.T) {
|
||||
salt := make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
|
||||
t.Fatalf("reading from crypto/rand failed: %v", err.Error())
|
||||
}
|
||||
sharedSecret := "a85586744a1ddd56a7ed9f33fa24f40dd745b3a941be296a0d60e329dbdb896d"
|
||||
|
||||
for i, v := range []struct {
|
||||
publisherPriv string
|
||||
granteePub string
|
||||
}{
|
||||
{
|
||||
publisherPriv: "ec5541555f3bc6376788425e9d1a62f55a82901683fd7062c5eddcc373a73459",
|
||||
granteePub: "0226f213613e843a413ad35b40f193910d26eb35f00154afcde9ded57479a6224a",
|
||||
},
|
||||
{
|
||||
publisherPriv: "70c7a73011aa56584a0009ab874794ee7e5652fd0c6911cd02f8b6267dd82d2d",
|
||||
granteePub: "02e6f8d5e28faaa899744972bb847b6eb805a160494690c9ee7197ae9f619181db",
|
||||
},
|
||||
} {
|
||||
b, _ := hex.DecodeString(v.granteePub)
|
||||
granteePub, _ := crypto.DecompressPubkey(b)
|
||||
publisherPrivate, _ := crypto.HexToECDSA(v.publisherPriv)
|
||||
|
||||
ssKey, err := api.NewSessionKeyPK(publisherPrivate, granteePub, salt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hasher := sha3.NewKeccak256()
|
||||
hasher.Write(salt)
|
||||
shared, err := hex.DecodeString(sharedSecret)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hasher.Write(shared)
|
||||
sum := hasher.Sum(nil)
|
||||
|
||||
if !bytes.Equal(ssKey, sum) {
|
||||
t.Fatalf("%d: got a session key mismatch", i)
|
||||
}
|
||||
}
|
||||
}
|
77
cmd/swarm/bootnodes.go
Normal file
77
cmd/swarm/bootnodes.go
Normal file
@@ -0,0 +1,77 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
var SwarmBootnodes = []string{
|
||||
// Foundation Swarm Gateway Cluster
|
||||
"enode://e5c6f9215c919a5450a7b8c14c22535607b69f2c8e1e7f6f430cb25d7a2c27cd1df4c4f18ad7c1d7e5162e271ffcd3f20b1a1467fb6e790e7d727f3b2193de97@52.232.7.187:30399",
|
||||
"enode://9b2fe07e69ccc7db5fef15793dab7d7d2e697ed92132d6e9548218e68a34613a8671ad03a6658d862b468ed693cae8a0f8f8d37274e4a657ffb59ca84676e45b@52.232.7.187:30400",
|
||||
"enode://76c1059162c93ef9df0f01097c824d17c492634df211ef4c806935b349082233b63b90c23970254b3b7138d630400f7cf9b71e80355a446a8b733296cb04169a@52.232.7.187:30401",
|
||||
"enode://ce46bbe2a8263145d65252d52da06e000ad350ed09c876a71ea9544efa42f63c1e1b6cc56307373aaad8f9dd069c90d0ed2dd1530106200e16f4ca681dd8ae2d@52.232.7.187:30402",
|
||||
"enode://f431e0d6008a6c35c6e670373d828390c8323e53da8158e7bfc43cf07e632cc9e472188be8df01decadea2d4a068f1428caba769b632554a8fb0607bc296988f@52.232.7.187:30403",
|
||||
"enode://174720abfff83d7392f121108ae50ea54e04889afe020df883655c0f6cb95414db945a0228d8982fe000d86fc9f4b7669161adc89cd7cd56f78f01489ab2b99b@52.232.7.187:30404",
|
||||
"enode://2ae89be4be61a689b6f9ecee4360a59e185e010ab750f14b63b4ae43d4180e872e18e3437d4386ce44875dc7cc6eb761acba06412fe3178f3dac1dab3b65703e@52.232.7.187:30405",
|
||||
"enode://24abebe1c0e6d75d6052ce3219a87be8573fd6397b4cb51f0773b83abba9b3d872bfb273cdc07389715b87adfac02f5235f5241442c5089802cbd8d42e310fce@52.232.7.187:30406",
|
||||
"enode://d08dfa46bfbbdbcaafbb6e34abee4786610f6c91e0b76d7881f0334ac10dda41d8c1f2b6eedffb4493293c335c0ad46776443b2208d1fbbb9e1a90b25ee4eef2@52.232.7.187:30407",
|
||||
"enode://8d95eb0f837d27581a43668ed3b8783d69dc4e84aa3edd7a0897e026155c8f59c8702fdc0375ee7bac15757c9c78e1315d9b73e4ce59c936db52ea4ae2f501c7@52.232.7.187:30408",
|
||||
"enode://a5967cc804aebd422baaaba9f06f27c9e695ccab335b61088130f8cbe64e3cdf78793868c7051dfc06eecfe844fad54bc7f6dfaed9db3c7ecef279cb829c25fb@52.232.7.187:30409",
|
||||
"enode://5f00134d81a8f2ebcc46f8766f627f492893eda48138f811b7de2168308171968f01710bca6da05764e74f14bae41652f554e6321f1aed85fa3461e89d075dbf@52.232.7.187:30410",
|
||||
"enode://b2142b79b01a5aa66a5e23cc35e78219a8e97bc2412a6698cee24ae02e87078b725d71730711bd62e25ff1aa8658c6633778af8ac14c63814a337c3dd0ebda9f@52.232.7.187:30411",
|
||||
"enode://1ffa7651094867d6486ce3ef46d27a052c2cb968b618346c6df7040322c7efc3337547ba85d4cbba32e8b31c42c867202554735c06d4c664b9afada2ed0c4b3c@52.232.7.187:30412",
|
||||
"enode://129e0c3d5f5df12273754f6f703d2424409fa4baa599e0b758c55600169313887855e75b082028d2302ec034b303898cd697cc7ae8256ba924ce927510da2c8d@52.232.7.187:30413",
|
||||
"enode://419e2dc0d2f5b022cf16b0e28842658284909fa027a0fbbb5e2b755e7f846ea02a8f0b66a7534981edf6a7bcf8a14855344c6668e2cd4476ccd35a11537c9144@52.232.7.187:30414",
|
||||
"enode://23d55ad900583231b91f2f62e3f72eb498b342afd58b682be3af052eed62b5651094471065981de33d8786f075f05e3cca499503b0ac8ae84b2a06e99f5b0723@52.232.7.187:30415",
|
||||
"enode://bc56e4158c00e9f616d7ea533def20a89bef959df4e62a768ff238ff4e1e9223f57ecff969941c20921bad98749baae311c0fbebce53bf7bbb9d3dc903640990@52.232.7.187:30416",
|
||||
"enode://433ce15199c409875e7e72fffd69fdafe746f17b20f0d5555281722a65fde6c80328fab600d37d8624509adc072c445ce0dad4a1c01cff6acf3132c11d429d4d@52.232.7.187:30417",
|
||||
"enode://632ee95b8f0eac51ef89ceb29313fef3a60050181d66a6b125583b1a225a7694b252edc016efb58aa3b251da756cb73280842a022c658ed405223b2f58626343@52.232.7.187:30418",
|
||||
"enode://4a0f9bcff7a4b9ee453fb298d0fb222592efe121512e30cd72fef631beb8c6a15153a1456eb073ee18551c0e003c569651a101892dc4124e90b933733a498bb5@52.232.7.187:30419",
|
||||
"enode://f0d80fbc72d16df30e19aac3051eb56a7aff0c8367686702e01ea132d8b0b3ee00cadd6a859d2cca98ec68d3d574f8a8a87dba2347ec1e2818dc84bc3fa34fae@52.232.7.187:30420",
|
||||
"enode://a199146906e4f9f2b94b195a8308d9a59a3564b92efaab898a4243fe4c2ad918b7a8e4853d9d901d94fad878270a2669d644591299c3d43de1b298c00b92b4a7@52.232.7.187:30421",
|
||||
"enode://052036ea8736b37adbfb684d90ce43e11b3591b51f31489d7c726b03618dea4f73b1e659deb928e6bf40564edcdcf08351643f42db3d4ca1c2b5db95dad59e94@52.232.7.187:30422",
|
||||
"enode://460e2b8c6da8f12fac96c836e7d108f4b7ec55a1c64631bb8992339e117e1c28328fee83af863196e20af1487a655d13e5ceba90e980e92502d5bac5834c1f71@52.232.7.187:30423",
|
||||
"enode://6d2cdd13741b2e72e9031e1b93c6d9a4e68de2844aa4e939f6a8a8498a7c1d7e2ee4c64217e92a6df08c9a32c6764d173552810ef1bd2ecb356532d389dd2136@52.232.7.187:30424",
|
||||
"enode://62105fc25ce2cd5b299647f47eaa9211502dc76f0e9f461df915782df7242ac3223e3db04356ae6ed2977ccac20f0b16864406e9ca514a40a004cb6a5d0402aa@52.232.7.187:30425",
|
||||
"enode://e0e388fc520fd493c33f0ce16685e6f98fb6aec28f2edc14ee6b179594ee519a896425b0025bb6f0e182dd3e468443f19c70885fbc66560d000093a668a86aa8@52.232.7.187:30426",
|
||||
"enode://63f3353a72521ea10022127a4fe6b4acbef197c3fe668fd9f4805542d8a6fcf79f6335fbab62d180a35e19b739483e740858b113fdd7c13a26ad7b4e318a5aef@52.232.7.187:30427",
|
||||
"enode://33a42b927085678d4aefd4e70b861cfca6ef5f6c143696c4f755973fd29e64c9e658cad57a66a687a7a156da1e3688b1fbdd17bececff2ee009fff038fa5666b@52.232.7.187:30428",
|
||||
"enode://259ab5ab5c1daee3eab7e3819ab3177b82d25c29e6c2444fdd3f956e356afae79a72840ccf2d0665fe82c81ebc3b3734da1178ac9fd5d62c67e674b69f86b6be@52.232.7.187:30429",
|
||||
"enode://558bccad7445ce3fd8db116ed6ab4aed1324fdbdac2348417340c1764dc46d46bffe0728e5b7d5c36f12e794c289f18f57f08f085d2c65c9910a5c7a65b6a66a@52.232.7.187:30430",
|
||||
"enode://abe60937a0657ffded718e3f84a32987286983be257bdd6004775c4b525747c2b598f4fac49c8de324de5ce75b22673fa541a7ce2d555fb7f8ca325744ae3577@52.232.7.187:30431",
|
||||
"enode://bce6f0aaa5b230742680084df71d4f026b3eff7f564265599216a1b06b765303fdc9325de30ffd5dfdaf302ce4b14322891d2faea50ce2ca298d7409f5858339@52.232.7.187:30432",
|
||||
"enode://21b957c4e03277d42be6660730ec1b93f540764f26c6abdb54d006611139c7081248486206dfbf64fcaffd62589e9c6b8ea77a5297e4b21a605f1bcf49483ed0@52.232.7.187:30433",
|
||||
"enode://ff104e30e64f24c3d7328acee8b13354e5551bc8d60bb25ecbd9632d955c7e34bb2d969482d173355baad91c8282f8b592624eb3929151090da3b4448d4d58fb@52.232.7.187:30434",
|
||||
"enode://c76e2b5f81a521bceaec1518926a21380a345df9cf463461562c6845795512497fb67679e155fc96a74350f8b78de8f4c135dd52b106dbbb9795452021d09ea5@52.232.7.187:30435",
|
||||
"enode://3288fd860105164f3e9b69934c4eb18f7146cfab31b5a671f994e21a36e9287766e5f9f075aefbc404538c77f7c2eb2a4495020a7633a1c3970d94e9fa770aeb@52.232.7.187:30436",
|
||||
"enode://6cea859c7396d46b20cfcaa80f9a11cd112f8684f2f782f7b4c0e1e0af9212113429522075101923b9b957603e6c32095a6a07b5e5e35183c521952ee108dfaf@52.232.7.187:30437",
|
||||
"enode://f628ec56e4ca8317cc24cc4ac9b27b95edcce7b96e1c7f3b53e30de4a8580fe44f2f0694a513bdb0a431acaf2824074d6ace4690247bbc34c14f426af8c056ea@52.232.7.187:30438",
|
||||
"enode://055ec8b26fc105c4f97970a1cce9773a5e34c03f511b839db742198a1c571e292c54aa799e9afb991cc8a560529b8cdf3e0c344bc6c282aff2f68eec59361ddf@52.232.7.187:30439",
|
||||
"enode://48cb0d430c328974226aa33a931d8446cd5a8d40f3ead8f4ce7ad60faa1278192eb6d58bed91258d63e81f255fc107eec2425ce2ae8b22350dd556076e160610@52.232.7.187:30440",
|
||||
"enode://3fadb7af7f770d5ffc6b073b8d42834bebb18ce1fe8a4fe270d2b799e7051327093960dc61d9a18870db288f7746a0e6ea2a013cd6ab0e5f97ca08199473aace@52.232.7.187:30441",
|
||||
"enode://a5d7168024c9992769cf380ffa559a64b4f39a29d468f579559863814eb0ae0ed689ac0871a3a2b4c78b03297485ec322d578281131ef5d5c09a4beb6200a97a@52.232.7.187:30442",
|
||||
"enode://9c57744c5b2c2d71abcbe80512652f9234d4ab041b768a2a886ab390fe6f184860f40e113290698652d7e20a8ac74d27ac8671db23eb475b6c5e6253e4693bf8@52.232.7.187:30443",
|
||||
"enode://daca9ff0c3176045a0e0ed228dee00ec86bc0939b135dc6b1caa23745d20fd0332e1ee74ad04020e89df56c7146d831a91b89d15ca3df05ba7618769fefab376@52.232.7.187:30444",
|
||||
"enode://a3f6af59428cb4b9acb198db15ef5554fa43c2b0c18e468a269722d64a27218963a2975eaf82750b6262e42192b5e3669ea51337b4cda62b33987981bc5e0c1a@52.232.7.187:30445",
|
||||
"enode://fe571422fa4651c3354c85dac61911a6a6520dd3c0332967a49d4133ca30e16a8a4946fa73ca2cb5de77917ea701a905e1c3015b2f4defcd53132b61cc84127a@52.232.7.187:30446",
|
||||
|
||||
// Mainframe
|
||||
"enode://ee9a5a571ea6c8a59f9a8bb2c569c865e922b41c91d09b942e8c1d4dd2e1725bd2c26149da14de1f6321a2c6fdf1e07c503c3e093fb61696daebf74d6acd916b@54.186.219.160:30399",
|
||||
"enode://a03f0562ecb8a992ad5242345535e73483cdc18ab934d36bf24b567d43447c2cea68f89f1d51d504dd13acc30f24ebce5a150bea2ccb1b722122ce4271dc199d@52.67.248.147:30399",
|
||||
"enode://e2cbf9eafd85903d3b1c56743035284320695e0072bc8d7396e0542aa5e1c321b236f67eab66b79c2f15d4447fa4bbe74dd67d0467da23e7eb829f60ec8a812b@13.58.169.1:30399",
|
||||
"enode://8b8c6bda6047f1cad9fab2db4d3d02b7aa26279902c32879f7bcd4a7d189fee77fdc36ee151ce6b84279b4792e72578fd529d2274d014132465758fbfee51cee@13.209.13.15:30399",
|
||||
"enode://63f6a8818927e429585287cf2ca0cb9b11fa990b7b9b331c2962cdc6f21807a2473b26e8256225c26caff70d7218e59586d704d49061452c6852e382c885d03c@35.154.106.174:30399",
|
||||
"enode://ed4bd3b794ed73f18e6dcc70c6624dfec63b5654f6ab54e8f40b16eff8afbd342d4230e099ddea40e84423f81b2d2ea79799dc345257b1fec6f6c422c9d008f7@52.213.20.99:30399",
|
||||
}
|
@@ -38,8 +38,6 @@ import (
|
||||
bzzapi "github.com/ethereum/go-ethereum/swarm/api"
|
||||
)
|
||||
|
||||
const SWARM_VERSION = "0.3"
|
||||
|
||||
var (
|
||||
//flag definition for the dumpconfig command
|
||||
DumpConfigCommand = cli.Command{
|
||||
@@ -70,6 +68,7 @@ const (
|
||||
SWARM_ENV_SWAP_API = "SWARM_SWAP_API"
|
||||
SWARM_ENV_SYNC_DISABLE = "SWARM_SYNC_DISABLE"
|
||||
SWARM_ENV_SYNC_UPDATE_DELAY = "SWARM_ENV_SYNC_UPDATE_DELAY"
|
||||
SWARM_ENV_LIGHT_NODE_ENABLE = "SWARM_LIGHT_NODE_ENABLE"
|
||||
SWARM_ENV_DELIVERY_SKIP_CHECK = "SWARM_DELIVERY_SKIP_CHECK"
|
||||
SWARM_ENV_ENS_API = "SWARM_ENS_API"
|
||||
SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR"
|
||||
@@ -79,6 +78,7 @@ const (
|
||||
SWARM_ENV_STORE_PATH = "SWARM_STORE_PATH"
|
||||
SWARM_ENV_STORE_CAPACITY = "SWARM_STORE_CAPACITY"
|
||||
SWARM_ENV_STORE_CACHE_CAPACITY = "SWARM_STORE_CACHE_CAPACITY"
|
||||
SWARM_ACCESS_PASSWORD = "SWARM_ACCESS_PASSWORD"
|
||||
GETH_ENV_DATADIR = "GETH_DATADIR"
|
||||
)
|
||||
|
||||
@@ -133,7 +133,7 @@ func initSwarmNode(config *bzzapi.Config, stack *node.Node, ctx *cli.Context) {
|
||||
log.Debug(printConfig(config))
|
||||
}
|
||||
|
||||
//override the current config with whatever is in the config file, if a config file has been provided
|
||||
//configFileOverride overrides the current config with the config file, if a config file has been provided
|
||||
func configFileOverride(config *bzzapi.Config, ctx *cli.Context) (*bzzapi.Config, error) {
|
||||
var err error
|
||||
|
||||
@@ -143,7 +143,8 @@ func configFileOverride(config *bzzapi.Config, ctx *cli.Context) (*bzzapi.Config
|
||||
if filepath = ctx.GlobalString(SwarmTomlConfigPathFlag.Name); filepath == "" {
|
||||
utils.Fatalf("Config file flag provided with invalid file path")
|
||||
}
|
||||
f, err := os.Open(filepath)
|
||||
var f *os.File
|
||||
f, err = os.Open(filepath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -206,6 +207,10 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
||||
currentConfig.SyncUpdateDelay = d
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(SwarmLightNodeEnabled.Name) {
|
||||
currentConfig.LightNodeEnabled = true
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(SwarmDeliverySkipCheckFlag.Name) {
|
||||
currentConfig.DeliverySkipCheck = true
|
||||
}
|
||||
@@ -228,10 +233,6 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
||||
currentConfig.Cors = cors
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(utils.BootnodesFlag.Name) {
|
||||
currentConfig.BootNodes = ctx.GlobalString(utils.BootnodesFlag.Name)
|
||||
}
|
||||
|
||||
if storePath := ctx.GlobalString(SwarmStorePath.Name); storePath != "" {
|
||||
currentConfig.LocalStoreParams.ChunkDbPath = storePath
|
||||
}
|
||||
@@ -303,6 +304,12 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
if lne := os.Getenv(SWARM_ENV_LIGHT_NODE_ENABLE); lne != "" {
|
||||
if lightnode, err := strconv.ParseBool(lne); err != nil {
|
||||
currentConfig.LightNodeEnabled = lightnode
|
||||
}
|
||||
}
|
||||
|
||||
if swapapi := os.Getenv(SWARM_ENV_SWAP_API); swapapi != "" {
|
||||
currentConfig.SwapAPI = swapapi
|
||||
}
|
||||
@@ -323,10 +330,6 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
|
||||
currentConfig.Cors = cors
|
||||
}
|
||||
|
||||
if bootnodes := os.Getenv(SWARM_ENV_BOOTNODES); bootnodes != "" {
|
||||
currentConfig.BootNodes = bootnodes
|
||||
}
|
||||
|
||||
return currentConfig
|
||||
}
|
||||
|
||||
|
@@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
@@ -559,3 +560,16 @@ func TestValidateConfig(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func assignTCPPort() (string, error) {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
l.Close()
|
||||
_, port, err := net.SplitHostPort(l.Addr().String())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return port, nil
|
||||
}
|
||||
|
@@ -68,18 +68,36 @@ func download(ctx *cli.Context) {
|
||||
utils.Fatalf("could not parse uri argument: %v", err)
|
||||
}
|
||||
|
||||
// assume behaviour according to --recursive switch
|
||||
if isRecursive {
|
||||
if err := client.DownloadDirectory(uri.Addr, uri.Path, dest); err != nil {
|
||||
utils.Fatalf("encoutered an error while downloading directory: %v", err)
|
||||
}
|
||||
} else {
|
||||
// we are downloading a file
|
||||
log.Debug(fmt.Sprintf("downloading file/path from a manifest. hash: %s, path:%s", uri.Addr, uri.Path))
|
||||
dl := func(credentials string) error {
|
||||
// assume behaviour according to --recursive switch
|
||||
if isRecursive {
|
||||
if err := client.DownloadDirectory(uri.Addr, uri.Path, dest, credentials); err != nil {
|
||||
if err == swarm.ErrUnauthorized {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("directory %s: %v", uri.Path, err)
|
||||
}
|
||||
} else {
|
||||
// we are downloading a file
|
||||
log.Debug("downloading file/path from a manifest", "uri.Addr", uri.Addr, "uri.Path", uri.Path)
|
||||
|
||||
err := client.DownloadFile(uri.Addr, uri.Path, dest)
|
||||
if err != nil {
|
||||
utils.Fatalf("could not download %s from given address: %s. error: %v", uri.Path, uri.Addr, err)
|
||||
err := client.DownloadFile(uri.Addr, uri.Path, dest, credentials)
|
||||
if err != nil {
|
||||
if err == swarm.ErrUnauthorized {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("file %s from address: %s: %v", uri.Path, uri.Addr, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if passwords := makePasswordList(ctx); passwords != nil {
|
||||
password := getPassPhrase(fmt.Sprintf("Downloading %s is restricted", uri), 0, passwords)
|
||||
err = dl(password)
|
||||
} else {
|
||||
err = dl("")
|
||||
}
|
||||
if err != nil {
|
||||
utils.Fatalf("download: %v", err)
|
||||
}
|
||||
}
|
||||
|
@@ -92,7 +92,7 @@ func listMounts(cliContext *cli.Context) {
|
||||
mf := []fuse.MountInfo{}
|
||||
err = client.CallContext(ctx, &mf, "swarmfs_listmounts")
|
||||
if err != nil {
|
||||
utils.Fatalf("encountered an error calling the RPC endpoint while unmounting: %v", err)
|
||||
utils.Fatalf("encountered an error calling the RPC endpoint while listing mounts: %v", err)
|
||||
}
|
||||
if len(mf) == 0 {
|
||||
fmt.Print("Could not found any swarmfs mounts. Please make sure you've specified the correct RPC endpoint\n")
|
||||
|
@@ -14,6 +14,8 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build linux freebsd
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
@@ -41,6 +43,11 @@ type testFile struct {
|
||||
}
|
||||
|
||||
// TestCLISwarmFs is a high-level test of swarmfs
|
||||
//
|
||||
// This test fails on travis for macOS as this executable exits with code 1
|
||||
// and without any log messages in the log:
|
||||
// /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse.
|
||||
// This is the reason for this file not being built on darwin architecture.
|
||||
func TestCLISwarmFs(t *testing.T) {
|
||||
cluster := newTestCluster(t, 3)
|
||||
defer cluster.Shutdown()
|
||||
|
@@ -18,6 +18,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
@@ -39,7 +40,7 @@ func hash(ctx *cli.Context) {
|
||||
|
||||
stat, _ := f.Stat()
|
||||
fileStore := storage.NewFileStore(storage.NewMapChunkStore(), storage.NewFileStoreParams())
|
||||
addr, _, err := fileStore.Store(f, stat.Size(), false)
|
||||
addr, _, err := fileStore.Store(context.TODO(), f, stat.Size(), false)
|
||||
if err != nil {
|
||||
utils.Fatalf("%v\n", err)
|
||||
} else {
|
||||
|
@@ -44,7 +44,7 @@ func list(ctx *cli.Context) {
|
||||
|
||||
bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client := swarm.NewClient(bzzapi)
|
||||
list, err := client.List(manifest, prefix)
|
||||
list, err := client.List(manifest, prefix, "")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to generate file and directory list: %s", err)
|
||||
}
|
||||
|
@@ -37,12 +37,12 @@ import (
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/swarm"
|
||||
bzzapi "github.com/ethereum/go-ethereum/swarm/api"
|
||||
swarmmetrics "github.com/ethereum/go-ethereum/swarm/metrics"
|
||||
"github.com/ethereum/go-ethereum/swarm/tracing"
|
||||
sv "github.com/ethereum/go-ethereum/swarm/version"
|
||||
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
@@ -66,14 +66,7 @@ OPTIONS:
|
||||
`
|
||||
|
||||
var (
|
||||
gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
|
||||
testbetBootNodes = []string{
|
||||
"enode://ec8ae764f7cb0417bdfb009b9d0f18ab3818a3a4e8e7c67dd5f18971a93510a2e6f43cd0b69a27e439a9629457ea804104f37c85e41eed057d3faabbf7744cdf@13.74.157.139:30429",
|
||||
"enode://c2e1fceb3bf3be19dff71eec6cccf19f2dbf7567ee017d130240c670be8594bc9163353ca55dd8df7a4f161dd94b36d0615c17418b5a3cdcbb4e9d99dfa4de37@13.74.157.139:30430",
|
||||
"enode://fe29b82319b734ce1ec68b84657d57145fee237387e63273989d354486731e59f78858e452ef800a020559da22dcca759536e6aa5517c53930d29ce0b1029286@13.74.157.139:30431",
|
||||
"enode://1d7187e7bde45cf0bee489ce9852dd6d1a0d9aa67a33a6b8e6db8a4fbc6fcfa6f0f1a5419343671521b863b187d1c73bad3603bae66421d157ffef357669ddb8@13.74.157.139:30432",
|
||||
"enode://0e4cba800f7b1ee73673afa6a4acead4018f0149d2e3216be3f133318fd165b324cd71b81fbe1e80deac8dbf56e57a49db7be67f8b9bc81bd2b7ee496434fb5d@13.74.157.139:30433",
|
||||
}
|
||||
gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -122,6 +115,11 @@ var (
|
||||
Usage: "Duration for sync subscriptions update after no new peers are added (default 15s)",
|
||||
EnvVar: SWARM_ENV_SYNC_UPDATE_DELAY,
|
||||
}
|
||||
SwarmLightNodeEnabled = cli.BoolFlag{
|
||||
Name: "lightnode",
|
||||
Usage: "Enable Swarm LightNode (default false)",
|
||||
EnvVar: SWARM_ENV_LIGHT_NODE_ENABLE,
|
||||
}
|
||||
SwarmDeliverySkipCheckFlag = cli.BoolFlag{
|
||||
Name: "delivery-skip-check",
|
||||
Usage: "Skip chunk delivery check (default false)",
|
||||
@@ -143,24 +141,41 @@ var (
|
||||
}
|
||||
SwarmWantManifestFlag = cli.BoolTFlag{
|
||||
Name: "manifest",
|
||||
Usage: "Automatic manifest upload",
|
||||
Usage: "Automatic manifest upload (default true)",
|
||||
}
|
||||
SwarmUploadDefaultPath = cli.StringFlag{
|
||||
Name: "defaultpath",
|
||||
Usage: "path to file served for empty url path (none)",
|
||||
}
|
||||
SwarmAccessGrantKeyFlag = cli.StringFlag{
|
||||
Name: "grant-key",
|
||||
Usage: "grants a given public key access to an ACT",
|
||||
}
|
||||
SwarmAccessGrantKeysFlag = cli.StringFlag{
|
||||
Name: "grant-keys",
|
||||
Usage: "grants a given list of public keys in the following file (separated by line breaks) access to an ACT",
|
||||
}
|
||||
SwarmUpFromStdinFlag = cli.BoolFlag{
|
||||
Name: "stdin",
|
||||
Usage: "reads data to be uploaded from stdin",
|
||||
}
|
||||
SwarmUploadMimeType = cli.StringFlag{
|
||||
Name: "mime",
|
||||
Usage: "force mime type",
|
||||
Usage: "Manually specify MIME type",
|
||||
}
|
||||
SwarmEncryptedFlag = cli.BoolFlag{
|
||||
Name: "encrypt",
|
||||
Usage: "use encrypted upload",
|
||||
}
|
||||
SwarmAccessPasswordFlag = cli.StringFlag{
|
||||
Name: "password",
|
||||
Usage: "Password",
|
||||
EnvVar: SWARM_ACCESS_PASSWORD,
|
||||
}
|
||||
SwarmDryRunFlag = cli.BoolFlag{
|
||||
Name: "dry-run",
|
||||
Usage: "dry-run",
|
||||
}
|
||||
CorsStringFlag = cli.StringFlag{
|
||||
Name: "corsdomain",
|
||||
Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')",
|
||||
@@ -181,6 +196,18 @@ var (
|
||||
Usage: "Number of recent chunks cached in memory (default 5000)",
|
||||
EnvVar: SWARM_ENV_STORE_CACHE_CAPACITY,
|
||||
}
|
||||
SwarmResourceMultihashFlag = cli.BoolFlag{
|
||||
Name: "multihash",
|
||||
Usage: "Determines how to interpret data for a resource update. If not present, data will be interpreted as raw, literal data that will be included in the resource",
|
||||
}
|
||||
SwarmResourceNameFlag = cli.StringFlag{
|
||||
Name: "name",
|
||||
Usage: "User-defined name for the new resource",
|
||||
}
|
||||
SwarmResourceDataOnCreateFlag = cli.StringFlag{
|
||||
Name: "data",
|
||||
Usage: "Initializes the resource with the given hex-encoded data. Data must be prefixed by 0x",
|
||||
}
|
||||
)
|
||||
|
||||
//declare a few constant error messages, useful for later error check comparisons in test
|
||||
@@ -189,12 +216,21 @@ var (
|
||||
SWARM_ERR_SWAP_SET_NO_API = "SWAP is enabled but --swap-api is not set"
|
||||
)
|
||||
|
||||
// this help command gets added to any subcommand that does not define it explicitly
|
||||
var defaultSubcommandHelp = cli.Command{
|
||||
Action: func(ctx *cli.Context) { cli.ShowCommandHelpAndExit(ctx, "", 1) },
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "help",
|
||||
Usage: "shows this help",
|
||||
Hidden: true,
|
||||
}
|
||||
|
||||
var defaultNodeConfig = node.DefaultConfig
|
||||
|
||||
// This init function sets defaults so cmd/swarm can run alongside geth.
|
||||
func init() {
|
||||
defaultNodeConfig.Name = clientIdentifier
|
||||
defaultNodeConfig.Version = params.VersionWithCommit(gitCommit)
|
||||
defaultNodeConfig.Version = sv.VersionWithCommit(gitCommit)
|
||||
defaultNodeConfig.P2P.ListenAddr = ":30399"
|
||||
defaultNodeConfig.IPCPath = "bzzd.ipc"
|
||||
// Set flag defaults for --help display.
|
||||
@@ -225,6 +261,96 @@ func init() {
|
||||
Flags: []cli.Flag{SwarmEncryptedFlag},
|
||||
Description: "uploads a file or directory to swarm using the HTTP API and prints the root hash",
|
||||
},
|
||||
{
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "access",
|
||||
Usage: "encrypts a reference and embeds it into a root manifest",
|
||||
ArgsUsage: "<ref>",
|
||||
Description: "encrypts a reference and embeds it into a root manifest",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "new",
|
||||
Usage: "encrypts a reference and embeds it into a root manifest",
|
||||
ArgsUsage: "<ref>",
|
||||
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: accessNewPass,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Flags: []cli.Flag{
|
||||
utils.PasswordFileFlag,
|
||||
SwarmDryRunFlag,
|
||||
},
|
||||
Name: "pass",
|
||||
Usage: "encrypts a reference with a password and embeds it into a root manifest",
|
||||
ArgsUsage: "<ref>",
|
||||
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
|
||||
},
|
||||
{
|
||||
Action: accessNewPK,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Flags: []cli.Flag{
|
||||
utils.PasswordFileFlag,
|
||||
SwarmDryRunFlag,
|
||||
SwarmAccessGrantKeyFlag,
|
||||
},
|
||||
Name: "pk",
|
||||
Usage: "encrypts a reference with the node's private key and a given grantee's public key and embeds it into a root manifest",
|
||||
ArgsUsage: "<ref>",
|
||||
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
|
||||
},
|
||||
{
|
||||
Action: accessNewACT,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Flags: []cli.Flag{
|
||||
SwarmAccessGrantKeysFlag,
|
||||
SwarmDryRunFlag,
|
||||
},
|
||||
Name: "act",
|
||||
Usage: "encrypts a reference with the node's private key and a given grantee's public key and embeds it into a root manifest",
|
||||
ArgsUsage: "<ref>",
|
||||
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "resource",
|
||||
Usage: "(Advanced) Create and update Mutable Resources",
|
||||
ArgsUsage: "<create|update|info>",
|
||||
Description: "Works with Mutable Resource Updates",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: resourceCreate,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "create",
|
||||
Usage: "creates a new Mutable Resource",
|
||||
ArgsUsage: "<frequency>",
|
||||
Description: "creates a new Mutable Resource",
|
||||
Flags: []cli.Flag{SwarmResourceNameFlag, SwarmResourceDataOnCreateFlag, SwarmResourceMultihashFlag},
|
||||
},
|
||||
{
|
||||
Action: resourceUpdate,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "update",
|
||||
Usage: "updates the content of an existing Mutable Resource",
|
||||
ArgsUsage: "<Manifest Address or ENS domain> <0x Hex data>",
|
||||
Description: "updates the content of an existing Mutable Resource",
|
||||
Flags: []cli.Flag{SwarmResourceMultihashFlag},
|
||||
},
|
||||
{
|
||||
Action: resourceInfo,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "info",
|
||||
Usage: "obtains information about an existing Mutable Resource",
|
||||
ArgsUsage: "<Manifest Address or ENS domain>",
|
||||
Description: "obtains information about an existing Mutable Resource",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: list,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
@@ -242,16 +368,13 @@ func init() {
|
||||
Description: "Prints the swarm hash of file or directory",
|
||||
},
|
||||
{
|
||||
Action: download,
|
||||
Name: "down",
|
||||
Flags: []cli.Flag{SwarmRecursiveFlag},
|
||||
Usage: "downloads a swarm manifest or a file inside a manifest",
|
||||
ArgsUsage: " <uri> [<dir>]",
|
||||
Description: `
|
||||
Downloads a swarm bzz uri to the given dir. When no dir is provided, working directory is assumed. --recursive flag is expected when downloading a manifest with multiple entries.
|
||||
`,
|
||||
Action: download,
|
||||
Name: "down",
|
||||
Flags: []cli.Flag{SwarmRecursiveFlag, SwarmAccessPasswordFlag},
|
||||
Usage: "downloads a swarm manifest or a file inside a manifest",
|
||||
ArgsUsage: " <uri> [<dir>]",
|
||||
Description: `Downloads a swarm bzz uri to the given dir. When no dir is provided, working directory is assumed. --recursive flag is expected when downloading a manifest with multiple entries.`,
|
||||
},
|
||||
|
||||
{
|
||||
Name: "manifest",
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
@@ -260,23 +383,23 @@ Downloads a swarm bzz uri to the given dir. When no dir is provided, working dir
|
||||
Description: "Updates a MANIFEST by adding/removing/updating the hash of a path.\nCOMMAND could be: add, update, remove",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: add,
|
||||
Action: manifestAdd,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "add",
|
||||
Usage: "add a new path to the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path> <hash> [<content-type>]",
|
||||
ArgsUsage: "<MANIFEST> <path> <hash>",
|
||||
Description: "Adds a new path to the manifest",
|
||||
},
|
||||
{
|
||||
Action: update,
|
||||
Action: manifestUpdate,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "update",
|
||||
Usage: "update the hash for an already existing path in the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path> <newhash> [<newcontent-type>]",
|
||||
ArgsUsage: "<MANIFEST> <path> <newhash>",
|
||||
Description: "Update the hash for an already existing path in the manifest",
|
||||
},
|
||||
{
|
||||
Action: remove,
|
||||
Action: manifestRemove,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "remove",
|
||||
Usage: "removes a path from the manifest",
|
||||
@@ -351,16 +474,14 @@ pv(1) tool to get a progress bar:
|
||||
Name: "import",
|
||||
Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)",
|
||||
ArgsUsage: "<chunkdb> <file>",
|
||||
Description: `
|
||||
Import chunks from a tar archive into a local chunk database (use - to read from stdin).
|
||||
Description: `Import chunks from a tar archive into a local chunk database (use - to read from stdin).
|
||||
|
||||
swarm db import ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar
|
||||
|
||||
The import may be quite large, consider piping the input through the Unix
|
||||
pv(1) tool to get a progress bar:
|
||||
|
||||
pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -
|
||||
`,
|
||||
pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -`,
|
||||
},
|
||||
{
|
||||
Action: dbClean,
|
||||
@@ -376,6 +497,11 @@ pv(1) tool to get a progress bar:
|
||||
// See config.go
|
||||
DumpConfigCommand,
|
||||
}
|
||||
|
||||
// append a hidden help subcommand to all commands that have subcommands
|
||||
// if a help command was already defined above, that one will take precedence.
|
||||
addDefaultHelpSubcommands(app.Commands)
|
||||
|
||||
sort.Sort(cli.CommandsByName(app.Commands))
|
||||
|
||||
app.Flags = []cli.Flag{
|
||||
@@ -402,6 +528,7 @@ pv(1) tool to get a progress bar:
|
||||
SwarmSwapAPIFlag,
|
||||
SwarmSyncDisabledFlag,
|
||||
SwarmSyncUpdateDelay,
|
||||
SwarmLightNodeEnabled,
|
||||
SwarmDeliverySkipCheckFlag,
|
||||
SwarmListenAddrFlag,
|
||||
SwarmPortFlag,
|
||||
@@ -430,12 +557,14 @@ pv(1) tool to get a progress bar:
|
||||
app.Flags = append(app.Flags, rpcFlags...)
|
||||
app.Flags = append(app.Flags, debug.Flags...)
|
||||
app.Flags = append(app.Flags, swarmmetrics.Flags...)
|
||||
app.Flags = append(app.Flags, tracing.Flags...)
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
if err := debug.Setup(ctx); err != nil {
|
||||
if err := debug.Setup(ctx, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
swarmmetrics.Setup(ctx)
|
||||
tracing.Setup(ctx)
|
||||
return nil
|
||||
}
|
||||
app.After = func(ctx *cli.Context) error {
|
||||
@@ -452,7 +581,8 @@ func main() {
|
||||
}
|
||||
|
||||
func version(ctx *cli.Context) error {
|
||||
fmt.Println("Version:", SWARM_VERSION)
|
||||
fmt.Println(strings.Title(clientIdentifier))
|
||||
fmt.Println("Version:", sv.VersionWithMeta)
|
||||
if gitCommit != "" {
|
||||
fmt.Println("Git Commit:", gitCommit)
|
||||
}
|
||||
@@ -464,6 +594,7 @@ func version(ctx *cli.Context) error {
|
||||
func bzzd(ctx *cli.Context) error {
|
||||
//build a valid bzzapi.Config from all available sources:
|
||||
//default config, file config, command line and env vars
|
||||
|
||||
bzzconfig, err := buildConfig(ctx)
|
||||
if err != nil {
|
||||
utils.Fatalf("unable to configure swarm: %v", err)
|
||||
@@ -480,12 +611,16 @@ func bzzd(ctx *cli.Context) error {
|
||||
if _, err := os.Stat(bzzconfig.Path); err == nil {
|
||||
cfg.DataDir = bzzconfig.Path
|
||||
}
|
||||
|
||||
//optionally set the bootnodes before configuring the node
|
||||
setSwarmBootstrapNodes(ctx, &cfg)
|
||||
//setup the ethereum node
|
||||
utils.SetNodeConfig(ctx, &cfg)
|
||||
stack, err := node.New(&cfg)
|
||||
if err != nil {
|
||||
utils.Fatalf("can't create node: %v", err)
|
||||
}
|
||||
|
||||
//a few steps need to be done after the config phase is completed,
|
||||
//due to overriding behavior
|
||||
initSwarmNode(bzzconfig, stack, ctx)
|
||||
@@ -503,16 +638,6 @@ func bzzd(ctx *cli.Context) error {
|
||||
stack.Stop()
|
||||
}()
|
||||
|
||||
// Add bootnodes as initial peers.
|
||||
if bzzconfig.BootNodes != "" {
|
||||
bootnodes := strings.Split(bzzconfig.BootNodes, ",")
|
||||
injectBootnodes(stack.Server(), bootnodes)
|
||||
} else {
|
||||
if bzzconfig.NetworkID == 3 {
|
||||
injectBootnodes(stack.Server(), testbetBootNodes)
|
||||
}
|
||||
}
|
||||
|
||||
stack.Wait()
|
||||
return nil
|
||||
}
|
||||
@@ -546,6 +671,26 @@ func getAccount(bzzaccount string, ctx *cli.Context, stack *node.Node) *ecdsa.Pr
|
||||
return decryptStoreAccount(ks, bzzaccount, utils.MakePasswordList(ctx))
|
||||
}
|
||||
|
||||
// getPrivKey returns the private key of the specified bzzaccount
|
||||
// Used only by client commands, such as `resource`
|
||||
func getPrivKey(ctx *cli.Context) *ecdsa.PrivateKey {
|
||||
// booting up the swarm node just as we do in bzzd action
|
||||
bzzconfig, err := buildConfig(ctx)
|
||||
if err != nil {
|
||||
utils.Fatalf("unable to configure swarm: %v", err)
|
||||
}
|
||||
cfg := defaultNodeConfig
|
||||
if _, err := os.Stat(bzzconfig.Path); err == nil {
|
||||
cfg.DataDir = bzzconfig.Path
|
||||
}
|
||||
utils.SetNodeConfig(ctx, &cfg)
|
||||
stack, err := node.New(&cfg)
|
||||
if err != nil {
|
||||
utils.Fatalf("can't create node: %v", err)
|
||||
}
|
||||
return getAccount(bzzconfig.BzzAccount, ctx, stack)
|
||||
}
|
||||
|
||||
func decryptStoreAccount(ks *keystore.KeyStore, account string, passwords []string) *ecdsa.PrivateKey {
|
||||
var a accounts.Account
|
||||
var err error
|
||||
@@ -600,13 +745,32 @@ func getPassPhrase(prompt string, i int, passwords []string) string {
|
||||
return password
|
||||
}
|
||||
|
||||
func injectBootnodes(srv *p2p.Server, nodes []string) {
|
||||
for _, url := range nodes {
|
||||
n, err := discover.ParseNode(url)
|
||||
if err != nil {
|
||||
log.Error("Invalid swarm bootnode", "err", err)
|
||||
continue
|
||||
// addDefaultHelpSubcommand scans through defined CLI commands and adds
|
||||
// a basic help subcommand to each
|
||||
// if a help command is already defined, it will take precedence over the default.
|
||||
func addDefaultHelpSubcommands(commands []cli.Command) {
|
||||
for i := range commands {
|
||||
cmd := &commands[i]
|
||||
if cmd.Subcommands != nil {
|
||||
cmd.Subcommands = append(cmd.Subcommands, defaultSubcommandHelp)
|
||||
addDefaultHelpSubcommands(cmd.Subcommands)
|
||||
}
|
||||
srv.AddPeer(n)
|
||||
}
|
||||
}
|
||||
|
||||
func setSwarmBootstrapNodes(ctx *cli.Context, cfg *node.Config) {
|
||||
if ctx.GlobalIsSet(utils.BootnodesFlag.Name) || ctx.GlobalIsSet(utils.BootnodesV4Flag.Name) {
|
||||
return
|
||||
}
|
||||
|
||||
cfg.P2P.BootstrapNodes = []*discover.Node{}
|
||||
|
||||
for _, url := range SwarmBootnodes {
|
||||
node, err := discover.ParseNode(url)
|
||||
if err != nil {
|
||||
log.Error("Bootstrap URL invalid", "enode", url, "err", err)
|
||||
}
|
||||
cfg.P2P.BootstrapNodes = append(cfg.P2P.BootstrapNodes, node)
|
||||
}
|
||||
log.Debug("added default swarm bootnodes", "length", len(cfg.P2P.BootstrapNodes))
|
||||
}
|
||||
|
@@ -18,10 +18,8 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"mime"
|
||||
"path/filepath"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
@@ -30,127 +28,118 @@ import (
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
const bzzManifestJSON = "application/bzz-manifest+json"
|
||||
|
||||
func add(ctx *cli.Context) {
|
||||
// manifestAdd adds a new entry to the manifest at the given path.
|
||||
// New entry hash, the last argument, must be the hash of a manifest
|
||||
// with only one entry, which meta-data will be added to the original manifest.
|
||||
// On success, this function will print new (updated) manifest's hash.
|
||||
func manifestAdd(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) < 3 {
|
||||
utils.Fatalf("Need at least three arguments <MHASH> <path> <HASH> [<content-type>]")
|
||||
if len(args) != 3 {
|
||||
utils.Fatalf("Need exactly three arguments <MHASH> <path> <HASH>")
|
||||
}
|
||||
|
||||
var (
|
||||
mhash = args[0]
|
||||
path = args[1]
|
||||
hash = args[2]
|
||||
|
||||
ctype string
|
||||
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
|
||||
mroot api.Manifest
|
||||
)
|
||||
|
||||
if len(args) > 3 {
|
||||
ctype = args[3]
|
||||
} else {
|
||||
ctype = mime.TypeByExtension(filepath.Ext(path))
|
||||
bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client := swarm.NewClient(bzzapi)
|
||||
|
||||
m, _, err := client.DownloadManifest(hash)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error downloading manifest to add: %v", err)
|
||||
}
|
||||
l := len(m.Entries)
|
||||
if l == 0 {
|
||||
utils.Fatalf("No entries in manifest %s", hash)
|
||||
} else if l > 1 {
|
||||
utils.Fatalf("Too many entries in manifest %s", hash)
|
||||
}
|
||||
|
||||
newManifest := addEntryToManifest(ctx, mhash, path, hash, ctype)
|
||||
newManifest := addEntryToManifest(client, mhash, path, m.Entries[0])
|
||||
fmt.Println(newManifest)
|
||||
|
||||
if !wantManifest {
|
||||
// Print the manifest. This is the only output to stdout.
|
||||
mrootJSON, _ := json.MarshalIndent(mroot, "", " ")
|
||||
fmt.Println(string(mrootJSON))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func update(ctx *cli.Context) {
|
||||
|
||||
// manifestUpdate replaces an existing entry of the manifest at the given path.
|
||||
// New entry hash, the last argument, must be the hash of a manifest
|
||||
// with only one entry, which meta-data will be added to the original manifest.
|
||||
// On success, this function will print hash of the updated manifest.
|
||||
func manifestUpdate(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) < 3 {
|
||||
utils.Fatalf("Need at least three arguments <MHASH> <path> <HASH>")
|
||||
if len(args) != 3 {
|
||||
utils.Fatalf("Need exactly three arguments <MHASH> <path> <HASH>")
|
||||
}
|
||||
|
||||
var (
|
||||
mhash = args[0]
|
||||
path = args[1]
|
||||
hash = args[2]
|
||||
|
||||
ctype string
|
||||
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
|
||||
mroot api.Manifest
|
||||
)
|
||||
if len(args) > 3 {
|
||||
ctype = args[3]
|
||||
} else {
|
||||
ctype = mime.TypeByExtension(filepath.Ext(path))
|
||||
|
||||
bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client := swarm.NewClient(bzzapi)
|
||||
|
||||
m, _, err := client.DownloadManifest(hash)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error downloading manifest to update: %v", err)
|
||||
}
|
||||
l := len(m.Entries)
|
||||
if l == 0 {
|
||||
utils.Fatalf("No entries in manifest %s", hash)
|
||||
} else if l > 1 {
|
||||
utils.Fatalf("Too many entries in manifest %s", hash)
|
||||
}
|
||||
|
||||
newManifest := updateEntryInManifest(ctx, mhash, path, hash, ctype)
|
||||
newManifest, _, defaultEntryUpdated := updateEntryInManifest(client, mhash, path, m.Entries[0], true)
|
||||
if defaultEntryUpdated {
|
||||
// Print informational message to stderr
|
||||
// allowing the user to get the new manifest hash from stdout
|
||||
// without the need to parse the complete output.
|
||||
fmt.Fprintln(os.Stderr, "Manifest default entry is updated, too")
|
||||
}
|
||||
fmt.Println(newManifest)
|
||||
|
||||
if !wantManifest {
|
||||
// Print the manifest. This is the only output to stdout.
|
||||
mrootJSON, _ := json.MarshalIndent(mroot, "", " ")
|
||||
fmt.Println(string(mrootJSON))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func remove(ctx *cli.Context) {
|
||||
// manifestRemove removes an existing entry of the manifest at the given path.
|
||||
// On success, this function will print hash of the manifest which does not
|
||||
// contain the path.
|
||||
func manifestRemove(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) < 2 {
|
||||
utils.Fatalf("Need at least two arguments <MHASH> <path>")
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf("Need exactly two arguments <MHASH> <path>")
|
||||
}
|
||||
|
||||
var (
|
||||
mhash = args[0]
|
||||
path = args[1]
|
||||
|
||||
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
|
||||
mroot api.Manifest
|
||||
)
|
||||
|
||||
newManifest := removeEntryFromManifest(ctx, mhash, path)
|
||||
fmt.Println(newManifest)
|
||||
bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client := swarm.NewClient(bzzapi)
|
||||
|
||||
if !wantManifest {
|
||||
// Print the manifest. This is the only output to stdout.
|
||||
mrootJSON, _ := json.MarshalIndent(mroot, "", " ")
|
||||
fmt.Println(string(mrootJSON))
|
||||
return
|
||||
}
|
||||
newManifest := removeEntryFromManifest(client, mhash, path)
|
||||
fmt.Println(newManifest)
|
||||
}
|
||||
|
||||
func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) string {
|
||||
|
||||
var (
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client = swarm.NewClient(bzzapi)
|
||||
longestPathEntry = api.ManifestEntry{}
|
||||
)
|
||||
func addEntryToManifest(client *swarm.Client, mhash, path string, entry api.ManifestEntry) string {
|
||||
var longestPathEntry = api.ManifestEntry{}
|
||||
|
||||
mroot, isEncrypted, err := client.DownloadManifest(mhash)
|
||||
if err != nil {
|
||||
utils.Fatalf("Manifest download failed: %v", err)
|
||||
}
|
||||
|
||||
//TODO: check if the "hash" to add is valid and present in swarm
|
||||
_, _, err = client.DownloadManifest(hash)
|
||||
if err != nil {
|
||||
utils.Fatalf("Hash to add is not present: %v", err)
|
||||
}
|
||||
|
||||
// See if we path is in this Manifest or do we have to dig deeper
|
||||
for _, entry := range mroot.Entries {
|
||||
if path == entry.Path {
|
||||
for _, e := range mroot.Entries {
|
||||
if path == e.Path {
|
||||
utils.Fatalf("Path %s already present, not adding anything", path)
|
||||
} else {
|
||||
if entry.ContentType == bzzManifestJSON {
|
||||
prfxlen := strings.HasPrefix(path, entry.Path)
|
||||
if e.ContentType == api.ManifestType {
|
||||
prfxlen := strings.HasPrefix(path, e.Path)
|
||||
if prfxlen && len(path) > len(longestPathEntry.Path) {
|
||||
longestPathEntry = entry
|
||||
longestPathEntry = e
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -159,25 +148,21 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin
|
||||
if longestPathEntry.Path != "" {
|
||||
// Load the child Manifest add the entry there
|
||||
newPath := path[len(longestPathEntry.Path):]
|
||||
newHash := addEntryToManifest(ctx, longestPathEntry.Hash, newPath, hash, ctype)
|
||||
newHash := addEntryToManifest(client, longestPathEntry.Hash, newPath, entry)
|
||||
|
||||
// Replace the hash for parent Manifests
|
||||
newMRoot := &api.Manifest{}
|
||||
for _, entry := range mroot.Entries {
|
||||
if longestPathEntry.Path == entry.Path {
|
||||
entry.Hash = newHash
|
||||
for _, e := range mroot.Entries {
|
||||
if longestPathEntry.Path == e.Path {
|
||||
e.Hash = newHash
|
||||
}
|
||||
newMRoot.Entries = append(newMRoot.Entries, entry)
|
||||
newMRoot.Entries = append(newMRoot.Entries, e)
|
||||
}
|
||||
mroot = newMRoot
|
||||
} else {
|
||||
// Add the entry in the leaf Manifest
|
||||
newEntry := api.ManifestEntry{
|
||||
Hash: hash,
|
||||
Path: path,
|
||||
ContentType: ctype,
|
||||
}
|
||||
mroot.Entries = append(mroot.Entries, newEntry)
|
||||
entry.Path = path
|
||||
mroot.Entries = append(mroot.Entries, entry)
|
||||
}
|
||||
|
||||
newManifestHash, err := client.UploadManifest(mroot, isEncrypted)
|
||||
@@ -185,14 +170,16 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin
|
||||
utils.Fatalf("Manifest upload failed: %v", err)
|
||||
}
|
||||
return newManifestHash
|
||||
|
||||
}
|
||||
|
||||
func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) string {
|
||||
|
||||
// updateEntryInManifest updates an existing entry o path with a new one in the manifest with provided mhash
|
||||
// finding the path recursively through all nested manifests. Argument isRoot is used for default
|
||||
// entry update detection. If the updated entry has the same hash as the default entry, then the
|
||||
// default entry in root manifest will be updated too.
|
||||
// Returned values are the new manifest hash, hash of the entry that was replaced by the new entry and
|
||||
// a a bool that is true if default entry is updated.
|
||||
func updateEntryInManifest(client *swarm.Client, mhash, path string, entry api.ManifestEntry, isRoot bool) (newManifestHash, oldHash string, defaultEntryUpdated bool) {
|
||||
var (
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client = swarm.NewClient(bzzapi)
|
||||
newEntry = api.ManifestEntry{}
|
||||
longestPathEntry = api.ManifestEntry{}
|
||||
)
|
||||
@@ -202,17 +189,18 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st
|
||||
utils.Fatalf("Manifest download failed: %v", err)
|
||||
}
|
||||
|
||||
//TODO: check if the "hash" with which to update is valid and present in swarm
|
||||
|
||||
// See if we path is in this Manifest or do we have to dig deeper
|
||||
for _, entry := range mroot.Entries {
|
||||
if path == entry.Path {
|
||||
newEntry = entry
|
||||
for _, e := range mroot.Entries {
|
||||
if path == e.Path {
|
||||
newEntry = e
|
||||
// keep the reference of the hash of the entry that should be replaced
|
||||
// for default entry detection
|
||||
oldHash = e.Hash
|
||||
} else {
|
||||
if entry.ContentType == bzzManifestJSON {
|
||||
prfxlen := strings.HasPrefix(path, entry.Path)
|
||||
if e.ContentType == api.ManifestType {
|
||||
prfxlen := strings.HasPrefix(path, e.Path)
|
||||
if prfxlen && len(path) > len(longestPathEntry.Path) {
|
||||
longestPathEntry = entry
|
||||
longestPathEntry = e
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -225,50 +213,50 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st
|
||||
if longestPathEntry.Path != "" {
|
||||
// Load the child Manifest add the entry there
|
||||
newPath := path[len(longestPathEntry.Path):]
|
||||
newHash := updateEntryInManifest(ctx, longestPathEntry.Hash, newPath, hash, ctype)
|
||||
var newHash string
|
||||
newHash, oldHash, _ = updateEntryInManifest(client, longestPathEntry.Hash, newPath, entry, false)
|
||||
|
||||
// Replace the hash for parent Manifests
|
||||
newMRoot := &api.Manifest{}
|
||||
for _, entry := range mroot.Entries {
|
||||
if longestPathEntry.Path == entry.Path {
|
||||
entry.Hash = newHash
|
||||
for _, e := range mroot.Entries {
|
||||
if longestPathEntry.Path == e.Path {
|
||||
e.Hash = newHash
|
||||
}
|
||||
newMRoot.Entries = append(newMRoot.Entries, entry)
|
||||
newMRoot.Entries = append(newMRoot.Entries, e)
|
||||
|
||||
}
|
||||
mroot = newMRoot
|
||||
}
|
||||
|
||||
if newEntry.Path != "" {
|
||||
// update the manifest if the new entry is found and
|
||||
// check if default entry should be updated
|
||||
if newEntry.Path != "" || isRoot {
|
||||
// Replace the hash for leaf Manifest
|
||||
newMRoot := &api.Manifest{}
|
||||
for _, entry := range mroot.Entries {
|
||||
if newEntry.Path == entry.Path {
|
||||
myEntry := api.ManifestEntry{
|
||||
Hash: hash,
|
||||
Path: entry.Path,
|
||||
ContentType: ctype,
|
||||
}
|
||||
newMRoot.Entries = append(newMRoot.Entries, myEntry)
|
||||
} else {
|
||||
for _, e := range mroot.Entries {
|
||||
if newEntry.Path == e.Path {
|
||||
entry.Path = e.Path
|
||||
newMRoot.Entries = append(newMRoot.Entries, entry)
|
||||
} else if isRoot && e.Path == "" && e.Hash == oldHash {
|
||||
entry.Path = e.Path
|
||||
newMRoot.Entries = append(newMRoot.Entries, entry)
|
||||
defaultEntryUpdated = true
|
||||
} else {
|
||||
newMRoot.Entries = append(newMRoot.Entries, e)
|
||||
}
|
||||
}
|
||||
mroot = newMRoot
|
||||
}
|
||||
|
||||
newManifestHash, err := client.UploadManifest(mroot, isEncrypted)
|
||||
newManifestHash, err = client.UploadManifest(mroot, isEncrypted)
|
||||
if err != nil {
|
||||
utils.Fatalf("Manifest upload failed: %v", err)
|
||||
}
|
||||
return newManifestHash
|
||||
return newManifestHash, oldHash, defaultEntryUpdated
|
||||
}
|
||||
|
||||
func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string {
|
||||
|
||||
func removeEntryFromManifest(client *swarm.Client, mhash, path string) string {
|
||||
var (
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client = swarm.NewClient(bzzapi)
|
||||
entryToRemove = api.ManifestEntry{}
|
||||
longestPathEntry = api.ManifestEntry{}
|
||||
)
|
||||
@@ -283,7 +271,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string {
|
||||
if path == entry.Path {
|
||||
entryToRemove = entry
|
||||
} else {
|
||||
if entry.ContentType == bzzManifestJSON {
|
||||
if entry.ContentType == api.ManifestType {
|
||||
prfxlen := strings.HasPrefix(path, entry.Path)
|
||||
if prfxlen && len(path) > len(longestPathEntry.Path) {
|
||||
longestPathEntry = entry
|
||||
@@ -299,7 +287,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string {
|
||||
if longestPathEntry.Path != "" {
|
||||
// Load the child Manifest remove the entry there
|
||||
newPath := path[len(longestPathEntry.Path):]
|
||||
newHash := removeEntryFromManifest(ctx, longestPathEntry.Hash, newPath)
|
||||
newHash := removeEntryFromManifest(client, longestPathEntry.Hash, newPath)
|
||||
|
||||
// Replace the hash for parent Manifests
|
||||
newMRoot := &api.Manifest{}
|
||||
|
579
cmd/swarm/manifest_test.go
Normal file
579
cmd/swarm/manifest_test.go
Normal file
@@ -0,0 +1,579 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||
)
|
||||
|
||||
// TestManifestChange tests manifest add, update and remove
|
||||
// cli commands without encryption.
|
||||
func TestManifestChange(t *testing.T) {
|
||||
testManifestChange(t, false)
|
||||
}
|
||||
|
||||
// TestManifestChange tests manifest add, update and remove
|
||||
// cli commands with encryption enabled.
|
||||
func TestManifestChangeEncrypted(t *testing.T) {
|
||||
testManifestChange(t, true)
|
||||
}
|
||||
|
||||
// testManifestChange performs cli commands:
|
||||
// - manifest add
|
||||
// - manifest update
|
||||
// - manifest remove
|
||||
// on a manifest, testing the functionality of this
|
||||
// comands on paths that are in root manifest or a nested one.
|
||||
// Argument encrypt controls whether to use encryption or not.
|
||||
func testManifestChange(t *testing.T, encrypt bool) {
|
||||
t.Parallel()
|
||||
cluster := newTestCluster(t, 1)
|
||||
defer cluster.Shutdown()
|
||||
|
||||
tmp, err := ioutil.TempDir("", "swarm-manifest-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
origDir := filepath.Join(tmp, "orig")
|
||||
if err := os.Mkdir(origDir, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
indexDataFilename := filepath.Join(origDir, "index.html")
|
||||
err = ioutil.WriteFile(indexDataFilename, []byte("<h1>Test</h1>"), 0666)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Files paths robots.txt and robots.html share the same prefix "robots."
|
||||
// which will result a manifest with a nested manifest under path "robots.".
|
||||
// This will allow testing manifest changes on both root and nested manifest.
|
||||
err = ioutil.WriteFile(filepath.Join(origDir, "robots.txt"), []byte("Disallow: /"), 0666)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = ioutil.WriteFile(filepath.Join(origDir, "robots.html"), []byte("<strong>No Robots Allowed</strong>"), 0666)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = ioutil.WriteFile(filepath.Join(origDir, "mutants.txt"), []byte("Frank\nMarcus"), 0666)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
args := []string{
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
"--recursive",
|
||||
"--defaultpath",
|
||||
indexDataFilename,
|
||||
"up",
|
||||
origDir,
|
||||
}
|
||||
if encrypt {
|
||||
args = append(args, "--encrypt")
|
||||
}
|
||||
|
||||
origManifestHash := runSwarmExpectHash(t, args...)
|
||||
|
||||
checkHashLength(t, origManifestHash, encrypt)
|
||||
|
||||
client := swarm.NewClient(cluster.Nodes[0].URL)
|
||||
|
||||
// upload a new file and use its manifest to add it the original manifest.
|
||||
t.Run("add", func(t *testing.T) {
|
||||
humansData := []byte("Ann\nBob")
|
||||
humansDataFilename := filepath.Join(tmp, "humans.txt")
|
||||
err = ioutil.WriteFile(humansDataFilename, humansData, 0666)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
humansManifestHash := runSwarmExpectHash(t,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
"up",
|
||||
humansDataFilename,
|
||||
)
|
||||
|
||||
newManifestHash := runSwarmExpectHash(t,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
"manifest",
|
||||
"add",
|
||||
origManifestHash,
|
||||
"humans.txt",
|
||||
humansManifestHash,
|
||||
)
|
||||
|
||||
checkHashLength(t, newManifestHash, encrypt)
|
||||
|
||||
newManifest := downloadManifest(t, client, newManifestHash, encrypt)
|
||||
|
||||
var found bool
|
||||
for _, e := range newManifest.Entries {
|
||||
if e.Path == "humans.txt" {
|
||||
found = true
|
||||
if e.Size != int64(len(humansData)) {
|
||||
t.Errorf("expected humans.txt size %v, got %v", len(humansData), e.Size)
|
||||
}
|
||||
if e.ModTime.IsZero() {
|
||||
t.Errorf("got zero mod time for humans.txt")
|
||||
}
|
||||
ct := "text/plain; charset=utf-8"
|
||||
if e.ContentType != ct {
|
||||
t.Errorf("expected content type %q, got %q", ct, e.ContentType)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatal("no humans.txt in new manifest")
|
||||
}
|
||||
|
||||
checkFile(t, client, newManifestHash, "humans.txt", humansData)
|
||||
})
|
||||
|
||||
// upload a new file and use its manifest to add it the original manifest,
|
||||
// but ensure that the file will be in the nested manifest of the original one.
|
||||
t.Run("add nested", func(t *testing.T) {
|
||||
robotsData := []byte(`{"disallow": "/"}`)
|
||||
robotsDataFilename := filepath.Join(tmp, "robots.json")
|
||||
err = ioutil.WriteFile(robotsDataFilename, robotsData, 0666)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
robotsManifestHash := runSwarmExpectHash(t,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
"up",
|
||||
robotsDataFilename,
|
||||
)
|
||||
|
||||
newManifestHash := runSwarmExpectHash(t,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
"manifest",
|
||||
"add",
|
||||
origManifestHash,
|
||||
"robots.json",
|
||||
robotsManifestHash,
|
||||
)
|
||||
|
||||
checkHashLength(t, newManifestHash, encrypt)
|
||||
|
||||
newManifest := downloadManifest(t, client, newManifestHash, encrypt)
|
||||
|
||||
var found bool
|
||||
loop:
|
||||
for _, e := range newManifest.Entries {
|
||||
if e.Path == "robots." {
|
||||
nestedManifest := downloadManifest(t, client, e.Hash, encrypt)
|
||||
for _, e := range nestedManifest.Entries {
|
||||
if e.Path == "json" {
|
||||
found = true
|
||||
if e.Size != int64(len(robotsData)) {
|
||||
t.Errorf("expected robots.json size %v, got %v", len(robotsData), e.Size)
|
||||
}
|
||||
if e.ModTime.IsZero() {
|
||||
t.Errorf("got zero mod time for robots.json")
|
||||
}
|
||||
ct := "application/json"
|
||||
if e.ContentType != ct {
|
||||
t.Errorf("expected content type %q, got %q", ct, e.ContentType)
|
||||
}
|
||||
break loop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatal("no robots.json in new manifest")
|
||||
}
|
||||
|
||||
checkFile(t, client, newManifestHash, "robots.json", robotsData)
|
||||
})
|
||||
|
||||
// upload a new file and use its manifest to change the file it the original manifest.
|
||||
t.Run("update", func(t *testing.T) {
|
||||
indexData := []byte("<h1>Ethereum Swarm</h1>")
|
||||
indexDataFilename := filepath.Join(tmp, "index.html")
|
||||
err = ioutil.WriteFile(indexDataFilename, indexData, 0666)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
indexManifestHash := runSwarmExpectHash(t,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
"up",
|
||||
indexDataFilename,
|
||||
)
|
||||
|
||||
newManifestHash := runSwarmExpectHash(t,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
"manifest",
|
||||
"update",
|
||||
origManifestHash,
|
||||
"index.html",
|
||||
indexManifestHash,
|
||||
)
|
||||
|
||||
checkHashLength(t, newManifestHash, encrypt)
|
||||
|
||||
newManifest := downloadManifest(t, client, newManifestHash, encrypt)
|
||||
|
||||
var found bool
|
||||
for _, e := range newManifest.Entries {
|
||||
if e.Path == "index.html" {
|
||||
found = true
|
||||
if e.Size != int64(len(indexData)) {
|
||||
t.Errorf("expected index.html size %v, got %v", len(indexData), e.Size)
|
||||
}
|
||||
if e.ModTime.IsZero() {
|
||||
t.Errorf("got zero mod time for index.html")
|
||||
}
|
||||
ct := "text/html; charset=utf-8"
|
||||
if e.ContentType != ct {
|
||||
t.Errorf("expected content type %q, got %q", ct, e.ContentType)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatal("no index.html in new manifest")
|
||||
}
|
||||
|
||||
checkFile(t, client, newManifestHash, "index.html", indexData)
|
||||
|
||||
// check default entry change
|
||||
checkFile(t, client, newManifestHash, "", indexData)
|
||||
})
|
||||
|
||||
// upload a new file and use its manifest to change the file it the original manifest,
|
||||
// but ensure that the file is in the nested manifest of the original one.
|
||||
t.Run("update nested", func(t *testing.T) {
|
||||
robotsData := []byte(`<string>Only humans allowed!!!</strong>`)
|
||||
robotsDataFilename := filepath.Join(tmp, "robots.html")
|
||||
err = ioutil.WriteFile(robotsDataFilename, robotsData, 0666)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
humansManifestHash := runSwarmExpectHash(t,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
"up",
|
||||
robotsDataFilename,
|
||||
)
|
||||
|
||||
newManifestHash := runSwarmExpectHash(t,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
"manifest",
|
||||
"update",
|
||||
origManifestHash,
|
||||
"robots.html",
|
||||
humansManifestHash,
|
||||
)
|
||||
|
||||
checkHashLength(t, newManifestHash, encrypt)
|
||||
|
||||
newManifest := downloadManifest(t, client, newManifestHash, encrypt)
|
||||
|
||||
var found bool
|
||||
loop:
|
||||
for _, e := range newManifest.Entries {
|
||||
if e.Path == "robots." {
|
||||
nestedManifest := downloadManifest(t, client, e.Hash, encrypt)
|
||||
for _, e := range nestedManifest.Entries {
|
||||
if e.Path == "html" {
|
||||
found = true
|
||||
if e.Size != int64(len(robotsData)) {
|
||||
t.Errorf("expected robots.html size %v, got %v", len(robotsData), e.Size)
|
||||
}
|
||||
if e.ModTime.IsZero() {
|
||||
t.Errorf("got zero mod time for robots.html")
|
||||
}
|
||||
ct := "text/html; charset=utf-8"
|
||||
if e.ContentType != ct {
|
||||
t.Errorf("expected content type %q, got %q", ct, e.ContentType)
|
||||
}
|
||||
break loop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatal("no robots.html in new manifest")
|
||||
}
|
||||
|
||||
checkFile(t, client, newManifestHash, "robots.html", robotsData)
|
||||
})
|
||||
|
||||
// remove a file from the manifest.
|
||||
t.Run("remove", func(t *testing.T) {
|
||||
newManifestHash := runSwarmExpectHash(t,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
"manifest",
|
||||
"remove",
|
||||
origManifestHash,
|
||||
"mutants.txt",
|
||||
)
|
||||
|
||||
checkHashLength(t, newManifestHash, encrypt)
|
||||
|
||||
newManifest := downloadManifest(t, client, newManifestHash, encrypt)
|
||||
|
||||
var found bool
|
||||
for _, e := range newManifest.Entries {
|
||||
if e.Path == "mutants.txt" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if found {
|
||||
t.Fatal("mutants.txt is not removed")
|
||||
}
|
||||
})
|
||||
|
||||
// remove a file from the manifest, but ensure that the file is in
|
||||
// the nested manifest of the original one.
|
||||
t.Run("remove nested", func(t *testing.T) {
|
||||
newManifestHash := runSwarmExpectHash(t,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
"manifest",
|
||||
"remove",
|
||||
origManifestHash,
|
||||
"robots.html",
|
||||
)
|
||||
|
||||
checkHashLength(t, newManifestHash, encrypt)
|
||||
|
||||
newManifest := downloadManifest(t, client, newManifestHash, encrypt)
|
||||
|
||||
var found bool
|
||||
loop:
|
||||
for _, e := range newManifest.Entries {
|
||||
if e.Path == "robots." {
|
||||
nestedManifest := downloadManifest(t, client, e.Hash, encrypt)
|
||||
for _, e := range nestedManifest.Entries {
|
||||
if e.Path == "html" {
|
||||
found = true
|
||||
break loop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if found {
|
||||
t.Fatal("robots.html in not removed")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestNestedDefaultEntryUpdate tests if the default entry is updated
|
||||
// if the file in nested manifest used for it is also updated.
|
||||
func TestNestedDefaultEntryUpdate(t *testing.T) {
|
||||
testNestedDefaultEntryUpdate(t, false)
|
||||
}
|
||||
|
||||
// TestNestedDefaultEntryUpdateEncrypted tests if the default entry
|
||||
// of encrypted upload is updated if the file in nested manifest
|
||||
// used for it is also updated.
|
||||
func TestNestedDefaultEntryUpdateEncrypted(t *testing.T) {
|
||||
testNestedDefaultEntryUpdate(t, true)
|
||||
}
|
||||
|
||||
func testNestedDefaultEntryUpdate(t *testing.T, encrypt bool) {
|
||||
t.Parallel()
|
||||
cluster := newTestCluster(t, 1)
|
||||
defer cluster.Shutdown()
|
||||
|
||||
tmp, err := ioutil.TempDir("", "swarm-manifest-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
origDir := filepath.Join(tmp, "orig")
|
||||
if err := os.Mkdir(origDir, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
indexData := []byte("<h1>Test</h1>")
|
||||
indexDataFilename := filepath.Join(origDir, "index.html")
|
||||
err = ioutil.WriteFile(indexDataFilename, indexData, 0666)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Add another file with common prefix as the default entry to test updates of
|
||||
// default entry with nested manifests.
|
||||
err = ioutil.WriteFile(filepath.Join(origDir, "index.txt"), []byte("Test"), 0666)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
args := []string{
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
"--recursive",
|
||||
"--defaultpath",
|
||||
indexDataFilename,
|
||||
"up",
|
||||
origDir,
|
||||
}
|
||||
if encrypt {
|
||||
args = append(args, "--encrypt")
|
||||
}
|
||||
|
||||
origManifestHash := runSwarmExpectHash(t, args...)
|
||||
|
||||
checkHashLength(t, origManifestHash, encrypt)
|
||||
|
||||
client := swarm.NewClient(cluster.Nodes[0].URL)
|
||||
|
||||
newIndexData := []byte("<h1>Ethereum Swarm</h1>")
|
||||
newIndexDataFilename := filepath.Join(tmp, "index.html")
|
||||
err = ioutil.WriteFile(newIndexDataFilename, newIndexData, 0666)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
newIndexManifestHash := runSwarmExpectHash(t,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
"up",
|
||||
newIndexDataFilename,
|
||||
)
|
||||
|
||||
newManifestHash := runSwarmExpectHash(t,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
"manifest",
|
||||
"update",
|
||||
origManifestHash,
|
||||
"index.html",
|
||||
newIndexManifestHash,
|
||||
)
|
||||
|
||||
checkHashLength(t, newManifestHash, encrypt)
|
||||
|
||||
newManifest := downloadManifest(t, client, newManifestHash, encrypt)
|
||||
|
||||
var found bool
|
||||
for _, e := range newManifest.Entries {
|
||||
if e.Path == "index." {
|
||||
found = true
|
||||
newManifest = downloadManifest(t, client, e.Hash, encrypt)
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatal("no index. path in new manifest")
|
||||
}
|
||||
|
||||
found = false
|
||||
for _, e := range newManifest.Entries {
|
||||
if e.Path == "html" {
|
||||
found = true
|
||||
if e.Size != int64(len(newIndexData)) {
|
||||
t.Errorf("expected index.html size %v, got %v", len(newIndexData), e.Size)
|
||||
}
|
||||
if e.ModTime.IsZero() {
|
||||
t.Errorf("got zero mod time for index.html")
|
||||
}
|
||||
ct := "text/html; charset=utf-8"
|
||||
if e.ContentType != ct {
|
||||
t.Errorf("expected content type %q, got %q", ct, e.ContentType)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatal("no html in new manifest")
|
||||
}
|
||||
|
||||
checkFile(t, client, newManifestHash, "index.html", newIndexData)
|
||||
|
||||
// check default entry change
|
||||
checkFile(t, client, newManifestHash, "", newIndexData)
|
||||
}
|
||||
|
||||
func runSwarmExpectHash(t *testing.T, args ...string) (hash string) {
|
||||
t.Helper()
|
||||
hashRegexp := `[a-f\d]{64,128}`
|
||||
up := runSwarm(t, args...)
|
||||
_, matches := up.ExpectRegexp(hashRegexp)
|
||||
up.ExpectExit()
|
||||
|
||||
if len(matches) < 1 {
|
||||
t.Fatal("no matches found")
|
||||
}
|
||||
return matches[0]
|
||||
}
|
||||
|
||||
func checkHashLength(t *testing.T, hash string, encrypted bool) {
|
||||
t.Helper()
|
||||
l := len(hash)
|
||||
if encrypted && l != 128 {
|
||||
t.Errorf("expected hash length 128, got %v", l)
|
||||
}
|
||||
if !encrypted && l != 64 {
|
||||
t.Errorf("expected hash length 64, got %v", l)
|
||||
}
|
||||
}
|
||||
|
||||
func downloadManifest(t *testing.T, client *swarm.Client, hash string, encrypted bool) (manifest *api.Manifest) {
|
||||
t.Helper()
|
||||
m, isEncrypted, err := client.DownloadManifest(hash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if encrypted != isEncrypted {
|
||||
t.Error("new manifest encryption flag is not correct")
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func checkFile(t *testing.T, client *swarm.Client, hash, path string, expected []byte) {
|
||||
t.Helper()
|
||||
f, err := client.Download(hash, path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
got, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(got, expected) {
|
||||
t.Errorf("expected file content %q, got %q", expected, got)
|
||||
}
|
||||
}
|
169
cmd/swarm/mru.go
Normal file
169
cmd/swarm/mru.go
Normal file
@@ -0,0 +1,169 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Command resource allows the user to create and update signed mutable resource updates
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/mru"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
func NewGenericSigner(ctx *cli.Context) mru.Signer {
|
||||
return mru.NewGenericSigner(getPrivKey(ctx))
|
||||
}
|
||||
|
||||
// swarm resource create <frequency> [--name <name>] [--data <0x Hexdata> [--multihash=false]]
|
||||
// swarm resource update <Manifest Address or ENS domain> <0x Hexdata> [--multihash=false]
|
||||
// swarm resource info <Manifest Address or ENS domain>
|
||||
|
||||
func resourceCreate(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
|
||||
var (
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client = swarm.NewClient(bzzapi)
|
||||
multihash = ctx.Bool(SwarmResourceMultihashFlag.Name)
|
||||
initialData = ctx.String(SwarmResourceDataOnCreateFlag.Name)
|
||||
name = ctx.String(SwarmResourceNameFlag.Name)
|
||||
)
|
||||
|
||||
if len(args) < 1 {
|
||||
fmt.Println("Incorrect number of arguments")
|
||||
cli.ShowCommandHelpAndExit(ctx, "create", 1)
|
||||
return
|
||||
}
|
||||
signer := NewGenericSigner(ctx)
|
||||
frequency, err := strconv.ParseUint(args[0], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Printf("Frequency formatting error: %s\n", err.Error())
|
||||
cli.ShowCommandHelpAndExit(ctx, "create", 1)
|
||||
return
|
||||
}
|
||||
|
||||
metadata := mru.ResourceMetadata{
|
||||
Name: name,
|
||||
Frequency: frequency,
|
||||
Owner: signer.Address(),
|
||||
}
|
||||
|
||||
var newResourceRequest *mru.Request
|
||||
if initialData != "" {
|
||||
initialDataBytes, err := hexutil.Decode(initialData)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing data: %s\n", err.Error())
|
||||
cli.ShowCommandHelpAndExit(ctx, "create", 1)
|
||||
return
|
||||
}
|
||||
newResourceRequest, err = mru.NewCreateUpdateRequest(&metadata)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error creating new resource request: %s", err)
|
||||
}
|
||||
newResourceRequest.SetData(initialDataBytes, multihash)
|
||||
if err = newResourceRequest.Sign(signer); err != nil {
|
||||
utils.Fatalf("Error signing resource update: %s", err.Error())
|
||||
}
|
||||
} else {
|
||||
newResourceRequest, err = mru.NewCreateRequest(&metadata)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error creating new resource request: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
manifestAddress, err := client.CreateResource(newResourceRequest)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error creating resource: %s", err.Error())
|
||||
return
|
||||
}
|
||||
fmt.Println(manifestAddress) // output manifest address to the user in a single line (useful for other commands to pick up)
|
||||
|
||||
}
|
||||
|
||||
func resourceUpdate(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
|
||||
var (
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client = swarm.NewClient(bzzapi)
|
||||
multihash = ctx.Bool(SwarmResourceMultihashFlag.Name)
|
||||
)
|
||||
|
||||
if len(args) < 2 {
|
||||
fmt.Println("Incorrect number of arguments")
|
||||
cli.ShowCommandHelpAndExit(ctx, "update", 1)
|
||||
return
|
||||
}
|
||||
signer := NewGenericSigner(ctx)
|
||||
manifestAddressOrDomain := args[0]
|
||||
data, err := hexutil.Decode(args[1])
|
||||
if err != nil {
|
||||
utils.Fatalf("Error parsing data: %s", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Retrieve resource status and metadata out of the manifest
|
||||
updateRequest, err := client.GetResourceMetadata(manifestAddressOrDomain)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error retrieving resource status: %s", err.Error())
|
||||
}
|
||||
|
||||
// set the new data
|
||||
updateRequest.SetData(data, multihash)
|
||||
|
||||
// sign update
|
||||
if err = updateRequest.Sign(signer); err != nil {
|
||||
utils.Fatalf("Error signing resource update: %s", err.Error())
|
||||
}
|
||||
|
||||
// post update
|
||||
err = client.UpdateResource(updateRequest)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error updating resource: %s", err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func resourceInfo(ctx *cli.Context) {
|
||||
var (
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client = swarm.NewClient(bzzapi)
|
||||
)
|
||||
args := ctx.Args()
|
||||
if len(args) < 1 {
|
||||
fmt.Println("Incorrect number of arguments.")
|
||||
cli.ShowCommandHelpAndExit(ctx, "info", 1)
|
||||
return
|
||||
}
|
||||
manifestAddressOrDomain := args[0]
|
||||
metadata, err := client.GetResourceMetadata(manifestAddressOrDomain)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error retrieving resource metadata: %s", err.Error())
|
||||
return
|
||||
}
|
||||
encodedMetadata, err := metadata.MarshalJSON()
|
||||
if err != nil {
|
||||
utils.Fatalf("Error encoding metadata to JSON for display:%s", err)
|
||||
}
|
||||
fmt.Println(string(encodedMetadata))
|
||||
}
|
@@ -17,12 +17,17 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -172,14 +177,15 @@ func (c *testCluster) Cleanup() {
|
||||
}
|
||||
|
||||
type testNode struct {
|
||||
Name string
|
||||
Addr string
|
||||
URL string
|
||||
Enode string
|
||||
Dir string
|
||||
IpcPath string
|
||||
Client *rpc.Client
|
||||
Cmd *cmdtest.TestCmd
|
||||
Name string
|
||||
Addr string
|
||||
URL string
|
||||
Enode string
|
||||
Dir string
|
||||
IpcPath string
|
||||
PrivateKey *ecdsa.PrivateKey
|
||||
Client *rpc.Client
|
||||
Cmd *cmdtest.TestCmd
|
||||
}
|
||||
|
||||
const testPassphrase = "swarm-test-passphrase"
|
||||
@@ -218,14 +224,12 @@ func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode {
|
||||
}
|
||||
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
p2pPort, err := assignTCPPort()
|
||||
ports, err := getAvailableTCPPorts(2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
p2pPort := ports[0]
|
||||
httpPort := ports[1]
|
||||
|
||||
// start the node
|
||||
node.Cmd = runSwarm(t,
|
||||
@@ -246,6 +250,17 @@ func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode {
|
||||
}
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// ensure that all ports have active listeners
|
||||
// so that the next node will not get the same
|
||||
// when calling getAvailableTCPPorts
|
||||
err = waitTCPPorts(ctx, ports...)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// wait for the node to start
|
||||
for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
|
||||
node.Client, err = rpc.Dial(conf.IPCEndpoint())
|
||||
@@ -277,17 +292,19 @@ func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode {
|
||||
func newTestNode(t *testing.T, dir string) *testNode {
|
||||
|
||||
conf, account := getTestAccount(t, dir)
|
||||
node := &testNode{Dir: dir}
|
||||
ks := keystore.NewKeyStore(path.Join(dir, "keystore"), 1<<18, 1)
|
||||
|
||||
pk := decryptStoreAccount(ks, account.Address.Hex(), []string{testPassphrase})
|
||||
|
||||
node := &testNode{Dir: dir, PrivateKey: pk}
|
||||
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
p2pPort, err := assignTCPPort()
|
||||
ports, err := getAvailableTCPPorts(2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
p2pPort := ports[0]
|
||||
httpPort := ports[1]
|
||||
|
||||
// start the node
|
||||
node.Cmd = runSwarm(t,
|
||||
@@ -308,6 +325,17 @@ func newTestNode(t *testing.T, dir string) *testNode {
|
||||
}
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// ensure that all ports have active listeners
|
||||
// so that the next node will not get the same
|
||||
// when calling getAvailableTCPPorts
|
||||
err = waitTCPPorts(ctx, ports...)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// wait for the node to start
|
||||
for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
|
||||
node.Client, err = rpc.Dial(conf.IPCEndpoint())
|
||||
@@ -343,15 +371,92 @@ func (n *testNode) Shutdown() {
|
||||
}
|
||||
}
|
||||
|
||||
func assignTCPPort() (string, error) {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
return "", err
|
||||
// getAvailableTCPPorts returns a set of ports that
|
||||
// nothing is listening on at the time.
|
||||
//
|
||||
// Function assignTCPPort cannot be called in sequence
|
||||
// and guardantee that the same port will be returned in
|
||||
// different calls as the listener is closed within the function,
|
||||
// not after all listeners are started and selected unique
|
||||
// available ports.
|
||||
func getAvailableTCPPorts(count int) (ports []string, err error) {
|
||||
for i := 0; i < count; i++ {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// defer close in the loop to be sure the same port will not
|
||||
// be selected in the next iteration
|
||||
defer l.Close()
|
||||
|
||||
_, port, err := net.SplitHostPort(l.Addr().String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ports = append(ports, port)
|
||||
}
|
||||
return ports, nil
|
||||
}
|
||||
|
||||
// waitTCPPorts blocks until tcp connections can be
|
||||
// established on all provided ports. It runs all
|
||||
// ports dialers in parallel, and returns the first
|
||||
// encountered error.
|
||||
// See waitTCPPort also.
|
||||
func waitTCPPorts(ctx context.Context, ports ...string) error {
|
||||
var err error
|
||||
// mu locks err variable that is assigned in
|
||||
// other goroutines
|
||||
var mu sync.Mutex
|
||||
|
||||
// cancel is canceling all goroutines
|
||||
// when the firs error is returned
|
||||
// to prevent unnecessary waiting
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, port := range ports {
|
||||
wg.Add(1)
|
||||
go func(port string) {
|
||||
defer wg.Done()
|
||||
|
||||
e := waitTCPPort(ctx, port)
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if e != nil && err == nil {
|
||||
err = e
|
||||
cancel()
|
||||
}
|
||||
}(port)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// waitTCPPort blocks until tcp connection can be established
|
||||
// ona provided port. It has a 3 minute timeout as maximum,
|
||||
// to prevent long waiting, but it can be shortened with
|
||||
// a provided context instance. Dialer has a 10 second timeout
|
||||
// in every iteration, and connection refused error will be
|
||||
// retried in 100 milliseconds periods.
|
||||
func waitTCPPort(ctx context.Context, port string) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, 3*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
for {
|
||||
c, err := (&net.Dialer{Timeout: 10 * time.Second}).DialContext(ctx, "tcp", "127.0.0.1:"+port)
|
||||
if err != nil {
|
||||
if operr, ok := err.(*net.OpError); ok {
|
||||
if syserr, ok := operr.Err.(*os.SyscallError); ok && syserr.Err == syscall.ECONNREFUSED {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
return c.Close()
|
||||
}
|
||||
l.Close()
|
||||
_, port, err := net.SplitHostPort(l.Addr().String())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return port, nil
|
||||
}
|
||||
|
@@ -37,8 +37,14 @@ import (
|
||||
)
|
||||
|
||||
func generateEndpoints(scheme string, cluster string, from int, to int) {
|
||||
if cluster == "prod" {
|
||||
cluster = ""
|
||||
} else {
|
||||
cluster = cluster + "."
|
||||
}
|
||||
|
||||
for port := from; port <= to; port++ {
|
||||
endpoints = append(endpoints, fmt.Sprintf("%s://%v.%s.swarm-gateways.net", scheme, port, cluster))
|
||||
endpoints = append(endpoints, fmt.Sprintf("%s://%v.%sswarm-gateways.net", scheme, port, cluster))
|
||||
}
|
||||
|
||||
if includeLocalhost {
|
||||
@@ -71,8 +77,9 @@ func cliUploadAndSync(c *cli.Context) error {
|
||||
log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash))
|
||||
|
||||
if filesize < 10 {
|
||||
time.Sleep(15 * time.Second)
|
||||
time.Sleep(35 * time.Second)
|
||||
} else {
|
||||
time.Sleep(15 * time.Second)
|
||||
time.Sleep(2 * time.Duration(filesize) * time.Second)
|
||||
}
|
||||
|
||||
@@ -102,7 +109,7 @@ func cliUploadAndSync(c *cli.Context) error {
|
||||
// fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file
|
||||
func fetch(hash string, endpoint string, original []byte, ruid string) error {
|
||||
log.Trace("sleeping", "ruid", ruid)
|
||||
time.Sleep(1 * time.Second)
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
log.Trace("http get request", "ruid", ruid, "api", endpoint, "hash", hash)
|
||||
res, err := http.Get(endpoint + "/bzz:/" + hash + "/")
|
||||
|
@@ -98,6 +98,17 @@ func upload(ctx *cli.Context) {
|
||||
if !recursive {
|
||||
return "", errors.New("Argument is a directory and recursive upload is disabled")
|
||||
}
|
||||
if defaultPath != "" {
|
||||
// construct absolute default path
|
||||
absDefaultPath, _ := filepath.Abs(defaultPath)
|
||||
absFile, _ := filepath.Abs(file)
|
||||
// make sure absolute directory ends with only one "/"
|
||||
// to trim it from absolute default path and get relative default path
|
||||
absFile = strings.TrimRight(absFile, "/") + "/"
|
||||
if absDefaultPath != "" && absFile != "" && strings.HasPrefix(absDefaultPath, absFile) {
|
||||
defaultPath = strings.TrimPrefix(absDefaultPath, absFile)
|
||||
}
|
||||
}
|
||||
return client.UploadDirectory(file, defaultPath, "", toEncrypt)
|
||||
}
|
||||
} else {
|
||||
|
@@ -273,3 +273,84 @@ func testCLISwarmUpRecursive(toEncrypt bool, t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestCLISwarmUpDefaultPath tests swarm recursive upload with relative and absolute
|
||||
// default paths and with encryption.
|
||||
func TestCLISwarmUpDefaultPath(t *testing.T) {
|
||||
testCLISwarmUpDefaultPath(false, false, t)
|
||||
testCLISwarmUpDefaultPath(false, true, t)
|
||||
testCLISwarmUpDefaultPath(true, false, t)
|
||||
testCLISwarmUpDefaultPath(true, true, t)
|
||||
}
|
||||
|
||||
func testCLISwarmUpDefaultPath(toEncrypt bool, absDefaultPath bool, t *testing.T) {
|
||||
cluster := newTestCluster(t, 1)
|
||||
defer cluster.Shutdown()
|
||||
|
||||
tmp, err := ioutil.TempDir("", "swarm-defaultpath-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
err = ioutil.WriteFile(filepath.Join(tmp, "index.html"), []byte("<h1>Test</h1>"), 0666)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = ioutil.WriteFile(filepath.Join(tmp, "robots.txt"), []byte("Disallow: /"), 0666)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defaultPath := "index.html"
|
||||
if absDefaultPath {
|
||||
defaultPath = filepath.Join(tmp, defaultPath)
|
||||
}
|
||||
|
||||
args := []string{
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
"--recursive",
|
||||
"--defaultpath",
|
||||
defaultPath,
|
||||
"up",
|
||||
tmp,
|
||||
}
|
||||
if toEncrypt {
|
||||
args = append(args, "--encrypt")
|
||||
}
|
||||
|
||||
up := runSwarm(t, args...)
|
||||
hashRegexp := `[a-f\d]{64,128}`
|
||||
_, matches := up.ExpectRegexp(hashRegexp)
|
||||
up.ExpectExit()
|
||||
hash := matches[0]
|
||||
|
||||
client := swarm.NewClient(cluster.Nodes[0].URL)
|
||||
|
||||
m, isEncrypted, err := client.DownloadManifest(hash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if toEncrypt != isEncrypted {
|
||||
t.Error("downloaded manifest is not encrypted")
|
||||
}
|
||||
|
||||
var found bool
|
||||
var entriesCount int
|
||||
for _, e := range m.Entries {
|
||||
entriesCount++
|
||||
if e.Path == "" {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
t.Error("manifest default entry was not found")
|
||||
}
|
||||
|
||||
if entriesCount != 3 {
|
||||
t.Errorf("manifest contains %v entries, expected %v", entriesCount, 3)
|
||||
}
|
||||
}
|
||||
|
@@ -24,7 +24,6 @@ import (
|
||||
"math/big"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -98,7 +97,7 @@ func NewApp(gitCommit, usage string) *cli.App {
|
||||
app.Author = ""
|
||||
//app.Authors = nil
|
||||
app.Email = ""
|
||||
app.Version = params.Version
|
||||
app.Version = params.VersionWithMeta
|
||||
if len(gitCommit) >= 8 {
|
||||
app.Version += "-" + gitCommit[:8]
|
||||
}
|
||||
@@ -158,14 +157,6 @@ var (
|
||||
Usage: "Document Root for HTTPClient file scheme",
|
||||
Value: DirectoryString{homeDir()},
|
||||
}
|
||||
FastSyncFlag = cli.BoolFlag{
|
||||
Name: "fast",
|
||||
Usage: "Enable fast syncing through state downloads (replaced by --syncmode)",
|
||||
}
|
||||
LightModeFlag = cli.BoolFlag{
|
||||
Name: "light",
|
||||
Usage: "Enable light client mode (replaced by --syncmode)",
|
||||
}
|
||||
defaultSyncMode = eth.DefaultConfig.SyncMode
|
||||
SyncModeFlag = TextMarshalerFlag{
|
||||
Name: "syncmode",
|
||||
@@ -193,7 +184,7 @@ var (
|
||||
}
|
||||
// Dashboard settings
|
||||
DashboardEnabledFlag = cli.BoolFlag{
|
||||
Name: "dashboard",
|
||||
Name: metrics.DashboardEnabledFlag,
|
||||
Usage: "Enable the dashboard",
|
||||
}
|
||||
DashboardAddrFlag = cli.StringFlag{
|
||||
@@ -242,6 +233,10 @@ var (
|
||||
Value: eth.DefaultConfig.Ethash.DatasetsOnDisk,
|
||||
}
|
||||
// Transaction pool settings
|
||||
TxPoolLocalsFlag = cli.StringFlag{
|
||||
Name: "txpool.locals",
|
||||
Usage: "Comma separated accounts to treat as locals (no flush, priority inclusion)",
|
||||
}
|
||||
TxPoolNoLocalsFlag = cli.BoolFlag{
|
||||
Name: "txpool.nolocals",
|
||||
Usage: "Disables price exemptions for locally submitted transactions",
|
||||
@@ -318,29 +313,62 @@ var (
|
||||
Usage: "Enable mining",
|
||||
}
|
||||
MinerThreadsFlag = cli.IntFlag{
|
||||
Name: "minerthreads",
|
||||
Name: "miner.threads",
|
||||
Usage: "Number of CPU threads to use for mining",
|
||||
Value: runtime.NumCPU(),
|
||||
Value: 0,
|
||||
}
|
||||
TargetGasLimitFlag = cli.Uint64Flag{
|
||||
Name: "targetgaslimit",
|
||||
Usage: "Target gas limit sets the artificial target gas floor for the blocks to mine",
|
||||
MinerLegacyThreadsFlag = cli.IntFlag{
|
||||
Name: "minerthreads",
|
||||
Usage: "Number of CPU threads to use for mining (deprecated, use --miner.threads)",
|
||||
Value: 0,
|
||||
}
|
||||
MinerNotifyFlag = cli.StringFlag{
|
||||
Name: "miner.notify",
|
||||
Usage: "Comma separated HTTP URL list to notify of new work packages",
|
||||
}
|
||||
MinerGasTargetFlag = cli.Uint64Flag{
|
||||
Name: "miner.gastarget",
|
||||
Usage: "Target gas floor for mined blocks",
|
||||
Value: params.GenesisGasLimit,
|
||||
}
|
||||
EtherbaseFlag = cli.StringFlag{
|
||||
Name: "etherbase",
|
||||
Usage: "Public address for block mining rewards (default = first account created)",
|
||||
MinerLegacyGasTargetFlag = cli.Uint64Flag{
|
||||
Name: "targetgaslimit",
|
||||
Usage: "Target gas floor for mined blocks (deprecated, use --miner.gastarget)",
|
||||
Value: params.GenesisGasLimit,
|
||||
}
|
||||
MinerGasPriceFlag = BigFlag{
|
||||
Name: "miner.gasprice",
|
||||
Usage: "Minimal gas price for mining a transactions",
|
||||
Value: eth.DefaultConfig.MinerGasPrice,
|
||||
}
|
||||
MinerLegacyGasPriceFlag = BigFlag{
|
||||
Name: "gasprice",
|
||||
Usage: "Minimal gas price for mining a transactions (deprecated, use --miner.gasprice)",
|
||||
Value: eth.DefaultConfig.MinerGasPrice,
|
||||
}
|
||||
MinerEtherbaseFlag = cli.StringFlag{
|
||||
Name: "miner.etherbase",
|
||||
Usage: "Public address for block mining rewards (default = first account)",
|
||||
Value: "0",
|
||||
}
|
||||
GasPriceFlag = BigFlag{
|
||||
Name: "gasprice",
|
||||
Usage: "Minimal gas price to accept for mining a transactions",
|
||||
Value: eth.DefaultConfig.GasPrice,
|
||||
MinerLegacyEtherbaseFlag = cli.StringFlag{
|
||||
Name: "etherbase",
|
||||
Usage: "Public address for block mining rewards (default = first account, deprecated, use --miner.etherbase)",
|
||||
Value: "0",
|
||||
}
|
||||
ExtraDataFlag = cli.StringFlag{
|
||||
Name: "extradata",
|
||||
MinerExtraDataFlag = cli.StringFlag{
|
||||
Name: "miner.extradata",
|
||||
Usage: "Block extra data set by the miner (default = client version)",
|
||||
}
|
||||
MinerLegacyExtraDataFlag = cli.StringFlag{
|
||||
Name: "extradata",
|
||||
Usage: "Block extra data set by the miner (default = client version, deprecated, use --miner.extradata)",
|
||||
}
|
||||
MinerRecommitIntervalFlag = cli.DurationFlag{
|
||||
Name: "miner.recommit",
|
||||
Usage: "Time interval to recreate the block being mined.",
|
||||
Value: eth.DefaultConfig.MinerRecommit,
|
||||
}
|
||||
// Account settings
|
||||
UnlockedAccountFlag = cli.StringFlag{
|
||||
Name: "unlock",
|
||||
@@ -644,8 +672,7 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
|
||||
for _, url := range urls {
|
||||
node, err := discover.ParseNode(url)
|
||||
if err != nil {
|
||||
log.Error("Bootstrap URL invalid", "enode", url, "err", err)
|
||||
continue
|
||||
log.Crit("Bootstrap URL invalid", "enode", url, "err", err)
|
||||
}
|
||||
cfg.BootstrapNodes = append(cfg.BootstrapNodes, node)
|
||||
}
|
||||
@@ -811,10 +838,19 @@ func MakeAddress(ks *keystore.KeyStore, account string) (accounts.Account, error
|
||||
// setEtherbase retrieves the etherbase either from the directly specified
|
||||
// command line flags or from the keystore if CLI indexed.
|
||||
func setEtherbase(ctx *cli.Context, ks *keystore.KeyStore, cfg *eth.Config) {
|
||||
if ctx.GlobalIsSet(EtherbaseFlag.Name) {
|
||||
account, err := MakeAddress(ks, ctx.GlobalString(EtherbaseFlag.Name))
|
||||
// Extract the current etherbase, new flag overriding legacy one
|
||||
var etherbase string
|
||||
if ctx.GlobalIsSet(MinerLegacyEtherbaseFlag.Name) {
|
||||
etherbase = ctx.GlobalString(MinerLegacyEtherbaseFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(MinerEtherbaseFlag.Name) {
|
||||
etherbase = ctx.GlobalString(MinerEtherbaseFlag.Name)
|
||||
}
|
||||
// Convert the etherbase into an address and configure it
|
||||
if etherbase != "" {
|
||||
account, err := MakeAddress(ks, etherbase)
|
||||
if err != nil {
|
||||
Fatalf("Option %q: %v", EtherbaseFlag.Name, err)
|
||||
Fatalf("Invalid miner etherbase: %v", err)
|
||||
}
|
||||
cfg.Etherbase = account.Address
|
||||
}
|
||||
@@ -845,7 +881,7 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
|
||||
setBootstrapNodes(ctx, cfg)
|
||||
setBootstrapNodesV5(ctx, cfg)
|
||||
|
||||
lightClient := ctx.GlobalBool(LightModeFlag.Name) || ctx.GlobalString(SyncModeFlag.Name) == "light"
|
||||
lightClient := ctx.GlobalString(SyncModeFlag.Name) == "light"
|
||||
lightServer := ctx.GlobalInt(LightServFlag.Name) != 0
|
||||
lightPeers := ctx.GlobalInt(LightPeersFlag.Name)
|
||||
|
||||
@@ -945,6 +981,16 @@ func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
|
||||
}
|
||||
|
||||
func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) {
|
||||
if ctx.GlobalIsSet(TxPoolLocalsFlag.Name) {
|
||||
locals := strings.Split(ctx.GlobalString(TxPoolLocalsFlag.Name), ",")
|
||||
for _, account := range locals {
|
||||
if trimmed := strings.TrimSpace(account); !common.IsHexAddress(trimmed) {
|
||||
Fatalf("Invalid account in --txpool.locals: %s", trimmed)
|
||||
} else {
|
||||
cfg.Locals = append(cfg.Locals, common.HexToAddress(account))
|
||||
}
|
||||
}
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolNoLocalsFlag.Name) {
|
||||
cfg.NoLocals = ctx.GlobalBool(TxPoolNoLocalsFlag.Name)
|
||||
}
|
||||
@@ -998,7 +1044,7 @@ func setEthash(ctx *cli.Context, cfg *eth.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
// checkExclusive verifies that only a single isntance of the provided flags was
|
||||
// checkExclusive verifies that only a single instance of the provided flags was
|
||||
// set by the user. Each flag might optionally be followed by a string type to
|
||||
// specialize it further.
|
||||
func checkExclusive(ctx *cli.Context, args ...interface{}) {
|
||||
@@ -1050,8 +1096,6 @@ func SetShhConfig(ctx *cli.Context, stack *node.Node, cfg *whisper.Config) {
|
||||
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
// Avoid conflicting network flags
|
||||
checkExclusive(ctx, DeveloperFlag, TestnetFlag, RinkebyFlag)
|
||||
checkExclusive(ctx, FastSyncFlag, LightModeFlag, SyncModeFlag)
|
||||
checkExclusive(ctx, LightServFlag, LightModeFlag)
|
||||
checkExclusive(ctx, LightServFlag, SyncModeFlag, "light")
|
||||
|
||||
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
|
||||
@@ -1060,13 +1104,8 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
setTxPool(ctx, &cfg.TxPool)
|
||||
setEthash(ctx, cfg)
|
||||
|
||||
switch {
|
||||
case ctx.GlobalIsSet(SyncModeFlag.Name):
|
||||
if ctx.GlobalIsSet(SyncModeFlag.Name) {
|
||||
cfg.SyncMode = *GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode)
|
||||
case ctx.GlobalBool(FastSyncFlag.Name):
|
||||
cfg.SyncMode = downloader.FastSync
|
||||
case ctx.GlobalBool(LightModeFlag.Name):
|
||||
cfg.SyncMode = downloader.LightSync
|
||||
}
|
||||
if ctx.GlobalIsSet(LightServFlag.Name) {
|
||||
cfg.LightServ = ctx.GlobalInt(LightServFlag.Name)
|
||||
@@ -1091,17 +1130,32 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) {
|
||||
cfg.TrieCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
|
||||
}
|
||||
if ctx.GlobalIsSet(MinerLegacyThreadsFlag.Name) {
|
||||
cfg.MinerThreads = ctx.GlobalInt(MinerLegacyThreadsFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(MinerThreadsFlag.Name) {
|
||||
cfg.MinerThreads = ctx.GlobalInt(MinerThreadsFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(MinerNotifyFlag.Name) {
|
||||
cfg.MinerNotify = strings.Split(ctx.GlobalString(MinerNotifyFlag.Name), ",")
|
||||
}
|
||||
if ctx.GlobalIsSet(DocRootFlag.Name) {
|
||||
cfg.DocRoot = ctx.GlobalString(DocRootFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(ExtraDataFlag.Name) {
|
||||
cfg.ExtraData = []byte(ctx.GlobalString(ExtraDataFlag.Name))
|
||||
if ctx.GlobalIsSet(MinerLegacyExtraDataFlag.Name) {
|
||||
cfg.MinerExtraData = []byte(ctx.GlobalString(MinerLegacyExtraDataFlag.Name))
|
||||
}
|
||||
if ctx.GlobalIsSet(GasPriceFlag.Name) {
|
||||
cfg.GasPrice = GlobalBig(ctx, GasPriceFlag.Name)
|
||||
if ctx.GlobalIsSet(MinerExtraDataFlag.Name) {
|
||||
cfg.MinerExtraData = []byte(ctx.GlobalString(MinerExtraDataFlag.Name))
|
||||
}
|
||||
if ctx.GlobalIsSet(MinerLegacyGasPriceFlag.Name) {
|
||||
cfg.MinerGasPrice = GlobalBig(ctx, MinerLegacyGasPriceFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(MinerGasPriceFlag.Name) {
|
||||
cfg.MinerGasPrice = GlobalBig(ctx, MinerGasPriceFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(MinerRecommitIntervalFlag.Name) {
|
||||
cfg.MinerRecommit = ctx.Duration(MinerRecommitIntervalFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(VMEnableDebugFlag.Name) {
|
||||
// TODO(fjl): force-enable this in --dev mode
|
||||
@@ -1143,8 +1197,8 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
log.Info("Using developer account", "address", developer.Address)
|
||||
|
||||
cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.GlobalInt(DeveloperPeriodFlag.Name)), developer.Address)
|
||||
if !ctx.GlobalIsSet(GasPriceFlag.Name) {
|
||||
cfg.GasPrice = big.NewInt(1)
|
||||
if !ctx.GlobalIsSet(MinerGasPriceFlag.Name) && !ctx.GlobalIsSet(MinerLegacyGasPriceFlag.Name) {
|
||||
cfg.MinerGasPrice = big.NewInt(1)
|
||||
}
|
||||
}
|
||||
// TODO(fjl): move trie cache generations into config
|
||||
@@ -1185,7 +1239,7 @@ func RegisterEthService(stack *node.Node, cfg *eth.Config) {
|
||||
// RegisterDashboardService adds a dashboard to the stack.
|
||||
func RegisterDashboardService(stack *node.Node, cfg *dashboard.Config, commit string) {
|
||||
stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
return dashboard.New(cfg, commit)
|
||||
return dashboard.New(cfg, commit, ctx.ResolvePath("logs")), nil
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1199,7 +1253,7 @@ func RegisterShhService(stack *node.Node, cfg *whisper.Config) {
|
||||
}
|
||||
|
||||
// RegisterEthStatsService configures the Ethereum Stats daemon and adds it to
|
||||
// th egiven node.
|
||||
// the given node.
|
||||
func RegisterEthStatsService(stack *node.Node, url string) {
|
||||
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
// Retrieve both eth and les services
|
||||
@@ -1218,7 +1272,10 @@ func RegisterEthStatsService(stack *node.Node, url string) {
|
||||
// SetupNetwork configures the system for either the main net or some test network.
|
||||
func SetupNetwork(ctx *cli.Context) {
|
||||
// TODO(fjl): move target gas limit into config
|
||||
params.TargetGasLimit = ctx.GlobalUint64(TargetGasLimitFlag.Name)
|
||||
params.TargetGasLimit = ctx.GlobalUint64(MinerLegacyGasTargetFlag.Name)
|
||||
if ctx.GlobalIsSet(MinerGasTargetFlag.Name) {
|
||||
params.TargetGasLimit = ctx.GlobalUint64(MinerGasTargetFlag.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func SetupMetrics(ctx *cli.Context) {
|
||||
@@ -1249,7 +1306,7 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database {
|
||||
handles = makeDatabaseHandles()
|
||||
)
|
||||
name := "chaindata"
|
||||
if ctx.GlobalBool(LightModeFlag.Name) {
|
||||
if ctx.GlobalString(SyncModeFlag.Name) == "light" {
|
||||
name = "lightchaindata"
|
||||
}
|
||||
chainDb, err := stack.OpenDatabase(name, cache, handles)
|
||||
@@ -1294,7 +1351,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
|
||||
DatasetDir: stack.ResolvePath(eth.DefaultConfig.Ethash.DatasetDir),
|
||||
DatasetsInMem: eth.DefaultConfig.Ethash.DatasetsInMem,
|
||||
DatasetsOnDisk: eth.DefaultConfig.Ethash.DatasetsOnDisk,
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
}
|
||||
if gcmode := ctx.GlobalString(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {
|
||||
|
@@ -30,3 +30,34 @@ type AbsTime time.Duration
|
||||
func Now() AbsTime {
|
||||
return AbsTime(monotime.Now())
|
||||
}
|
||||
|
||||
// Add returns t + d.
|
||||
func (t AbsTime) Add(d time.Duration) AbsTime {
|
||||
return t + AbsTime(d)
|
||||
}
|
||||
|
||||
// Clock interface makes it possible to replace the monotonic system clock with
|
||||
// a simulated clock.
|
||||
type Clock interface {
|
||||
Now() AbsTime
|
||||
Sleep(time.Duration)
|
||||
After(time.Duration) <-chan time.Time
|
||||
}
|
||||
|
||||
// System implements Clock using the system clock.
|
||||
type System struct{}
|
||||
|
||||
// Now implements Clock.
|
||||
func (System) Now() AbsTime {
|
||||
return AbsTime(monotime.Now())
|
||||
}
|
||||
|
||||
// Sleep implements Clock.
|
||||
func (System) Sleep(d time.Duration) {
|
||||
time.Sleep(d)
|
||||
}
|
||||
|
||||
// After implements Clock.
|
||||
func (System) After(d time.Duration) <-chan time.Time {
|
||||
return time.After(d)
|
||||
}
|
||||
|
129
common/mclock/simclock.go
Normal file
129
common/mclock/simclock.go
Normal file
@@ -0,0 +1,129 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package mclock
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Simulated implements a virtual Clock for reproducible time-sensitive tests. It
|
||||
// simulates a scheduler on a virtual timescale where actual processing takes zero time.
|
||||
//
|
||||
// The virtual clock doesn't advance on its own, call Run to advance it and execute timers.
|
||||
// Since there is no way to influence the Go scheduler, testing timeout behaviour involving
|
||||
// goroutines needs special care. A good way to test such timeouts is as follows: First
|
||||
// perform the action that is supposed to time out. Ensure that the timer you want to test
|
||||
// is created. Then run the clock until after the timeout. Finally observe the effect of
|
||||
// the timeout using a channel or semaphore.
|
||||
type Simulated struct {
|
||||
now AbsTime
|
||||
scheduled []event
|
||||
mu sync.RWMutex
|
||||
cond *sync.Cond
|
||||
}
|
||||
|
||||
type event struct {
|
||||
do func()
|
||||
at AbsTime
|
||||
}
|
||||
|
||||
// Run moves the clock by the given duration, executing all timers before that duration.
|
||||
func (s *Simulated) Run(d time.Duration) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.init()
|
||||
|
||||
end := s.now + AbsTime(d)
|
||||
for len(s.scheduled) > 0 {
|
||||
ev := s.scheduled[0]
|
||||
if ev.at > end {
|
||||
break
|
||||
}
|
||||
s.now = ev.at
|
||||
ev.do()
|
||||
s.scheduled = s.scheduled[1:]
|
||||
}
|
||||
s.now = end
|
||||
}
|
||||
|
||||
func (s *Simulated) ActiveTimers() int {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
return len(s.scheduled)
|
||||
}
|
||||
|
||||
func (s *Simulated) WaitForTimers(n int) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.init()
|
||||
|
||||
for len(s.scheduled) < n {
|
||||
s.cond.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
// Now implements Clock.
|
||||
func (s *Simulated) Now() AbsTime {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
return s.now
|
||||
}
|
||||
|
||||
// Sleep implements Clock.
|
||||
func (s *Simulated) Sleep(d time.Duration) {
|
||||
<-s.After(d)
|
||||
}
|
||||
|
||||
// After implements Clock.
|
||||
func (s *Simulated) After(d time.Duration) <-chan time.Time {
|
||||
after := make(chan time.Time, 1)
|
||||
s.insert(d, func() {
|
||||
after <- (time.Time{}).Add(time.Duration(s.now))
|
||||
})
|
||||
return after
|
||||
}
|
||||
|
||||
func (s *Simulated) insert(d time.Duration, do func()) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.init()
|
||||
|
||||
at := s.now + AbsTime(d)
|
||||
l, h := 0, len(s.scheduled)
|
||||
ll := h
|
||||
for l != h {
|
||||
m := (l + h) / 2
|
||||
if at < s.scheduled[m].at {
|
||||
h = m
|
||||
} else {
|
||||
l = m + 1
|
||||
}
|
||||
}
|
||||
s.scheduled = append(s.scheduled, event{})
|
||||
copy(s.scheduled[l+1:], s.scheduled[l:ll])
|
||||
s.scheduled[l] = event{do: do, at: at}
|
||||
s.cond.Broadcast()
|
||||
}
|
||||
|
||||
func (s *Simulated) init() {
|
||||
if s.cond == nil {
|
||||
s.cond = sync.NewCond(&s.mu)
|
||||
}
|
||||
}
|
57
common/prque/prque.go
Executable file
57
common/prque/prque.go
Executable file
@@ -0,0 +1,57 @@
|
||||
// This is a duplicated and slightly modified version of "gopkg.in/karalabe/cookiejar.v2/collections/prque".
|
||||
|
||||
package prque
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
)
|
||||
|
||||
// Priority queue data structure.
|
||||
type Prque struct {
|
||||
cont *sstack
|
||||
}
|
||||
|
||||
// Creates a new priority queue.
|
||||
func New(setIndex setIndexCallback) *Prque {
|
||||
return &Prque{newSstack(setIndex)}
|
||||
}
|
||||
|
||||
// Pushes a value with a given priority into the queue, expanding if necessary.
|
||||
func (p *Prque) Push(data interface{}, priority int64) {
|
||||
heap.Push(p.cont, &item{data, priority})
|
||||
}
|
||||
|
||||
// Pops the value with the greates priority off the stack and returns it.
|
||||
// Currently no shrinking is done.
|
||||
func (p *Prque) Pop() (interface{}, int64) {
|
||||
item := heap.Pop(p.cont).(*item)
|
||||
return item.value, item.priority
|
||||
}
|
||||
|
||||
// Pops only the item from the queue, dropping the associated priority value.
|
||||
func (p *Prque) PopItem() interface{} {
|
||||
return heap.Pop(p.cont).(*item).value
|
||||
}
|
||||
|
||||
// Remove removes the element with the given index.
|
||||
func (p *Prque) Remove(i int) interface{} {
|
||||
if i < 0 {
|
||||
return nil
|
||||
}
|
||||
return heap.Remove(p.cont, i)
|
||||
}
|
||||
|
||||
// Checks whether the priority queue is empty.
|
||||
func (p *Prque) Empty() bool {
|
||||
return p.cont.Len() == 0
|
||||
}
|
||||
|
||||
// Returns the number of element in the priority queue.
|
||||
func (p *Prque) Size() int {
|
||||
return p.cont.Len()
|
||||
}
|
||||
|
||||
// Clears the contents of the priority queue.
|
||||
func (p *Prque) Reset() {
|
||||
*p = *New(p.cont.setIndex)
|
||||
}
|
106
common/prque/sstack.go
Executable file
106
common/prque/sstack.go
Executable file
@@ -0,0 +1,106 @@
|
||||
// This is a duplicated and slightly modified version of "gopkg.in/karalabe/cookiejar.v2/collections/prque".
|
||||
|
||||
package prque
|
||||
|
||||
// The size of a block of data
|
||||
const blockSize = 4096
|
||||
|
||||
// A prioritized item in the sorted stack.
|
||||
//
|
||||
// Note: priorities can "wrap around" the int64 range, a comes before b if (a.priority - b.priority) > 0.
|
||||
// The difference between the lowest and highest priorities in the queue at any point should be less than 2^63.
|
||||
type item struct {
|
||||
value interface{}
|
||||
priority int64
|
||||
}
|
||||
|
||||
// setIndexCallback is called when the element is moved to a new index.
|
||||
// Providing setIndexCallback is optional, it is needed only if the application needs
|
||||
// to delete elements other than the top one.
|
||||
type setIndexCallback func(a interface{}, i int)
|
||||
|
||||
// Internal sortable stack data structure. Implements the Push and Pop ops for
|
||||
// the stack (heap) functionality and the Len, Less and Swap methods for the
|
||||
// sortability requirements of the heaps.
|
||||
type sstack struct {
|
||||
setIndex setIndexCallback
|
||||
size int
|
||||
capacity int
|
||||
offset int
|
||||
|
||||
blocks [][]*item
|
||||
active []*item
|
||||
}
|
||||
|
||||
// Creates a new, empty stack.
|
||||
func newSstack(setIndex setIndexCallback) *sstack {
|
||||
result := new(sstack)
|
||||
result.setIndex = setIndex
|
||||
result.active = make([]*item, blockSize)
|
||||
result.blocks = [][]*item{result.active}
|
||||
result.capacity = blockSize
|
||||
return result
|
||||
}
|
||||
|
||||
// Pushes a value onto the stack, expanding it if necessary. Required by
|
||||
// heap.Interface.
|
||||
func (s *sstack) Push(data interface{}) {
|
||||
if s.size == s.capacity {
|
||||
s.active = make([]*item, blockSize)
|
||||
s.blocks = append(s.blocks, s.active)
|
||||
s.capacity += blockSize
|
||||
s.offset = 0
|
||||
} else if s.offset == blockSize {
|
||||
s.active = s.blocks[s.size/blockSize]
|
||||
s.offset = 0
|
||||
}
|
||||
if s.setIndex != nil {
|
||||
s.setIndex(data.(*item).value, s.size)
|
||||
}
|
||||
s.active[s.offset] = data.(*item)
|
||||
s.offset++
|
||||
s.size++
|
||||
}
|
||||
|
||||
// Pops a value off the stack and returns it. Currently no shrinking is done.
|
||||
// Required by heap.Interface.
|
||||
func (s *sstack) Pop() (res interface{}) {
|
||||
s.size--
|
||||
s.offset--
|
||||
if s.offset < 0 {
|
||||
s.offset = blockSize - 1
|
||||
s.active = s.blocks[s.size/blockSize]
|
||||
}
|
||||
res, s.active[s.offset] = s.active[s.offset], nil
|
||||
if s.setIndex != nil {
|
||||
s.setIndex(res.(*item).value, -1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Returns the length of the stack. Required by sort.Interface.
|
||||
func (s *sstack) Len() int {
|
||||
return s.size
|
||||
}
|
||||
|
||||
// Compares the priority of two elements of the stack (higher is first).
|
||||
// Required by sort.Interface.
|
||||
func (s *sstack) Less(i, j int) bool {
|
||||
return (s.blocks[i/blockSize][i%blockSize].priority - s.blocks[j/blockSize][j%blockSize].priority) > 0
|
||||
}
|
||||
|
||||
// Swaps two elements in the stack. Required by sort.Interface.
|
||||
func (s *sstack) Swap(i, j int) {
|
||||
ib, io, jb, jo := i/blockSize, i%blockSize, j/blockSize, j%blockSize
|
||||
a, b := s.blocks[jb][jo], s.blocks[ib][io]
|
||||
if s.setIndex != nil {
|
||||
s.setIndex(a.value, i)
|
||||
s.setIndex(b.value, j)
|
||||
}
|
||||
s.blocks[ib][io], s.blocks[jb][jo] = a, b
|
||||
}
|
||||
|
||||
// Resets the stack, effectively clearing its contents.
|
||||
func (s *sstack) Reset() {
|
||||
*s = *newSstack(s.setIndex)
|
||||
}
|
@@ -17,6 +17,7 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@@ -31,7 +32,9 @@ import (
|
||||
|
||||
// Lengths of hashes and addresses in bytes.
|
||||
const (
|
||||
HashLength = 32
|
||||
// HashLength is the expected length of the hash
|
||||
HashLength = 32
|
||||
// AddressLength is the expected length of the adddress
|
||||
AddressLength = 20
|
||||
)
|
||||
|
||||
@@ -120,6 +123,24 @@ func (h Hash) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
return reflect.ValueOf(h)
|
||||
}
|
||||
|
||||
// Scan implements Scanner for database/sql.
|
||||
func (h *Hash) Scan(src interface{}) error {
|
||||
srcB, ok := src.([]byte)
|
||||
if !ok {
|
||||
return fmt.Errorf("can't scan %T into Hash", src)
|
||||
}
|
||||
if len(srcB) != HashLength {
|
||||
return fmt.Errorf("can't scan []byte of len %d into Hash, want %d", len(srcB), HashLength)
|
||||
}
|
||||
copy(h[:], srcB)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements valuer for database/sql.
|
||||
func (h Hash) Value() (driver.Value, error) {
|
||||
return h[:], nil
|
||||
}
|
||||
|
||||
// UnprefixedHash allows marshaling a Hash without 0x prefix.
|
||||
type UnprefixedHash Hash
|
||||
|
||||
@@ -229,6 +250,24 @@ func (a *Address) UnmarshalJSON(input []byte) error {
|
||||
return hexutil.UnmarshalFixedJSON(addressT, input, a[:])
|
||||
}
|
||||
|
||||
// Scan implements Scanner for database/sql.
|
||||
func (a *Address) Scan(src interface{}) error {
|
||||
srcB, ok := src.([]byte)
|
||||
if !ok {
|
||||
return fmt.Errorf("can't scan %T into Address", src)
|
||||
}
|
||||
if len(srcB) != AddressLength {
|
||||
return fmt.Errorf("can't scan []byte of len %d into Address, want %d", len(srcB), AddressLength)
|
||||
}
|
||||
copy(a[:], srcB)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements valuer for database/sql.
|
||||
func (a Address) Value() (driver.Value, error) {
|
||||
return a[:], nil
|
||||
}
|
||||
|
||||
// UnprefixedAddress allows marshaling an Address without 0x prefix.
|
||||
type UnprefixedAddress Address
|
||||
|
||||
|
@@ -17,9 +17,10 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
@@ -193,3 +194,180 @@ func TestMixedcaseAccount_Address(t *testing.T) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestHash_Scan(t *testing.T) {
|
||||
type args struct {
|
||||
src interface{}
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "working scan",
|
||||
args: args{src: []byte{
|
||||
0xb2, 0x6f, 0x2b, 0x34, 0x2a, 0xab, 0x24, 0xbc, 0xf6, 0x3e,
|
||||
0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15,
|
||||
0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15,
|
||||
0x10, 0x00,
|
||||
}},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "non working scan",
|
||||
args: args{src: int64(1234567890)},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid length scan",
|
||||
args: args{src: []byte{
|
||||
0xb2, 0x6f, 0x2b, 0x34, 0x2a, 0xab, 0x24, 0xbc, 0xf6, 0x3e,
|
||||
0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15,
|
||||
0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15,
|
||||
}},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
h := &Hash{}
|
||||
if err := h.Scan(tt.args.src); (err != nil) != tt.wantErr {
|
||||
t.Errorf("Hash.Scan() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
|
||||
if !tt.wantErr {
|
||||
for i := range h {
|
||||
if h[i] != tt.args.src.([]byte)[i] {
|
||||
t.Errorf(
|
||||
"Hash.Scan() didn't scan the %d src correctly (have %X, want %X)",
|
||||
i, h[i], tt.args.src.([]byte)[i],
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHash_Value(t *testing.T) {
|
||||
b := []byte{
|
||||
0xb2, 0x6f, 0x2b, 0x34, 0x2a, 0xab, 0x24, 0xbc, 0xf6, 0x3e,
|
||||
0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15,
|
||||
0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15,
|
||||
0x10, 0x00,
|
||||
}
|
||||
var usedH Hash
|
||||
usedH.SetBytes(b)
|
||||
tests := []struct {
|
||||
name string
|
||||
h Hash
|
||||
want driver.Value
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Working value",
|
||||
h: usedH,
|
||||
want: b,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := tt.h.Value()
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("Hash.Value() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("Hash.Value() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddress_Scan(t *testing.T) {
|
||||
type args struct {
|
||||
src interface{}
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "working scan",
|
||||
args: args{src: []byte{
|
||||
0xb2, 0x6f, 0x2b, 0x34, 0x2a, 0xab, 0x24, 0xbc, 0xf6, 0x3e,
|
||||
0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15,
|
||||
}},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "non working scan",
|
||||
args: args{src: int64(1234567890)},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid length scan",
|
||||
args: args{src: []byte{
|
||||
0xb2, 0x6f, 0x2b, 0x34, 0x2a, 0xab, 0x24, 0xbc, 0xf6, 0x3e,
|
||||
0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a,
|
||||
}},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
a := &Address{}
|
||||
if err := a.Scan(tt.args.src); (err != nil) != tt.wantErr {
|
||||
t.Errorf("Address.Scan() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
|
||||
if !tt.wantErr {
|
||||
for i := range a {
|
||||
if a[i] != tt.args.src.([]byte)[i] {
|
||||
t.Errorf(
|
||||
"Address.Scan() didn't scan the %d src correctly (have %X, want %X)",
|
||||
i, a[i], tt.args.src.([]byte)[i],
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddress_Value(t *testing.T) {
|
||||
b := []byte{
|
||||
0xb2, 0x6f, 0x2b, 0x34, 0x2a, 0xab, 0x24, 0xbc, 0xf6, 0x3e,
|
||||
0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15,
|
||||
}
|
||||
var usedA Address
|
||||
usedA.SetBytes(b)
|
||||
tests := []struct {
|
||||
name string
|
||||
a Address
|
||||
want driver.Value
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Working value",
|
||||
a: usedA,
|
||||
want: b,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := tt.a.Value()
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("Address.Value() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("Address.Value() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@@ -75,7 +75,7 @@ func (api *API) GetSigners(number *rpc.BlockNumber) ([]common.Address, error) {
|
||||
return snap.signers(), nil
|
||||
}
|
||||
|
||||
// GetSignersAtHash retrieves the state snapshot at a given block.
|
||||
// GetSignersAtHash retrieves the list of authorized signers at the specified block.
|
||||
func (api *API) GetSignersAtHash(hash common.Hash) ([]common.Address, error) {
|
||||
header := api.chain.GetHeaderByHash(hash)
|
||||
if header == nil {
|
||||
|
@@ -53,7 +53,6 @@ const (
|
||||
// Clique proof-of-authority protocol constants.
|
||||
var (
|
||||
epochLength = uint64(30000) // Default number of blocks after which to checkpoint and reset the pending votes
|
||||
blockPeriod = uint64(15) // Default minimum difference between two consecutive block's timestamps
|
||||
|
||||
extraVanity = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity
|
||||
extraSeal = 65 // Fixed number of extra-data suffix bytes reserved for signer seal
|
||||
@@ -388,22 +387,23 @@ func (c *Clique) snapshot(chain consensus.ChainReader, number uint64, hash commo
|
||||
break
|
||||
}
|
||||
}
|
||||
// If we're at block zero, make a snapshot
|
||||
if number == 0 {
|
||||
genesis := chain.GetHeaderByNumber(0)
|
||||
if err := c.VerifyHeader(chain, genesis, false); err != nil {
|
||||
return nil, err
|
||||
// If we're at an checkpoint block, make a snapshot if it's known
|
||||
if number%c.config.Epoch == 0 {
|
||||
checkpoint := chain.GetHeaderByNumber(number)
|
||||
if checkpoint != nil {
|
||||
hash := checkpoint.Hash()
|
||||
|
||||
signers := make([]common.Address, (len(checkpoint.Extra)-extraVanity-extraSeal)/common.AddressLength)
|
||||
for i := 0; i < len(signers); i++ {
|
||||
copy(signers[i][:], checkpoint.Extra[extraVanity+i*common.AddressLength:])
|
||||
}
|
||||
snap = newSnapshot(c.config, c.signatures, number, hash, signers)
|
||||
if err := snap.store(c.db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Info("Stored checkpoint snapshot to disk", "number", number, "hash", hash)
|
||||
break
|
||||
}
|
||||
signers := make([]common.Address, (len(genesis.Extra)-extraVanity-extraSeal)/common.AddressLength)
|
||||
for i := 0; i < len(signers); i++ {
|
||||
copy(signers[i][:], genesis.Extra[extraVanity+i*common.AddressLength:])
|
||||
}
|
||||
snap = newSnapshot(c.config, c.signatures, 0, genesis.Hash(), signers)
|
||||
if err := snap.store(c.db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Trace("Stored genesis voting snapshot to disk")
|
||||
break
|
||||
}
|
||||
// No snapshot for this header, gather the header and move backward
|
||||
var header *types.Header
|
||||
@@ -673,6 +673,11 @@ func CalcDifficulty(snap *Snapshot, signer common.Address) *big.Int {
|
||||
return new(big.Int).Set(diffNoTurn)
|
||||
}
|
||||
|
||||
// Close implements consensus.Engine. It's a noop for clique as there is are no background threads.
|
||||
func (c *Clique) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// APIs implements consensus.Engine, returning the user facing RPC API to allow
|
||||
// controlling the signer voting.
|
||||
func (c *Clique) APIs(chain consensus.ChainReader) []rpc.API {
|
||||
|
@@ -19,6 +19,7 @@ package clique
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"sort"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
@@ -56,6 +57,13 @@ type Snapshot struct {
|
||||
Tally map[common.Address]Tally `json:"tally"` // Current vote tally to avoid recalculating
|
||||
}
|
||||
|
||||
// signers implements the sort interface to allow sorting a list of addresses
|
||||
type signers []common.Address
|
||||
|
||||
func (s signers) Len() int { return len(s) }
|
||||
func (s signers) Less(i, j int) bool { return bytes.Compare(s[i][:], s[j][:]) < 0 }
|
||||
func (s signers) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// newSnapshot creates a new snapshot with the specified startup parameters. This
|
||||
// method does not initialize the set of recent signers, so only ever use if for
|
||||
// the genesis block.
|
||||
@@ -286,18 +294,12 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) {
|
||||
|
||||
// signers retrieves the list of authorized signers in ascending order.
|
||||
func (s *Snapshot) signers() []common.Address {
|
||||
signers := make([]common.Address, 0, len(s.Signers))
|
||||
for signer := range s.Signers {
|
||||
signers = append(signers, signer)
|
||||
sigs := make([]common.Address, 0, len(s.Signers))
|
||||
for sig := range s.Signers {
|
||||
sigs = append(sigs, sig)
|
||||
}
|
||||
for i := 0; i < len(signers); i++ {
|
||||
for j := i + 1; j < len(signers); j++ {
|
||||
if bytes.Compare(signers[i][:], signers[j][:]) > 0 {
|
||||
signers[i], signers[j] = signers[j], signers[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
return signers
|
||||
sort.Sort(signers(sigs))
|
||||
return sigs
|
||||
}
|
||||
|
||||
// inturn returns if a signer at a given block height is in-turn or not.
|
||||
|
@@ -84,7 +84,7 @@ func (r *testerChainReader) GetHeaderByNumber(number uint64) *types.Header {
|
||||
if number == 0 {
|
||||
return rawdb.ReadHeader(r.db, rawdb.ReadCanonicalHash(r.db, 0), 0)
|
||||
}
|
||||
panic("not supported")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Tests that voting is evaluated correctly for various simple and complex scenarios.
|
||||
@@ -360,7 +360,7 @@ func TestVoting(t *testing.T) {
|
||||
for j, vote := range tt.votes {
|
||||
headers[j] = &types.Header{
|
||||
Number: big.NewInt(int64(j) + 1),
|
||||
Time: big.NewInt(int64(j) * int64(blockPeriod)),
|
||||
Time: big.NewInt(int64(j) * 15),
|
||||
Coinbase: accounts.address(vote.voted),
|
||||
Extra: make([]byte, extraVanity+extraSeal),
|
||||
}
|
||||
|
@@ -96,6 +96,9 @@ type Engine interface {
|
||||
|
||||
// APIs returns the RPC APIs this consensus engine provides.
|
||||
APIs(chain ChainReader) []rpc.API
|
||||
|
||||
// Close terminates any background threads maintained by the consensus engine.
|
||||
Close() error
|
||||
}
|
||||
|
||||
// PoW is a consensus engine based on proof-of-work.
|
||||
|
@@ -214,15 +214,6 @@ func swap(buffer []byte) {
|
||||
}
|
||||
}
|
||||
|
||||
// prepare converts an ethash cache or dataset from a byte stream into the internal
|
||||
// int representation. All ethash methods work with ints to avoid constant byte to
|
||||
// int conversions as well as to handle both little and big endian systems.
|
||||
func prepare(dest []uint32, src []byte) {
|
||||
for i := 0; i < len(dest); i++ {
|
||||
dest[i] = binary.LittleEndian.Uint32(src[i*4:])
|
||||
}
|
||||
}
|
||||
|
||||
// fnv is an algorithm inspired by the FNV hash, which in some cases is used as
|
||||
// a non-associative substitute for XOR. Note that we multiply the prime with
|
||||
// the full 32-bit input, in contrast with the FNV-1 spec which multiplies the
|
||||
|
@@ -18,6 +18,7 @@ package ethash
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
@@ -30,6 +31,15 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// prepare converts an ethash cache or dataset from a byte stream into the internal
|
||||
// int representation. All ethash methods work with ints to avoid constant byte to
|
||||
// int conversions as well as to handle both little and big endian systems.
|
||||
func prepare(dest []uint32, src []byte) {
|
||||
for i := 0; i < len(dest); i++ {
|
||||
dest[i] = binary.LittleEndian.Uint32(src[i*4:])
|
||||
}
|
||||
}
|
||||
|
||||
// Tests whether the dataset size calculator works correctly by cross checking the
|
||||
// hard coded lookup table with the value generated by it.
|
||||
func TestSizeCalculations(t *testing.T) {
|
||||
@@ -719,7 +729,8 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) {
|
||||
|
||||
go func(idx int) {
|
||||
defer pend.Done()
|
||||
ethash := New(Config{cachedir, 0, 1, "", 0, 0, ModeNormal})
|
||||
ethash := New(Config{cachedir, 0, 1, "", 0, 0, ModeNormal}, nil)
|
||||
defer ethash.Close()
|
||||
if err := ethash.VerifySeal(nil, block.Header()); err != nil {
|
||||
t.Errorf("proc %d: block verification failed: %v", idx, err)
|
||||
}
|
||||
|
117
consensus/ethash/api.go
Normal file
117
consensus/ethash/api.go
Normal file
@@ -0,0 +1,117 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ethash
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
var errEthashStopped = errors.New("ethash stopped")
|
||||
|
||||
// API exposes ethash related methods for the RPC interface.
|
||||
type API struct {
|
||||
ethash *Ethash // Make sure the mode of ethash is normal.
|
||||
}
|
||||
|
||||
// GetWork returns a work package for external miner.
|
||||
//
|
||||
// The work package consists of 3 strings:
|
||||
// result[0] - 32 bytes hex encoded current block header pow-hash
|
||||
// result[1] - 32 bytes hex encoded seed hash used for DAG
|
||||
// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||
func (api *API) GetWork() ([3]string, error) {
|
||||
if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest {
|
||||
return [3]string{}, errors.New("not supported")
|
||||
}
|
||||
|
||||
var (
|
||||
workCh = make(chan [3]string, 1)
|
||||
errc = make(chan error, 1)
|
||||
)
|
||||
|
||||
select {
|
||||
case api.ethash.fetchWorkCh <- &sealWork{errc: errc, res: workCh}:
|
||||
case <-api.ethash.exitCh:
|
||||
return [3]string{}, errEthashStopped
|
||||
}
|
||||
|
||||
select {
|
||||
case work := <-workCh:
|
||||
return work, nil
|
||||
case err := <-errc:
|
||||
return [3]string{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// SubmitWork can be used by external miner to submit their POW solution.
|
||||
// It returns an indication if the work was accepted.
|
||||
// Note either an invalid solution, a stale work a non-existent work will return false.
|
||||
func (api *API) SubmitWork(nonce types.BlockNonce, hash, digest common.Hash) bool {
|
||||
if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest {
|
||||
return false
|
||||
}
|
||||
|
||||
var errc = make(chan error, 1)
|
||||
|
||||
select {
|
||||
case api.ethash.submitWorkCh <- &mineResult{
|
||||
nonce: nonce,
|
||||
mixDigest: digest,
|
||||
hash: hash,
|
||||
errc: errc,
|
||||
}:
|
||||
case <-api.ethash.exitCh:
|
||||
return false
|
||||
}
|
||||
|
||||
err := <-errc
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// SubmitHashrate can be used for remote miners to submit their hash rate.
|
||||
// This enables the node to report the combined hash rate of all miners
|
||||
// which submit work through this node.
|
||||
//
|
||||
// It accepts the miner hash rate and an identifier which must be unique
|
||||
// between nodes.
|
||||
func (api *API) SubmitHashRate(rate hexutil.Uint64, id common.Hash) bool {
|
||||
if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest {
|
||||
return false
|
||||
}
|
||||
|
||||
var done = make(chan struct{}, 1)
|
||||
|
||||
select {
|
||||
case api.ethash.submitRateCh <- &hashrate{done: done, rate: uint64(rate), id: id}:
|
||||
case <-api.ethash.exitCh:
|
||||
return false
|
||||
}
|
||||
|
||||
// Block until hash rate submitted successfully.
|
||||
<-done
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// GetHashrate returns the current hashrate for local CPU miner and remote miner.
|
||||
func (api *API) GetHashrate() uint64 {
|
||||
return uint64(api.ethash.Hashrate())
|
||||
}
|
@@ -24,6 +24,7 @@ import (
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
mapset "github.com/deckarep/golang-set"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
@@ -31,7 +32,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
set "gopkg.in/fatih/set.v0"
|
||||
)
|
||||
|
||||
// Ethash proof-of-work protocol constants.
|
||||
@@ -177,7 +177,7 @@ func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Blo
|
||||
return errTooManyUncles
|
||||
}
|
||||
// Gather the set of past uncles and ancestors
|
||||
uncles, ancestors := set.New(), make(map[common.Hash]*types.Header)
|
||||
uncles, ancestors := mapset.NewSet(), make(map[common.Hash]*types.Header)
|
||||
|
||||
number, parent := block.NumberU64()-1, block.ParentHash()
|
||||
for i := 0; i < 7; i++ {
|
||||
@@ -198,7 +198,7 @@ func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Blo
|
||||
for _, uncle := range block.Uncles() {
|
||||
// Make sure every uncle is rewarded only once
|
||||
hash := uncle.Hash()
|
||||
if uncles.Has(hash) {
|
||||
if uncles.Contains(hash) {
|
||||
return errDuplicateUncle
|
||||
}
|
||||
uncles.Add(hash)
|
||||
@@ -356,8 +356,8 @@ func calcDifficultyByzantium(time uint64, parent *types.Header) *big.Int {
|
||||
x.Set(params.MinimumDifficulty)
|
||||
}
|
||||
// calculate a fake block number for the ice-age delay:
|
||||
// https://github.com/ethereum/EIPs/pull/669
|
||||
// fake_block_number = min(0, block.number - 3_000_000
|
||||
// https://github.com/ethereum/EIPs/pull/669
|
||||
// fake_block_number = max(0, block.number - 3_000_000)
|
||||
fakeBlockNumber := new(big.Int)
|
||||
if parent.Number.Cmp(big2999999) >= 0 {
|
||||
fakeBlockNumber = fakeBlockNumber.Sub(parent.Number, big2999999) // Note, parent is 1 less than the actual block number
|
||||
@@ -461,6 +461,13 @@ func calcDifficultyFrontier(time uint64, parent *types.Header) *big.Int {
|
||||
// VerifySeal implements consensus.Engine, checking whether the given block satisfies
|
||||
// the PoW difficulty requirements.
|
||||
func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Header) error {
|
||||
return ethash.verifySeal(chain, header, false)
|
||||
}
|
||||
|
||||
// verifySeal checks whether a block satisfies the PoW difficulty requirements,
|
||||
// either using the usual ethash cache for it, or alternatively using a full DAG
|
||||
// to make remote mining fast.
|
||||
func (ethash *Ethash) verifySeal(chain consensus.ChainReader, header *types.Header, fulldag bool) error {
|
||||
// If we're running a fake PoW, accept any seal as valid
|
||||
if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake {
|
||||
time.Sleep(ethash.fakeDelay)
|
||||
@@ -471,29 +478,52 @@ func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Head
|
||||
}
|
||||
// If we're running a shared PoW, delegate verification to it
|
||||
if ethash.shared != nil {
|
||||
return ethash.shared.VerifySeal(chain, header)
|
||||
return ethash.shared.verifySeal(chain, header, fulldag)
|
||||
}
|
||||
// Ensure that we have a valid difficulty for the block
|
||||
if header.Difficulty.Sign() <= 0 {
|
||||
return errInvalidDifficulty
|
||||
}
|
||||
// Recompute the digest and PoW value and verify against the header
|
||||
// Recompute the digest and PoW values
|
||||
number := header.Number.Uint64()
|
||||
|
||||
cache := ethash.cache(number)
|
||||
size := datasetSize(number)
|
||||
if ethash.config.PowMode == ModeTest {
|
||||
size = 32 * 1024
|
||||
}
|
||||
digest, result := hashimotoLight(size, cache.cache, header.HashNoNonce().Bytes(), header.Nonce.Uint64())
|
||||
// Caches are unmapped in a finalizer. Ensure that the cache stays live
|
||||
// until after the call to hashimotoLight so it's not unmapped while being used.
|
||||
runtime.KeepAlive(cache)
|
||||
var (
|
||||
digest []byte
|
||||
result []byte
|
||||
)
|
||||
// If fast-but-heavy PoW verification was requested, use an ethash dataset
|
||||
if fulldag {
|
||||
dataset := ethash.dataset(number, true)
|
||||
if dataset.generated() {
|
||||
digest, result = hashimotoFull(dataset.dataset, header.HashNoNonce().Bytes(), header.Nonce.Uint64())
|
||||
|
||||
// Datasets are unmapped in a finalizer. Ensure that the dataset stays alive
|
||||
// until after the call to hashimotoFull so it's not unmapped while being used.
|
||||
runtime.KeepAlive(dataset)
|
||||
} else {
|
||||
// Dataset not yet generated, don't hang, use a cache instead
|
||||
fulldag = false
|
||||
}
|
||||
}
|
||||
// If slow-but-light PoW verification was requested (or DAG not yet ready), use an ethash cache
|
||||
if !fulldag {
|
||||
cache := ethash.cache(number)
|
||||
|
||||
size := datasetSize(number)
|
||||
if ethash.config.PowMode == ModeTest {
|
||||
size = 32 * 1024
|
||||
}
|
||||
digest, result = hashimotoLight(size, cache.cache, header.HashNoNonce().Bytes(), header.Nonce.Uint64())
|
||||
|
||||
// Caches are unmapped in a finalizer. Ensure that the cache stays alive
|
||||
// until after the call to hashimotoLight so it's not unmapped while being used.
|
||||
runtime.KeepAlive(cache)
|
||||
}
|
||||
// Verify the calculated values against the ones provided in the header
|
||||
if !bytes.Equal(header.MixDigest[:], digest) {
|
||||
return errInvalidMixDigest
|
||||
}
|
||||
target := new(big.Int).Div(maxUint256, header.Difficulty)
|
||||
target := new(big.Int).Div(two256, header.Difficulty)
|
||||
if new(big.Int).SetBytes(result).Cmp(target) > 0 {
|
||||
return errInvalidPoW
|
||||
}
|
||||
|
@@ -29,11 +29,14 @@ import (
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
mmap "github.com/edsrzf/mmap-go"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
@@ -43,11 +46,11 @@ import (
|
||||
var ErrInvalidDumpMagic = errors.New("invalid dump magic")
|
||||
|
||||
var (
|
||||
// maxUint256 is a big integer representing 2^256-1
|
||||
maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
|
||||
// two256 is a big integer representing 2^256
|
||||
two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
|
||||
|
||||
// sharedEthash is a full instance that can be shared between multiple users.
|
||||
sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal})
|
||||
sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal}, nil)
|
||||
|
||||
// algorithmRevision is the data structure version used for file naming.
|
||||
algorithmRevision = 23
|
||||
@@ -279,6 +282,7 @@ type dataset struct {
|
||||
mmap mmap.MMap // Memory map itself to unmap before releasing
|
||||
dataset []uint32 // The actual cache data content
|
||||
once sync.Once // Ensures the cache is generated only once
|
||||
done uint32 // Atomic flag to determine generation status
|
||||
}
|
||||
|
||||
// newDataset creates a new ethash mining dataset and returns it as a plain Go
|
||||
@@ -290,6 +294,9 @@ func newDataset(epoch uint64) interface{} {
|
||||
// generate ensures that the dataset content is generated before use.
|
||||
func (d *dataset) generate(dir string, limit int, test bool) {
|
||||
d.once.Do(func() {
|
||||
// Mark the dataset generated after we're done. This is needed for remote
|
||||
defer atomic.StoreUint32(&d.done, 1)
|
||||
|
||||
csize := cacheSize(d.epoch*epochLength + 1)
|
||||
dsize := datasetSize(d.epoch*epochLength + 1)
|
||||
seed := seedHash(d.epoch*epochLength + 1)
|
||||
@@ -304,6 +311,8 @@ func (d *dataset) generate(dir string, limit int, test bool) {
|
||||
|
||||
d.dataset = make([]uint32, dsize/4)
|
||||
generateDataset(d.dataset, d.epoch, cache)
|
||||
|
||||
return
|
||||
}
|
||||
// Disk storage is needed, this will get fancy
|
||||
var endian string
|
||||
@@ -346,6 +355,13 @@ func (d *dataset) generate(dir string, limit int, test bool) {
|
||||
})
|
||||
}
|
||||
|
||||
// generated returns whether this particular dataset finished generating already
|
||||
// or not (it may not have been started at all). This is useful for remote miners
|
||||
// to default to verification caches instead of blocking on DAG generations.
|
||||
func (d *dataset) generated() bool {
|
||||
return atomic.LoadUint32(&d.done) == 1
|
||||
}
|
||||
|
||||
// finalizer closes any file handlers and memory maps open.
|
||||
func (d *dataset) finalizer() {
|
||||
if d.mmap != nil {
|
||||
@@ -389,6 +405,30 @@ type Config struct {
|
||||
PowMode Mode
|
||||
}
|
||||
|
||||
// mineResult wraps the pow solution parameters for the specified block.
|
||||
type mineResult struct {
|
||||
nonce types.BlockNonce
|
||||
mixDigest common.Hash
|
||||
hash common.Hash
|
||||
|
||||
errc chan error
|
||||
}
|
||||
|
||||
// hashrate wraps the hash rate submitted by the remote sealer.
|
||||
type hashrate struct {
|
||||
id common.Hash
|
||||
ping time.Time
|
||||
rate uint64
|
||||
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// sealWork wraps a seal work package for remote sealer.
|
||||
type sealWork struct {
|
||||
errc chan error
|
||||
res chan [3]string
|
||||
}
|
||||
|
||||
// Ethash is a consensus engine based on proof-of-work implementing the ethash
|
||||
// algorithm.
|
||||
type Ethash struct {
|
||||
@@ -403,16 +443,28 @@ type Ethash struct {
|
||||
update chan struct{} // Notification channel to update mining parameters
|
||||
hashrate metrics.Meter // Meter tracking the average hashrate
|
||||
|
||||
// Remote sealer related fields
|
||||
workCh chan *types.Block // Notification channel to push new work to remote sealer
|
||||
resultCh chan *types.Block // Channel used by mining threads to return result
|
||||
fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work
|
||||
submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result
|
||||
fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer.
|
||||
submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate
|
||||
|
||||
// The fields below are hooks for testing
|
||||
shared *Ethash // Shared PoW verifier to avoid cache regeneration
|
||||
fakeFail uint64 // Block number which fails PoW check even in fake mode
|
||||
fakeDelay time.Duration // Time delay to sleep for before returning from verify
|
||||
|
||||
lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields
|
||||
lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields
|
||||
closeOnce sync.Once // Ensures exit channel will not be closed twice.
|
||||
exitCh chan chan error // Notification channel to exiting backend threads
|
||||
}
|
||||
|
||||
// New creates a full sized ethash PoW scheme.
|
||||
func New(config Config) *Ethash {
|
||||
// New creates a full sized ethash PoW scheme and starts a background thread for
|
||||
// remote mining, also optionally notifying a batch of remote services of new work
|
||||
// packages.
|
||||
func New(config Config, notify []string) *Ethash {
|
||||
if config.CachesInMem <= 0 {
|
||||
log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
|
||||
config.CachesInMem = 1
|
||||
@@ -423,19 +475,43 @@ func New(config Config) *Ethash {
|
||||
if config.DatasetDir != "" && config.DatasetsOnDisk > 0 {
|
||||
log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk)
|
||||
}
|
||||
return &Ethash{
|
||||
config: config,
|
||||
caches: newlru("cache", config.CachesInMem, newCache),
|
||||
datasets: newlru("dataset", config.DatasetsInMem, newDataset),
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeter(),
|
||||
ethash := &Ethash{
|
||||
config: config,
|
||||
caches: newlru("cache", config.CachesInMem, newCache),
|
||||
datasets: newlru("dataset", config.DatasetsInMem, newDataset),
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeter(),
|
||||
workCh: make(chan *types.Block),
|
||||
resultCh: make(chan *types.Block),
|
||||
fetchWorkCh: make(chan *sealWork),
|
||||
submitWorkCh: make(chan *mineResult),
|
||||
fetchRateCh: make(chan chan uint64),
|
||||
submitRateCh: make(chan *hashrate),
|
||||
exitCh: make(chan chan error),
|
||||
}
|
||||
go ethash.remote(notify)
|
||||
return ethash
|
||||
}
|
||||
|
||||
// NewTester creates a small sized ethash PoW scheme useful only for testing
|
||||
// purposes.
|
||||
func NewTester() *Ethash {
|
||||
return New(Config{CachesInMem: 1, PowMode: ModeTest})
|
||||
func NewTester(notify []string) *Ethash {
|
||||
ethash := &Ethash{
|
||||
config: Config{PowMode: ModeTest},
|
||||
caches: newlru("cache", 1, newCache),
|
||||
datasets: newlru("dataset", 1, newDataset),
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeter(),
|
||||
workCh: make(chan *types.Block),
|
||||
resultCh: make(chan *types.Block),
|
||||
fetchWorkCh: make(chan *sealWork),
|
||||
submitWorkCh: make(chan *mineResult),
|
||||
fetchRateCh: make(chan chan uint64),
|
||||
submitRateCh: make(chan *hashrate),
|
||||
exitCh: make(chan chan error),
|
||||
}
|
||||
go ethash.remote(notify)
|
||||
return ethash
|
||||
}
|
||||
|
||||
// NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts
|
||||
@@ -489,6 +565,22 @@ func NewShared() *Ethash {
|
||||
return &Ethash{shared: sharedEthash}
|
||||
}
|
||||
|
||||
// Close closes the exit channel to notify all backend threads exiting.
|
||||
func (ethash *Ethash) Close() error {
|
||||
var err error
|
||||
ethash.closeOnce.Do(func() {
|
||||
// Short circuit if the exit channel is not allocated.
|
||||
if ethash.exitCh == nil {
|
||||
return
|
||||
}
|
||||
errc := make(chan error)
|
||||
ethash.exitCh <- errc
|
||||
err = <-errc
|
||||
close(ethash.exitCh)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// cache tries to retrieve a verification cache for the specified block number
|
||||
// by first checking against a list of in-memory caches, then against caches
|
||||
// stored on disk, and finally generating one if none can be found.
|
||||
@@ -511,20 +603,34 @@ func (ethash *Ethash) cache(block uint64) *cache {
|
||||
// dataset tries to retrieve a mining dataset for the specified block number
|
||||
// by first checking against a list of in-memory datasets, then against DAGs
|
||||
// stored on disk, and finally generating one if none can be found.
|
||||
func (ethash *Ethash) dataset(block uint64) *dataset {
|
||||
//
|
||||
// If async is specified, not only the future but the current DAG is also
|
||||
// generates on a background thread.
|
||||
func (ethash *Ethash) dataset(block uint64, async bool) *dataset {
|
||||
// Retrieve the requested ethash dataset
|
||||
epoch := block / epochLength
|
||||
currentI, futureI := ethash.datasets.get(epoch)
|
||||
current := currentI.(*dataset)
|
||||
|
||||
// Wait for generation finish.
|
||||
current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
|
||||
// If async is specified, generate everything in a background thread
|
||||
if async && !current.generated() {
|
||||
go func() {
|
||||
current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
|
||||
|
||||
// If we need a new future dataset, now's a good time to regenerate it.
|
||||
if futureI != nil {
|
||||
future := futureI.(*dataset)
|
||||
go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
|
||||
if futureI != nil {
|
||||
future := futureI.(*dataset)
|
||||
future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
// Either blocking generation was requested, or already done
|
||||
current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
|
||||
|
||||
if futureI != nil {
|
||||
future := futureI.(*dataset)
|
||||
go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
|
||||
}
|
||||
}
|
||||
|
||||
return current
|
||||
}
|
||||
|
||||
@@ -561,14 +667,44 @@ func (ethash *Ethash) SetThreads(threads int) {
|
||||
|
||||
// Hashrate implements PoW, returning the measured rate of the search invocations
|
||||
// per second over the last minute.
|
||||
// Note the returned hashrate includes local hashrate, but also includes the total
|
||||
// hashrate of all remote miner.
|
||||
func (ethash *Ethash) Hashrate() float64 {
|
||||
return ethash.hashrate.Rate1()
|
||||
// Short circuit if we are run the ethash in normal/test mode.
|
||||
if ethash.config.PowMode != ModeNormal && ethash.config.PowMode != ModeTest {
|
||||
return ethash.hashrate.Rate1()
|
||||
}
|
||||
var res = make(chan uint64, 1)
|
||||
|
||||
select {
|
||||
case ethash.fetchRateCh <- res:
|
||||
case <-ethash.exitCh:
|
||||
// Return local hashrate only if ethash is stopped.
|
||||
return ethash.hashrate.Rate1()
|
||||
}
|
||||
|
||||
// Gather total submitted hash rate of remote sealers.
|
||||
return ethash.hashrate.Rate1() + float64(<-res)
|
||||
}
|
||||
|
||||
// APIs implements consensus.Engine, returning the user facing RPC APIs. Currently
|
||||
// that is empty.
|
||||
// APIs implements consensus.Engine, returning the user facing RPC APIs.
|
||||
func (ethash *Ethash) APIs(chain consensus.ChainReader) []rpc.API {
|
||||
return nil
|
||||
// In order to ensure backward compatibility, we exposes ethash RPC APIs
|
||||
// to both eth and ethash namespaces.
|
||||
return []rpc.API{
|
||||
{
|
||||
Namespace: "eth",
|
||||
Version: "1.0",
|
||||
Service: &API{ethash},
|
||||
Public: true,
|
||||
},
|
||||
{
|
||||
Namespace: "ethash",
|
||||
Version: "1.0",
|
||||
Service: &API{ethash},
|
||||
Public: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// SeedHash is the seed to use for generating a verification cache and the mining
|
||||
|
@@ -23,22 +23,27 @@ import (
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// Tests that ethash works correctly in test mode.
|
||||
func TestTestMode(t *testing.T) {
|
||||
head := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)}
|
||||
header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)}
|
||||
|
||||
ethash := NewTester()
|
||||
block, err := ethash.Seal(nil, types.NewBlockWithHeader(head), nil)
|
||||
ethash := NewTester(nil)
|
||||
defer ethash.Close()
|
||||
|
||||
block, err := ethash.Seal(nil, types.NewBlockWithHeader(header), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to seal block: %v", err)
|
||||
}
|
||||
head.Nonce = types.EncodeNonce(block.Nonce())
|
||||
head.MixDigest = block.MixDigest()
|
||||
if err := ethash.VerifySeal(nil, head); err != nil {
|
||||
header.Nonce = types.EncodeNonce(block.Nonce())
|
||||
header.MixDigest = block.MixDigest()
|
||||
if err := ethash.VerifySeal(nil, header); err != nil {
|
||||
t.Fatalf("unexpected verification error: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -51,7 +56,8 @@ func TestCacheFileEvict(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
e := New(Config{CachesInMem: 3, CachesOnDisk: 10, CacheDir: tmpdir, PowMode: ModeTest})
|
||||
e := New(Config{CachesInMem: 3, CachesOnDisk: 10, CacheDir: tmpdir, PowMode: ModeTest}, nil)
|
||||
defer e.Close()
|
||||
|
||||
workers := 8
|
||||
epochs := 100
|
||||
@@ -73,7 +79,90 @@ func verifyTest(wg *sync.WaitGroup, e *Ethash, workerIndex, epochs int) {
|
||||
if block < 0 {
|
||||
block = 0
|
||||
}
|
||||
head := &types.Header{Number: big.NewInt(block), Difficulty: big.NewInt(100)}
|
||||
e.VerifySeal(nil, head)
|
||||
header := &types.Header{Number: big.NewInt(block), Difficulty: big.NewInt(100)}
|
||||
e.VerifySeal(nil, header)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoteSealer(t *testing.T) {
|
||||
ethash := NewTester(nil)
|
||||
defer ethash.Close()
|
||||
|
||||
api := &API{ethash}
|
||||
if _, err := api.GetWork(); err != errNoMiningWork {
|
||||
t.Error("expect to return an error indicate there is no mining work")
|
||||
}
|
||||
header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)}
|
||||
block := types.NewBlockWithHeader(header)
|
||||
|
||||
// Push new work.
|
||||
ethash.Seal(nil, block, nil)
|
||||
|
||||
var (
|
||||
work [3]string
|
||||
err error
|
||||
)
|
||||
if work, err = api.GetWork(); err != nil || work[0] != block.HashNoNonce().Hex() {
|
||||
t.Error("expect to return a mining work has same hash")
|
||||
}
|
||||
|
||||
if res := api.SubmitWork(types.BlockNonce{}, block.HashNoNonce(), common.Hash{}); res {
|
||||
t.Error("expect to return false when submit a fake solution")
|
||||
}
|
||||
// Push new block with same block number to replace the original one.
|
||||
header = &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(1000)}
|
||||
block = types.NewBlockWithHeader(header)
|
||||
ethash.Seal(nil, block, nil)
|
||||
|
||||
if work, err = api.GetWork(); err != nil || work[0] != block.HashNoNonce().Hex() {
|
||||
t.Error("expect to return the latest pushed work")
|
||||
}
|
||||
// Push block with higher block number.
|
||||
newHead := &types.Header{Number: big.NewInt(2), Difficulty: big.NewInt(100)}
|
||||
newBlock := types.NewBlockWithHeader(newHead)
|
||||
ethash.Seal(nil, newBlock, nil)
|
||||
|
||||
if res := api.SubmitWork(types.BlockNonce{}, block.HashNoNonce(), common.Hash{}); res {
|
||||
t.Error("expect to return false when submit a stale solution")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashRate(t *testing.T) {
|
||||
var (
|
||||
hashrate = []hexutil.Uint64{100, 200, 300}
|
||||
expect uint64
|
||||
ids = []common.Hash{common.HexToHash("a"), common.HexToHash("b"), common.HexToHash("c")}
|
||||
)
|
||||
ethash := NewTester(nil)
|
||||
defer ethash.Close()
|
||||
|
||||
if tot := ethash.Hashrate(); tot != 0 {
|
||||
t.Error("expect the result should be zero")
|
||||
}
|
||||
|
||||
api := &API{ethash}
|
||||
for i := 0; i < len(hashrate); i += 1 {
|
||||
if res := api.SubmitHashRate(hashrate[i], ids[i]); !res {
|
||||
t.Error("remote miner submit hashrate failed")
|
||||
}
|
||||
expect += uint64(hashrate[i])
|
||||
}
|
||||
if tot := ethash.Hashrate(); tot != float64(expect) {
|
||||
t.Error("expect total hashrate should be same")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClosedRemoteSealer(t *testing.T) {
|
||||
ethash := NewTester(nil)
|
||||
time.Sleep(1 * time.Second) // ensure exit channel is listening
|
||||
ethash.Close()
|
||||
|
||||
api := &API{ethash}
|
||||
if _, err := api.GetWork(); err != errEthashStopped {
|
||||
t.Error("expect to return an error to indicate ethash is stopped")
|
||||
}
|
||||
|
||||
if res := api.SubmitHashRate(hexutil.Uint64(100), common.HexToHash("a")); res {
|
||||
t.Error("expect to return false when submit hashrate to a stopped ethash")
|
||||
}
|
||||
}
|
||||
|
@@ -17,12 +17,17 @@
|
||||
package ethash
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
crand "crypto/rand"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"math"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
@@ -30,6 +35,11 @@ import (
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
var (
|
||||
errNoMiningWork = errors.New("no mining work available yet")
|
||||
errInvalidSealResult = errors.New("invalid or stale proof-of-work solution")
|
||||
)
|
||||
|
||||
// Seal implements consensus.Engine, attempting to find a nonce that satisfies
|
||||
// the block's difficulty requirements.
|
||||
func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop <-chan struct{}) (*types.Block, error) {
|
||||
@@ -45,7 +55,6 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop
|
||||
}
|
||||
// Create a runner and the multiple search threads it directs
|
||||
abort := make(chan struct{})
|
||||
found := make(chan *types.Block)
|
||||
|
||||
ethash.lock.Lock()
|
||||
threads := ethash.threads
|
||||
@@ -64,12 +73,16 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop
|
||||
if threads < 0 {
|
||||
threads = 0 // Allows disabling local mining without extra logic around local/remote
|
||||
}
|
||||
// Push new work to remote sealer
|
||||
if ethash.workCh != nil {
|
||||
ethash.workCh <- block
|
||||
}
|
||||
var pend sync.WaitGroup
|
||||
for i := 0; i < threads; i++ {
|
||||
pend.Add(1)
|
||||
go func(id int, nonce uint64) {
|
||||
defer pend.Done()
|
||||
ethash.mine(block, id, nonce, abort, found)
|
||||
ethash.mine(block, id, nonce, abort, ethash.resultCh)
|
||||
}(i, uint64(ethash.rand.Int63()))
|
||||
}
|
||||
// Wait until sealing is terminated or a nonce is found
|
||||
@@ -78,7 +91,7 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop
|
||||
case <-stop:
|
||||
// Outside abort, stop all miner threads
|
||||
close(abort)
|
||||
case result = <-found:
|
||||
case result = <-ethash.resultCh:
|
||||
// One of the threads found a block, abort all others
|
||||
close(abort)
|
||||
case <-ethash.update:
|
||||
@@ -99,9 +112,9 @@ func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan s
|
||||
var (
|
||||
header = block.Header()
|
||||
hash = header.HashNoNonce().Bytes()
|
||||
target = new(big.Int).Div(maxUint256, header.Difficulty)
|
||||
target = new(big.Int).Div(two256, header.Difficulty)
|
||||
number = header.Number.Uint64()
|
||||
dataset = ethash.dataset(number)
|
||||
dataset = ethash.dataset(number, false)
|
||||
)
|
||||
// Start generating random nonces until we abort or find a good one
|
||||
var (
|
||||
@@ -150,3 +163,164 @@ search:
|
||||
// during sealing so it's not unmapped while being read.
|
||||
runtime.KeepAlive(dataset)
|
||||
}
|
||||
|
||||
// remote is a standalone goroutine to handle remote mining related stuff.
|
||||
func (ethash *Ethash) remote(notify []string) {
|
||||
var (
|
||||
works = make(map[common.Hash]*types.Block)
|
||||
rates = make(map[common.Hash]hashrate)
|
||||
|
||||
currentBlock *types.Block
|
||||
currentWork [3]string
|
||||
|
||||
notifyTransport = &http.Transport{}
|
||||
notifyClient = &http.Client{
|
||||
Transport: notifyTransport,
|
||||
Timeout: time.Second,
|
||||
}
|
||||
notifyReqs = make([]*http.Request, len(notify))
|
||||
)
|
||||
// notifyWork notifies all the specified mining endpoints of the availability of
|
||||
// new work to be processed.
|
||||
notifyWork := func() {
|
||||
work := currentWork
|
||||
blob, _ := json.Marshal(work)
|
||||
|
||||
for i, url := range notify {
|
||||
// Terminate any previously pending request and create the new work
|
||||
if notifyReqs[i] != nil {
|
||||
notifyTransport.CancelRequest(notifyReqs[i])
|
||||
}
|
||||
notifyReqs[i], _ = http.NewRequest("POST", url, bytes.NewReader(blob))
|
||||
notifyReqs[i].Header.Set("Content-Type", "application/json")
|
||||
|
||||
// Push the new work concurrently to all the remote nodes
|
||||
go func(req *http.Request, url string) {
|
||||
res, err := notifyClient.Do(req)
|
||||
if err != nil {
|
||||
log.Warn("Failed to notify remote miner", "err", err)
|
||||
} else {
|
||||
log.Trace("Notified remote miner", "miner", url, "hash", log.Lazy{Fn: func() common.Hash { return common.HexToHash(work[0]) }}, "target", work[2])
|
||||
res.Body.Close()
|
||||
}
|
||||
}(notifyReqs[i], url)
|
||||
}
|
||||
}
|
||||
// makeWork creates a work package for external miner.
|
||||
//
|
||||
// The work package consists of 3 strings:
|
||||
// result[0], 32 bytes hex encoded current block header pow-hash
|
||||
// result[1], 32 bytes hex encoded seed hash used for DAG
|
||||
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||
makeWork := func(block *types.Block) {
|
||||
hash := block.HashNoNonce()
|
||||
|
||||
currentWork[0] = hash.Hex()
|
||||
currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex()
|
||||
currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex()
|
||||
|
||||
// Trace the seal work fetched by remote sealer.
|
||||
currentBlock = block
|
||||
works[hash] = block
|
||||
}
|
||||
// submitWork verifies the submitted pow solution, returning
|
||||
// whether the solution was accepted or not (not can be both a bad pow as well as
|
||||
// any other error, like no pending work or stale mining result).
|
||||
submitWork := func(nonce types.BlockNonce, mixDigest common.Hash, hash common.Hash) bool {
|
||||
// Make sure the work submitted is present
|
||||
block := works[hash]
|
||||
if block == nil {
|
||||
log.Info("Work submitted but none pending", "hash", hash)
|
||||
return false
|
||||
}
|
||||
// Verify the correctness of submitted result.
|
||||
header := block.Header()
|
||||
header.Nonce = nonce
|
||||
header.MixDigest = mixDigest
|
||||
|
||||
start := time.Now()
|
||||
if err := ethash.verifySeal(nil, header, true); err != nil {
|
||||
log.Warn("Invalid proof-of-work submitted", "hash", hash, "elapsed", time.Since(start), "err", err)
|
||||
return false
|
||||
}
|
||||
// Make sure the result channel is created.
|
||||
if ethash.resultCh == nil {
|
||||
log.Warn("Ethash result channel is empty, submitted mining result is rejected")
|
||||
return false
|
||||
}
|
||||
log.Trace("Verified correct proof-of-work", "hash", hash, "elapsed", time.Since(start))
|
||||
|
||||
// Solutions seems to be valid, return to the miner and notify acceptance.
|
||||
select {
|
||||
case ethash.resultCh <- block.WithSeal(header):
|
||||
delete(works, hash)
|
||||
return true
|
||||
default:
|
||||
log.Info("Work submitted is stale", "hash", hash)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case block := <-ethash.workCh:
|
||||
if currentBlock != nil && block.ParentHash() != currentBlock.ParentHash() {
|
||||
// Start new round mining, throw out all previous work.
|
||||
works = make(map[common.Hash]*types.Block)
|
||||
}
|
||||
// Update current work with new received block.
|
||||
// Note same work can be past twice, happens when changing CPU threads.
|
||||
makeWork(block)
|
||||
|
||||
// Notify and requested URLs of the new work availability
|
||||
notifyWork()
|
||||
|
||||
case work := <-ethash.fetchWorkCh:
|
||||
// Return current mining work to remote miner.
|
||||
if currentBlock == nil {
|
||||
work.errc <- errNoMiningWork
|
||||
} else {
|
||||
work.res <- currentWork
|
||||
}
|
||||
|
||||
case result := <-ethash.submitWorkCh:
|
||||
// Verify submitted PoW solution based on maintained mining blocks.
|
||||
if submitWork(result.nonce, result.mixDigest, result.hash) {
|
||||
result.errc <- nil
|
||||
} else {
|
||||
result.errc <- errInvalidSealResult
|
||||
}
|
||||
|
||||
case result := <-ethash.submitRateCh:
|
||||
// Trace remote sealer's hash rate by submitted value.
|
||||
rates[result.id] = hashrate{rate: result.rate, ping: time.Now()}
|
||||
close(result.done)
|
||||
|
||||
case req := <-ethash.fetchRateCh:
|
||||
// Gather all hash rate submitted by remote sealer.
|
||||
var total uint64
|
||||
for _, rate := range rates {
|
||||
// this could overflow
|
||||
total += rate.rate
|
||||
}
|
||||
req <- total
|
||||
|
||||
case <-ticker.C:
|
||||
// Clear stale submitted hash rate.
|
||||
for id, rate := range rates {
|
||||
if time.Since(rate.ping) > 10*time.Second {
|
||||
delete(rates, id)
|
||||
}
|
||||
}
|
||||
|
||||
case errc := <-ethash.exitCh:
|
||||
// Exit remote loop if ethash is closed and return relevant error.
|
||||
errc <- nil
|
||||
log.Trace("Ethash remote sealer is exiting")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
115
consensus/ethash/sealer_test.go
Normal file
115
consensus/ethash/sealer_test.go
Normal file
@@ -0,0 +1,115 @@
|
||||
package ethash
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// Tests whether remote HTTP servers are correctly notified of new work.
|
||||
func TestRemoteNotify(t *testing.T) {
|
||||
// Start a simple webserver to capture notifications
|
||||
sink := make(chan [3]string)
|
||||
|
||||
server := &http.Server{
|
||||
Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
blob, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read miner notification: %v", err)
|
||||
}
|
||||
var work [3]string
|
||||
if err := json.Unmarshal(blob, &work); err != nil {
|
||||
t.Fatalf("failed to unmarshal miner notification: %v", err)
|
||||
}
|
||||
sink <- work
|
||||
}),
|
||||
}
|
||||
// Open a custom listener to extract its local address
|
||||
listener, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to open notification server: %v", err)
|
||||
}
|
||||
defer listener.Close()
|
||||
|
||||
go server.Serve(listener)
|
||||
|
||||
// Create the custom ethash engine
|
||||
ethash := NewTester([]string{"http://" + listener.Addr().String()})
|
||||
defer ethash.Close()
|
||||
|
||||
// Stream a work task and ensure the notification bubbles out
|
||||
header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)}
|
||||
block := types.NewBlockWithHeader(header)
|
||||
|
||||
ethash.Seal(nil, block, nil)
|
||||
select {
|
||||
case work := <-sink:
|
||||
if want := header.HashNoNonce().Hex(); work[0] != want {
|
||||
t.Errorf("work packet hash mismatch: have %s, want %s", work[0], want)
|
||||
}
|
||||
if want := common.BytesToHash(SeedHash(header.Number.Uint64())).Hex(); work[1] != want {
|
||||
t.Errorf("work packet seed mismatch: have %s, want %s", work[1], want)
|
||||
}
|
||||
target := new(big.Int).Div(new(big.Int).Lsh(big.NewInt(1), 256), header.Difficulty)
|
||||
if want := common.BytesToHash(target.Bytes()).Hex(); work[2] != want {
|
||||
t.Errorf("work packet target mismatch: have %s, want %s", work[2], want)
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("notification timed out")
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that pushing work packages fast to the miner doesn't cause any daa race
|
||||
// issues in the notifications.
|
||||
func TestRemoteMultiNotify(t *testing.T) {
|
||||
// Start a simple webserver to capture notifications
|
||||
sink := make(chan [3]string, 64)
|
||||
|
||||
server := &http.Server{
|
||||
Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
blob, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read miner notification: %v", err)
|
||||
}
|
||||
var work [3]string
|
||||
if err := json.Unmarshal(blob, &work); err != nil {
|
||||
t.Fatalf("failed to unmarshal miner notification: %v", err)
|
||||
}
|
||||
sink <- work
|
||||
}),
|
||||
}
|
||||
// Open a custom listener to extract its local address
|
||||
listener, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to open notification server: %v", err)
|
||||
}
|
||||
defer listener.Close()
|
||||
|
||||
go server.Serve(listener)
|
||||
|
||||
// Create the custom ethash engine
|
||||
ethash := NewTester([]string{"http://" + listener.Addr().String()})
|
||||
defer ethash.Close()
|
||||
|
||||
// Stream a lot of work task and ensure all the notifications bubble out
|
||||
for i := 0; i < cap(sink); i++ {
|
||||
header := &types.Header{Number: big.NewInt(int64(i)), Difficulty: big.NewInt(100)}
|
||||
block := types.NewBlockWithHeader(header)
|
||||
|
||||
ethash.Seal(nil, block, nil)
|
||||
}
|
||||
for i := 0; i < cap(sink); i++ {
|
||||
select {
|
||||
case <-sink:
|
||||
case <-time.After(250 * time.Millisecond):
|
||||
t.Fatalf("notification %d timed out", i)
|
||||
}
|
||||
}
|
||||
}
|
@@ -314,7 +314,7 @@ func (c *Console) Interactive() {
|
||||
input = "" // Current user input
|
||||
scheduler = make(chan string) // Channel to send the next prompt on and receive the input
|
||||
)
|
||||
// Start a goroutine to listen for promt requests and send back inputs
|
||||
// Start a goroutine to listen for prompt requests and send back inputs
|
||||
go func() {
|
||||
for {
|
||||
// Read the next user input
|
||||
|
@@ -201,7 +201,7 @@ func TestInteractive(t *testing.T) {
|
||||
|
||||
go tester.console.Interactive()
|
||||
|
||||
// Wait for a promt and send a statement back
|
||||
// Wait for a prompt and send a statement back
|
||||
select {
|
||||
case <-tester.input.scheduler:
|
||||
case <-time.After(time.Second):
|
||||
@@ -212,7 +212,7 @@ func TestInteractive(t *testing.T) {
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("input feedback timeout")
|
||||
}
|
||||
// Wait for the second promt and ensure first statement was evaluated
|
||||
// Wait for the second prompt and ensure first statement was evaluated
|
||||
select {
|
||||
case <-tester.input.scheduler:
|
||||
case <-time.After(time.Second):
|
||||
@@ -249,7 +249,7 @@ func TestExecute(t *testing.T) {
|
||||
}
|
||||
|
||||
// Tests that the JavaScript objects returned by statement executions are properly
|
||||
// pretty printed instead of just displaing "[object]".
|
||||
// pretty printed instead of just displaying "[object]".
|
||||
func TestPrettyPrint(t *testing.T) {
|
||||
tester := newTester(t, nil)
|
||||
defer tester.Close(t)
|
||||
@@ -300,7 +300,7 @@ func TestIndenting(t *testing.T) {
|
||||
}{
|
||||
{`var a = 1;`, 0},
|
||||
{`"some string"`, 0},
|
||||
{`"some string with (parentesis`, 0},
|
||||
{`"some string with (parenthesis`, 0},
|
||||
{`"some string with newline
|
||||
("`, 0},
|
||||
{`function v(a,b) {}`, 0},
|
||||
|
@@ -27,7 +27,7 @@ import (
|
||||
// Only this reader may be used for input because it keeps an internal buffer.
|
||||
var Stdin = newTerminalPrompter()
|
||||
|
||||
// UserPrompter defines the methods needed by the console to promt the user for
|
||||
// UserPrompter defines the methods needed by the console to prompt the user for
|
||||
// various types of inputs.
|
||||
type UserPrompter interface {
|
||||
// PromptInput displays the given prompt to the user and requests some textual
|
||||
|
@@ -46,7 +46,7 @@ func newTestBackend() *backends.SimulatedBackend {
|
||||
addr0: {Balance: big.NewInt(1000000000)},
|
||||
addr1: {Balance: big.NewInt(1000000000)},
|
||||
addr2: {Balance: big.NewInt(1000000000)},
|
||||
})
|
||||
}, 10000000)
|
||||
}
|
||||
|
||||
func deploy(prvKey *ecdsa.PrivateKey, amount *big.Int, backend *backends.SimulatedBackend) (common.Address, error) {
|
||||
|
@@ -40,7 +40,7 @@ var (
|
||||
)
|
||||
|
||||
func main() {
|
||||
backend := backends.NewSimulatedBackend(testAlloc)
|
||||
backend := backends.NewSimulatedBackend(testAlloc, uint64(100000000))
|
||||
auth := bind.NewKeyedTransactor(testKey)
|
||||
|
||||
// Deploy the contract, get the code.
|
||||
|
@@ -35,7 +35,7 @@ var (
|
||||
)
|
||||
|
||||
func TestENS(t *testing.T) {
|
||||
contractBackend := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}})
|
||||
contractBackend := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}}, 10000000)
|
||||
transactOpts := bind.NewKeyedTransactor(key)
|
||||
|
||||
ensAddr, ens, err := DeployENS(transactOpts, contractBackend)
|
||||
|
@@ -450,15 +450,19 @@ func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
|
||||
}
|
||||
log.Info("Exporting batch of blocks", "count", last-first+1)
|
||||
|
||||
start, reported := time.Now(), time.Now()
|
||||
for nr := first; nr <= last; nr++ {
|
||||
block := bc.GetBlockByNumber(nr)
|
||||
if block == nil {
|
||||
return fmt.Errorf("export failed on #%d: not found", nr)
|
||||
}
|
||||
|
||||
if err := block.EncodeRLP(w); err != nil {
|
||||
return err
|
||||
}
|
||||
if time.Since(reported) >= statsReportLimit {
|
||||
log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||
reported = time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -895,9 +899,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
|
||||
if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
|
||||
return NonStatTy, err
|
||||
}
|
||||
// Write other block data using a batch.
|
||||
batch := bc.db.NewBatch()
|
||||
rawdb.WriteBlock(batch, block)
|
||||
rawdb.WriteBlock(bc.db, block)
|
||||
|
||||
root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
|
||||
if err != nil {
|
||||
@@ -951,6 +953,9 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write other block data using a batch.
|
||||
batch := bc.db.NewBatch()
|
||||
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
|
||||
|
||||
// If the total difficulty is higher than our known, add it to the canonical chain
|
||||
@@ -1012,7 +1017,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
|
||||
// Do a sanity check that the provided chain is actually ordered and linked
|
||||
for i := 1; i < len(chain); i++ {
|
||||
if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() {
|
||||
// Chain broke ancestry, log a messge (programming error) and skip insertion
|
||||
// Chain broke ancestry, log a message (programming error) and skip insertion
|
||||
log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(),
|
||||
"parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash())
|
||||
|
||||
@@ -1203,8 +1208,8 @@ type insertStats struct {
|
||||
startTime mclock.AbsTime
|
||||
}
|
||||
|
||||
// statsReportLimit is the time limit during import after which we always print
|
||||
// out progress. This avoids the user wondering what's going on.
|
||||
// statsReportLimit is the time limit during import and export after which we
|
||||
// always print out progress. This avoids the user wondering what's going on.
|
||||
const statsReportLimit = 8 * time.Second
|
||||
|
||||
// report prints statistics if some number of blocks have been processed
|
||||
|
@@ -1146,8 +1146,7 @@ func TestEIP155Transition(t *testing.T) {
|
||||
return types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{}, new(big.Int), 21000, new(big.Int), nil), signer, key)
|
||||
}
|
||||
)
|
||||
switch i {
|
||||
case 0:
|
||||
if i == 0 {
|
||||
tx, err = basicTx(types.NewEIP155Signer(big.NewInt(2)))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@@ -22,16 +22,22 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// errSectionOutOfBounds is returned if the user tried to add more bloom filters
|
||||
// to the batch than available space, or if tries to retrieve above the capacity,
|
||||
var errSectionOutOfBounds = errors.New("section out of bounds")
|
||||
var (
|
||||
// errSectionOutOfBounds is returned if the user tried to add more bloom filters
|
||||
// to the batch than available space, or if tries to retrieve above the capacity.
|
||||
errSectionOutOfBounds = errors.New("section out of bounds")
|
||||
|
||||
// errBloomBitOutOfBounds is returned if the user tried to retrieve specified
|
||||
// bit bloom above the capacity.
|
||||
errBloomBitOutOfBounds = errors.New("bloom bit out of bounds")
|
||||
)
|
||||
|
||||
// Generator takes a number of bloom filters and generates the rotated bloom bits
|
||||
// to be used for batched filtering.
|
||||
type Generator struct {
|
||||
blooms [types.BloomBitLength][]byte // Rotated blooms for per-bit matching
|
||||
sections uint // Number of sections to batch together
|
||||
nextBit uint // Next bit to set when adding a bloom
|
||||
nextSec uint // Next section to set when adding a bloom
|
||||
}
|
||||
|
||||
// NewGenerator creates a rotated bloom generator that can iteratively fill a
|
||||
@@ -51,15 +57,15 @@ func NewGenerator(sections uint) (*Generator, error) {
|
||||
// in memory accordingly.
|
||||
func (b *Generator) AddBloom(index uint, bloom types.Bloom) error {
|
||||
// Make sure we're not adding more bloom filters than our capacity
|
||||
if b.nextBit >= b.sections {
|
||||
if b.nextSec >= b.sections {
|
||||
return errSectionOutOfBounds
|
||||
}
|
||||
if b.nextBit != index {
|
||||
if b.nextSec != index {
|
||||
return errors.New("bloom filter with unexpected index")
|
||||
}
|
||||
// Rotate the bloom and insert into our collection
|
||||
byteIndex := b.nextBit / 8
|
||||
bitMask := byte(1) << byte(7-b.nextBit%8)
|
||||
byteIndex := b.nextSec / 8
|
||||
bitMask := byte(1) << byte(7-b.nextSec%8)
|
||||
|
||||
for i := 0; i < types.BloomBitLength; i++ {
|
||||
bloomByteIndex := types.BloomByteLength - 1 - i/8
|
||||
@@ -69,7 +75,7 @@ func (b *Generator) AddBloom(index uint, bloom types.Bloom) error {
|
||||
b.blooms[i][byteIndex] |= bitMask
|
||||
}
|
||||
}
|
||||
b.nextBit++
|
||||
b.nextSec++
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -77,11 +83,11 @@ func (b *Generator) AddBloom(index uint, bloom types.Bloom) error {
|
||||
// Bitset returns the bit vector belonging to the given bit index after all
|
||||
// blooms have been added.
|
||||
func (b *Generator) Bitset(idx uint) ([]byte, error) {
|
||||
if b.nextBit != b.sections {
|
||||
if b.nextSec != b.sections {
|
||||
return nil, errors.New("bloom not fully generated yet")
|
||||
}
|
||||
if idx >= b.sections {
|
||||
return nil, errSectionOutOfBounds
|
||||
if idx >= types.BloomBitLength {
|
||||
return nil, errBloomBitOutOfBounds
|
||||
}
|
||||
return b.blooms[idx], nil
|
||||
}
|
||||
|
@@ -59,7 +59,7 @@ type partialMatches struct {
|
||||
// It can also have the actual results set to be used as a delivery data struct.
|
||||
//
|
||||
// The contest and error fields are used by the light client to terminate matching
|
||||
// early if an error is enountered on some path of the pipeline.
|
||||
// early if an error is encountered on some path of the pipeline.
|
||||
type Retrieval struct {
|
||||
Bit uint
|
||||
Sections []uint64
|
||||
@@ -218,7 +218,7 @@ func (m *Matcher) Start(ctx context.Context, begin, end uint64, results chan uin
|
||||
// run creates a daisy-chain of sub-matchers, one for the address set and one
|
||||
// for each topic set, each sub-matcher receiving a section only if the previous
|
||||
// ones have all found a potential match in one of the blocks of the section,
|
||||
// then binary AND-ing its own matches and forwaring the result to the next one.
|
||||
// then binary AND-ing its own matches and forwarding the result to the next one.
|
||||
//
|
||||
// The method starts feeding the section indexes into the first sub-matcher on a
|
||||
// new goroutine and returns a sink channel receiving the results.
|
||||
@@ -543,7 +543,7 @@ func (s *MatcherSession) Error() error {
|
||||
}
|
||||
|
||||
// AllocateRetrieval assigns a bloom bit index to a client process that can either
|
||||
// immediately reuest and fetch the section contents assigned to this bit or wait
|
||||
// immediately request and fetch the section contents assigned to this bit or wait
|
||||
// a little while for more sections to be requested.
|
||||
func (s *MatcherSession) AllocateRetrieval() (uint, bool) {
|
||||
fetcher := make(chan uint)
|
||||
@@ -599,8 +599,8 @@ func (s *MatcherSession) DeliverSections(bit uint, sections []uint64, bitsets []
|
||||
}
|
||||
}
|
||||
|
||||
// Multiplex polls the matcher session for rerieval tasks and multiplexes it into
|
||||
// the reuested retrieval queue to be serviced together with other sessions.
|
||||
// Multiplex polls the matcher session for retrieval tasks and multiplexes it into
|
||||
// the requested retrieval queue to be serviced together with other sessions.
|
||||
//
|
||||
// This method will block for the lifetime of the session. Even after termination
|
||||
// of the session, any request in-flight need to be responded to! Empty responses
|
||||
|
@@ -156,7 +156,7 @@ func testMatcher(t *testing.T, filter [][]bloomIndexes, start, blocks uint64, in
|
||||
// Track the number of retrieval requests made
|
||||
var requested uint32
|
||||
|
||||
// Start the matching session for the filter and the retriver goroutines
|
||||
// Start the matching session for the filter and the retriever goroutines
|
||||
quit := make(chan struct{})
|
||||
matches := make(chan uint64, 16)
|
||||
|
||||
|
@@ -17,6 +17,7 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"sync"
|
||||
@@ -37,11 +38,11 @@ import (
|
||||
type ChainIndexerBackend interface {
|
||||
// Reset initiates the processing of a new chain segment, potentially terminating
|
||||
// any partially completed operations (in case of a reorg).
|
||||
Reset(section uint64, prevHead common.Hash) error
|
||||
Reset(ctx context.Context, section uint64, prevHead common.Hash) error
|
||||
|
||||
// Process crunches through the next header in the chain segment. The caller
|
||||
// will ensure a sequential order of headers.
|
||||
Process(header *types.Header)
|
||||
Process(ctx context.Context, header *types.Header) error
|
||||
|
||||
// Commit finalizes the section metadata and stores it into the database.
|
||||
Commit() error
|
||||
@@ -71,9 +72,11 @@ type ChainIndexer struct {
|
||||
backend ChainIndexerBackend // Background processor generating the index data content
|
||||
children []*ChainIndexer // Child indexers to cascade chain updates to
|
||||
|
||||
active uint32 // Flag whether the event loop was started
|
||||
update chan struct{} // Notification channel that headers should be processed
|
||||
quit chan chan error // Quit channel to tear down running goroutines
|
||||
active uint32 // Flag whether the event loop was started
|
||||
update chan struct{} // Notification channel that headers should be processed
|
||||
quit chan chan error // Quit channel to tear down running goroutines
|
||||
ctx context.Context
|
||||
ctxCancel func()
|
||||
|
||||
sectionSize uint64 // Number of blocks in a single chain segment to process
|
||||
confirmsReq uint64 // Number of confirmations before processing a completed segment
|
||||
@@ -105,6 +108,8 @@ func NewChainIndexer(chainDb, indexDb ethdb.Database, backend ChainIndexerBacken
|
||||
}
|
||||
// Initialize database dependent fields and start the updater
|
||||
c.loadValidSections()
|
||||
c.ctx, c.ctxCancel = context.WithCancel(context.Background())
|
||||
|
||||
go c.updateLoop()
|
||||
|
||||
return c
|
||||
@@ -138,6 +143,8 @@ func (c *ChainIndexer) Start(chain ChainIndexerChain) {
|
||||
func (c *ChainIndexer) Close() error {
|
||||
var errs []error
|
||||
|
||||
c.ctxCancel()
|
||||
|
||||
// Tear down the primary update loop
|
||||
errc := make(chan error)
|
||||
c.quit <- errc
|
||||
@@ -297,6 +304,12 @@ func (c *ChainIndexer) updateLoop() {
|
||||
c.lock.Unlock()
|
||||
newHead, err := c.processSection(section, oldHead)
|
||||
if err != nil {
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
<-c.quit <- nil
|
||||
return
|
||||
default:
|
||||
}
|
||||
c.log.Error("Section processing failed", "error", err)
|
||||
}
|
||||
c.lock.Lock()
|
||||
@@ -344,7 +357,7 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com
|
||||
|
||||
// Reset and partial processing
|
||||
|
||||
if err := c.backend.Reset(section, lastHead); err != nil {
|
||||
if err := c.backend.Reset(c.ctx, section, lastHead); err != nil {
|
||||
c.setValidSections(0)
|
||||
return common.Hash{}, err
|
||||
}
|
||||
@@ -360,11 +373,12 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com
|
||||
} else if header.ParentHash != lastHead {
|
||||
return common.Hash{}, fmt.Errorf("chain reorged during section processing")
|
||||
}
|
||||
c.backend.Process(header)
|
||||
if err := c.backend.Process(c.ctx, header); err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
lastHead = header.Hash()
|
||||
}
|
||||
if err := c.backend.Commit(); err != nil {
|
||||
c.log.Error("Section commit failed", "error", err)
|
||||
return common.Hash{}, err
|
||||
}
|
||||
return lastHead, nil
|
||||
|
@@ -17,6 +17,7 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
@@ -210,13 +211,13 @@ func (b *testChainIndexBackend) reorg(headNum uint64) uint64 {
|
||||
return b.stored * b.indexer.sectionSize
|
||||
}
|
||||
|
||||
func (b *testChainIndexBackend) Reset(section uint64, prevHead common.Hash) error {
|
||||
func (b *testChainIndexBackend) Reset(ctx context.Context, section uint64, prevHead common.Hash) error {
|
||||
b.section = section
|
||||
b.headerCnt = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *testChainIndexBackend) Process(header *types.Header) {
|
||||
func (b *testChainIndexBackend) Process(ctx context.Context, header *types.Header) error {
|
||||
b.headerCnt++
|
||||
if b.headerCnt > b.indexer.sectionSize {
|
||||
b.t.Error("Processing too many headers")
|
||||
@@ -227,6 +228,7 @@ func (b *testChainIndexBackend) Process(header *types.Header) {
|
||||
b.t.Fatal("Unexpected call to Process")
|
||||
case b.processCh <- header.Number.Uint64():
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *testChainIndexBackend) Commit() error {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user