Compare commits
244 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
dae82f0985 | ||
|
8fdbbef72d | ||
|
8696986547 | ||
|
d606a7a46a | ||
|
174083c3ae | ||
|
bfed28a421 | ||
|
edc39aaedf | ||
|
89fe24bbcc | ||
|
8b9f469419 | ||
|
695a5cce1e | ||
|
c207edf2a3 | ||
|
4f0d978eaa | ||
|
1cd007ecae | ||
|
bba5fd8192 | ||
|
2714e8f091 | ||
|
0699287440 | ||
|
197d609b9a | ||
|
ca228569e4 | ||
|
f5e6634fd2 | ||
|
93854bbad4 | ||
|
f0515800e6 | ||
|
bb29d20828 | ||
|
38592a13a3 | ||
|
a5898ba621 | ||
|
2a113f6d72 | ||
|
b24ef5e05d | ||
|
76f5f662cc | ||
|
6b2cc8950e | ||
|
2843001ac2 | ||
|
9d5e3e0637 | ||
|
3ba0418a9a | ||
|
e0d091e090 | ||
|
070caec4bd | ||
|
4c181e4fb9 | ||
|
333b5fb123 | ||
|
3fd87f2193 | ||
|
c7e522fd17 | ||
|
5d80a1b665 | ||
|
493903eede | ||
|
3d997b6dec | ||
|
d876f214e5 | ||
|
7bf7bd2f50 | ||
|
d31f1f4fdb | ||
|
6b6c4d1c27 | ||
|
3333fe660f | ||
|
51e2e78d26 | ||
|
d136e985e8 | ||
|
91c66d47ef | ||
|
accc0fab4f | ||
|
51b2f1620c | ||
|
68be45e5f8 | ||
|
ffe2fc3bc4 | ||
|
324027640b | ||
|
b91766fe6d | ||
|
a6942b9f25 | ||
|
17d67c5834 | ||
|
434dd5bc00 | ||
|
14346e4ef9 | ||
|
b8a2ac3fcf | ||
|
9a000601c6 | ||
|
23de6197f9 | ||
|
698843b45f | ||
|
48b4e8069c | ||
|
58632d4402 | ||
|
eb8fa3cc89 | ||
|
cff97119a7 | ||
|
cef7ed53bd | ||
|
c41e1bd1eb | ||
|
4fecc7a3b1 | ||
|
588aa88121 | ||
|
8080265f3f | ||
|
1212c7b844 | ||
|
201a0bf181 | ||
|
a0876f7433 | ||
|
1ff152f3a4 | ||
|
f574c4e74b | ||
|
870efeef01 | ||
|
144c1c6c52 | ||
|
b16cc501a8 | ||
|
9313fa63f9 | ||
|
d0675e9d9c | ||
|
bd519ab8ae | ||
|
1064e3283d | ||
|
a5dc087845 | ||
|
212bf266c5 | ||
|
c71e4fc4d5 | ||
|
968f6019d0 | ||
|
503993c819 | ||
|
cf3b187bde | ||
|
81533deae5 | ||
|
0bcff8f525 | ||
|
36ca85fa1c | ||
|
b35165555d | ||
|
5b74bb6445 | ||
|
eea3ae42a3 | ||
|
dc6648bb58 | ||
|
0fe0b8f7b9 | ||
|
80e2f3aca4 | ||
|
e2640a96d4 | ||
|
79c7a69ac8 | ||
|
53eb4e0b0f | ||
|
baee850471 | ||
|
126dfde6c9 | ||
|
f08f596a37 | ||
|
3e1cfbae93 | ||
|
54f650a3be | ||
|
1b6fd032e3 | ||
|
8ed4739176 | ||
|
80d3907767 | ||
|
6810933640 | ||
|
7f22b59f87 | ||
|
4c0883e20d | ||
|
3088c122d8 | ||
|
88b41a9e68 | ||
|
66debd91d9 | ||
|
75060ef96e | ||
|
6ff97bf2e5 | ||
|
d98c45f70f | ||
|
aeb733623e | ||
|
97fb08342d | ||
|
cdf5982cfc | ||
|
4e693ad5a6 | ||
|
4466c7b971 | ||
|
2868acd80b | ||
|
6c313fff7b | ||
|
a352de6a08 | ||
|
6a7695e367 | ||
|
16e4d0e005 | ||
|
331fa6d307 | ||
|
3e92c853fb | ||
|
60827dc50f | ||
|
2e98631c5e | ||
|
6566a0a3b8 | ||
|
dc3c3fb1e1 | ||
|
862d6f2fbf | ||
|
4868964bb9 | ||
|
6f607de5d5 | ||
|
dcae0d348b | ||
|
f951e23fb5 | ||
|
aff421e78c | ||
|
4e474c74dc | ||
|
da290e9707 | ||
|
0fe9a372b3 | ||
|
d5c7a6056a | ||
|
ff5538ad4c | ||
|
8bbe72075e | ||
|
97b2806686 | ||
|
11d0ff6578 | ||
|
72a076840b | ||
|
459278cd57 | ||
|
cfcc47529d | ||
|
c5d34fc94e | ||
|
53634f1e04 | ||
|
31c4e3a118 | ||
|
1d3d4a4d57 | ||
|
c5cb214f68 | ||
|
f95811e65b | ||
|
5ed3960b9b | ||
|
5b0c9c8ae5 | ||
|
58e868b759 | ||
|
5d3b7bb023 | ||
|
6ee3b26f44 | ||
|
092df3ab59 | ||
|
81375a3801 | ||
|
d79602d2d4 | ||
|
ff7fad18fb | ||
|
89a32451ae | ||
|
8c63d0d2e4 | ||
|
1895059119 | ||
|
127553253e | ||
|
ff6e0351ab | ||
|
b8a0daf0cc | ||
|
bfa0f96822 | ||
|
82a1c771ef | ||
|
9d06b2c5f3 | ||
|
e5677114dc | ||
|
303b99663e | ||
|
14bef9a2db | ||
|
d3a773c284 | ||
|
de01178c18 | ||
|
696bc9b01c | ||
|
58c0879c2f | ||
|
68b8088cb9 | ||
|
b6ccc06cda | ||
|
83705ef6aa | ||
|
b35622cf3c | ||
|
f1e86ad9cf | ||
|
bd1f7ebda2 | ||
|
26a37c5351 | ||
|
0bf3065fb4 | ||
|
83116a3479 | ||
|
2c8d5dec50 | ||
|
668c37fde1 | ||
|
b7bbe66b19 | ||
|
96fd50be10 | ||
|
634e963f02 | ||
|
dc5d643bb5 | ||
|
9a749dcde5 | ||
|
b69942befe | ||
|
86ec213076 | ||
|
3d782bc727 | ||
|
01d9f29805 | ||
|
024b22c30e | ||
|
ffca6dfe01 | ||
|
107f556b2d | ||
|
44eb69561a | ||
|
d9e324a331 | ||
|
a5aaab2f22 | ||
|
f1b9a3e2f4 | ||
|
79ca6c7a65 | ||
|
4d8c7248bd | ||
|
7e1c374dc6 | ||
|
7910dd5179 | ||
|
0ee44e796a | ||
|
bf37241eb5 | ||
|
d5837e84ff | ||
|
dcaabfe7f6 | ||
|
2c110c81ee | ||
|
c63985d194 | ||
|
0da3b17a11 | ||
|
d8d8669271 | ||
|
bd1f74f654 | ||
|
86f68cf04f | ||
|
a5e6bf7eef | ||
|
e39a9b3480 | ||
|
c3cfdfacd0 | ||
|
4b6824e07b | ||
|
3f7acbbeb9 | ||
|
0d5e1e7bc9 | ||
|
26cf866349 | ||
|
60577d48d5 | ||
|
bf411a04ba | ||
|
24349144b6 | ||
|
7d56602391 | ||
|
d3441ebb56 | ||
|
09dde380f9 | ||
|
a95a601f35 | ||
|
d5db4f810e | ||
|
b66f793443 | ||
|
6663e5da10 | ||
|
30cd5c1854 | ||
|
9e99a0c2b9 | ||
|
0ae462fb80 | ||
|
d3f056bd68 |
15
.github/CODEOWNERS
vendored
@@ -9,24 +9,27 @@ les/ @zsfelfoldi
|
||||
light/ @zsfelfoldi
|
||||
mobile/ @karalabe
|
||||
p2p/ @fjl @zsfelfoldi
|
||||
p2p/simulations @lmars
|
||||
p2p/protocols @zelig
|
||||
swarm/api/http @justelad
|
||||
swarm/bmt @zelig
|
||||
swarm/dev @lmars
|
||||
swarm/fuse @jmozah @holisticode
|
||||
swarm/grafana_dashboards @nonsense
|
||||
swarm/metrics @nonsense @holisticode
|
||||
swarm/multihash @nolash
|
||||
swarm/network/bitvector @zelig @janos @gbalint
|
||||
swarm/network/priorityqueue @zelig @janos @gbalint
|
||||
swarm/network/simulations @zelig
|
||||
swarm/network/stream @janos @zelig @gbalint @holisticode @justelad
|
||||
swarm/network/bitvector @zelig @janos
|
||||
swarm/network/priorityqueue @zelig @janos
|
||||
swarm/network/simulations @zelig @janos
|
||||
swarm/network/stream @janos @zelig @holisticode @justelad
|
||||
swarm/network/stream/intervals @janos
|
||||
swarm/network/stream/testing @zelig
|
||||
swarm/pot @zelig
|
||||
swarm/pss @nolash @zelig @nonsense
|
||||
swarm/services @zelig
|
||||
swarm/state @justelad
|
||||
swarm/storage/encryption @gbalint @zelig @nagydani
|
||||
swarm/storage/encryption @zelig @nagydani
|
||||
swarm/storage/mock @janos
|
||||
swarm/storage/mru @nolash
|
||||
swarm/storage/feed @nolash @jpeletier
|
||||
swarm/testutil @lmars
|
||||
whisper/ @gballet @gluk256
|
||||
|
46
.github/CONTRIBUTING.md
vendored
@@ -1,16 +1,40 @@
|
||||
# Contributing
|
||||
|
||||
Thank you for considering to help out with the source code! We welcome
|
||||
contributions from anyone on the internet, and are grateful for even the
|
||||
smallest of fixes!
|
||||
|
||||
If you'd like to contribute to go-ethereum, please fork, fix, commit and send a
|
||||
pull request for the maintainers to review and merge into the main code base. If
|
||||
you wish to submit more complex changes though, please check up with the core
|
||||
devs first on [our gitter channel](https://gitter.im/ethereum/go-ethereum) to
|
||||
ensure those changes are in line with the general philosophy of the project
|
||||
and/or get some early feedback which can make both your efforts much lighter as
|
||||
well as our review and merge procedures quick and simple.
|
||||
|
||||
## Coding guidelines
|
||||
|
||||
Please make sure your contributions adhere to our coding guidelines:
|
||||
|
||||
* Code must adhere to the official Go
|
||||
[formatting](https://golang.org/doc/effective_go.html#formatting) guidelines
|
||||
(i.e. uses [gofmt](https://golang.org/cmd/gofmt/)).
|
||||
* Code must be documented adhering to the official Go
|
||||
[commentary](https://golang.org/doc/effective_go.html#commentary) guidelines.
|
||||
* Pull requests need to be based on and opened against the `master` branch.
|
||||
* Commit messages should be prefixed with the package(s) they modify.
|
||||
* E.g. "eth, rpc: make trace configs optional"
|
||||
|
||||
## Can I have feature X
|
||||
|
||||
Before you do a feature request please check and make sure that it isn't possible
|
||||
through some other means. The JavaScript enabled console is a powerful feature
|
||||
in the right hands. Please check our [Wiki page](https://github.com/ethereum/go-ethereum/wiki) for more info
|
||||
Before you submit a feature request, please check and make sure that it isn't
|
||||
possible through some other means. The JavaScript-enabled console is a powerful
|
||||
feature in the right hands. Please check our
|
||||
[Wiki page](https://github.com/ethereum/go-ethereum/wiki) for more info
|
||||
and help.
|
||||
|
||||
## Contributing
|
||||
## Configuration, dependencies, and tests
|
||||
|
||||
If you'd like to contribute to go-ethereum please fork, fix, commit and
|
||||
send a pull request. Commits which do not comply with the coding standards
|
||||
are ignored (use gofmt!).
|
||||
|
||||
See [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide)
|
||||
for more details on configuring your environment, testing, and
|
||||
dependency management.
|
||||
Please see the [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide)
|
||||
for more details on configuring your environment, managing project dependencies
|
||||
and testing procedures.
|
||||
|
46
.travis.yml
@@ -3,17 +3,6 @@ go_import_path: github.com/ethereum/go-ethereum
|
||||
sudo: false
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.9.x
|
||||
script:
|
||||
- sudo modprobe fuse
|
||||
- sudo chmod 666 /dev/fuse
|
||||
- sudo chown root:$USER /etc/fuse.conf
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
@@ -40,6 +29,14 @@ matrix:
|
||||
- os: osx
|
||||
go: 1.11.x
|
||||
script:
|
||||
- echo "Increase the maximum number of open file descriptors on macOS"
|
||||
- NOFILE=20480
|
||||
- sudo sysctl -w kern.maxfiles=$NOFILE
|
||||
- sudo sysctl -w kern.maxfilesperproc=$NOFILE
|
||||
- sudo launchctl limit maxfiles $NOFILE $NOFILE
|
||||
- sudo launchctl limit maxfiles
|
||||
- ulimit -S -n $NOFILE
|
||||
- ulimit -n
|
||||
- unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
@@ -56,7 +53,8 @@ matrix:
|
||||
- go run build/ci.go lint
|
||||
|
||||
# This builder does the Ubuntu PPA upload
|
||||
- os: linux
|
||||
- if: type = push
|
||||
os: linux
|
||||
dist: trusty
|
||||
go: 1.11.x
|
||||
env:
|
||||
@@ -74,7 +72,8 @@ matrix:
|
||||
- go run build/ci.go debsrc -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>" -upload ppa:ethereum/ethereum
|
||||
|
||||
# This builder does the Linux Azure uploads
|
||||
- os: linux
|
||||
- if: type = push
|
||||
os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.11.x
|
||||
@@ -107,7 +106,8 @@ matrix:
|
||||
- go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
|
||||
# This builder does the Linux Azure MIPS xgo uploads
|
||||
- os: linux
|
||||
- if: type = push
|
||||
os: linux
|
||||
dist: trusty
|
||||
services:
|
||||
- docker
|
||||
@@ -134,7 +134,8 @@ matrix:
|
||||
- go run build/ci.go archive -arch mips64le -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
|
||||
# This builder does the Android Maven and Azure uploads
|
||||
- os: linux
|
||||
- if: type = push
|
||||
os: linux
|
||||
dist: trusty
|
||||
addons:
|
||||
apt:
|
||||
@@ -155,7 +156,7 @@ matrix:
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
before_install:
|
||||
- curl https://storage.googleapis.com/golang/go1.11.linux-amd64.tar.gz | tar -xz
|
||||
- curl https://storage.googleapis.com/golang/go1.11.2.linux-amd64.tar.gz | tar -xz
|
||||
- export PATH=`pwd`/go/bin:$PATH
|
||||
- export GOROOT=`pwd`/go
|
||||
- export GOPATH=$HOME/go
|
||||
@@ -171,7 +172,8 @@ matrix:
|
||||
- go run build/ci.go aar -signer ANDROID_SIGNING_KEY -deploy https://oss.sonatype.org -upload gethstore/builds
|
||||
|
||||
# This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads
|
||||
- os: osx
|
||||
- if: type = push
|
||||
os: osx
|
||||
go: 1.11.x
|
||||
env:
|
||||
- azure-osx
|
||||
@@ -199,7 +201,8 @@ matrix:
|
||||
- go run build/ci.go xcode -signer IOS_SIGNING_KEY -deploy trunk -upload gethstore/builds
|
||||
|
||||
# This builder does the Azure archive purges to avoid accumulating junk
|
||||
- os: linux
|
||||
- if: type = cron
|
||||
os: linux
|
||||
dist: trusty
|
||||
go: 1.11.x
|
||||
env:
|
||||
@@ -208,10 +211,3 @@ matrix:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
script:
|
||||
- go run build/ci.go purge -store gethstore/builds -days 14
|
||||
|
||||
notifications:
|
||||
webhooks:
|
||||
urls:
|
||||
- https://webhooks.gitter.im/e/e09ccdce1048c5e03445
|
||||
on_success: change
|
||||
on_failure: always
|
||||
|
3
Makefile
@@ -57,6 +57,9 @@ devtools:
|
||||
@type "solc" 2> /dev/null || echo 'Please install solc'
|
||||
@type "protoc" 2> /dev/null || echo 'Please install protoc'
|
||||
|
||||
swarm-devtools:
|
||||
env GOBIN= go install ./cmd/swarm/mimegen
|
||||
|
||||
# Cross Compilation Targets (xgo)
|
||||
|
||||
geth-cross: geth-linux geth-darwin geth-windows geth-android geth-ios
|
||||
|
@@ -18,7 +18,7 @@ For prerequisites and detailed build instructions please read the
|
||||
[Installation Instructions](https://github.com/ethereum/go-ethereum/wiki/Building-Ethereum)
|
||||
on the wiki.
|
||||
|
||||
Building geth requires both a Go (version 1.7 or later) and a C compiler.
|
||||
Building geth requires both a Go (version 1.9 or later) and a C compiler.
|
||||
You can install them using your favourite package manager.
|
||||
Once the dependencies are installed, run
|
||||
|
||||
|
@@ -137,6 +137,9 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
|
||||
// MethodById looks up a method by the 4-byte id
|
||||
// returns nil if none found
|
||||
func (abi *ABI) MethodById(sigdata []byte) (*Method, error) {
|
||||
if len(sigdata) < 4 {
|
||||
return nil, fmt.Errorf("data too short (% bytes) for abi method lookup", len(sigdata))
|
||||
}
|
||||
for _, method := range abi.Methods {
|
||||
if bytes.Equal(method.Id(), sigdata[:4]) {
|
||||
return &method, nil
|
||||
|
@@ -711,5 +711,14 @@ func TestABI_MethodById(t *testing.T) {
|
||||
t.Errorf("Method %v (id %v) not 'findable' by id in ABI", name, common.ToHex(m.Id()))
|
||||
}
|
||||
}
|
||||
|
||||
// Also test empty
|
||||
if _, err := abi.MethodById([]byte{0x00}); err == nil {
|
||||
t.Errorf("Expected error, too short to decode data")
|
||||
}
|
||||
if _, err := abi.MethodById([]byte{}); err == nil {
|
||||
t.Errorf("Expected error, too short to decode data")
|
||||
}
|
||||
if _, err := abi.MethodById(nil); err == nil {
|
||||
t.Errorf("Expected error, nil is short to decode data")
|
||||
}
|
||||
}
|
||||
|
@@ -208,7 +208,7 @@ func (b *SimulatedBackend) PendingNonceAt(ctx context.Context, account common.Ad
|
||||
}
|
||||
|
||||
// SuggestGasPrice implements ContractTransactor.SuggestGasPrice. Since the simulated
|
||||
// chain doens't have miners, we just return a gas price of 1 for any call.
|
||||
// chain doesn't have miners, we just return a gas price of 1 for any call.
|
||||
func (b *SimulatedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) {
|
||||
return big.NewInt(1), nil
|
||||
}
|
||||
|
@@ -23,13 +23,13 @@ package bind
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"regexp"
|
||||
"strings"
|
||||
"text/template"
|
||||
"unicode"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"golang.org/x/tools/imports"
|
||||
)
|
||||
|
||||
// Lang is a target programming language selector to generate bindings for.
|
||||
@@ -145,9 +145,9 @@ func Bind(types []string, abis []string, bytecodes []string, pkg string, lang La
|
||||
if err := tmpl.Execute(buffer, data); err != nil {
|
||||
return "", err
|
||||
}
|
||||
// For Go bindings pass the code through goimports to clean it up and double check
|
||||
// For Go bindings pass the code through gofmt to clean it up
|
||||
if lang == LangGo {
|
||||
code, err := imports.Process(".", buffer.Bytes(), nil)
|
||||
code, err := format.Source(buffer.Bytes())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("%v\n%s", err, buffer)
|
||||
}
|
||||
|
@@ -64,6 +64,30 @@ const tmplSourceGo = `
|
||||
|
||||
package {{.Package}}
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"strings"
|
||||
|
||||
ethereum "github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var (
|
||||
_ = big.NewInt
|
||||
_ = strings.NewReader
|
||||
_ = ethereum.NotFound
|
||||
_ = abi.U256
|
||||
_ = bind.Bind
|
||||
_ = common.Big1
|
||||
_ = types.BloomLookup
|
||||
_ = event.NewSubscription
|
||||
)
|
||||
|
||||
{{range $contract := .Contracts}}
|
||||
// {{.Type}}ABI is the input ABI used to generate the binding from.
|
||||
const {{.Type}}ABI = "{{.InputABI}}"
|
||||
|
@@ -30,8 +30,8 @@ import (
|
||||
var DefaultRootDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}
|
||||
|
||||
// DefaultBaseDerivationPath is the base path from which custom derivation endpoints
|
||||
// are incremented. As such, the first account will be at m/44'/60'/0'/0, the second
|
||||
// at m/44'/60'/0'/1, etc.
|
||||
// are incremented. As such, the first account will be at m/44'/60'/0'/0/0, the second
|
||||
// at m/44'/60'/0'/0/1, etc.
|
||||
var DefaultBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0}
|
||||
|
||||
// DefaultLedgerBaseDerivationPath is the base path from which custom derivation endpoints
|
||||
|
@@ -66,19 +66,19 @@ type plainKeyJSON struct {
|
||||
|
||||
type encryptedKeyJSONV3 struct {
|
||||
Address string `json:"address"`
|
||||
Crypto cryptoJSON `json:"crypto"`
|
||||
Crypto CryptoJSON `json:"crypto"`
|
||||
Id string `json:"id"`
|
||||
Version int `json:"version"`
|
||||
}
|
||||
|
||||
type encryptedKeyJSONV1 struct {
|
||||
Address string `json:"address"`
|
||||
Crypto cryptoJSON `json:"crypto"`
|
||||
Crypto CryptoJSON `json:"crypto"`
|
||||
Id string `json:"id"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
type cryptoJSON struct {
|
||||
type CryptoJSON struct {
|
||||
Cipher string `json:"cipher"`
|
||||
CipherText string `json:"ciphertext"`
|
||||
CipherParams cipherparamsJSON `json:"cipherparams"`
|
||||
|
@@ -135,29 +135,26 @@ func (ks keyStorePassphrase) JoinPath(filename string) string {
|
||||
return filepath.Join(ks.keysDirPath, filename)
|
||||
}
|
||||
|
||||
// EncryptKey encrypts a key using the specified scrypt parameters into a json
|
||||
// blob that can be decrypted later on.
|
||||
func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
|
||||
authArray := []byte(auth)
|
||||
// Encryptdata encrypts the data given as 'data' with the password 'auth'.
|
||||
func EncryptDataV3(data, auth []byte, scryptN, scryptP int) (CryptoJSON, error) {
|
||||
|
||||
salt := make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
|
||||
panic("reading from crypto/rand failed: " + err.Error())
|
||||
}
|
||||
derivedKey, err := scrypt.Key(authArray, salt, scryptN, scryptR, scryptP, scryptDKLen)
|
||||
derivedKey, err := scrypt.Key(auth, salt, scryptN, scryptR, scryptP, scryptDKLen)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return CryptoJSON{}, err
|
||||
}
|
||||
encryptKey := derivedKey[:16]
|
||||
keyBytes := math.PaddedBigBytes(key.PrivateKey.D, 32)
|
||||
|
||||
iv := make([]byte, aes.BlockSize) // 16
|
||||
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
|
||||
panic("reading from crypto/rand failed: " + err.Error())
|
||||
}
|
||||
cipherText, err := aesCTRXOR(encryptKey, keyBytes, iv)
|
||||
cipherText, err := aesCTRXOR(encryptKey, data, iv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return CryptoJSON{}, err
|
||||
}
|
||||
mac := crypto.Keccak256(derivedKey[16:32], cipherText)
|
||||
|
||||
@@ -167,12 +164,11 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
|
||||
scryptParamsJSON["p"] = scryptP
|
||||
scryptParamsJSON["dklen"] = scryptDKLen
|
||||
scryptParamsJSON["salt"] = hex.EncodeToString(salt)
|
||||
|
||||
cipherParamsJSON := cipherparamsJSON{
|
||||
IV: hex.EncodeToString(iv),
|
||||
}
|
||||
|
||||
cryptoStruct := cryptoJSON{
|
||||
cryptoStruct := CryptoJSON{
|
||||
Cipher: "aes-128-ctr",
|
||||
CipherText: hex.EncodeToString(cipherText),
|
||||
CipherParams: cipherParamsJSON,
|
||||
@@ -180,6 +176,17 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
|
||||
KDFParams: scryptParamsJSON,
|
||||
MAC: hex.EncodeToString(mac),
|
||||
}
|
||||
return cryptoStruct, nil
|
||||
}
|
||||
|
||||
// EncryptKey encrypts a key using the specified scrypt parameters into a json
|
||||
// blob that can be decrypted later on.
|
||||
func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
|
||||
keyBytes := math.PaddedBigBytes(key.PrivateKey.D, 32)
|
||||
cryptoStruct, err := EncryptDataV3(keyBytes, []byte(auth), scryptN, scryptP)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
encryptedKeyJSONV3 := encryptedKeyJSONV3{
|
||||
hex.EncodeToString(key.Address[:]),
|
||||
cryptoStruct,
|
||||
@@ -226,43 +233,48 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
|
||||
PrivateKey: key,
|
||||
}, nil
|
||||
}
|
||||
func DecryptDataV3(cryptoJson CryptoJSON, auth string) ([]byte, error) {
|
||||
if cryptoJson.Cipher != "aes-128-ctr" {
|
||||
return nil, fmt.Errorf("Cipher not supported: %v", cryptoJson.Cipher)
|
||||
}
|
||||
mac, err := hex.DecodeString(cryptoJson.MAC)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
iv, err := hex.DecodeString(cryptoJson.CipherParams.IV)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cipherText, err := hex.DecodeString(cryptoJson.CipherText)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
derivedKey, err := getKDFKey(cryptoJson, auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
calculatedMAC := crypto.Keccak256(derivedKey[16:32], cipherText)
|
||||
if !bytes.Equal(calculatedMAC, mac) {
|
||||
return nil, ErrDecrypt
|
||||
}
|
||||
|
||||
plainText, err := aesCTRXOR(derivedKey[:16], cipherText, iv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return plainText, err
|
||||
}
|
||||
|
||||
func decryptKeyV3(keyProtected *encryptedKeyJSONV3, auth string) (keyBytes []byte, keyId []byte, err error) {
|
||||
if keyProtected.Version != version {
|
||||
return nil, nil, fmt.Errorf("Version not supported: %v", keyProtected.Version)
|
||||
}
|
||||
|
||||
if keyProtected.Crypto.Cipher != "aes-128-ctr" {
|
||||
return nil, nil, fmt.Errorf("Cipher not supported: %v", keyProtected.Crypto.Cipher)
|
||||
}
|
||||
|
||||
keyId = uuid.Parse(keyProtected.Id)
|
||||
mac, err := hex.DecodeString(keyProtected.Crypto.MAC)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
iv, err := hex.DecodeString(keyProtected.Crypto.CipherParams.IV)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
cipherText, err := hex.DecodeString(keyProtected.Crypto.CipherText)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
derivedKey, err := getKDFKey(keyProtected.Crypto, auth)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
calculatedMAC := crypto.Keccak256(derivedKey[16:32], cipherText)
|
||||
if !bytes.Equal(calculatedMAC, mac) {
|
||||
return nil, nil, ErrDecrypt
|
||||
}
|
||||
|
||||
plainText, err := aesCTRXOR(derivedKey[:16], cipherText, iv)
|
||||
plainText, err := DecryptDataV3(keyProtected.Crypto, auth)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -303,7 +315,7 @@ func decryptKeyV1(keyProtected *encryptedKeyJSONV1, auth string) (keyBytes []byt
|
||||
return plainText, keyId, err
|
||||
}
|
||||
|
||||
func getKDFKey(cryptoJSON cryptoJSON, auth string) ([]byte, error) {
|
||||
func getKDFKey(cryptoJSON CryptoJSON, auth string) ([]byte, error) {
|
||||
authArray := []byte(auth)
|
||||
salt, err := hex.DecodeString(cryptoJSON.KDFParams["salt"].(string))
|
||||
if err != nil {
|
||||
|
@@ -350,7 +350,7 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
|
||||
signer = new(types.HomesteadSigner)
|
||||
} else {
|
||||
signer = types.NewEIP155Signer(chainID)
|
||||
signature[64] = signature[64] - byte(chainID.Uint64()*2+35)
|
||||
signature[64] -= byte(chainID.Uint64()*2 + 35)
|
||||
}
|
||||
signed, err := tx.WithSignature(signer, signature)
|
||||
if err != nil {
|
||||
|
@@ -221,7 +221,7 @@ func (w *trezorDriver) trezorSign(derivationPath []uint32, tx *types.Transaction
|
||||
signer = new(types.HomesteadSigner)
|
||||
} else {
|
||||
signer = types.NewEIP155Signer(chainID)
|
||||
signature[64] = signature[64] - byte(chainID.Uint64()*2+35)
|
||||
signature[64] -= byte(chainID.Uint64()*2 + 35)
|
||||
}
|
||||
// Inject the final signature into the transaction and sanity check the sender
|
||||
signed, err := tx.WithSignature(signer, signature)
|
||||
|
@@ -23,8 +23,8 @@ environment:
|
||||
install:
|
||||
- git submodule update --init
|
||||
- rmdir C:\go /s /q
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.11.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.2.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.11.2.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- go version
|
||||
- gcc --version
|
||||
|
||||
|
13
build/ci.go
@@ -320,9 +320,7 @@ func goToolArch(arch string, cc string, subcmd string, args ...string) *exec.Cmd
|
||||
// "tests" also includes static analysis tools such as vet.
|
||||
|
||||
func doTest(cmdline []string) {
|
||||
var (
|
||||
coverage = flag.Bool("coverage", false, "Whether to record code coverage")
|
||||
)
|
||||
coverage := flag.Bool("coverage", false, "Whether to record code coverage")
|
||||
flag.CommandLine.Parse(cmdline)
|
||||
env := build.Env()
|
||||
|
||||
@@ -332,14 +330,11 @@ func doTest(cmdline []string) {
|
||||
}
|
||||
packages = build.ExpandPackagesNoVendor(packages)
|
||||
|
||||
// Run analysis tools before the tests.
|
||||
build.MustRun(goTool("vet", packages...))
|
||||
|
||||
// Run the actual tests.
|
||||
gotest := goTool("test", buildFlags(env)...)
|
||||
// Test a single package at a time. CI builders are slow
|
||||
// and some tests run into timeouts under load.
|
||||
gotest.Args = append(gotest.Args, "-p", "1")
|
||||
gotest := goTool("test", buildFlags(env)...)
|
||||
gotest.Args = append(gotest.Args, "-p", "1", "-timeout", "5m")
|
||||
if *coverage {
|
||||
gotest.Args = append(gotest.Args, "-covermode=atomic", "-cover")
|
||||
}
|
||||
@@ -1040,7 +1035,7 @@ func xgoTool(args []string) *exec.Cmd {
|
||||
func doPurge(cmdline []string) {
|
||||
var (
|
||||
store = flag.String("store", "", `Destination from where to purge archives (usually "gethstore/builds")`)
|
||||
limit = flag.Int("days", 30, `Age threshold above which to delete unstalbe archives`)
|
||||
limit = flag.Int("days", 30, `Age threshold above which to delete unstable archives`)
|
||||
)
|
||||
flag.CommandLine.Parse(cmdline)
|
||||
|
||||
|
@@ -75,7 +75,7 @@ func main() {
|
||||
bins []string
|
||||
types []string
|
||||
)
|
||||
if *solFlag != "" || *abiFlag == "-" {
|
||||
if *solFlag != "" || (*abiFlag == "-" && *pkgFlag == "") {
|
||||
// Generate the list of types to exclude from binding
|
||||
exclude := make(map[string]bool)
|
||||
for _, kind := range strings.Split(*excFlag, ",") {
|
||||
@@ -111,7 +111,13 @@ func main() {
|
||||
}
|
||||
} else {
|
||||
// Otherwise load up the ABI, optional bytecode and type name from the parameters
|
||||
abi, err := ioutil.ReadFile(*abiFlag)
|
||||
var abi []byte
|
||||
var err error
|
||||
if *abiFlag == "-" {
|
||||
abi, err = ioutil.ReadAll(os.Stdin)
|
||||
} else {
|
||||
abi, err = ioutil.ReadFile(*abiFlag)
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to read input ABI: %v\n", err)
|
||||
os.Exit(-1)
|
||||
@@ -155,6 +161,5 @@ func contractsFromStdin() (map[string]*compiler.Contract, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return compiler.ParseCombinedJSON(bytes, "", "", "", "")
|
||||
}
|
||||
|
@@ -29,6 +29,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/discv5"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
"github.com/ethereum/go-ethereum/p2p/netutil"
|
||||
)
|
||||
@@ -37,7 +38,7 @@ func main() {
|
||||
var (
|
||||
listenAddr = flag.String("addr", ":30301", "listen address")
|
||||
genKey = flag.String("genkey", "", "generate a node key")
|
||||
writeAddr = flag.Bool("writeaddress", false, "write out the node's pubkey hash and quit")
|
||||
writeAddr = flag.Bool("writeaddress", false, "write out the node's public key and quit")
|
||||
nodeKeyFile = flag.String("nodekey", "", "private key filename")
|
||||
nodeKeyHex = flag.String("nodekeyhex", "", "private key as hex (for testing)")
|
||||
natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|extip:<IP>)")
|
||||
@@ -85,7 +86,7 @@ func main() {
|
||||
}
|
||||
|
||||
if *writeAddr {
|
||||
fmt.Printf("%v\n", discover.PubkeyID(&nodeKey.PublicKey))
|
||||
fmt.Printf("%x\n", crypto.FromECDSAPub(&nodeKey.PublicKey)[1:])
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
@@ -118,16 +119,17 @@ func main() {
|
||||
}
|
||||
|
||||
if *runv5 {
|
||||
if _, err := discv5.ListenUDP(nodeKey, conn, realaddr, "", restrictList); err != nil {
|
||||
if _, err := discv5.ListenUDP(nodeKey, conn, "", restrictList); err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
} else {
|
||||
db, _ := enode.OpenDB("")
|
||||
ln := enode.NewLocalNode(db, nodeKey)
|
||||
cfg := discover.Config{
|
||||
PrivateKey: nodeKey,
|
||||
AnnounceAddr: realaddr,
|
||||
NetRestrict: restrictList,
|
||||
PrivateKey: nodeKey,
|
||||
NetRestrict: restrictList,
|
||||
}
|
||||
if _, err := discover.ListenUDP(conn, cfg); err != nil {
|
||||
if _, err := discover.ListenUDP(conn, ln, cfg); err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
|
@@ -91,7 +91,7 @@ invoking methods with the following info:
|
||||
* [x] Version info about the signer
|
||||
* [x] Address of API (http/ipc)
|
||||
* [ ] List of known accounts
|
||||
* [ ] Have a default timeout on signing operations, so that if the user has not answered withing e.g. 60 seconds, the request is rejected.
|
||||
* [ ] Have a default timeout on signing operations, so that if the user has not answered within e.g. 60 seconds, the request is rejected.
|
||||
* [ ] `account_signRawTransaction`
|
||||
* [ ] `account_bulkSignTransactions([] transactions)` should
|
||||
* only exist if enabled via config/flag
|
||||
@@ -129,7 +129,7 @@ The signer listens to HTTP requests on `rpcaddr`:`rpcport`, with the same JSONRP
|
||||
expected to be JSON [jsonrpc 2.0 standard](http://www.jsonrpc.org/specification).
|
||||
|
||||
Some of these call can require user interaction. Clients must be aware that responses
|
||||
may be delayed significanlty or may never be received if a users decides to ignore the confirmation request.
|
||||
may be delayed significantly or may never be received if a users decides to ignore the confirmation request.
|
||||
|
||||
The External API is **untrusted** : it does not accept credentials over this api, nor does it expect
|
||||
that requests have any authority.
|
||||
@@ -862,7 +862,7 @@ A UI should conform to the following rules.
|
||||
* A UI SHOULD inform the user about the `SHA256` or `MD5` hash of the binary being executed
|
||||
* A UI SHOULD NOT maintain a secondary storage of data, e.g. list of accounts
|
||||
* The signer provides accounts
|
||||
* A UI SHOULD, to the best extent possible, use static linking / bundling, so that requried libraries are bundled
|
||||
* A UI SHOULD, to the best extent possible, use static linking / bundling, so that required libraries are bundled
|
||||
along with the UI.
|
||||
|
||||
|
||||
@@ -875,3 +875,4 @@ There are a couple of implementation for a UI. We'll try to keep this list up to
|
||||
| QtSigner| https://github.com/holiman/qtsigner/| Python3/QT-based| :+1:| :+1:| :+1:| :+1:| :+1:| :x: | :+1: (partially)|
|
||||
| GtkSigner| https://github.com/holiman/gtksigner| Python3/GTK-based| :+1:| :x:| :x:| :+1:| :+1:| :x: | :x: |
|
||||
| Frame | https://github.com/floating/frame/commits/go-signer| Electron-based| :x:| :x:| :x:| :x:| ?| :x: | :x: |
|
||||
| Clef UI| https://github.com/kyokan/clef-ui| Golang/QT-based| :+1:| :+1:| :x:| :+1:| :+1:| :x: | :+1: (approve tx only)|
|
||||
|
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 20 KiB After Width: | Height: | Size: 17 KiB |
Before Width: | Height: | Size: 19 KiB After Width: | Height: | Size: 16 KiB |
Before Width: | Height: | Size: 25 KiB After Width: | Height: | Size: 22 KiB |
Before Width: | Height: | Size: 42 KiB After Width: | Height: | Size: 36 KiB |
@@ -1,6 +1,13 @@
|
||||
### Changelog for external API
|
||||
|
||||
#### 4.0.0
|
||||
|
||||
* The external `account_Ecrecover`-method was removed.
|
||||
* The external `account_Import`-method was removed.
|
||||
|
||||
#### 3.0.0
|
||||
|
||||
* The external `account_List`-method was changed to not expose `url`, which contained info about the local filesystem. It now returns only a list of addresses.
|
||||
|
||||
#### 2.0.0
|
||||
|
||||
|
@@ -1,5 +1,24 @@
|
||||
### Changelog for internal API (ui-api)
|
||||
|
||||
### 3.0.0
|
||||
|
||||
* Make use of `OnInputRequired(info UserInputRequest)` for obtaining master password during startup
|
||||
|
||||
### 2.1.0
|
||||
|
||||
* Add `OnInputRequired(info UserInputRequest)` to internal API. This method is used when Clef needs user input, e.g. passwords.
|
||||
|
||||
The following structures are used:
|
||||
```golang
|
||||
UserInputRequest struct {
|
||||
Prompt string `json:"prompt"`
|
||||
Title string `json:"title"`
|
||||
IsPassword bool `json:"isPassword"`
|
||||
}
|
||||
UserInputResponse struct {
|
||||
Text string `json:"text"`
|
||||
}
|
||||
|
||||
### 2.0.0
|
||||
|
||||
* Modify how `call_info` on a transaction is conveyed. New format:
|
||||
|
215
cmd/clef/main.go
@@ -35,8 +35,10 @@ import (
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/console"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
@@ -48,10 +50,10 @@ import (
|
||||
)
|
||||
|
||||
// ExternalAPIVersion -- see extapi_changelog.md
|
||||
const ExternalAPIVersion = "2.0.0"
|
||||
const ExternalAPIVersion = "4.0.0"
|
||||
|
||||
// InternalAPIVersion -- see intapi_changelog.md
|
||||
const InternalAPIVersion = "2.0.0"
|
||||
const InternalAPIVersion = "3.0.0"
|
||||
|
||||
const legalWarning = `
|
||||
WARNING!
|
||||
@@ -70,6 +72,10 @@ var (
|
||||
Value: 4,
|
||||
Usage: "log level to emit to the screen",
|
||||
}
|
||||
advancedMode = cli.BoolFlag{
|
||||
Name: "advanced",
|
||||
Usage: "If enabled, issues warnings instead of rejections for suspicious requests. Default off",
|
||||
}
|
||||
keystoreFlag = cli.StringFlag{
|
||||
Name: "keystore",
|
||||
Value: filepath.Join(node.DefaultDataDir(), "keystore"),
|
||||
@@ -87,7 +93,7 @@ var (
|
||||
}
|
||||
signerSecretFlag = cli.StringFlag{
|
||||
Name: "signersecret",
|
||||
Usage: "A file containing the password used to encrypt Clef credentials, e.g. keystore credentials and ruleset hash",
|
||||
Usage: "A file containing the (encrypted) master seed to encrypt Clef data, e.g. keystore credentials and ruleset hash",
|
||||
}
|
||||
dBFlag = cli.StringFlag{
|
||||
Name: "4bytedb",
|
||||
@@ -151,18 +157,18 @@ Whenever you make an edit to the rule file, you need to use attestation to tell
|
||||
Clef that the file is 'safe' to execute.`,
|
||||
}
|
||||
|
||||
addCredentialCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(addCredential),
|
||||
Name: "addpw",
|
||||
setCredentialCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(setCredential),
|
||||
Name: "setpw",
|
||||
Usage: "Store a credential for a keystore file",
|
||||
ArgsUsage: "<address> <password>",
|
||||
ArgsUsage: "<address>",
|
||||
Flags: []cli.Flag{
|
||||
logLevelFlag,
|
||||
configdirFlag,
|
||||
signerSecretFlag,
|
||||
},
|
||||
Description: `
|
||||
The addpw command stores a password for a given address (keyfile). If you invoke it with only one parameter, it will
|
||||
The setpw command stores a password for a given address (keyfile). If you enter a blank passphrase, it will
|
||||
remove any stored credential for that address (keyfile)
|
||||
`,
|
||||
}
|
||||
@@ -191,9 +197,10 @@ func init() {
|
||||
ruleFlag,
|
||||
stdiouiFlag,
|
||||
testFlag,
|
||||
advancedMode,
|
||||
}
|
||||
app.Action = signer
|
||||
app.Commands = []cli.Command{initCommand, attestCommand, addCredentialCommand}
|
||||
app.Commands = []cli.Command{initCommand, attestCommand, setCredentialCommand}
|
||||
|
||||
}
|
||||
func main() {
|
||||
@@ -207,25 +214,45 @@ func initializeSecrets(c *cli.Context) error {
|
||||
if err := initialize(c); err != nil {
|
||||
return err
|
||||
}
|
||||
configDir := c.String(configdirFlag.Name)
|
||||
configDir := c.GlobalString(configdirFlag.Name)
|
||||
|
||||
masterSeed := make([]byte, 256)
|
||||
n, err := io.ReadFull(rand.Reader, masterSeed)
|
||||
num, err := io.ReadFull(rand.Reader, masterSeed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n != len(masterSeed) {
|
||||
if num != len(masterSeed) {
|
||||
return fmt.Errorf("failed to read enough random")
|
||||
}
|
||||
|
||||
n, p := keystore.StandardScryptN, keystore.StandardScryptP
|
||||
if c.GlobalBool(utils.LightKDFFlag.Name) {
|
||||
n, p = keystore.LightScryptN, keystore.LightScryptP
|
||||
}
|
||||
text := "The master seed of clef is locked with a password. Please give a password. Do not forget this password."
|
||||
var password string
|
||||
for {
|
||||
password = getPassPhrase(text, true)
|
||||
if err := core.ValidatePasswordFormat(password); err != nil {
|
||||
fmt.Printf("invalid password: %v\n", err)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
cipherSeed, err := encryptSeed(masterSeed, []byte(password), n, p)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to encrypt master seed: %v", err)
|
||||
}
|
||||
|
||||
err = os.Mkdir(configDir, 0700)
|
||||
if err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
location := filepath.Join(configDir, "secrets.dat")
|
||||
location := filepath.Join(configDir, "masterseed.json")
|
||||
if _, err := os.Stat(location); err == nil {
|
||||
return fmt.Errorf("file %v already exists, will not overwrite", location)
|
||||
}
|
||||
err = ioutil.WriteFile(location, masterSeed, 0400)
|
||||
err = ioutil.WriteFile(location, cipherSeed, 0400)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -250,11 +277,11 @@ func attestFile(ctx *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
stretchedKey, err := readMasterKey(ctx)
|
||||
stretchedKey, err := readMasterKey(ctx, nil)
|
||||
if err != nil {
|
||||
utils.Fatalf(err.Error())
|
||||
}
|
||||
configDir := ctx.String(configdirFlag.Name)
|
||||
configDir := ctx.GlobalString(configdirFlag.Name)
|
||||
vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), stretchedKey)[:10]))
|
||||
confKey := crypto.Keccak256([]byte("config"), stretchedKey)
|
||||
|
||||
@@ -266,38 +293,36 @@ func attestFile(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func addCredential(ctx *cli.Context) error {
|
||||
func setCredential(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) < 1 {
|
||||
utils.Fatalf("This command requires at leaste one argument.")
|
||||
utils.Fatalf("This command requires an address to be passed as an argument.")
|
||||
}
|
||||
if err := initialize(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stretchedKey, err := readMasterKey(ctx)
|
||||
address := ctx.Args().First()
|
||||
password := getPassPhrase("Enter a passphrase to store with this address.", true)
|
||||
|
||||
stretchedKey, err := readMasterKey(ctx, nil)
|
||||
if err != nil {
|
||||
utils.Fatalf(err.Error())
|
||||
}
|
||||
configDir := ctx.String(configdirFlag.Name)
|
||||
configDir := ctx.GlobalString(configdirFlag.Name)
|
||||
vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), stretchedKey)[:10]))
|
||||
pwkey := crypto.Keccak256([]byte("credentials"), stretchedKey)
|
||||
|
||||
// Initialize the encrypted storages
|
||||
pwStorage := storage.NewAESEncryptedStorage(filepath.Join(vaultLocation, "credentials.json"), pwkey)
|
||||
key := ctx.Args().First()
|
||||
value := ""
|
||||
if len(ctx.Args()) > 1 {
|
||||
value = ctx.Args().Get(1)
|
||||
}
|
||||
pwStorage.Put(key, value)
|
||||
log.Info("Credential store updated", "key", key)
|
||||
pwStorage.Put(address, password)
|
||||
log.Info("Credential store updated", "key", address)
|
||||
return nil
|
||||
}
|
||||
|
||||
func initialize(c *cli.Context) error {
|
||||
// Set up the logger to print everything
|
||||
logOutput := os.Stdout
|
||||
if c.Bool(stdiouiFlag.Name) {
|
||||
if c.GlobalBool(stdiouiFlag.Name) {
|
||||
logOutput = os.Stderr
|
||||
// If using the stdioui, we can't do the 'confirm'-flow
|
||||
fmt.Fprintf(logOutput, legalWarning)
|
||||
@@ -318,26 +343,28 @@ func signer(c *cli.Context) error {
|
||||
var (
|
||||
ui core.SignerUI
|
||||
)
|
||||
if c.Bool(stdiouiFlag.Name) {
|
||||
if c.GlobalBool(stdiouiFlag.Name) {
|
||||
log.Info("Using stdin/stdout as UI-channel")
|
||||
ui = core.NewStdIOUI()
|
||||
} else {
|
||||
log.Info("Using CLI as UI-channel")
|
||||
ui = core.NewCommandlineUI()
|
||||
}
|
||||
db, err := core.NewAbiDBFromFiles(c.String(dBFlag.Name), c.String(customDBFlag.Name))
|
||||
fourByteDb := c.GlobalString(dBFlag.Name)
|
||||
fourByteLocal := c.GlobalString(customDBFlag.Name)
|
||||
db, err := core.NewAbiDBFromFiles(fourByteDb, fourByteLocal)
|
||||
if err != nil {
|
||||
utils.Fatalf(err.Error())
|
||||
}
|
||||
log.Info("Loaded 4byte db", "signatures", db.Size(), "file", c.String("4bytedb"))
|
||||
log.Info("Loaded 4byte db", "signatures", db.Size(), "file", fourByteDb, "local", fourByteLocal)
|
||||
|
||||
var (
|
||||
api core.ExternalAPI
|
||||
)
|
||||
|
||||
configDir := c.String(configdirFlag.Name)
|
||||
if stretchedKey, err := readMasterKey(c); err != nil {
|
||||
log.Info("No master seed provided, rules disabled")
|
||||
configDir := c.GlobalString(configdirFlag.Name)
|
||||
if stretchedKey, err := readMasterKey(c, ui); err != nil {
|
||||
log.Info("No master seed provided, rules disabled", "error", err)
|
||||
} else {
|
||||
|
||||
if err != nil {
|
||||
@@ -356,7 +383,7 @@ func signer(c *cli.Context) error {
|
||||
configStorage := storage.NewAESEncryptedStorage(filepath.Join(vaultLocation, "config.json"), confkey)
|
||||
|
||||
//Do we have a rule-file?
|
||||
ruleJS, err := ioutil.ReadFile(c.String(ruleFlag.Name))
|
||||
ruleJS, err := ioutil.ReadFile(c.GlobalString(ruleFlag.Name))
|
||||
if err != nil {
|
||||
log.Info("Could not load rulefile, rules not enabled", "file", "rulefile")
|
||||
} else {
|
||||
@@ -380,16 +407,15 @@ func signer(c *cli.Context) error {
|
||||
}
|
||||
|
||||
apiImpl := core.NewSignerAPI(
|
||||
c.Int64(utils.NetworkIdFlag.Name),
|
||||
c.String(keystoreFlag.Name),
|
||||
c.Bool(utils.NoUSBFlag.Name),
|
||||
c.GlobalInt64(utils.NetworkIdFlag.Name),
|
||||
c.GlobalString(keystoreFlag.Name),
|
||||
c.GlobalBool(utils.NoUSBFlag.Name),
|
||||
ui, db,
|
||||
c.Bool(utils.LightKDFFlag.Name))
|
||||
|
||||
c.GlobalBool(utils.LightKDFFlag.Name),
|
||||
c.GlobalBool(advancedMode.Name))
|
||||
api = apiImpl
|
||||
|
||||
// Audit logging
|
||||
if logfile := c.String(auditLogFlag.Name); logfile != "" {
|
||||
if logfile := c.GlobalString(auditLogFlag.Name); logfile != "" {
|
||||
api, err = core.NewAuditLogger(logfile, api)
|
||||
if err != nil {
|
||||
utils.Fatalf(err.Error())
|
||||
@@ -408,13 +434,13 @@ func signer(c *cli.Context) error {
|
||||
Service: api,
|
||||
Version: "1.0"},
|
||||
}
|
||||
if c.Bool(utils.RPCEnabledFlag.Name) {
|
||||
if c.GlobalBool(utils.RPCEnabledFlag.Name) {
|
||||
|
||||
vhosts := splitAndTrim(c.GlobalString(utils.RPCVirtualHostsFlag.Name))
|
||||
cors := splitAndTrim(c.GlobalString(utils.RPCCORSDomainFlag.Name))
|
||||
|
||||
// start http server
|
||||
httpEndpoint := fmt.Sprintf("%s:%d", c.String(utils.RPCListenAddrFlag.Name), c.Int(rpcPortFlag.Name))
|
||||
httpEndpoint := fmt.Sprintf("%s:%d", c.GlobalString(utils.RPCListenAddrFlag.Name), c.Int(rpcPortFlag.Name))
|
||||
listener, _, err := rpc.StartHTTPEndpoint(httpEndpoint, rpcAPI, []string{"account"}, cors, vhosts, rpc.DefaultHTTPTimeouts)
|
||||
if err != nil {
|
||||
utils.Fatalf("Could not start RPC api: %v", err)
|
||||
@@ -428,9 +454,9 @@ func signer(c *cli.Context) error {
|
||||
}()
|
||||
|
||||
}
|
||||
if !c.Bool(utils.IPCDisabledFlag.Name) {
|
||||
if !c.GlobalBool(utils.IPCDisabledFlag.Name) {
|
||||
if c.IsSet(utils.IPCPathFlag.Name) {
|
||||
ipcapiURL = c.String(utils.IPCPathFlag.Name)
|
||||
ipcapiURL = c.GlobalString(utils.IPCPathFlag.Name)
|
||||
} else {
|
||||
ipcapiURL = filepath.Join(configDir, "clef.ipc")
|
||||
}
|
||||
@@ -447,7 +473,7 @@ func signer(c *cli.Context) error {
|
||||
|
||||
}
|
||||
|
||||
if c.Bool(testFlag.Name) {
|
||||
if c.GlobalBool(testFlag.Name) {
|
||||
log.Info("Performing UI test")
|
||||
go testExternalUI(apiImpl)
|
||||
}
|
||||
@@ -506,36 +532,52 @@ func homeDir() string {
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func readMasterKey(ctx *cli.Context) ([]byte, error) {
|
||||
func readMasterKey(ctx *cli.Context, ui core.SignerUI) ([]byte, error) {
|
||||
var (
|
||||
file string
|
||||
configDir = ctx.String(configdirFlag.Name)
|
||||
configDir = ctx.GlobalString(configdirFlag.Name)
|
||||
)
|
||||
if ctx.IsSet(signerSecretFlag.Name) {
|
||||
file = ctx.String(signerSecretFlag.Name)
|
||||
if ctx.GlobalIsSet(signerSecretFlag.Name) {
|
||||
file = ctx.GlobalString(signerSecretFlag.Name)
|
||||
} else {
|
||||
file = filepath.Join(configDir, "secrets.dat")
|
||||
file = filepath.Join(configDir, "masterseed.json")
|
||||
}
|
||||
if err := checkFile(file); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
masterKey, err := ioutil.ReadFile(file)
|
||||
cipherKey, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(masterKey) < 256 {
|
||||
return nil, fmt.Errorf("master key of insufficient length, expected >255 bytes, got %d", len(masterKey))
|
||||
var password string
|
||||
// If ui is not nil, get the password from ui.
|
||||
if ui != nil {
|
||||
resp, err := ui.OnInputRequired(core.UserInputRequest{
|
||||
Title: "Master Password",
|
||||
Prompt: "Please enter the password to decrypt the master seed",
|
||||
IsPassword: true})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
password = resp.Text
|
||||
} else {
|
||||
password = getPassPhrase("Decrypt master seed of clef", false)
|
||||
}
|
||||
masterSeed, err := decryptSeed(cipherKey, password)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decrypt the master seed of clef")
|
||||
}
|
||||
if len(masterSeed) < 256 {
|
||||
return nil, fmt.Errorf("master seed of insufficient length, expected >255 bytes, got %d", len(masterSeed))
|
||||
}
|
||||
|
||||
// Create vault location
|
||||
vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), masterKey)[:10]))
|
||||
vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), masterSeed)[:10]))
|
||||
err = os.Mkdir(vaultLocation, 0700)
|
||||
if err != nil && !os.IsExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
//!TODO, use KDF to stretch the master key
|
||||
// stretched_key := stretch_key(master_key)
|
||||
|
||||
return masterKey, nil
|
||||
return masterSeed, nil
|
||||
}
|
||||
|
||||
// checkFile is a convenience function to check if a file
|
||||
@@ -613,6 +655,59 @@ func testExternalUI(api *core.SignerAPI) {
|
||||
|
||||
}
|
||||
|
||||
// getPassPhrase retrieves the password associated with clef, either fetched
|
||||
// from a list of preloaded passphrases, or requested interactively from the user.
|
||||
// TODO: there are many `getPassPhrase` functions, it will be better to abstract them into one.
|
||||
func getPassPhrase(prompt string, confirmation bool) string {
|
||||
fmt.Println(prompt)
|
||||
password, err := console.Stdin.PromptPassword("Passphrase: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase: %v", err)
|
||||
}
|
||||
if confirmation {
|
||||
confirm, err := console.Stdin.PromptPassword("Repeat passphrase: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase confirmation: %v", err)
|
||||
}
|
||||
if password != confirm {
|
||||
utils.Fatalf("Passphrases do not match")
|
||||
}
|
||||
}
|
||||
return password
|
||||
}
|
||||
|
||||
type encryptedSeedStorage struct {
|
||||
Description string `json:"description"`
|
||||
Version int `json:"version"`
|
||||
Params keystore.CryptoJSON `json:"params"`
|
||||
}
|
||||
|
||||
// encryptSeed uses a similar scheme as the keystore uses, but with a different wrapping,
|
||||
// to encrypt the master seed
|
||||
func encryptSeed(seed []byte, auth []byte, scryptN, scryptP int) ([]byte, error) {
|
||||
cryptoStruct, err := keystore.EncryptDataV3(seed, auth, scryptN, scryptP)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.Marshal(&encryptedSeedStorage{"Clef seed", 1, cryptoStruct})
|
||||
}
|
||||
|
||||
// decryptSeed decrypts the master seed
|
||||
func decryptSeed(keyjson []byte, auth string) ([]byte, error) {
|
||||
var encSeed encryptedSeedStorage
|
||||
if err := json.Unmarshal(keyjson, &encSeed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if encSeed.Version != 1 {
|
||||
log.Warn(fmt.Sprintf("unsupported encryption format of seed: %d, operation will likely fail", encSeed.Version))
|
||||
}
|
||||
seed, err := keystore.DecryptDataV3(encSeed.Params, auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return seed, err
|
||||
}
|
||||
|
||||
/**
|
||||
//Create Account
|
||||
|
||||
|
Before Width: | Height: | Size: 36 KiB After Width: | Height: | Size: 20 KiB |
@@ -31,43 +31,51 @@ NOTE: This file does not contain your accounts. Those need to be backed up separ
|
||||
|
||||
## Creating rules
|
||||
|
||||
Now, you can create a rule-file.
|
||||
Now, you can create a rule-file. Note that it is not mandatory to use predefined rules, but it's really handy.
|
||||
|
||||
```javascript
|
||||
function ApproveListing(){
|
||||
return "Approve"
|
||||
}
|
||||
```
|
||||
Get the `sha256` hash....
|
||||
|
||||
Get the `sha256` hash. If you have openssl, you can do `openssl sha256 rules.js`...
|
||||
```text
|
||||
#sha256sum rules.js
|
||||
6c21d1737429d6d4f2e55146da0797782f3c0a0355227f19d702df377c165d72 rules.js
|
||||
```
|
||||
...And then `attest` the file:
|
||||
...now `attest` the file...
|
||||
```text
|
||||
#./signer attest 6c21d1737429d6d4f2e55146da0797782f3c0a0355227f19d702df377c165d72
|
||||
|
||||
INFO [02-21|12:14:38] Ruleset attestation updated sha256=6c21d1737429d6d4f2e55146da0797782f3c0a0355227f19d702df377c165d72
|
||||
```
|
||||
At this point, we then start the signer with the rule-file:
|
||||
|
||||
...and (this is required only for non-production versions) load a mock-up `4byte.json` by copying the file from the source to your current working directory:
|
||||
```text
|
||||
#./signer --rules rules.json
|
||||
#cp $GOPATH/src/github.com/ethereum/go-ethereum/cmd/clef/4byte.json $PWD
|
||||
```
|
||||
|
||||
INFO [02-21|12:15:18] Using CLI as UI-channel
|
||||
INFO [02-21|12:15:18] Loaded 4byte db signatures=5509 file=./4byte.json
|
||||
INFO [02-21|12:15:18] Could not load rulefile, rules not enabled file=rulefile
|
||||
DEBUG[02-21|12:15:18] FS scan times list=35.335µs set=5.536µs diff=5.073µs
|
||||
DEBUG[02-21|12:15:18] Ledger support enabled
|
||||
DEBUG[02-21|12:15:18] Trezor support enabled
|
||||
INFO [02-21|12:15:18] Audit logs configured file=audit.log
|
||||
INFO [02-21|12:15:18] HTTP endpoint opened url=http://localhost:8550
|
||||
At this point, we can start the signer with the rule-file:
|
||||
```text
|
||||
#./signer --rules rules.js --rpc
|
||||
|
||||
INFO [09-25|20:28:11.866] Using CLI as UI-channel
|
||||
INFO [09-25|20:28:11.876] Loaded 4byte db signatures=5509 file=./4byte.json
|
||||
INFO [09-25|20:28:11.877] Rule engine configured file=./rules.js
|
||||
DEBUG[09-25|20:28:11.877] FS scan times list=100.781µs set=13.253µs diff=5.761µs
|
||||
DEBUG[09-25|20:28:11.884] Ledger support enabled
|
||||
DEBUG[09-25|20:28:11.888] Trezor support enabled
|
||||
INFO [09-25|20:28:11.888] Audit logs configured file=audit.log
|
||||
DEBUG[09-25|20:28:11.888] HTTP registered namespace=account
|
||||
INFO [09-25|20:28:11.890] HTTP endpoint opened url=http://localhost:8550
|
||||
DEBUG[09-25|20:28:11.890] IPC registered namespace=account
|
||||
INFO [09-25|20:28:11.890] IPC endpoint opened url=<nil>
|
||||
------- Signer info -------
|
||||
* extapi_version : 2.0.0
|
||||
* intapi_version : 2.0.0
|
||||
* extapi_http : http://localhost:8550
|
||||
* extapi_ipc : <nil>
|
||||
* extapi_version : 2.0.0
|
||||
* intapi_version : 1.2.0
|
||||
|
||||
```
|
||||
|
||||
Any list-requests will now be auto-approved by our rule-file.
|
||||
@@ -107,16 +115,16 @@ The `master_seed` was then used to derive a few other things:
|
||||
|
||||
## Adding credentials
|
||||
|
||||
In order to make more useful rules; sign transactions, the signer needs access to the passwords needed to unlock keystores.
|
||||
In order to make more useful rules like signing transactions, the signer needs access to the passwords needed to unlock keystores.
|
||||
|
||||
```text
|
||||
#./signer addpw 0x694267f14675d7e1b9494fd8d72fefe1755710fa test
|
||||
#./signer addpw "0x694267f14675d7e1b9494fd8d72fefe1755710fa" "test_password"
|
||||
|
||||
INFO [02-21|13:43:21] Credential store updated key=0x694267f14675d7e1b9494fd8d72fefe1755710fa
|
||||
```
|
||||
## More advanced rules
|
||||
|
||||
Now let's update the rules to make use of credentials
|
||||
Now let's update the rules to make use of credentials:
|
||||
|
||||
```javascript
|
||||
function ApproveListing(){
|
||||
@@ -134,13 +142,15 @@ function ApproveSignData(r){
|
||||
}
|
||||
|
||||
```
|
||||
In this example,
|
||||
* any requests to sign data with the account `0x694...` will be
|
||||
* auto-approved if the message contains with `bazonk`,
|
||||
* and auto-rejected if it does not.
|
||||
* Any other signing-requests will be passed along for manual approve/reject.
|
||||
In this example:
|
||||
* Any requests to sign data with the account `0x694...` will be
|
||||
* auto-approved if the message contains with `bazonk`
|
||||
* auto-rejected if it does not.
|
||||
* Any other signing-requests will be passed along for manual approve/reject.
|
||||
|
||||
..attest the new file
|
||||
_Note: make sure that `0x694...` is an account you have access to. You can create it either via the clef or the traditional account cli tool. If the latter was chosen, make sure both clef and geth use the same keystore by specifing `--keystore path/to/your/keystore` when running clef._
|
||||
|
||||
Attest the new file...
|
||||
```text
|
||||
#sha256sum rules.js
|
||||
2a0cb661dacfc804b6e95d935d813fd17c0997a7170e4092ffbc34ca976acd9f rules.js
|
||||
@@ -153,23 +163,26 @@ INFO [02-21|14:36:30] Ruleset attestation updated sha256=2a0cb661da
|
||||
And start the signer:
|
||||
|
||||
```
|
||||
#./signer --rules rules.js
|
||||
#./signer --rules rules.js --rpc
|
||||
|
||||
INFO [02-21|14:41:56] Using CLI as UI-channel
|
||||
INFO [02-21|14:41:56] Loaded 4byte db signatures=5509 file=./4byte.json
|
||||
INFO [02-21|14:41:56] Rule engine configured file=rules.js
|
||||
DEBUG[02-21|14:41:56] FS scan times list=34.607µs set=4.509µs diff=4.87µs
|
||||
DEBUG[02-21|14:41:56] Ledger support enabled
|
||||
DEBUG[02-21|14:41:56] Trezor support enabled
|
||||
INFO [02-21|14:41:56] Audit logs configured file=audit.log
|
||||
INFO [02-21|14:41:56] HTTP endpoint opened url=http://localhost:8550
|
||||
INFO [09-25|21:02:16.450] Using CLI as UI-channel
|
||||
INFO [09-25|21:02:16.466] Loaded 4byte db signatures=5509 file=./4byte.json
|
||||
INFO [09-25|21:02:16.467] Rule engine configured file=./rules.js
|
||||
DEBUG[09-25|21:02:16.468] FS scan times list=1.45262ms set=21.926µs diff=6.944µs
|
||||
DEBUG[09-25|21:02:16.473] Ledger support enabled
|
||||
DEBUG[09-25|21:02:16.475] Trezor support enabled
|
||||
INFO [09-25|21:02:16.476] Audit logs configured file=audit.log
|
||||
DEBUG[09-25|21:02:16.476] HTTP registered namespace=account
|
||||
INFO [09-25|21:02:16.478] HTTP endpoint opened url=http://localhost:8550
|
||||
DEBUG[09-25|21:02:16.478] IPC registered namespace=account
|
||||
INFO [09-25|21:02:16.478] IPC endpoint opened url=<nil>
|
||||
------- Signer info -------
|
||||
* extapi_version : 2.0.0
|
||||
* intapi_version : 1.2.0
|
||||
* intapi_version : 2.0.0
|
||||
* extapi_http : http://localhost:8550
|
||||
* extapi_ipc : <nil>
|
||||
INFO [02-21|14:41:56] error occurred during execution error="ReferenceError: 'OnSignerStartup' is not defined"
|
||||
```
|
||||
|
||||
And then test signing, once with `bazonk` and once without:
|
||||
|
||||
```
|
||||
@@ -190,9 +203,9 @@ INFO [02-21|14:42:56] Op rejected
|
||||
The signer also stores all traffic over the external API in a log file. The last 4 lines shows the two requests and their responses:
|
||||
|
||||
```text
|
||||
#tail audit.log -n 4
|
||||
#tail -n 4 audit.log
|
||||
t=2018-02-21T14:42:41+0100 lvl=info msg=Sign api=signer type=request metadata="{\"remote\":\"127.0.0.1:49706\",\"local\":\"localhost:8550\",\"scheme\":\"HTTP/1.1\"}" addr="0x694267f14675d7e1b9494fd8d72fefe1755710fa [chksum INVALID]" data=202062617a6f6e6b2062617a2067617a0a
|
||||
t=2018-02-21T14:42:42+0100 lvl=info msg=Sign api=signer type=response data=93e6161840c3ae1efc26dc68dedab6e8fc233bb3fefa1b4645dbf6609b93dace160572ea4ab33240256bb6d3dadb60dcd9c515d6374d3cf614ee897408d41d541c error=nil
|
||||
t=2018-02-21T14:42:56+0100 lvl=info msg=Sign api=signer type=request metadata="{\"remote\":\"127.0.0.1:49708\",\"local\":\"localhost:8550\",\"scheme\":\"HTTP/1.1\"}" addr="0x694267f14675d7e1b9494fd8d72fefe1755710fa [chksum INVALID]" data=2020626f6e6b2062617a2067617a0a
|
||||
t=2018-02-21T14:42:56+0100 lvl=info msg=Sign api=signer type=response data= error="Request denied"
|
||||
```
|
||||
```
|
@@ -45,14 +45,15 @@ func (l *JSONLogger) CaptureStart(from common.Address, to common.Address, create
|
||||
// CaptureState outputs state information on the logger.
|
||||
func (l *JSONLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error {
|
||||
log := vm.StructLog{
|
||||
Pc: pc,
|
||||
Op: op,
|
||||
Gas: gas,
|
||||
GasCost: cost,
|
||||
MemorySize: memory.Len(),
|
||||
Storage: nil,
|
||||
Depth: depth,
|
||||
Err: err,
|
||||
Pc: pc,
|
||||
Op: op,
|
||||
Gas: gas,
|
||||
GasCost: cost,
|
||||
MemorySize: memory.Len(),
|
||||
Storage: nil,
|
||||
Depth: depth,
|
||||
RefundCounter: env.StateDB.GetRefund(),
|
||||
Err: err,
|
||||
}
|
||||
if !l.cfg.DisableMemory {
|
||||
log.Memory = memory.Data()
|
||||
|
@@ -97,6 +97,10 @@ func stateTestCmd(ctx *cli.Context) error {
|
||||
// Run the test and aggregate the result
|
||||
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
|
||||
state, err := test.Run(st, cfg)
|
||||
// print state root for evmlab tracing
|
||||
if ctx.GlobalBool(MachineFlag.Name) && state != nil {
|
||||
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false))
|
||||
}
|
||||
if err != nil {
|
||||
// Test failed, mark as so and dump any state to aid debugging
|
||||
result.Pass, result.Error = false, err.Error()
|
||||
@@ -105,10 +109,6 @@ func stateTestCmd(ctx *cli.Context) error {
|
||||
result.State = &dump
|
||||
}
|
||||
}
|
||||
// print state root for evmlab tracing (already committed above, so no need to delete objects again
|
||||
if ctx.GlobalBool(MachineFlag.Name) && state != nil {
|
||||
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false))
|
||||
}
|
||||
|
||||
results = append(results, *result)
|
||||
|
||||
|
@@ -54,8 +54,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/discv5"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"golang.org/x/net/websocket"
|
||||
@@ -255,8 +255,10 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u
|
||||
return nil, err
|
||||
}
|
||||
for _, boot := range enodes {
|
||||
old, _ := discover.ParseNode(boot.String())
|
||||
stack.Server().AddPeer(old)
|
||||
old, err := enode.ParseV4(boot.String())
|
||||
if err != nil {
|
||||
stack.Server().AddPeer(old)
|
||||
}
|
||||
}
|
||||
// Attach to the client and retrieve and interesting metadatas
|
||||
api, err := stack.Attach()
|
||||
|
@@ -21,7 +21,6 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"runtime"
|
||||
godebug "runtime/debug"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -90,6 +89,7 @@ var (
|
||||
utils.LightKDFFlag,
|
||||
utils.CacheFlag,
|
||||
utils.CacheDatabaseFlag,
|
||||
utils.CacheTrieFlag,
|
||||
utils.CacheGCFlag,
|
||||
utils.TrieCacheGenFlag,
|
||||
utils.ListenPortFlag,
|
||||
@@ -209,8 +209,6 @@ func init() {
|
||||
app.Flags = append(app.Flags, metricsFlags...)
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
logdir := ""
|
||||
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {
|
||||
logdir = (&node.Config{DataDir: utils.MakeDataDir(ctx)}).ResolvePath("logs")
|
||||
|
@@ -132,6 +132,7 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
Flags: []cli.Flag{
|
||||
utils.CacheFlag,
|
||||
utils.CacheDatabaseFlag,
|
||||
utils.CacheTrieFlag,
|
||||
utils.CacheGCFlag,
|
||||
utils.TrieCacheGenFlag,
|
||||
},
|
||||
|
@@ -47,7 +47,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
@@ -285,7 +285,7 @@ func createNode(ctx *cli.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.ID = discover.PubkeyID(&privKey.PublicKey)
|
||||
config.ID = enode.PubkeyToIDV4(&privKey.PublicKey)
|
||||
config.PrivateKey = privKey
|
||||
}
|
||||
if services := ctx.String("services"); services != "" {
|
||||
|
@@ -42,7 +42,7 @@ ADD genesis.json /genesis.json
|
||||
RUN \
|
||||
echo 'geth --cache 512 init /genesis.json' > geth.sh && \{{if .Unlock}}
|
||||
echo 'mkdir -p /root/.ethereum/keystore/ && cp /signer.json /root/.ethereum/keystore/' >> geth.sh && \{{end}}
|
||||
echo $'exec geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--miner.etherbase {{.Etherbase}} --mine --miner.threads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --miner.gastarget {{.GasTarget}} --miner.gaslimit {{.GasLimit}} --miner.gasprice {{.GasPrice}}' >> geth.sh
|
||||
echo $'exec geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --nat extip:{{.IP}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--miner.etherbase {{.Etherbase}} --mine --miner.threads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --miner.gastarget {{.GasTarget}} --miner.gaslimit {{.GasLimit}} --miner.gasprice {{.GasPrice}}' >> geth.sh
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "geth.sh"]
|
||||
`
|
||||
@@ -99,6 +99,7 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n
|
||||
template.Must(template.New("").Parse(nodeDockerfile)).Execute(dockerfile, map[string]interface{}{
|
||||
"NetworkID": config.network,
|
||||
"Port": config.port,
|
||||
"IP": client.address,
|
||||
"Peers": config.peersTotal,
|
||||
"LightFlag": lightFlag,
|
||||
"Bootnodes": strings.Join(bootnodes, ","),
|
||||
@@ -227,10 +228,10 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
|
||||
|
||||
// Container available, retrieve its node ID and its genesis json
|
||||
var out []byte
|
||||
if out, err = client.Run(fmt.Sprintf("docker exec %s_%s_1 geth --exec admin.nodeInfo.id --cache=16 attach", network, kind)); err != nil {
|
||||
if out, err = client.Run(fmt.Sprintf("docker exec %s_%s_1 geth --exec admin.nodeInfo.enode --cache=16 attach", network, kind)); err != nil {
|
||||
return nil, ErrServiceUnreachable
|
||||
}
|
||||
id := bytes.Trim(bytes.TrimSpace(out), "\"")
|
||||
enode := bytes.Trim(bytes.TrimSpace(out), "\"")
|
||||
|
||||
if out, err = client.Run(fmt.Sprintf("docker exec %s_%s_1 cat /genesis.json", network, kind)); err != nil {
|
||||
return nil, ErrServiceUnreachable
|
||||
@@ -265,7 +266,7 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
|
||||
gasLimit: gasLimit,
|
||||
gasPrice: gasPrice,
|
||||
}
|
||||
stats.enode = fmt.Sprintf("enode://%s@%s:%d", id, client.address, stats.port)
|
||||
stats.enode = string(enode)
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
@@ -29,7 +29,65 @@ import (
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var salt = make([]byte, 32)
|
||||
var (
|
||||
salt = make([]byte, 32)
|
||||
accessCommand = cli.Command{
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "access",
|
||||
Usage: "encrypts a reference and embeds it into a root manifest",
|
||||
ArgsUsage: "<ref>",
|
||||
Description: "encrypts a reference and embeds it into a root manifest",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "new",
|
||||
Usage: "encrypts a reference and embeds it into a root manifest",
|
||||
ArgsUsage: "<ref>",
|
||||
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: accessNewPass,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Flags: []cli.Flag{
|
||||
utils.PasswordFileFlag,
|
||||
SwarmDryRunFlag,
|
||||
},
|
||||
Name: "pass",
|
||||
Usage: "encrypts a reference with a password and embeds it into a root manifest",
|
||||
ArgsUsage: "<ref>",
|
||||
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
|
||||
},
|
||||
{
|
||||
Action: accessNewPK,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Flags: []cli.Flag{
|
||||
utils.PasswordFileFlag,
|
||||
SwarmDryRunFlag,
|
||||
SwarmAccessGrantKeyFlag,
|
||||
},
|
||||
Name: "pk",
|
||||
Usage: "encrypts a reference with the node's private key and a given grantee's public key and embeds it into a root manifest",
|
||||
ArgsUsage: "<ref>",
|
||||
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
|
||||
},
|
||||
{
|
||||
Action: accessNewACT,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Flags: []cli.Flag{
|
||||
SwarmAccessGrantKeysFlag,
|
||||
SwarmDryRunFlag,
|
||||
utils.PasswordFileFlag,
|
||||
},
|
||||
Name: "act",
|
||||
Usage: "encrypts a reference with the node's private key and a given grantee's public key and embeds it into a root manifest",
|
||||
ArgsUsage: "<ref>",
|
||||
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
|
||||
@@ -56,6 +114,9 @@ func accessNewPass(ctx *cli.Context) {
|
||||
utils.Fatalf("error getting session key: %v", err)
|
||||
}
|
||||
m, err := api.GenerateAccessControlManifest(ctx, ref, accessKey, ae)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error generating the manifest: %v", err)
|
||||
}
|
||||
if dryRun {
|
||||
err = printManifests(m, nil)
|
||||
if err != nil {
|
||||
@@ -89,6 +150,9 @@ func accessNewPK(ctx *cli.Context) {
|
||||
utils.Fatalf("error getting session key: %v", err)
|
||||
}
|
||||
m, err := api.GenerateAccessControlManifest(ctx, ref, sessionKey, ae)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error generating the manifest: %v", err)
|
||||
}
|
||||
if dryRun {
|
||||
err = printManifests(m, nil)
|
||||
if err != nil {
|
||||
@@ -130,7 +194,7 @@ func accessNewACT(ctx *cli.Context) {
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error reading the grantee public key list")
|
||||
}
|
||||
pkGrantees = strings.Split(string(bytes), "\n")
|
||||
pkGrantees = strings.Split(strings.Trim(string(bytes), "\n"), "\n")
|
||||
}
|
||||
|
||||
if passGranteesFilename != "" {
|
||||
@@ -138,7 +202,7 @@ func accessNewACT(ctx *cli.Context) {
|
||||
if err != nil {
|
||||
utils.Fatalf("could not read password filename: %v", err)
|
||||
}
|
||||
passGrantees = strings.Split(string(bytes), "\n")
|
||||
passGrantees = strings.Split(strings.Trim(string(bytes), "\n"), "\n")
|
||||
}
|
||||
accessKey, ae, actManifest, err = api.DoACT(ctx, privateKey, salt, pkGrantees, passGrantees)
|
||||
if err != nil {
|
||||
|
@@ -14,8 +14,6 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
@@ -28,6 +26,7 @@ import (
|
||||
gorand "math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -37,7 +36,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||
swarmapi "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||
)
|
||||
|
||||
@@ -48,23 +47,41 @@ const (
|
||||
|
||||
var DefaultCurve = crypto.S256()
|
||||
|
||||
// TestAccessPassword tests for the correct creation of an ACT manifest protected by a password.
|
||||
func TestACT(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip()
|
||||
}
|
||||
|
||||
initCluster(t)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
f func(t *testing.T)
|
||||
}{
|
||||
{"Password", testPassword},
|
||||
{"PK", testPK},
|
||||
{"ACTWithoutBogus", testACTWithoutBogus},
|
||||
{"ACTWithBogus", testACTWithBogus},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, tc.f)
|
||||
}
|
||||
}
|
||||
|
||||
// testPassword tests for the correct creation of an ACT manifest protected by a password.
|
||||
// The test creates bogus content, uploads it encrypted, then creates the wrapping manifest with the Access entry
|
||||
// The parties participating - node (publisher), uploads to second node then disappears. Content which was uploaded
|
||||
// is then fetched through 2nd node. since the tested code is not key-aware - we can just
|
||||
// fetch from the 2nd node using HTTP BasicAuth
|
||||
func TestAccessPassword(t *testing.T) {
|
||||
cluster := newTestCluster(t, 1)
|
||||
defer cluster.Shutdown()
|
||||
proxyNode := cluster.Nodes[0]
|
||||
|
||||
func testPassword(t *testing.T) {
|
||||
dataFilename := testutil.TempFileWithContent(t, data)
|
||||
defer os.RemoveAll(dataFilename)
|
||||
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
up := runSwarm(t,
|
||||
"--bzzapi",
|
||||
proxyNode.URL, //it doesn't matter through which node we upload content
|
||||
cluster.Nodes[0].URL,
|
||||
"up",
|
||||
"--encrypt",
|
||||
dataFilename)
|
||||
@@ -138,16 +155,17 @@ func TestAccessPassword(t *testing.T) {
|
||||
if a.Publisher != "" {
|
||||
t.Fatal("should be empty")
|
||||
}
|
||||
client := swarm.NewClient(cluster.Nodes[0].URL)
|
||||
|
||||
client := swarmapi.NewClient(cluster.Nodes[0].URL)
|
||||
|
||||
hash, err := client.UploadManifest(&m, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
httpClient := &http.Client{}
|
||||
|
||||
url := cluster.Nodes[0].URL + "/" + "bzz:/" + hash
|
||||
|
||||
httpClient := &http.Client{}
|
||||
response, err := httpClient.Get(url)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -189,7 +207,7 @@ func TestAccessPassword(t *testing.T) {
|
||||
//download file with 'swarm down' with wrong password
|
||||
up = runSwarm(t,
|
||||
"--bzzapi",
|
||||
proxyNode.URL,
|
||||
cluster.Nodes[0].URL,
|
||||
"down",
|
||||
"bzz:/"+hash,
|
||||
tmp,
|
||||
@@ -203,16 +221,12 @@ func TestAccessPassword(t *testing.T) {
|
||||
up.ExpectExit()
|
||||
}
|
||||
|
||||
// TestAccessPK tests for the correct creation of an ACT manifest between two parties (publisher and grantee).
|
||||
// testPK tests for the correct creation of an ACT manifest between two parties (publisher and grantee).
|
||||
// The test creates bogus content, uploads it encrypted, then creates the wrapping manifest with the Access entry
|
||||
// The parties participating - node (publisher), uploads to second node (which is also the grantee) then disappears.
|
||||
// Content which was uploaded is then fetched through the grantee's http proxy. Since the tested code is private-key aware,
|
||||
// the test will fail if the proxy's given private key is not granted on the ACT.
|
||||
func TestAccessPK(t *testing.T) {
|
||||
// Setup Swarm and upload a test file to it
|
||||
cluster := newTestCluster(t, 2)
|
||||
defer cluster.Shutdown()
|
||||
|
||||
func testPK(t *testing.T) {
|
||||
dataFilename := testutil.TempFileWithContent(t, data)
|
||||
defer os.RemoveAll(dataFilename)
|
||||
|
||||
@@ -318,7 +332,7 @@ func TestAccessPK(t *testing.T) {
|
||||
if a.Publisher != pkComp {
|
||||
t.Fatal("publisher key did not match")
|
||||
}
|
||||
client := swarm.NewClient(cluster.Nodes[0].URL)
|
||||
client := swarmapi.NewClient(cluster.Nodes[0].URL)
|
||||
|
||||
hash, err := client.UploadManifest(&m, false)
|
||||
if err != nil {
|
||||
@@ -344,29 +358,24 @@ func TestAccessPK(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestAccessACT tests the creation of the ACT manifest end-to-end, without any bogus entries (i.e. default scenario = 3 nodes 1 unauthorized)
|
||||
func TestAccessACT(t *testing.T) {
|
||||
testAccessACT(t, 0)
|
||||
// testACTWithoutBogus tests the creation of the ACT manifest end-to-end, without any bogus entries (i.e. default scenario = 3 nodes 1 unauthorized)
|
||||
func testACTWithoutBogus(t *testing.T) {
|
||||
testACT(t, 0)
|
||||
}
|
||||
|
||||
// TestAccessACTScale tests the creation of the ACT manifest end-to-end, with 1000 bogus entries (i.e. 1000 EC keys + default scenario = 3 nodes 1 unauthorized = 1003 keys in the ACT manifest)
|
||||
func TestAccessACTScale(t *testing.T) {
|
||||
testAccessACT(t, 1000)
|
||||
// testACTWithBogus tests the creation of the ACT manifest end-to-end, with 100 bogus entries (i.e. 100 EC keys + default scenario = 3 nodes 1 unauthorized = 103 keys in the ACT manifest)
|
||||
func testACTWithBogus(t *testing.T) {
|
||||
testACT(t, 100)
|
||||
}
|
||||
|
||||
// TestAccessACT tests the e2e creation, uploading and downloading of an ACT access control with both EC keys AND password protection
|
||||
// testACT tests the e2e creation, uploading and downloading of an ACT access control with both EC keys AND password protection
|
||||
// the test fires up a 3 node cluster, then randomly picks 2 nodes which will be acting as grantees to the data
|
||||
// set and also protects the ACT with a password. the third node should fail decoding the reference as it will not be granted access.
|
||||
// the third node then then tries to download using a correct password (and succeeds) then uses a wrong password and fails.
|
||||
// the publisher uploads through one of the nodes then disappears.
|
||||
func testAccessACT(t *testing.T, bogusEntries int) {
|
||||
// Setup Swarm and upload a test file to it
|
||||
const clusterSize = 3
|
||||
cluster := newTestCluster(t, clusterSize)
|
||||
defer cluster.Shutdown()
|
||||
|
||||
func testACT(t *testing.T, bogusEntries int) {
|
||||
var uploadThroughNode = cluster.Nodes[0]
|
||||
client := swarm.NewClient(uploadThroughNode.URL)
|
||||
client := swarmapi.NewClient(uploadThroughNode.URL)
|
||||
|
||||
r1 := gorand.New(gorand.NewSource(time.Now().UnixNano()))
|
||||
nodeToSkip := r1.Intn(clusterSize) // a number between 0 and 2 (node indices in `cluster`)
|
||||
|
@@ -59,27 +59,29 @@ var (
|
||||
|
||||
//constants for environment variables
|
||||
const (
|
||||
SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR"
|
||||
SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT"
|
||||
SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR"
|
||||
SWARM_ENV_PORT = "SWARM_PORT"
|
||||
SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID"
|
||||
SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE"
|
||||
SWARM_ENV_SWAP_API = "SWARM_SWAP_API"
|
||||
SWARM_ENV_SYNC_DISABLE = "SWARM_SYNC_DISABLE"
|
||||
SWARM_ENV_SYNC_UPDATE_DELAY = "SWARM_ENV_SYNC_UPDATE_DELAY"
|
||||
SWARM_ENV_LIGHT_NODE_ENABLE = "SWARM_LIGHT_NODE_ENABLE"
|
||||
SWARM_ENV_DELIVERY_SKIP_CHECK = "SWARM_DELIVERY_SKIP_CHECK"
|
||||
SWARM_ENV_ENS_API = "SWARM_ENS_API"
|
||||
SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR"
|
||||
SWARM_ENV_CORS = "SWARM_CORS"
|
||||
SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES"
|
||||
SWARM_ENV_PSS_ENABLE = "SWARM_PSS_ENABLE"
|
||||
SWARM_ENV_STORE_PATH = "SWARM_STORE_PATH"
|
||||
SWARM_ENV_STORE_CAPACITY = "SWARM_STORE_CAPACITY"
|
||||
SWARM_ENV_STORE_CACHE_CAPACITY = "SWARM_STORE_CACHE_CAPACITY"
|
||||
SWARM_ACCESS_PASSWORD = "SWARM_ACCESS_PASSWORD"
|
||||
GETH_ENV_DATADIR = "GETH_DATADIR"
|
||||
SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR"
|
||||
SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT"
|
||||
SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR"
|
||||
SWARM_ENV_PORT = "SWARM_PORT"
|
||||
SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID"
|
||||
SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE"
|
||||
SWARM_ENV_SWAP_API = "SWARM_SWAP_API"
|
||||
SWARM_ENV_SYNC_DISABLE = "SWARM_SYNC_DISABLE"
|
||||
SWARM_ENV_SYNC_UPDATE_DELAY = "SWARM_ENV_SYNC_UPDATE_DELAY"
|
||||
SWARM_ENV_MAX_STREAM_PEER_SERVERS = "SWARM_ENV_MAX_STREAM_PEER_SERVERS"
|
||||
SWARM_ENV_LIGHT_NODE_ENABLE = "SWARM_LIGHT_NODE_ENABLE"
|
||||
SWARM_ENV_DELIVERY_SKIP_CHECK = "SWARM_DELIVERY_SKIP_CHECK"
|
||||
SWARM_ENV_ENS_API = "SWARM_ENS_API"
|
||||
SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR"
|
||||
SWARM_ENV_CORS = "SWARM_CORS"
|
||||
SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES"
|
||||
SWARM_ENV_PSS_ENABLE = "SWARM_PSS_ENABLE"
|
||||
SWARM_ENV_STORE_PATH = "SWARM_STORE_PATH"
|
||||
SWARM_ENV_STORE_CAPACITY = "SWARM_STORE_CAPACITY"
|
||||
SWARM_ENV_STORE_CACHE_CAPACITY = "SWARM_STORE_CACHE_CAPACITY"
|
||||
SWARM_ACCESS_PASSWORD = "SWARM_ACCESS_PASSWORD"
|
||||
SWARM_AUTO_DEFAULTPATH = "SWARM_AUTO_DEFAULTPATH"
|
||||
GETH_ENV_DATADIR = "GETH_DATADIR"
|
||||
)
|
||||
|
||||
// These settings ensure that TOML keys use the same names as Go struct fields.
|
||||
@@ -124,7 +126,7 @@ func initSwarmNode(config *bzzapi.Config, stack *node.Node, ctx *cli.Context) {
|
||||
//get the account for the provided swarm account
|
||||
prvkey := getAccount(config.BzzAccount, ctx, stack)
|
||||
//set the resolved config path (geth --datadir)
|
||||
config.Path = stack.InstanceDir()
|
||||
config.Path = expandPath(stack.InstanceDir())
|
||||
//finally, initialize the configuration
|
||||
config.Init(prvkey)
|
||||
//configuration phase completed here
|
||||
@@ -175,14 +177,18 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
||||
}
|
||||
|
||||
if networkid := ctx.GlobalString(SwarmNetworkIdFlag.Name); networkid != "" {
|
||||
if id, _ := strconv.Atoi(networkid); id != 0 {
|
||||
currentConfig.NetworkID = uint64(id)
|
||||
id, err := strconv.ParseUint(networkid, 10, 64)
|
||||
if err != nil {
|
||||
utils.Fatalf("invalid cli flag %s: %v", SwarmNetworkIdFlag.Name, err)
|
||||
}
|
||||
if id != 0 {
|
||||
currentConfig.NetworkID = id
|
||||
}
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(utils.DataDirFlag.Name) {
|
||||
if datadir := ctx.GlobalString(utils.DataDirFlag.Name); datadir != "" {
|
||||
currentConfig.Path = datadir
|
||||
currentConfig.Path = expandPath(datadir)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -207,6 +213,9 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
||||
currentConfig.SyncUpdateDelay = d
|
||||
}
|
||||
|
||||
// any value including 0 is acceptable
|
||||
currentConfig.MaxStreamPeerServers = ctx.GlobalInt(SwarmMaxStreamPeerServersFlag.Name)
|
||||
|
||||
if ctx.GlobalIsSet(SwarmLightNodeEnabled.Name) {
|
||||
currentConfig.LightNodeEnabled = true
|
||||
}
|
||||
@@ -226,6 +235,10 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
||||
if len(ensAPIs) == 1 && ensAPIs[0] == "" {
|
||||
ensAPIs = nil
|
||||
}
|
||||
for i := range ensAPIs {
|
||||
ensAPIs[i] = expandPath(ensAPIs[i])
|
||||
}
|
||||
|
||||
currentConfig.EnsAPIs = ensAPIs
|
||||
}
|
||||
|
||||
@@ -262,13 +275,17 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
|
||||
}
|
||||
|
||||
if networkid := os.Getenv(SWARM_ENV_NETWORK_ID); networkid != "" {
|
||||
if id, _ := strconv.Atoi(networkid); id != 0 {
|
||||
currentConfig.NetworkID = uint64(id)
|
||||
id, err := strconv.ParseUint(networkid, 10, 64)
|
||||
if err != nil {
|
||||
utils.Fatalf("invalid environment variable %s: %v", SWARM_ENV_NETWORK_ID, err)
|
||||
}
|
||||
if id != 0 {
|
||||
currentConfig.NetworkID = id
|
||||
}
|
||||
}
|
||||
|
||||
if datadir := os.Getenv(GETH_ENV_DATADIR); datadir != "" {
|
||||
currentConfig.Path = datadir
|
||||
currentConfig.Path = expandPath(datadir)
|
||||
}
|
||||
|
||||
bzzport := os.Getenv(SWARM_ENV_PORT)
|
||||
@@ -281,33 +298,50 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
|
||||
}
|
||||
|
||||
if swapenable := os.Getenv(SWARM_ENV_SWAP_ENABLE); swapenable != "" {
|
||||
if swap, err := strconv.ParseBool(swapenable); err != nil {
|
||||
currentConfig.SwapEnabled = swap
|
||||
swap, err := strconv.ParseBool(swapenable)
|
||||
if err != nil {
|
||||
utils.Fatalf("invalid environment variable %s: %v", SWARM_ENV_SWAP_ENABLE, err)
|
||||
}
|
||||
currentConfig.SwapEnabled = swap
|
||||
}
|
||||
|
||||
if syncdisable := os.Getenv(SWARM_ENV_SYNC_DISABLE); syncdisable != "" {
|
||||
if sync, err := strconv.ParseBool(syncdisable); err != nil {
|
||||
currentConfig.SyncEnabled = !sync
|
||||
sync, err := strconv.ParseBool(syncdisable)
|
||||
if err != nil {
|
||||
utils.Fatalf("invalid environment variable %s: %v", SWARM_ENV_SYNC_DISABLE, err)
|
||||
}
|
||||
currentConfig.SyncEnabled = !sync
|
||||
}
|
||||
|
||||
if v := os.Getenv(SWARM_ENV_DELIVERY_SKIP_CHECK); v != "" {
|
||||
if skipCheck, err := strconv.ParseBool(v); err != nil {
|
||||
skipCheck, err := strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
currentConfig.DeliverySkipCheck = skipCheck
|
||||
}
|
||||
}
|
||||
|
||||
if v := os.Getenv(SWARM_ENV_SYNC_UPDATE_DELAY); v != "" {
|
||||
if d, err := time.ParseDuration(v); err != nil {
|
||||
currentConfig.SyncUpdateDelay = d
|
||||
d, err := time.ParseDuration(v)
|
||||
if err != nil {
|
||||
utils.Fatalf("invalid environment variable %s: %v", SWARM_ENV_SYNC_UPDATE_DELAY, err)
|
||||
}
|
||||
currentConfig.SyncUpdateDelay = d
|
||||
}
|
||||
|
||||
if max := os.Getenv(SWARM_ENV_MAX_STREAM_PEER_SERVERS); max != "" {
|
||||
m, err := strconv.Atoi(max)
|
||||
if err != nil {
|
||||
utils.Fatalf("invalid environment variable %s: %v", SWARM_ENV_MAX_STREAM_PEER_SERVERS, err)
|
||||
}
|
||||
currentConfig.MaxStreamPeerServers = m
|
||||
}
|
||||
|
||||
if lne := os.Getenv(SWARM_ENV_LIGHT_NODE_ENABLE); lne != "" {
|
||||
if lightnode, err := strconv.ParseBool(lne); err != nil {
|
||||
currentConfig.LightNodeEnabled = lightnode
|
||||
lightnode, err := strconv.ParseBool(lne)
|
||||
if err != nil {
|
||||
utils.Fatalf("invalid environment variable %s: %v", SWARM_ENV_LIGHT_NODE_ENABLE, err)
|
||||
}
|
||||
currentConfig.LightNodeEnabled = lightnode
|
||||
}
|
||||
|
||||
if swapapi := os.Getenv(SWARM_ENV_SWAP_API); swapapi != "" {
|
||||
|
@@ -26,14 +26,14 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/swarm"
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
)
|
||||
|
||||
func TestDumpConfig(t *testing.T) {
|
||||
func TestConfigDump(t *testing.T) {
|
||||
swarm := runSwarm(t, "dumpconfig")
|
||||
defaultConf := api.NewConfig()
|
||||
out, err := tomlSettings.Marshal(&defaultConf)
|
||||
@@ -91,8 +91,8 @@ func TestConfigCmdLineOverrides(t *testing.T) {
|
||||
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
|
||||
fmt.Sprintf("--%s", SwarmDeliverySkipCheckFlag.Name),
|
||||
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
|
||||
"--datadir", dir,
|
||||
"--ipcpath", conf.IPCPath,
|
||||
fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
|
||||
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
|
||||
}
|
||||
node.Cmd = runSwarm(t, flags...)
|
||||
node.Cmd.InputLine(testPassphrase)
|
||||
@@ -189,9 +189,9 @@ func TestConfigFileOverrides(t *testing.T) {
|
||||
flags := []string{
|
||||
fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(),
|
||||
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
|
||||
"--ens-api", "",
|
||||
"--ipcpath", conf.IPCPath,
|
||||
"--datadir", dir,
|
||||
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
|
||||
fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
|
||||
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
|
||||
}
|
||||
node.Cmd = runSwarm(t, flags...)
|
||||
node.Cmd.InputLine(testPassphrase)
|
||||
@@ -407,9 +407,9 @@ func TestConfigCmdLineOverridesFile(t *testing.T) {
|
||||
fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name),
|
||||
fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(),
|
||||
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
|
||||
"--ens-api", "",
|
||||
"--datadir", dir,
|
||||
"--ipcpath", conf.IPCPath,
|
||||
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
|
||||
fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
|
||||
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
|
||||
}
|
||||
node.Cmd = runSwarm(t, flags...)
|
||||
node.Cmd.InputLine(testPassphrase)
|
||||
@@ -466,7 +466,7 @@ func TestConfigCmdLineOverridesFile(t *testing.T) {
|
||||
node.Shutdown()
|
||||
}
|
||||
|
||||
func TestValidateConfig(t *testing.T) {
|
||||
func TestConfigValidate(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
cfg *api.Config
|
||||
err string
|
||||
|
@@ -29,6 +29,48 @@ import (
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var dbCommand = cli.Command{
|
||||
Name: "db",
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Usage: "manage the local chunk database",
|
||||
ArgsUsage: "db COMMAND",
|
||||
Description: "Manage the local chunk database",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: dbExport,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "export",
|
||||
Usage: "export a local chunk database as a tar archive (use - to send to stdout)",
|
||||
ArgsUsage: "<chunkdb> <file>",
|
||||
Description: `
|
||||
Export a local chunk database as a tar archive (use - to send to stdout).
|
||||
|
||||
swarm db export ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar
|
||||
|
||||
The export may be quite large, consider piping the output through the Unix
|
||||
pv(1) tool to get a progress bar:
|
||||
|
||||
swarm db export ~/.ethereum/swarm/bzz-KEY/chunks - | pv > chunks.tar
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: dbImport,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "import",
|
||||
Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)",
|
||||
ArgsUsage: "<chunkdb> <file>",
|
||||
Description: `Import chunks from a tar archive into a local chunk database (use - to read from stdin).
|
||||
|
||||
swarm db import ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar
|
||||
|
||||
The import may be quite large, consider piping the input through the Unix
|
||||
pv(1) tool to get a progress bar:
|
||||
|
||||
pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -`,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func dbExport(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 3 {
|
||||
@@ -93,21 +135,6 @@ func dbImport(ctx *cli.Context) {
|
||||
log.Info(fmt.Sprintf("successfully imported %d chunks", count))
|
||||
}
|
||||
|
||||
func dbClean(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf("invalid arguments, please specify <chunkdb> (path to a local chunk database) and the base key")
|
||||
}
|
||||
|
||||
store, err := openLDBStore(args[0], common.Hex2Bytes(args[1]))
|
||||
if err != nil {
|
||||
utils.Fatalf("error opening local chunk database: %s", err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
store.Cleanup()
|
||||
}
|
||||
|
||||
func openLDBStore(path string, basekey []byte) (*storage.LDBStore, error) {
|
||||
if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
|
||||
return nil, fmt.Errorf("invalid chunkdb path: %s", err)
|
||||
|
@@ -28,6 +28,15 @@ import (
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var downloadCommand = cli.Command{
|
||||
Action: download,
|
||||
Name: "down",
|
||||
Flags: []cli.Flag{SwarmRecursiveFlag, SwarmAccessPasswordFlag},
|
||||
Usage: "downloads a swarm manifest or a file inside a manifest",
|
||||
ArgsUsage: " <uri> [<dir>]",
|
||||
Description: `Downloads a swarm bzz uri to the given dir. When no dir is provided, working directory is assumed. --recursive flag is expected when downloading a manifest with multiple entries.`,
|
||||
}
|
||||
|
||||
func download(ctx *cli.Context) {
|
||||
log.Debug("downloading content using swarm down")
|
||||
args := ctx.Args()
|
||||
|
@@ -19,15 +19,15 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm"
|
||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||
)
|
||||
|
||||
// TestCLISwarmExportImport perform the following test:
|
||||
@@ -38,14 +38,18 @@ import (
|
||||
// 5. imports the exported datastore
|
||||
// 6. fetches the uploaded random file from the second node
|
||||
func TestCLISwarmExportImport(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip()
|
||||
}
|
||||
cluster := newTestCluster(t, 1)
|
||||
|
||||
// generate random 10mb file
|
||||
f, cleanup := generateRandomFile(t, 10000000)
|
||||
defer cleanup()
|
||||
// generate random 1mb file
|
||||
content := testutil.RandomBytes(1, 1000000)
|
||||
fileName := testutil.TempFileWithContent(t, string(content))
|
||||
defer os.Remove(fileName)
|
||||
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", f.Name())
|
||||
up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", fileName)
|
||||
_, matches := up.ExpectRegexp(`[a-f\d]{64}`)
|
||||
up.ExpectExit()
|
||||
hash := matches[0]
|
||||
@@ -92,7 +96,7 @@ func TestCLISwarmExportImport(t *testing.T) {
|
||||
}
|
||||
|
||||
// compare downloaded file with the generated random file
|
||||
mustEqualFiles(t, f, res.Body)
|
||||
mustEqualFiles(t, bytes.NewReader(content), res.Body)
|
||||
}
|
||||
|
||||
func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) {
|
||||
@@ -113,27 +117,3 @@ func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) {
|
||||
t.Fatalf("downloaded imported file md5=%x (length %v) is not the same as the generated one mp5=%x (length %v)", downHash, downLen, upHash, upLen)
|
||||
}
|
||||
}
|
||||
|
||||
func generateRandomFile(t *testing.T, size int) (f *os.File, teardown func()) {
|
||||
// create a tmp file
|
||||
tmp, err := ioutil.TempFile("", "swarm-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// callback for tmp file cleanup
|
||||
teardown = func() {
|
||||
tmp.Close()
|
||||
os.Remove(tmp.Name())
|
||||
}
|
||||
|
||||
// write 10mb random data to file
|
||||
buf := make([]byte, 10000000)
|
||||
_, err = rand.Read(buf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ioutil.WriteFile(tmp.Name(), buf, 0755)
|
||||
|
||||
return tmp, teardown
|
||||
}
|
||||
|
238
cmd/swarm/feeds.go
Normal file
@@ -0,0 +1,238 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Command feed allows the user to create and update signed Swarm feeds
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/feed"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var feedCommand = cli.Command{
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "feed",
|
||||
Usage: "(Advanced) Create and update Swarm Feeds",
|
||||
ArgsUsage: "<create|update|info>",
|
||||
Description: "Works with Swarm Feeds",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: feedCreateManifest,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "create",
|
||||
Usage: "creates and publishes a new feed manifest",
|
||||
Description: `creates and publishes a new feed manifest pointing to a specified user's updates about a particular topic.
|
||||
The feed topic can be built in the following ways:
|
||||
* use --topic to set the topic to an arbitrary binary hex string.
|
||||
* use --name to set the topic to a human-readable name.
|
||||
For example --name could be set to "profile-picture", meaning this feed allows to get this user's current profile picture.
|
||||
* use both --topic and --name to create named subtopics.
|
||||
For example, --topic could be set to an Ethereum contract address and --name could be set to "comments", meaning
|
||||
this feed tracks a discussion about that contract.
|
||||
The --user flag allows to have this manifest refer to a user other than yourself. If not specified,
|
||||
it will then default to your local account (--bzzaccount)`,
|
||||
Flags: []cli.Flag{SwarmFeedNameFlag, SwarmFeedTopicFlag, SwarmFeedUserFlag},
|
||||
},
|
||||
{
|
||||
Action: feedUpdate,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "update",
|
||||
Usage: "updates the content of an existing Swarm Feed",
|
||||
ArgsUsage: "<0x Hex data>",
|
||||
Description: `publishes a new update on the specified topic
|
||||
The feed topic can be built in the following ways:
|
||||
* use --topic to set the topic to an arbitrary binary hex string.
|
||||
* use --name to set the topic to a human-readable name.
|
||||
For example --name could be set to "profile-picture", meaning this feed allows to get this user's current profile picture.
|
||||
* use both --topic and --name to create named subtopics.
|
||||
For example, --topic could be set to an Ethereum contract address and --name could be set to "comments", meaning
|
||||
this feed tracks a discussion about that contract.
|
||||
|
||||
If you have a manifest, you can specify it with --manifest to refer to the feed,
|
||||
instead of using --topic / --name
|
||||
`,
|
||||
Flags: []cli.Flag{SwarmFeedManifestFlag, SwarmFeedNameFlag, SwarmFeedTopicFlag},
|
||||
},
|
||||
{
|
||||
Action: feedInfo,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "info",
|
||||
Usage: "obtains information about an existing Swarm feed",
|
||||
Description: `obtains information about an existing Swarm feed
|
||||
The topic can be specified directly with the --topic flag as an hex string
|
||||
If no topic is specified, the default topic (zero) will be used
|
||||
The --name flag can be used to specify subtopics with a specific name.
|
||||
The --user flag allows to refer to a user other than yourself. If not specified,
|
||||
it will then default to your local account (--bzzaccount)
|
||||
If you have a manifest, you can specify it with --manifest instead of --topic / --name / ---user
|
||||
to refer to the feed`,
|
||||
Flags: []cli.Flag{SwarmFeedManifestFlag, SwarmFeedNameFlag, SwarmFeedTopicFlag, SwarmFeedUserFlag},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func NewGenericSigner(ctx *cli.Context) feed.Signer {
|
||||
return feed.NewGenericSigner(getPrivKey(ctx))
|
||||
}
|
||||
|
||||
func getTopic(ctx *cli.Context) (topic feed.Topic) {
|
||||
var name = ctx.String(SwarmFeedNameFlag.Name)
|
||||
var relatedTopic = ctx.String(SwarmFeedTopicFlag.Name)
|
||||
var relatedTopicBytes []byte
|
||||
var err error
|
||||
|
||||
if relatedTopic != "" {
|
||||
relatedTopicBytes, err = hexutil.Decode(relatedTopic)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error parsing topic: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
topic, err = feed.NewTopic(name, relatedTopicBytes)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error parsing topic: %s", err)
|
||||
}
|
||||
return topic
|
||||
}
|
||||
|
||||
// swarm feed create <frequency> [--name <name>] [--data <0x Hexdata> [--multihash=false]]
|
||||
// swarm feed update <Manifest Address or ENS domain> <0x Hexdata> [--multihash=false]
|
||||
// swarm feed info <Manifest Address or ENS domain>
|
||||
|
||||
func feedCreateManifest(ctx *cli.Context) {
|
||||
var (
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client = swarm.NewClient(bzzapi)
|
||||
)
|
||||
|
||||
newFeedUpdateRequest := feed.NewFirstRequest(getTopic(ctx))
|
||||
newFeedUpdateRequest.Feed.User = feedGetUser(ctx)
|
||||
|
||||
manifestAddress, err := client.CreateFeedWithManifest(newFeedUpdateRequest)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error creating feed manifest: %s", err.Error())
|
||||
return
|
||||
}
|
||||
fmt.Println(manifestAddress) // output manifest address to the user in a single line (useful for other commands to pick up)
|
||||
|
||||
}
|
||||
|
||||
func feedUpdate(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
|
||||
var (
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client = swarm.NewClient(bzzapi)
|
||||
manifestAddressOrDomain = ctx.String(SwarmFeedManifestFlag.Name)
|
||||
)
|
||||
|
||||
if len(args) < 1 {
|
||||
fmt.Println("Incorrect number of arguments")
|
||||
cli.ShowCommandHelpAndExit(ctx, "update", 1)
|
||||
return
|
||||
}
|
||||
|
||||
signer := NewGenericSigner(ctx)
|
||||
|
||||
data, err := hexutil.Decode(args[0])
|
||||
if err != nil {
|
||||
utils.Fatalf("Error parsing data: %s", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
var updateRequest *feed.Request
|
||||
var query *feed.Query
|
||||
|
||||
if manifestAddressOrDomain == "" {
|
||||
query = new(feed.Query)
|
||||
query.User = signer.Address()
|
||||
query.Topic = getTopic(ctx)
|
||||
}
|
||||
|
||||
// Retrieve a feed update request
|
||||
updateRequest, err = client.GetFeedRequest(query, manifestAddressOrDomain)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error retrieving feed status: %s", err.Error())
|
||||
}
|
||||
|
||||
// Check that the provided signer matches the request to sign
|
||||
if updateRequest.User != signer.Address() {
|
||||
utils.Fatalf("Signer address does not match the update request")
|
||||
}
|
||||
|
||||
// set the new data
|
||||
updateRequest.SetData(data)
|
||||
|
||||
// sign update
|
||||
if err = updateRequest.Sign(signer); err != nil {
|
||||
utils.Fatalf("Error signing feed update: %s", err.Error())
|
||||
}
|
||||
|
||||
// post update
|
||||
err = client.UpdateFeed(updateRequest)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error updating feed: %s", err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func feedInfo(ctx *cli.Context) {
|
||||
var (
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client = swarm.NewClient(bzzapi)
|
||||
manifestAddressOrDomain = ctx.String(SwarmFeedManifestFlag.Name)
|
||||
)
|
||||
|
||||
var query *feed.Query
|
||||
if manifestAddressOrDomain == "" {
|
||||
query = new(feed.Query)
|
||||
query.Topic = getTopic(ctx)
|
||||
query.User = feedGetUser(ctx)
|
||||
}
|
||||
|
||||
metadata, err := client.GetFeedRequest(query, manifestAddressOrDomain)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error retrieving feed metadata: %s", err.Error())
|
||||
return
|
||||
}
|
||||
encodedMetadata, err := metadata.MarshalJSON()
|
||||
if err != nil {
|
||||
utils.Fatalf("Error encoding metadata to JSON for display:%s", err)
|
||||
}
|
||||
fmt.Println(string(encodedMetadata))
|
||||
}
|
||||
|
||||
func feedGetUser(ctx *cli.Context) common.Address {
|
||||
var user = ctx.String(SwarmFeedUserFlag.Name)
|
||||
if user != "" {
|
||||
return common.HexToAddress(user)
|
||||
}
|
||||
pk := getPrivKey(ctx)
|
||||
if pk == nil {
|
||||
utils.Fatalf("Cannot read private key. Must specify --user or --bzzaccount")
|
||||
}
|
||||
return crypto.PubkeyToAddress(pk.PublicKey)
|
||||
|
||||
}
|
196
cmd/swarm/feeds_test.go
Normal file
@@ -0,0 +1,196 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/feed"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
|
||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||
)
|
||||
|
||||
func TestCLIFeedUpdate(t *testing.T) {
|
||||
srv := swarmhttp.NewTestSwarmServer(t, func(api *api.API) swarmhttp.TestServer {
|
||||
return swarmhttp.NewServer(api, "")
|
||||
}, nil)
|
||||
log.Info("starting a test swarm server")
|
||||
defer srv.Close()
|
||||
|
||||
// create a private key file for signing
|
||||
privkeyHex := "0000000000000000000000000000000000000000000000000000000000001979"
|
||||
privKey, _ := crypto.HexToECDSA(privkeyHex)
|
||||
address := crypto.PubkeyToAddress(privKey.PublicKey)
|
||||
|
||||
pkFileName := testutil.TempFileWithContent(t, privkeyHex)
|
||||
defer os.Remove(pkFileName)
|
||||
|
||||
// compose a topic. We'll be doing quotes about Miguel de Cervantes
|
||||
var topic feed.Topic
|
||||
subject := []byte("Miguel de Cervantes")
|
||||
copy(topic[:], subject[:])
|
||||
name := "quotes"
|
||||
|
||||
// prepare some data for the update
|
||||
data := []byte("En boca cerrada no entran moscas")
|
||||
hexData := hexutil.Encode(data)
|
||||
|
||||
flags := []string{
|
||||
"--bzzapi", srv.URL,
|
||||
"--bzzaccount", pkFileName,
|
||||
"feed", "update",
|
||||
"--topic", topic.Hex(),
|
||||
"--name", name,
|
||||
hexData}
|
||||
|
||||
// create an update and expect an exit without errors
|
||||
log.Info("updating a feed with 'swarm feed update'")
|
||||
cmd := runSwarm(t, flags...)
|
||||
cmd.ExpectExit()
|
||||
|
||||
// now try to get the update using the client
|
||||
client := swarm.NewClient(srv.URL)
|
||||
|
||||
// build the same topic as before, this time
|
||||
// we use NewTopic to create a topic automatically.
|
||||
topic, err := feed.NewTopic(name, subject)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Feed configures whose updates we will be looking up.
|
||||
fd := feed.Feed{
|
||||
Topic: topic,
|
||||
User: address,
|
||||
}
|
||||
|
||||
// Build a query to get the latest update
|
||||
query := feed.NewQueryLatest(&fd, lookup.NoClue)
|
||||
|
||||
// retrieve content!
|
||||
reader, err := client.QueryFeed(query, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
retrieved, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// check we retrieved the sent information
|
||||
if !bytes.Equal(data, retrieved) {
|
||||
t.Fatalf("Received %s, expected %s", retrieved, data)
|
||||
}
|
||||
|
||||
// Now retrieve info for the next update
|
||||
flags = []string{
|
||||
"--bzzapi", srv.URL,
|
||||
"feed", "info",
|
||||
"--topic", topic.Hex(),
|
||||
"--user", address.Hex(),
|
||||
}
|
||||
|
||||
log.Info("getting feed info with 'swarm feed info'")
|
||||
cmd = runSwarm(t, flags...)
|
||||
_, matches := cmd.ExpectRegexp(`.*`) // regex hack to extract stdout
|
||||
cmd.ExpectExit()
|
||||
|
||||
// verify we can deserialize the result as a valid JSON
|
||||
var request feed.Request
|
||||
err = json.Unmarshal([]byte(matches[0]), &request)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// make sure the retrieved feed is the same
|
||||
if request.Feed != fd {
|
||||
t.Fatalf("Expected feed to be: %s, got %s", fd, request.Feed)
|
||||
}
|
||||
|
||||
// test publishing a manifest
|
||||
flags = []string{
|
||||
"--bzzapi", srv.URL,
|
||||
"--bzzaccount", pkFileName,
|
||||
"feed", "create",
|
||||
"--topic", topic.Hex(),
|
||||
}
|
||||
|
||||
log.Info("Publishing manifest with 'swarm feed create'")
|
||||
cmd = runSwarm(t, flags...)
|
||||
_, matches = cmd.ExpectRegexp(`[a-f\d]{64}`)
|
||||
cmd.ExpectExit()
|
||||
|
||||
manifestAddress := matches[0] // read the received feed manifest
|
||||
|
||||
// now attempt to lookup the latest update using a manifest instead
|
||||
reader, err = client.QueryFeed(nil, manifestAddress)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
retrieved, err = ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(data, retrieved) {
|
||||
t.Fatalf("Received %s, expected %s", retrieved, data)
|
||||
}
|
||||
|
||||
// test publishing a manifest for a different user
|
||||
flags = []string{
|
||||
"--bzzapi", srv.URL,
|
||||
"feed", "create",
|
||||
"--topic", topic.Hex(),
|
||||
"--user", "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", // different user
|
||||
}
|
||||
|
||||
log.Info("Publishing manifest with 'swarm feed create' for a different user")
|
||||
cmd = runSwarm(t, flags...)
|
||||
_, matches = cmd.ExpectRegexp(`[a-f\d]{64}`)
|
||||
cmd.ExpectExit()
|
||||
|
||||
manifestAddress = matches[0] // read the received feed manifest
|
||||
|
||||
// now let's try to update that user's manifest which we don't have the private key for
|
||||
flags = []string{
|
||||
"--bzzapi", srv.URL,
|
||||
"--bzzaccount", pkFileName,
|
||||
"feed", "update",
|
||||
"--manifest", manifestAddress,
|
||||
hexData}
|
||||
|
||||
// create an update and expect an error given there is a user mismatch
|
||||
log.Info("updating a feed with 'swarm feed update'")
|
||||
cmd = runSwarm(t, flags...)
|
||||
cmd.ExpectRegexp("Fatal:.*") // best way so far to detect a failure.
|
||||
cmd.ExpectExit()
|
||||
if cmd.ExitStatus() == 0 {
|
||||
t.Fatal("Expected nonzero exit code when updating a manifest with the wrong user. Got 0.")
|
||||
}
|
||||
}
|
179
cmd/swarm/flags.go
Normal file
@@ -0,0 +1,179 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Command feed allows the user to create and update signed Swarm feeds
|
||||
package main
|
||||
|
||||
import cli "gopkg.in/urfave/cli.v1"
|
||||
|
||||
var (
|
||||
ChequebookAddrFlag = cli.StringFlag{
|
||||
Name: "chequebook",
|
||||
Usage: "chequebook contract address",
|
||||
EnvVar: SWARM_ENV_CHEQUEBOOK_ADDR,
|
||||
}
|
||||
SwarmAccountFlag = cli.StringFlag{
|
||||
Name: "bzzaccount",
|
||||
Usage: "Swarm account key file",
|
||||
EnvVar: SWARM_ENV_ACCOUNT,
|
||||
}
|
||||
SwarmListenAddrFlag = cli.StringFlag{
|
||||
Name: "httpaddr",
|
||||
Usage: "Swarm HTTP API listening interface",
|
||||
EnvVar: SWARM_ENV_LISTEN_ADDR,
|
||||
}
|
||||
SwarmPortFlag = cli.StringFlag{
|
||||
Name: "bzzport",
|
||||
Usage: "Swarm local http api port",
|
||||
EnvVar: SWARM_ENV_PORT,
|
||||
}
|
||||
SwarmNetworkIdFlag = cli.IntFlag{
|
||||
Name: "bzznetworkid",
|
||||
Usage: "Network identifier (integer, default 3=swarm testnet)",
|
||||
EnvVar: SWARM_ENV_NETWORK_ID,
|
||||
}
|
||||
SwarmSwapEnabledFlag = cli.BoolFlag{
|
||||
Name: "swap",
|
||||
Usage: "Swarm SWAP enabled (default false)",
|
||||
EnvVar: SWARM_ENV_SWAP_ENABLE,
|
||||
}
|
||||
SwarmSwapAPIFlag = cli.StringFlag{
|
||||
Name: "swap-api",
|
||||
Usage: "URL of the Ethereum API provider to use to settle SWAP payments",
|
||||
EnvVar: SWARM_ENV_SWAP_API,
|
||||
}
|
||||
SwarmSyncDisabledFlag = cli.BoolTFlag{
|
||||
Name: "nosync",
|
||||
Usage: "Disable swarm syncing",
|
||||
EnvVar: SWARM_ENV_SYNC_DISABLE,
|
||||
}
|
||||
SwarmSyncUpdateDelay = cli.DurationFlag{
|
||||
Name: "sync-update-delay",
|
||||
Usage: "Duration for sync subscriptions update after no new peers are added (default 15s)",
|
||||
EnvVar: SWARM_ENV_SYNC_UPDATE_DELAY,
|
||||
}
|
||||
SwarmMaxStreamPeerServersFlag = cli.IntFlag{
|
||||
Name: "max-stream-peer-servers",
|
||||
Usage: "Limit of Stream peer servers, 0 denotes unlimited",
|
||||
EnvVar: SWARM_ENV_MAX_STREAM_PEER_SERVERS,
|
||||
Value: 10000, // A very large default value is possible as stream servers have very small memory footprint
|
||||
}
|
||||
SwarmLightNodeEnabled = cli.BoolFlag{
|
||||
Name: "lightnode",
|
||||
Usage: "Enable Swarm LightNode (default false)",
|
||||
EnvVar: SWARM_ENV_LIGHT_NODE_ENABLE,
|
||||
}
|
||||
SwarmDeliverySkipCheckFlag = cli.BoolFlag{
|
||||
Name: "delivery-skip-check",
|
||||
Usage: "Skip chunk delivery check (default false)",
|
||||
EnvVar: SWARM_ENV_DELIVERY_SKIP_CHECK,
|
||||
}
|
||||
EnsAPIFlag = cli.StringSliceFlag{
|
||||
Name: "ens-api",
|
||||
Usage: "ENS API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url",
|
||||
EnvVar: SWARM_ENV_ENS_API,
|
||||
}
|
||||
SwarmApiFlag = cli.StringFlag{
|
||||
Name: "bzzapi",
|
||||
Usage: "Specifies the Swarm HTTP endpoint to connect to",
|
||||
Value: "http://127.0.0.1:8500",
|
||||
}
|
||||
SwarmRecursiveFlag = cli.BoolFlag{
|
||||
Name: "recursive",
|
||||
Usage: "Upload directories recursively",
|
||||
}
|
||||
SwarmWantManifestFlag = cli.BoolTFlag{
|
||||
Name: "manifest",
|
||||
Usage: "Automatic manifest upload (default true)",
|
||||
}
|
||||
SwarmUploadDefaultPath = cli.StringFlag{
|
||||
Name: "defaultpath",
|
||||
Usage: "path to file served for empty url path (none)",
|
||||
}
|
||||
SwarmAccessGrantKeyFlag = cli.StringFlag{
|
||||
Name: "grant-key",
|
||||
Usage: "grants a given public key access to an ACT",
|
||||
}
|
||||
SwarmAccessGrantKeysFlag = cli.StringFlag{
|
||||
Name: "grant-keys",
|
||||
Usage: "grants a given list of public keys in the following file (separated by line breaks) access to an ACT",
|
||||
}
|
||||
SwarmUpFromStdinFlag = cli.BoolFlag{
|
||||
Name: "stdin",
|
||||
Usage: "reads data to be uploaded from stdin",
|
||||
}
|
||||
SwarmUploadMimeType = cli.StringFlag{
|
||||
Name: "mime",
|
||||
Usage: "Manually specify MIME type",
|
||||
}
|
||||
SwarmEncryptedFlag = cli.BoolFlag{
|
||||
Name: "encrypt",
|
||||
Usage: "use encrypted upload",
|
||||
}
|
||||
SwarmAccessPasswordFlag = cli.StringFlag{
|
||||
Name: "password",
|
||||
Usage: "Password",
|
||||
EnvVar: SWARM_ACCESS_PASSWORD,
|
||||
}
|
||||
SwarmDryRunFlag = cli.BoolFlag{
|
||||
Name: "dry-run",
|
||||
Usage: "dry-run",
|
||||
}
|
||||
CorsStringFlag = cli.StringFlag{
|
||||
Name: "corsdomain",
|
||||
Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')",
|
||||
EnvVar: SWARM_ENV_CORS,
|
||||
}
|
||||
SwarmStorePath = cli.StringFlag{
|
||||
Name: "store.path",
|
||||
Usage: "Path to leveldb chunk DB (default <$GETH_ENV_DIR>/swarm/bzz-<$BZZ_KEY>/chunks)",
|
||||
EnvVar: SWARM_ENV_STORE_PATH,
|
||||
}
|
||||
SwarmStoreCapacity = cli.Uint64Flag{
|
||||
Name: "store.size",
|
||||
Usage: "Number of chunks (5M is roughly 20-25GB) (default 5000000)",
|
||||
EnvVar: SWARM_ENV_STORE_CAPACITY,
|
||||
}
|
||||
SwarmStoreCacheCapacity = cli.UintFlag{
|
||||
Name: "store.cache.size",
|
||||
Usage: "Number of recent chunks cached in memory (default 5000)",
|
||||
EnvVar: SWARM_ENV_STORE_CACHE_CAPACITY,
|
||||
}
|
||||
SwarmCompressedFlag = cli.BoolFlag{
|
||||
Name: "compressed",
|
||||
Usage: "Prints encryption keys in compressed form",
|
||||
}
|
||||
SwarmFeedNameFlag = cli.StringFlag{
|
||||
Name: "name",
|
||||
Usage: "User-defined name for the new feed, limited to 32 characters. If combined with topic, it will refer to a subtopic with this name",
|
||||
}
|
||||
SwarmFeedTopicFlag = cli.StringFlag{
|
||||
Name: "topic",
|
||||
Usage: "User-defined topic this feed is tracking, hex encoded. Limited to 64 hexadecimal characters",
|
||||
}
|
||||
SwarmFeedDataOnCreateFlag = cli.StringFlag{
|
||||
Name: "data",
|
||||
Usage: "Initializes the feed with the given hex-encoded data. Data must be prefixed by 0x",
|
||||
}
|
||||
SwarmFeedManifestFlag = cli.StringFlag{
|
||||
Name: "manifest",
|
||||
Usage: "Refers to the feed through a manifest",
|
||||
}
|
||||
SwarmFeedUserFlag = cli.StringFlag{
|
||||
Name: "user",
|
||||
Usage: "Indicates the user who updates the feed",
|
||||
}
|
||||
)
|
@@ -24,16 +24,50 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/swarm/fuse"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var fsCommand = cli.Command{
|
||||
Name: "fs",
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Usage: "perform FUSE operations",
|
||||
ArgsUsage: "fs COMMAND",
|
||||
Description: "Performs FUSE operations by mounting/unmounting/listing mount points. This assumes you already have a Swarm node running locally. For all operation you must reference the correct path to bzzd.ipc in order to communicate with the node",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: mount,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "mount",
|
||||
Usage: "mount a swarm hash to a mount point",
|
||||
ArgsUsage: "swarm fs mount <manifest hash> <mount point>",
|
||||
Description: "Mounts a Swarm manifest hash to a given mount point. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
|
||||
},
|
||||
{
|
||||
Action: unmount,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "unmount",
|
||||
Usage: "unmount a swarmfs mount",
|
||||
ArgsUsage: "swarm fs unmount <mount point>",
|
||||
Description: "Unmounts a swarmfs mount residing at <mount point>. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
|
||||
},
|
||||
{
|
||||
Action: listMounts,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "list",
|
||||
Usage: "list swarmfs mounts",
|
||||
ArgsUsage: "swarm fs list",
|
||||
Description: "Lists all mounted swarmfs volumes. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func mount(cliContext *cli.Context) {
|
||||
args := cliContext.Args()
|
||||
if len(args) < 2 {
|
||||
utils.Fatalf("Usage: swarm fs mount --ipcpath <path to bzzd.ipc> <manifestHash> <file name>")
|
||||
utils.Fatalf("Usage: swarm fs mount <manifestHash> <file name>")
|
||||
}
|
||||
|
||||
client, err := dialRPC(cliContext)
|
||||
@@ -60,7 +94,7 @@ func unmount(cliContext *cli.Context) {
|
||||
args := cliContext.Args()
|
||||
|
||||
if len(args) < 1 {
|
||||
utils.Fatalf("Usage: swarm fs unmount --ipcpath <path to bzzd.ipc> <mount path>")
|
||||
utils.Fatalf("Usage: swarm fs unmount <mount path>")
|
||||
}
|
||||
client, err := dialRPC(cliContext)
|
||||
if err != nil {
|
||||
@@ -108,20 +142,21 @@ func listMounts(cliContext *cli.Context) {
|
||||
}
|
||||
|
||||
func dialRPC(ctx *cli.Context) (*rpc.Client, error) {
|
||||
var endpoint string
|
||||
endpoint := getIPCEndpoint(ctx)
|
||||
log.Info("IPC endpoint", "path", endpoint)
|
||||
return rpc.Dial(endpoint)
|
||||
}
|
||||
|
||||
if ctx.IsSet(utils.IPCPathFlag.Name) {
|
||||
endpoint = ctx.String(utils.IPCPathFlag.Name)
|
||||
} else {
|
||||
utils.Fatalf("swarm ipc endpoint not specified")
|
||||
}
|
||||
func getIPCEndpoint(ctx *cli.Context) string {
|
||||
cfg := defaultNodeConfig
|
||||
utils.SetNodeConfig(ctx, &cfg)
|
||||
|
||||
if endpoint == "" {
|
||||
endpoint = node.DefaultIPCEndpoint(clientIdentifier)
|
||||
} else if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") {
|
||||
endpoint := cfg.IPCEndpoint()
|
||||
|
||||
if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") {
|
||||
// Backwards compatibility with geth < 1.5 which required
|
||||
// these prefixes.
|
||||
endpoint = endpoint[4:]
|
||||
}
|
||||
return rpc.Dial(endpoint)
|
||||
return endpoint
|
||||
}
|
||||
|
@@ -20,6 +20,7 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -28,20 +29,35 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
colorable "github.com/mattn/go-colorable"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.PrintOrigins(true)
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
|
||||
}
|
||||
|
||||
type testFile struct {
|
||||
filePath string
|
||||
content string
|
||||
}
|
||||
|
||||
// TestCLISwarmFsDefaultIPCPath tests if the most basic fs command, i.e., list
|
||||
// can find and correctly connect to a running Swarm node on the default
|
||||
// IPCPath.
|
||||
func TestCLISwarmFsDefaultIPCPath(t *testing.T) {
|
||||
cluster := newTestCluster(t, 1)
|
||||
defer cluster.Shutdown()
|
||||
|
||||
handlingNode := cluster.Nodes[0]
|
||||
list := runSwarm(t, []string{
|
||||
"--datadir", handlingNode.Dir,
|
||||
"fs",
|
||||
"list",
|
||||
}...)
|
||||
|
||||
list.WaitExit()
|
||||
if list.Err != nil {
|
||||
t.Fatal(list.Err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCLISwarmFs is a high-level test of swarmfs
|
||||
//
|
||||
// This test fails on travis for macOS as this executable exits with code 1
|
||||
@@ -65,9 +81,9 @@ func TestCLISwarmFs(t *testing.T) {
|
||||
log.Debug("swarmfs cli test: mounting first run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
|
||||
|
||||
mount := runSwarm(t, []string{
|
||||
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||
"fs",
|
||||
"mount",
|
||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||
mhash,
|
||||
mountPoint,
|
||||
}...)
|
||||
@@ -80,6 +96,9 @@ func TestCLISwarmFs(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dirPath2, err := createDirInDir(dirPath, "AnotherTestSubDir")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dummyContent := "somerandomtestcontentthatshouldbeasserted"
|
||||
dirs := []string{
|
||||
@@ -104,9 +123,9 @@ func TestCLISwarmFs(t *testing.T) {
|
||||
log.Debug("swarmfs cli test: unmounting first run...", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
|
||||
|
||||
unmount := runSwarm(t, []string{
|
||||
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||
"fs",
|
||||
"unmount",
|
||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||
mountPoint,
|
||||
}...)
|
||||
_, matches := unmount.ExpectRegexp(hashRegexp)
|
||||
@@ -139,9 +158,9 @@ func TestCLISwarmFs(t *testing.T) {
|
||||
|
||||
//remount, check files
|
||||
newMount := runSwarm(t, []string{
|
||||
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||
"fs",
|
||||
"mount",
|
||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||
hash, // the latest hash
|
||||
secondMountPoint,
|
||||
}...)
|
||||
@@ -175,9 +194,9 @@ func TestCLISwarmFs(t *testing.T) {
|
||||
log.Debug("swarmfs cli test: unmounting second run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
|
||||
|
||||
unmountSec := runSwarm(t, []string{
|
||||
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||
"fs",
|
||||
"unmount",
|
||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||
secondMountPoint,
|
||||
}...)
|
||||
|
||||
|
@@ -27,6 +27,15 @@ import (
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var hashCommand = cli.Command{
|
||||
Action: hash,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "hash",
|
||||
Usage: "print the swarm hash of a file or directory",
|
||||
ArgsUsage: "<file>",
|
||||
Description: "Prints the swarm hash of file or directory",
|
||||
}
|
||||
|
||||
func hash(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) < 1 {
|
||||
|
@@ -27,6 +27,15 @@ import (
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var listCommand = cli.Command{
|
||||
Action: list,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "ls",
|
||||
Usage: "list files and directories contained in a manifest",
|
||||
ArgsUsage: "<manifest> [<prefix>]",
|
||||
Description: "Lists files and directories contained in a manifest",
|
||||
}
|
||||
|
||||
func list(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
|
||||
|
@@ -38,7 +38,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/swarm"
|
||||
bzzapi "github.com/ethereum/go-ethereum/swarm/api"
|
||||
swarmmetrics "github.com/ethereum/go-ethereum/swarm/metrics"
|
||||
@@ -70,151 +70,6 @@ var (
|
||||
gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
|
||||
)
|
||||
|
||||
var (
|
||||
ChequebookAddrFlag = cli.StringFlag{
|
||||
Name: "chequebook",
|
||||
Usage: "chequebook contract address",
|
||||
EnvVar: SWARM_ENV_CHEQUEBOOK_ADDR,
|
||||
}
|
||||
SwarmAccountFlag = cli.StringFlag{
|
||||
Name: "bzzaccount",
|
||||
Usage: "Swarm account key file",
|
||||
EnvVar: SWARM_ENV_ACCOUNT,
|
||||
}
|
||||
SwarmListenAddrFlag = cli.StringFlag{
|
||||
Name: "httpaddr",
|
||||
Usage: "Swarm HTTP API listening interface",
|
||||
EnvVar: SWARM_ENV_LISTEN_ADDR,
|
||||
}
|
||||
SwarmPortFlag = cli.StringFlag{
|
||||
Name: "bzzport",
|
||||
Usage: "Swarm local http api port",
|
||||
EnvVar: SWARM_ENV_PORT,
|
||||
}
|
||||
SwarmNetworkIdFlag = cli.IntFlag{
|
||||
Name: "bzznetworkid",
|
||||
Usage: "Network identifier (integer, default 3=swarm testnet)",
|
||||
EnvVar: SWARM_ENV_NETWORK_ID,
|
||||
}
|
||||
SwarmSwapEnabledFlag = cli.BoolFlag{
|
||||
Name: "swap",
|
||||
Usage: "Swarm SWAP enabled (default false)",
|
||||
EnvVar: SWARM_ENV_SWAP_ENABLE,
|
||||
}
|
||||
SwarmSwapAPIFlag = cli.StringFlag{
|
||||
Name: "swap-api",
|
||||
Usage: "URL of the Ethereum API provider to use to settle SWAP payments",
|
||||
EnvVar: SWARM_ENV_SWAP_API,
|
||||
}
|
||||
SwarmSyncDisabledFlag = cli.BoolTFlag{
|
||||
Name: "nosync",
|
||||
Usage: "Disable swarm syncing",
|
||||
EnvVar: SWARM_ENV_SYNC_DISABLE,
|
||||
}
|
||||
SwarmSyncUpdateDelay = cli.DurationFlag{
|
||||
Name: "sync-update-delay",
|
||||
Usage: "Duration for sync subscriptions update after no new peers are added (default 15s)",
|
||||
EnvVar: SWARM_ENV_SYNC_UPDATE_DELAY,
|
||||
}
|
||||
SwarmLightNodeEnabled = cli.BoolFlag{
|
||||
Name: "lightnode",
|
||||
Usage: "Enable Swarm LightNode (default false)",
|
||||
EnvVar: SWARM_ENV_LIGHT_NODE_ENABLE,
|
||||
}
|
||||
SwarmDeliverySkipCheckFlag = cli.BoolFlag{
|
||||
Name: "delivery-skip-check",
|
||||
Usage: "Skip chunk delivery check (default false)",
|
||||
EnvVar: SWARM_ENV_DELIVERY_SKIP_CHECK,
|
||||
}
|
||||
EnsAPIFlag = cli.StringSliceFlag{
|
||||
Name: "ens-api",
|
||||
Usage: "ENS API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url",
|
||||
EnvVar: SWARM_ENV_ENS_API,
|
||||
}
|
||||
SwarmApiFlag = cli.StringFlag{
|
||||
Name: "bzzapi",
|
||||
Usage: "Swarm HTTP endpoint",
|
||||
Value: "http://127.0.0.1:8500",
|
||||
}
|
||||
SwarmRecursiveFlag = cli.BoolFlag{
|
||||
Name: "recursive",
|
||||
Usage: "Upload directories recursively",
|
||||
}
|
||||
SwarmWantManifestFlag = cli.BoolTFlag{
|
||||
Name: "manifest",
|
||||
Usage: "Automatic manifest upload (default true)",
|
||||
}
|
||||
SwarmUploadDefaultPath = cli.StringFlag{
|
||||
Name: "defaultpath",
|
||||
Usage: "path to file served for empty url path (none)",
|
||||
}
|
||||
SwarmAccessGrantKeyFlag = cli.StringFlag{
|
||||
Name: "grant-key",
|
||||
Usage: "grants a given public key access to an ACT",
|
||||
}
|
||||
SwarmAccessGrantKeysFlag = cli.StringFlag{
|
||||
Name: "grant-keys",
|
||||
Usage: "grants a given list of public keys in the following file (separated by line breaks) access to an ACT",
|
||||
}
|
||||
SwarmUpFromStdinFlag = cli.BoolFlag{
|
||||
Name: "stdin",
|
||||
Usage: "reads data to be uploaded from stdin",
|
||||
}
|
||||
SwarmUploadMimeType = cli.StringFlag{
|
||||
Name: "mime",
|
||||
Usage: "Manually specify MIME type",
|
||||
}
|
||||
SwarmEncryptedFlag = cli.BoolFlag{
|
||||
Name: "encrypt",
|
||||
Usage: "use encrypted upload",
|
||||
}
|
||||
SwarmAccessPasswordFlag = cli.StringFlag{
|
||||
Name: "password",
|
||||
Usage: "Password",
|
||||
EnvVar: SWARM_ACCESS_PASSWORD,
|
||||
}
|
||||
SwarmDryRunFlag = cli.BoolFlag{
|
||||
Name: "dry-run",
|
||||
Usage: "dry-run",
|
||||
}
|
||||
CorsStringFlag = cli.StringFlag{
|
||||
Name: "corsdomain",
|
||||
Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')",
|
||||
EnvVar: SWARM_ENV_CORS,
|
||||
}
|
||||
SwarmStorePath = cli.StringFlag{
|
||||
Name: "store.path",
|
||||
Usage: "Path to leveldb chunk DB (default <$GETH_ENV_DIR>/swarm/bzz-<$BZZ_KEY>/chunks)",
|
||||
EnvVar: SWARM_ENV_STORE_PATH,
|
||||
}
|
||||
SwarmStoreCapacity = cli.Uint64Flag{
|
||||
Name: "store.size",
|
||||
Usage: "Number of chunks (5M is roughly 20-25GB) (default 5000000)",
|
||||
EnvVar: SWARM_ENV_STORE_CAPACITY,
|
||||
}
|
||||
SwarmStoreCacheCapacity = cli.UintFlag{
|
||||
Name: "store.cache.size",
|
||||
Usage: "Number of recent chunks cached in memory (default 5000)",
|
||||
EnvVar: SWARM_ENV_STORE_CACHE_CAPACITY,
|
||||
}
|
||||
SwarmResourceMultihashFlag = cli.BoolFlag{
|
||||
Name: "multihash",
|
||||
Usage: "Determines how to interpret data for a resource update. If not present, data will be interpreted as raw, literal data that will be included in the resource",
|
||||
}
|
||||
SwarmResourceNameFlag = cli.StringFlag{
|
||||
Name: "name",
|
||||
Usage: "User-defined name for the new resource",
|
||||
}
|
||||
SwarmResourceDataOnCreateFlag = cli.StringFlag{
|
||||
Name: "data",
|
||||
Usage: "Initializes the resource with the given hex-encoded data. Data must be prefixed by 0x",
|
||||
}
|
||||
SwarmCompressedFlag = cli.BoolFlag{
|
||||
Name: "compressed",
|
||||
Usage: "Prints encryption keys in compressed form",
|
||||
}
|
||||
)
|
||||
|
||||
//declare a few constant error messages, useful for later error check comparisons in test
|
||||
var (
|
||||
SWARM_ERR_NO_BZZACCOUNT = "bzzaccount option is required but not set; check your config file, command line or environment variables"
|
||||
@@ -242,12 +97,12 @@ func init() {
|
||||
utils.ListenPortFlag.Value = 30399
|
||||
}
|
||||
|
||||
var app = utils.NewApp(gitCommit, "Ethereum Swarm")
|
||||
var app = utils.NewApp("", "Ethereum Swarm")
|
||||
|
||||
// This init function creates the cli.App.
|
||||
func init() {
|
||||
app.Action = bzzd
|
||||
app.HideVersion = true // we have a command to print the version
|
||||
app.Version = sv.ArchiveVersion(gitCommit)
|
||||
app.Copyright = "Copyright 2013-2016 The go-ethereum Authors"
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
@@ -265,249 +120,24 @@ func init() {
|
||||
Usage: "Print public key information",
|
||||
Description: "The output of this command is supposed to be machine-readable",
|
||||
},
|
||||
{
|
||||
Action: upload,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "up",
|
||||
Usage: "uploads a file or directory to swarm using the HTTP API",
|
||||
ArgsUsage: "<file>",
|
||||
Flags: []cli.Flag{SwarmEncryptedFlag},
|
||||
Description: "uploads a file or directory to swarm using the HTTP API and prints the root hash",
|
||||
},
|
||||
{
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "access",
|
||||
Usage: "encrypts a reference and embeds it into a root manifest",
|
||||
ArgsUsage: "<ref>",
|
||||
Description: "encrypts a reference and embeds it into a root manifest",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "new",
|
||||
Usage: "encrypts a reference and embeds it into a root manifest",
|
||||
ArgsUsage: "<ref>",
|
||||
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: accessNewPass,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Flags: []cli.Flag{
|
||||
utils.PasswordFileFlag,
|
||||
SwarmDryRunFlag,
|
||||
},
|
||||
Name: "pass",
|
||||
Usage: "encrypts a reference with a password and embeds it into a root manifest",
|
||||
ArgsUsage: "<ref>",
|
||||
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
|
||||
},
|
||||
{
|
||||
Action: accessNewPK,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Flags: []cli.Flag{
|
||||
utils.PasswordFileFlag,
|
||||
SwarmDryRunFlag,
|
||||
SwarmAccessGrantKeyFlag,
|
||||
},
|
||||
Name: "pk",
|
||||
Usage: "encrypts a reference with the node's private key and a given grantee's public key and embeds it into a root manifest",
|
||||
ArgsUsage: "<ref>",
|
||||
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
|
||||
},
|
||||
{
|
||||
Action: accessNewACT,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Flags: []cli.Flag{
|
||||
SwarmAccessGrantKeysFlag,
|
||||
SwarmDryRunFlag,
|
||||
utils.PasswordFileFlag,
|
||||
},
|
||||
Name: "act",
|
||||
Usage: "encrypts a reference with the node's private key and a given grantee's public key and embeds it into a root manifest",
|
||||
ArgsUsage: "<ref>",
|
||||
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "resource",
|
||||
Usage: "(Advanced) Create and update Mutable Resources",
|
||||
ArgsUsage: "<create|update|info>",
|
||||
Description: "Works with Mutable Resource Updates",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: resourceCreate,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "create",
|
||||
Usage: "creates a new Mutable Resource",
|
||||
ArgsUsage: "<frequency>",
|
||||
Description: "creates a new Mutable Resource",
|
||||
Flags: []cli.Flag{SwarmResourceNameFlag, SwarmResourceDataOnCreateFlag, SwarmResourceMultihashFlag},
|
||||
},
|
||||
{
|
||||
Action: resourceUpdate,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "update",
|
||||
Usage: "updates the content of an existing Mutable Resource",
|
||||
ArgsUsage: "<Manifest Address or ENS domain> <0x Hex data>",
|
||||
Description: "updates the content of an existing Mutable Resource",
|
||||
Flags: []cli.Flag{SwarmResourceMultihashFlag},
|
||||
},
|
||||
{
|
||||
Action: resourceInfo,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "info",
|
||||
Usage: "obtains information about an existing Mutable Resource",
|
||||
ArgsUsage: "<Manifest Address or ENS domain>",
|
||||
Description: "obtains information about an existing Mutable Resource",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: list,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "ls",
|
||||
Usage: "list files and directories contained in a manifest",
|
||||
ArgsUsage: "<manifest> [<prefix>]",
|
||||
Description: "Lists files and directories contained in a manifest",
|
||||
},
|
||||
{
|
||||
Action: hash,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "hash",
|
||||
Usage: "print the swarm hash of a file or directory",
|
||||
ArgsUsage: "<file>",
|
||||
Description: "Prints the swarm hash of file or directory",
|
||||
},
|
||||
{
|
||||
Action: download,
|
||||
Name: "down",
|
||||
Flags: []cli.Flag{SwarmRecursiveFlag, SwarmAccessPasswordFlag},
|
||||
Usage: "downloads a swarm manifest or a file inside a manifest",
|
||||
ArgsUsage: " <uri> [<dir>]",
|
||||
Description: `Downloads a swarm bzz uri to the given dir. When no dir is provided, working directory is assumed. --recursive flag is expected when downloading a manifest with multiple entries.`,
|
||||
},
|
||||
{
|
||||
Name: "manifest",
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Usage: "perform operations on swarm manifests",
|
||||
ArgsUsage: "COMMAND",
|
||||
Description: "Updates a MANIFEST by adding/removing/updating the hash of a path.\nCOMMAND could be: add, update, remove",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: manifestAdd,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "add",
|
||||
Usage: "add a new path to the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path> <hash>",
|
||||
Description: "Adds a new path to the manifest",
|
||||
},
|
||||
{
|
||||
Action: manifestUpdate,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "update",
|
||||
Usage: "update the hash for an already existing path in the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path> <newhash>",
|
||||
Description: "Update the hash for an already existing path in the manifest",
|
||||
},
|
||||
{
|
||||
Action: manifestRemove,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "remove",
|
||||
Usage: "removes a path from the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path>",
|
||||
Description: "Removes a path from the manifest",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "fs",
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Usage: "perform FUSE operations",
|
||||
ArgsUsage: "fs COMMAND",
|
||||
Description: "Performs FUSE operations by mounting/unmounting/listing mount points. This assumes you already have a Swarm node running locally. For all operation you must reference the correct path to bzzd.ipc in order to communicate with the node",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: mount,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "mount",
|
||||
Flags: []cli.Flag{utils.IPCPathFlag},
|
||||
Usage: "mount a swarm hash to a mount point",
|
||||
ArgsUsage: "swarm fs mount --ipcpath <path to bzzd.ipc> <manifest hash> <mount point>",
|
||||
Description: "Mounts a Swarm manifest hash to a given mount point. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
|
||||
},
|
||||
{
|
||||
Action: unmount,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "unmount",
|
||||
Flags: []cli.Flag{utils.IPCPathFlag},
|
||||
Usage: "unmount a swarmfs mount",
|
||||
ArgsUsage: "swarm fs unmount --ipcpath <path to bzzd.ipc> <mount point>",
|
||||
Description: "Unmounts a swarmfs mount residing at <mount point>. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
|
||||
},
|
||||
{
|
||||
Action: listMounts,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "list",
|
||||
Flags: []cli.Flag{utils.IPCPathFlag},
|
||||
Usage: "list swarmfs mounts",
|
||||
ArgsUsage: "swarm fs list --ipcpath <path to bzzd.ipc>",
|
||||
Description: "Lists all mounted swarmfs volumes. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "db",
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Usage: "manage the local chunk database",
|
||||
ArgsUsage: "db COMMAND",
|
||||
Description: "Manage the local chunk database",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: dbExport,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "export",
|
||||
Usage: "export a local chunk database as a tar archive (use - to send to stdout)",
|
||||
ArgsUsage: "<chunkdb> <file>",
|
||||
Description: `
|
||||
Export a local chunk database as a tar archive (use - to send to stdout).
|
||||
|
||||
swarm db export ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar
|
||||
|
||||
The export may be quite large, consider piping the output through the Unix
|
||||
pv(1) tool to get a progress bar:
|
||||
|
||||
swarm db export ~/.ethereum/swarm/bzz-KEY/chunks - | pv > chunks.tar
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: dbImport,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "import",
|
||||
Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)",
|
||||
ArgsUsage: "<chunkdb> <file>",
|
||||
Description: `Import chunks from a tar archive into a local chunk database (use - to read from stdin).
|
||||
|
||||
swarm db import ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar
|
||||
|
||||
The import may be quite large, consider piping the input through the Unix
|
||||
pv(1) tool to get a progress bar:
|
||||
|
||||
pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -`,
|
||||
},
|
||||
{
|
||||
Action: dbClean,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "clean",
|
||||
Usage: "remove corrupt entries from a local chunk database",
|
||||
ArgsUsage: "<chunkdb>",
|
||||
Description: "Remove corrupt entries from a local chunk database",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// See upload.go
|
||||
upCommand,
|
||||
// See access.go
|
||||
accessCommand,
|
||||
// See feeds.go
|
||||
feedCommand,
|
||||
// See list.go
|
||||
listCommand,
|
||||
// See hash.go
|
||||
hashCommand,
|
||||
// See download.go
|
||||
downloadCommand,
|
||||
// See manifest.go
|
||||
manifestCommand,
|
||||
// See fs.go
|
||||
fsCommand,
|
||||
// See db.go
|
||||
dbCommand,
|
||||
// See config.go
|
||||
DumpConfigCommand,
|
||||
}
|
||||
@@ -542,6 +172,7 @@ pv(1) tool to get a progress bar:
|
||||
SwarmSwapAPIFlag,
|
||||
SwarmSyncDisabledFlag,
|
||||
SwarmSyncUpdateDelay,
|
||||
SwarmMaxStreamPeerServersFlag,
|
||||
SwarmLightNodeEnabled,
|
||||
SwarmDeliverySkipCheckFlag,
|
||||
SwarmListenAddrFlag,
|
||||
@@ -697,7 +328,7 @@ func getAccount(bzzaccount string, ctx *cli.Context, stack *node.Node) *ecdsa.Pr
|
||||
}
|
||||
|
||||
// getPrivKey returns the private key of the specified bzzaccount
|
||||
// Used only by client commands, such as `resource`
|
||||
// Used only by client commands, such as `feed`
|
||||
func getPrivKey(ctx *cli.Context) *ecdsa.PrivateKey {
|
||||
// booting up the swarm node just as we do in bzzd action
|
||||
bzzconfig, err := buildConfig(ctx)
|
||||
@@ -788,10 +419,10 @@ func setSwarmBootstrapNodes(ctx *cli.Context, cfg *node.Config) {
|
||||
return
|
||||
}
|
||||
|
||||
cfg.P2P.BootstrapNodes = []*discover.Node{}
|
||||
cfg.P2P.BootstrapNodes = []*enode.Node{}
|
||||
|
||||
for _, url := range SwarmBootnodes {
|
||||
node, err := discover.ParseNode(url)
|
||||
node, err := enode.ParseV4(url)
|
||||
if err != nil {
|
||||
log.Error("Bootstrap URL invalid", "enode", url, "err", err)
|
||||
}
|
||||
|
@@ -28,6 +28,40 @@ import (
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var manifestCommand = cli.Command{
|
||||
Name: "manifest",
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Usage: "perform operations on swarm manifests",
|
||||
ArgsUsage: "COMMAND",
|
||||
Description: "Updates a MANIFEST by adding/removing/updating the hash of a path.\nCOMMAND could be: add, update, remove",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: manifestAdd,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "add",
|
||||
Usage: "add a new path to the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path> <hash>",
|
||||
Description: "Adds a new path to the manifest",
|
||||
},
|
||||
{
|
||||
Action: manifestUpdate,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "update",
|
||||
Usage: "update the hash for an already existing path in the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path> <newhash>",
|
||||
Description: "Update the hash for an already existing path in the manifest",
|
||||
},
|
||||
{
|
||||
Action: manifestRemove,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "remove",
|
||||
Usage: "removes a path from the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path>",
|
||||
Description: "Removes a path from the manifest",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// manifestAdd adds a new entry to the manifest at the given path.
|
||||
// New entry hash, the last argument, must be the hash of a manifest
|
||||
// with only one entry, which meta-data will be added to the original manifest.
|
||||
|
@@ -21,21 +21,31 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
|
||||
)
|
||||
|
||||
// TestManifestChange tests manifest add, update and remove
|
||||
// cli commands without encryption.
|
||||
func TestManifestChange(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip()
|
||||
}
|
||||
|
||||
testManifestChange(t, false)
|
||||
}
|
||||
|
||||
// TestManifestChange tests manifest add, update and remove
|
||||
// cli commands with encryption enabled.
|
||||
func TestManifestChangeEncrypted(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip()
|
||||
}
|
||||
|
||||
testManifestChange(t, true)
|
||||
}
|
||||
|
||||
@@ -48,8 +58,8 @@ func TestManifestChangeEncrypted(t *testing.T) {
|
||||
// Argument encrypt controls whether to use encryption or not.
|
||||
func testManifestChange(t *testing.T, encrypt bool) {
|
||||
t.Parallel()
|
||||
cluster := newTestCluster(t, 1)
|
||||
defer cluster.Shutdown()
|
||||
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
|
||||
defer srv.Close()
|
||||
|
||||
tmp, err := ioutil.TempDir("", "swarm-manifest-test")
|
||||
if err != nil {
|
||||
@@ -85,7 +95,7 @@ func testManifestChange(t *testing.T, encrypt bool) {
|
||||
|
||||
args := []string{
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
srv.URL,
|
||||
"--recursive",
|
||||
"--defaultpath",
|
||||
indexDataFilename,
|
||||
@@ -100,7 +110,7 @@ func testManifestChange(t *testing.T, encrypt bool) {
|
||||
|
||||
checkHashLength(t, origManifestHash, encrypt)
|
||||
|
||||
client := swarm.NewClient(cluster.Nodes[0].URL)
|
||||
client := swarm.NewClient(srv.URL)
|
||||
|
||||
// upload a new file and use its manifest to add it the original manifest.
|
||||
t.Run("add", func(t *testing.T) {
|
||||
@@ -113,14 +123,14 @@ func testManifestChange(t *testing.T, encrypt bool) {
|
||||
|
||||
humansManifestHash := runSwarmExpectHash(t,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
srv.URL,
|
||||
"up",
|
||||
humansDataFilename,
|
||||
)
|
||||
|
||||
newManifestHash := runSwarmExpectHash(t,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
srv.URL,
|
||||
"manifest",
|
||||
"add",
|
||||
origManifestHash,
|
||||
@@ -168,14 +178,14 @@ func testManifestChange(t *testing.T, encrypt bool) {
|
||||
|
||||
robotsManifestHash := runSwarmExpectHash(t,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
srv.URL,
|
||||
"up",
|
||||
robotsDataFilename,
|
||||
)
|
||||
|
||||
newManifestHash := runSwarmExpectHash(t,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
srv.URL,
|
||||
"manifest",
|
||||
"add",
|
||||
origManifestHash,
|
||||
@@ -228,14 +238,14 @@ func testManifestChange(t *testing.T, encrypt bool) {
|
||||
|
||||
indexManifestHash := runSwarmExpectHash(t,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
srv.URL,
|
||||
"up",
|
||||
indexDataFilename,
|
||||
)
|
||||
|
||||
newManifestHash := runSwarmExpectHash(t,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
srv.URL,
|
||||
"manifest",
|
||||
"update",
|
||||
origManifestHash,
|
||||
@@ -286,14 +296,14 @@ func testManifestChange(t *testing.T, encrypt bool) {
|
||||
|
||||
humansManifestHash := runSwarmExpectHash(t,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
srv.URL,
|
||||
"up",
|
||||
robotsDataFilename,
|
||||
)
|
||||
|
||||
newManifestHash := runSwarmExpectHash(t,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
srv.URL,
|
||||
"manifest",
|
||||
"update",
|
||||
origManifestHash,
|
||||
@@ -339,7 +349,7 @@ func testManifestChange(t *testing.T, encrypt bool) {
|
||||
t.Run("remove", func(t *testing.T) {
|
||||
newManifestHash := runSwarmExpectHash(t,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
srv.URL,
|
||||
"manifest",
|
||||
"remove",
|
||||
origManifestHash,
|
||||
@@ -367,7 +377,7 @@ func testManifestChange(t *testing.T, encrypt bool) {
|
||||
t.Run("remove nested", func(t *testing.T) {
|
||||
newManifestHash := runSwarmExpectHash(t,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
srv.URL,
|
||||
"manifest",
|
||||
"remove",
|
||||
origManifestHash,
|
||||
@@ -400,6 +410,10 @@ func testManifestChange(t *testing.T, encrypt bool) {
|
||||
// TestNestedDefaultEntryUpdate tests if the default entry is updated
|
||||
// if the file in nested manifest used for it is also updated.
|
||||
func TestNestedDefaultEntryUpdate(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip()
|
||||
}
|
||||
|
||||
testNestedDefaultEntryUpdate(t, false)
|
||||
}
|
||||
|
||||
@@ -407,13 +421,17 @@ func TestNestedDefaultEntryUpdate(t *testing.T) {
|
||||
// of encrypted upload is updated if the file in nested manifest
|
||||
// used for it is also updated.
|
||||
func TestNestedDefaultEntryUpdateEncrypted(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip()
|
||||
}
|
||||
|
||||
testNestedDefaultEntryUpdate(t, true)
|
||||
}
|
||||
|
||||
func testNestedDefaultEntryUpdate(t *testing.T, encrypt bool) {
|
||||
t.Parallel()
|
||||
cluster := newTestCluster(t, 1)
|
||||
defer cluster.Shutdown()
|
||||
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
|
||||
defer srv.Close()
|
||||
|
||||
tmp, err := ioutil.TempDir("", "swarm-manifest-test")
|
||||
if err != nil {
|
||||
@@ -441,7 +459,7 @@ func testNestedDefaultEntryUpdate(t *testing.T, encrypt bool) {
|
||||
|
||||
args := []string{
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
srv.URL,
|
||||
"--recursive",
|
||||
"--defaultpath",
|
||||
indexDataFilename,
|
||||
@@ -456,7 +474,7 @@ func testNestedDefaultEntryUpdate(t *testing.T, encrypt bool) {
|
||||
|
||||
checkHashLength(t, origManifestHash, encrypt)
|
||||
|
||||
client := swarm.NewClient(cluster.Nodes[0].URL)
|
||||
client := swarm.NewClient(srv.URL)
|
||||
|
||||
newIndexData := []byte("<h1>Ethereum Swarm</h1>")
|
||||
newIndexDataFilename := filepath.Join(tmp, "index.html")
|
||||
@@ -467,14 +485,14 @@ func testNestedDefaultEntryUpdate(t *testing.T, encrypt bool) {
|
||||
|
||||
newIndexManifestHash := runSwarmExpectHash(t,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
srv.URL,
|
||||
"up",
|
||||
newIndexDataFilename,
|
||||
)
|
||||
|
||||
newManifestHash := runSwarmExpectHash(t,
|
||||
"--bzzapi",
|
||||
cluster.Nodes[0].URL,
|
||||
srv.URL,
|
||||
"manifest",
|
||||
"update",
|
||||
origManifestHash,
|
||||
|
124
cmd/swarm/mimegen/generator.go
Normal file
@@ -0,0 +1,124 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
package main
|
||||
|
||||
// Standard "mime" package rely on system-settings, see mime.osInitMime
|
||||
// Swarm will run on many OS/Platform/Docker and must behave similar
|
||||
// This command generates code to add common mime types based on mime.types file
|
||||
//
|
||||
// mime.types file provided by mailcap, which follow https://www.iana.org/assignments/media-types/media-types.xhtml
|
||||
//
|
||||
// Get last version of mime.types file by:
|
||||
// docker run --rm -v $(pwd):/tmp alpine:edge /bin/sh -c "apk add -U mailcap; mv /etc/mime.types /tmp"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"flag"
|
||||
"html/template"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"log"
|
||||
)
|
||||
|
||||
var (
|
||||
typesFlag = flag.String("types", "", "Input mime.types file")
|
||||
packageFlag = flag.String("package", "", "Golang package in output file")
|
||||
outFlag = flag.String("out", "", "Output file name for the generated mime types")
|
||||
)
|
||||
|
||||
type mime struct {
|
||||
Name string
|
||||
Exts []string
|
||||
}
|
||||
|
||||
type templateParams struct {
|
||||
PackageName string
|
||||
Mimes []mime
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Parse and ensure all needed inputs are specified
|
||||
flag.Parse()
|
||||
if *typesFlag == "" {
|
||||
log.Fatalf("--types is required")
|
||||
}
|
||||
if *packageFlag == "" {
|
||||
log.Fatalf("--types is required")
|
||||
}
|
||||
if *outFlag == "" {
|
||||
log.Fatalf("--out is required")
|
||||
}
|
||||
|
||||
params := templateParams{
|
||||
PackageName: *packageFlag,
|
||||
}
|
||||
|
||||
types, err := ioutil.ReadFile(*typesFlag)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(bytes.NewReader(types))
|
||||
for scanner.Scan() {
|
||||
txt := scanner.Text()
|
||||
if strings.HasPrefix(txt, "#") || len(txt) == 0 {
|
||||
continue
|
||||
}
|
||||
parts := strings.Fields(txt)
|
||||
if len(parts) == 1 {
|
||||
continue
|
||||
}
|
||||
params.Mimes = append(params.Mimes, mime{parts[0], parts[1:]})
|
||||
}
|
||||
|
||||
if err = scanner.Err(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
result := bytes.NewBuffer([]byte{})
|
||||
|
||||
if err := template.Must(template.New("_").Parse(tpl)).Execute(result, params); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(*outFlag, result.Bytes(), 0600); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
var tpl = `// Code generated by github.com/ethereum/go-ethereum/cmd/swarm/mimegen. DO NOT EDIT.
|
||||
|
||||
package {{ .PackageName }}
|
||||
|
||||
import "mime"
|
||||
func init() {
|
||||
var mimeTypes = map[string]string{
|
||||
{{- range .Mimes -}}
|
||||
{{ $name := .Name -}}
|
||||
{{- range .Exts }}
|
||||
".{{ . }}": "{{ $name | html }}",
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
}
|
||||
for ext, name := range mimeTypes {
|
||||
if err := mime.AddExtensionType(ext, name); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
1828
cmd/swarm/mimegen/mime.types
Normal file
169
cmd/swarm/mru.go
@@ -1,169 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Command resource allows the user to create and update signed mutable resource updates
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/mru"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
func NewGenericSigner(ctx *cli.Context) mru.Signer {
|
||||
return mru.NewGenericSigner(getPrivKey(ctx))
|
||||
}
|
||||
|
||||
// swarm resource create <frequency> [--name <name>] [--data <0x Hexdata> [--multihash=false]]
|
||||
// swarm resource update <Manifest Address or ENS domain> <0x Hexdata> [--multihash=false]
|
||||
// swarm resource info <Manifest Address or ENS domain>
|
||||
|
||||
func resourceCreate(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
|
||||
var (
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client = swarm.NewClient(bzzapi)
|
||||
multihash = ctx.Bool(SwarmResourceMultihashFlag.Name)
|
||||
initialData = ctx.String(SwarmResourceDataOnCreateFlag.Name)
|
||||
name = ctx.String(SwarmResourceNameFlag.Name)
|
||||
)
|
||||
|
||||
if len(args) < 1 {
|
||||
fmt.Println("Incorrect number of arguments")
|
||||
cli.ShowCommandHelpAndExit(ctx, "create", 1)
|
||||
return
|
||||
}
|
||||
signer := NewGenericSigner(ctx)
|
||||
frequency, err := strconv.ParseUint(args[0], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Printf("Frequency formatting error: %s\n", err.Error())
|
||||
cli.ShowCommandHelpAndExit(ctx, "create", 1)
|
||||
return
|
||||
}
|
||||
|
||||
metadata := mru.ResourceMetadata{
|
||||
Name: name,
|
||||
Frequency: frequency,
|
||||
Owner: signer.Address(),
|
||||
}
|
||||
|
||||
var newResourceRequest *mru.Request
|
||||
if initialData != "" {
|
||||
initialDataBytes, err := hexutil.Decode(initialData)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing data: %s\n", err.Error())
|
||||
cli.ShowCommandHelpAndExit(ctx, "create", 1)
|
||||
return
|
||||
}
|
||||
newResourceRequest, err = mru.NewCreateUpdateRequest(&metadata)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error creating new resource request: %s", err)
|
||||
}
|
||||
newResourceRequest.SetData(initialDataBytes, multihash)
|
||||
if err = newResourceRequest.Sign(signer); err != nil {
|
||||
utils.Fatalf("Error signing resource update: %s", err.Error())
|
||||
}
|
||||
} else {
|
||||
newResourceRequest, err = mru.NewCreateRequest(&metadata)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error creating new resource request: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
manifestAddress, err := client.CreateResource(newResourceRequest)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error creating resource: %s", err.Error())
|
||||
return
|
||||
}
|
||||
fmt.Println(manifestAddress) // output manifest address to the user in a single line (useful for other commands to pick up)
|
||||
|
||||
}
|
||||
|
||||
func resourceUpdate(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
|
||||
var (
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client = swarm.NewClient(bzzapi)
|
||||
multihash = ctx.Bool(SwarmResourceMultihashFlag.Name)
|
||||
)
|
||||
|
||||
if len(args) < 2 {
|
||||
fmt.Println("Incorrect number of arguments")
|
||||
cli.ShowCommandHelpAndExit(ctx, "update", 1)
|
||||
return
|
||||
}
|
||||
signer := NewGenericSigner(ctx)
|
||||
manifestAddressOrDomain := args[0]
|
||||
data, err := hexutil.Decode(args[1])
|
||||
if err != nil {
|
||||
utils.Fatalf("Error parsing data: %s", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Retrieve resource status and metadata out of the manifest
|
||||
updateRequest, err := client.GetResourceMetadata(manifestAddressOrDomain)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error retrieving resource status: %s", err.Error())
|
||||
}
|
||||
|
||||
// set the new data
|
||||
updateRequest.SetData(data, multihash)
|
||||
|
||||
// sign update
|
||||
if err = updateRequest.Sign(signer); err != nil {
|
||||
utils.Fatalf("Error signing resource update: %s", err.Error())
|
||||
}
|
||||
|
||||
// post update
|
||||
err = client.UpdateResource(updateRequest)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error updating resource: %s", err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func resourceInfo(ctx *cli.Context) {
|
||||
var (
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client = swarm.NewClient(bzzapi)
|
||||
)
|
||||
args := ctx.Args()
|
||||
if len(args) < 1 {
|
||||
fmt.Println("Incorrect number of arguments.")
|
||||
cli.ShowCommandHelpAndExit(ctx, "info", 1)
|
||||
return
|
||||
}
|
||||
manifestAddressOrDomain := args[0]
|
||||
metadata, err := client.GetResourceMetadata(manifestAddressOrDomain)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error retrieving resource metadata: %s", err.Error())
|
||||
return
|
||||
}
|
||||
encodedMetadata, err := metadata.MarshalJSON()
|
||||
if err != nil {
|
||||
utils.Fatalf("Error encoding metadata to JSON for display:%s", err)
|
||||
}
|
||||
fmt.Println(string(encodedMetadata))
|
||||
}
|
@@ -19,6 +19,7 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
@@ -39,8 +40,12 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/swarm"
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
|
||||
)
|
||||
|
||||
var loglevel = flag.Int("loglevel", 3, "verbosity of logs")
|
||||
|
||||
func init() {
|
||||
// Run the app if we've been exec'd as "swarm-test" in runSwarm.
|
||||
reexec.Register("swarm-test", func() {
|
||||
@@ -52,6 +57,20 @@ func init() {
|
||||
})
|
||||
}
|
||||
|
||||
const clusterSize = 3
|
||||
|
||||
var clusteronce sync.Once
|
||||
var cluster *testCluster
|
||||
|
||||
func initCluster(t *testing.T) {
|
||||
clusteronce.Do(func() {
|
||||
cluster = newTestCluster(t, clusterSize)
|
||||
})
|
||||
}
|
||||
|
||||
func serverFunc(api *api.API) swarmhttp.TestServer {
|
||||
return swarmhttp.NewServer(api, "")
|
||||
}
|
||||
func TestMain(m *testing.M) {
|
||||
// check if we have been reexec'd
|
||||
if reexec.Init() {
|
||||
@@ -234,6 +253,7 @@ func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode {
|
||||
// start the node
|
||||
node.Cmd = runSwarm(t,
|
||||
"--port", p2pPort,
|
||||
"--nat", "extip:127.0.0.1",
|
||||
"--nodiscover",
|
||||
"--datadir", dir,
|
||||
"--ipcpath", conf.IPCPath,
|
||||
@@ -241,7 +261,7 @@ func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode {
|
||||
"--bzzaccount", bzzaccount,
|
||||
"--bzznetworkid", "321",
|
||||
"--bzzport", httpPort,
|
||||
"--verbosity", "6",
|
||||
"--verbosity", fmt.Sprint(*loglevel),
|
||||
)
|
||||
node.Cmd.InputLine(testPassphrase)
|
||||
defer func() {
|
||||
@@ -284,8 +304,8 @@ func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode {
|
||||
if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort)
|
||||
|
||||
node.Enode = nodeInfo.Enode
|
||||
node.IpcPath = conf.IPCPath
|
||||
return node
|
||||
}
|
||||
|
||||
@@ -309,6 +329,7 @@ func newTestNode(t *testing.T, dir string) *testNode {
|
||||
// start the node
|
||||
node.Cmd = runSwarm(t,
|
||||
"--port", p2pPort,
|
||||
"--nat", "extip:127.0.0.1",
|
||||
"--nodiscover",
|
||||
"--datadir", dir,
|
||||
"--ipcpath", conf.IPCPath,
|
||||
@@ -316,7 +337,7 @@ func newTestNode(t *testing.T, dir string) *testNode {
|
||||
"--bzzaccount", account.Address.String(),
|
||||
"--bzznetworkid", "321",
|
||||
"--bzzport", httpPort,
|
||||
"--verbosity", "6",
|
||||
"--verbosity", fmt.Sprint(*loglevel),
|
||||
)
|
||||
node.Cmd.InputLine(testPassphrase)
|
||||
defer func() {
|
||||
@@ -359,9 +380,8 @@ func newTestNode(t *testing.T, dir string) *testNode {
|
||||
if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort)
|
||||
node.Enode = nodeInfo.Enode
|
||||
node.IpcPath = conf.IPCPath
|
||||
|
||||
return node
|
||||
}
|
||||
|
||||
|
318
cmd/swarm/swarm-smoke/feed_upload_and_sync.go
Normal file
@@ -0,0 +1,318 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/feed"
|
||||
colorable "github.com/mattn/go-colorable"
|
||||
"github.com/pborman/uuid"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
const (
|
||||
feedRandomDataLength = 8
|
||||
)
|
||||
|
||||
// TODO: retrieve with manifest + extract repeating code
|
||||
func cliFeedUploadAndSync(c *cli.Context) error {
|
||||
|
||||
log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))))
|
||||
|
||||
defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size (kb)", filesize) }(time.Now())
|
||||
|
||||
generateEndpoints(scheme, cluster, from, to)
|
||||
|
||||
log.Info("generating and uploading feeds to " + endpoints[0] + " and syncing")
|
||||
|
||||
// create a random private key to sign updates with and derive the address
|
||||
pkFile, err := ioutil.TempFile("", "swarm-feed-smoke-test")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer pkFile.Close()
|
||||
defer os.Remove(pkFile.Name())
|
||||
|
||||
privkeyHex := "0000000000000000000000000000000000000000000000000000000000001976"
|
||||
privKey, err := crypto.HexToECDSA(privkeyHex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
user := crypto.PubkeyToAddress(privKey.PublicKey)
|
||||
userHex := hexutil.Encode(user.Bytes())
|
||||
|
||||
// save the private key to a file
|
||||
_, err = io.WriteString(pkFile, privkeyHex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// keep hex strings for topic and subtopic
|
||||
var topicHex string
|
||||
var subTopicHex string
|
||||
|
||||
// and create combination hex topics for bzz-feed retrieval
|
||||
// xor'ed with topic (zero-value topic if no topic)
|
||||
var subTopicOnlyHex string
|
||||
var mergedSubTopicHex string
|
||||
|
||||
// generate random topic and subtopic and put a hex on them
|
||||
topicBytes, err := generateRandomData(feed.TopicLength)
|
||||
topicHex = hexutil.Encode(topicBytes)
|
||||
subTopicBytes, err := generateRandomData(8)
|
||||
subTopicHex = hexutil.Encode(subTopicBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mergedSubTopic, err := feed.NewTopic(subTopicHex, topicBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mergedSubTopicHex = hexutil.Encode(mergedSubTopic[:])
|
||||
subTopicOnlyBytes, err := feed.NewTopic(subTopicHex, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
subTopicOnlyHex = hexutil.Encode(subTopicOnlyBytes[:])
|
||||
|
||||
// create feed manifest, topic only
|
||||
var out bytes.Buffer
|
||||
cmd := exec.Command("swarm", "--bzzapi", endpoints[0], "feed", "create", "--topic", topicHex, "--user", userHex)
|
||||
cmd.Stdout = &out
|
||||
log.Debug("create feed manifest topic cmd", "cmd", cmd)
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
manifestWithTopic := strings.TrimRight(out.String(), string([]byte{0x0a}))
|
||||
if len(manifestWithTopic) != 64 {
|
||||
return fmt.Errorf("unknown feed create manifest hash format (topic): (%d) %s", len(out.String()), manifestWithTopic)
|
||||
}
|
||||
log.Debug("create topic feed", "manifest", manifestWithTopic)
|
||||
out.Reset()
|
||||
|
||||
// create feed manifest, subtopic only
|
||||
cmd = exec.Command("swarm", "--bzzapi", endpoints[0], "feed", "create", "--name", subTopicHex, "--user", userHex)
|
||||
cmd.Stdout = &out
|
||||
log.Debug("create feed manifest subtopic cmd", "cmd", cmd)
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
manifestWithSubTopic := strings.TrimRight(out.String(), string([]byte{0x0a}))
|
||||
if len(manifestWithSubTopic) != 64 {
|
||||
return fmt.Errorf("unknown feed create manifest hash format (subtopic): (%d) %s", len(out.String()), manifestWithSubTopic)
|
||||
}
|
||||
log.Debug("create subtopic feed", "manifest", manifestWithTopic)
|
||||
out.Reset()
|
||||
|
||||
// create feed manifest, merged topic
|
||||
cmd = exec.Command("swarm", "--bzzapi", endpoints[0], "feed", "create", "--topic", topicHex, "--name", subTopicHex, "--user", userHex)
|
||||
cmd.Stdout = &out
|
||||
log.Debug("create feed manifest mergetopic cmd", "cmd", cmd)
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
manifestWithMergedTopic := strings.TrimRight(out.String(), string([]byte{0x0a}))
|
||||
if len(manifestWithMergedTopic) != 64 {
|
||||
return fmt.Errorf("unknown feed create manifest hash format (mergedtopic): (%d) %s", len(out.String()), manifestWithMergedTopic)
|
||||
}
|
||||
log.Debug("create mergedtopic feed", "manifest", manifestWithMergedTopic)
|
||||
out.Reset()
|
||||
|
||||
// create test data
|
||||
data, err := generateRandomData(feedRandomDataLength)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h := md5.New()
|
||||
h.Write(data)
|
||||
dataHash := h.Sum(nil)
|
||||
dataHex := hexutil.Encode(data)
|
||||
|
||||
// update with topic
|
||||
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", endpoints[0], "feed", "update", "--topic", topicHex, dataHex)
|
||||
cmd.Stdout = &out
|
||||
log.Debug("update feed manifest topic cmd", "cmd", cmd)
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("feed update topic", "out", out)
|
||||
out.Reset()
|
||||
|
||||
// update with subtopic
|
||||
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", endpoints[0], "feed", "update", "--name", subTopicHex, dataHex)
|
||||
cmd.Stdout = &out
|
||||
log.Debug("update feed manifest subtopic cmd", "cmd", cmd)
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("feed update subtopic", "out", out)
|
||||
out.Reset()
|
||||
|
||||
// update with merged topic
|
||||
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", endpoints[0], "feed", "update", "--topic", topicHex, "--name", subTopicHex, dataHex)
|
||||
cmd.Stdout = &out
|
||||
log.Debug("update feed manifest merged topic cmd", "cmd", cmd)
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("feed update mergedtopic", "out", out)
|
||||
out.Reset()
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// retrieve the data
|
||||
wg := sync.WaitGroup{}
|
||||
for _, endpoint := range endpoints {
|
||||
// raw retrieve, topic only
|
||||
for _, hex := range []string{topicHex, subTopicOnlyHex, mergedSubTopicHex} {
|
||||
wg.Add(1)
|
||||
ruid := uuid.New()[:8]
|
||||
go func(hex string, endpoint string, ruid string) {
|
||||
for {
|
||||
err := fetchFeed(hex, userHex, endpoint, dataHash, ruid)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Done()
|
||||
return
|
||||
}
|
||||
}(hex, endpoint, ruid)
|
||||
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
log.Info("all endpoints synced random data successfully")
|
||||
|
||||
// upload test file
|
||||
log.Info("uploading to " + endpoints[0] + " and syncing")
|
||||
|
||||
f, cleanup := generateRandomFile(filesize * 1000)
|
||||
defer cleanup()
|
||||
|
||||
hash, err := upload(f, endpoints[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hashBytes, err := hexutil.Decode("0x" + hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
multihashHex := hexutil.Encode(hashBytes)
|
||||
fileHash, err := digest(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fileHash))
|
||||
|
||||
// update file with topic
|
||||
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", endpoints[0], "feed", "update", "--topic", topicHex, multihashHex)
|
||||
cmd.Stdout = &out
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("feed update topic", "out", out)
|
||||
out.Reset()
|
||||
|
||||
// update file with subtopic
|
||||
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", endpoints[0], "feed", "update", "--name", subTopicHex, multihashHex)
|
||||
cmd.Stdout = &out
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("feed update subtopic", "out", out)
|
||||
out.Reset()
|
||||
|
||||
// update file with merged topic
|
||||
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", endpoints[0], "feed", "update", "--topic", topicHex, "--name", subTopicHex, multihashHex)
|
||||
cmd.Stdout = &out
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("feed update mergedtopic", "out", out)
|
||||
out.Reset()
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
|
||||
// manifest retrieve, topic only
|
||||
for _, url := range []string{manifestWithTopic, manifestWithSubTopic, manifestWithMergedTopic} {
|
||||
wg.Add(1)
|
||||
ruid := uuid.New()[:8]
|
||||
go func(url string, endpoint string, ruid string) {
|
||||
for {
|
||||
err := fetch(url, endpoint, fileHash, ruid)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Done()
|
||||
return
|
||||
}
|
||||
}(url, endpoint, ruid)
|
||||
}
|
||||
|
||||
}
|
||||
wg.Wait()
|
||||
log.Info("all endpoints synced random file successfully")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func fetchFeed(topic string, user string, endpoint string, original []byte, ruid string) error {
|
||||
log.Trace("sleeping", "ruid", ruid)
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
log.Trace("http get request (feed)", "ruid", ruid, "api", endpoint, "topic", topic, "user", user)
|
||||
res, err := http.Get(endpoint + "/bzz-feed:/?topic=" + topic + "&user=" + user)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Trace("http get response (feed)", "ruid", ruid, "api", endpoint, "topic", topic, "user", user, "code", res.StatusCode, "len", res.ContentLength)
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
return fmt.Errorf("expected status code %d, got %v (ruid %v)", 200, res.StatusCode, ruid)
|
||||
}
|
||||
|
||||
defer res.Body.Close()
|
||||
|
||||
rdigest, err := digest(res.Body)
|
||||
if err != nil {
|
||||
log.Warn(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(rdigest, original) {
|
||||
err := fmt.Errorf("downloaded imported file md5=%x is not the same as the generated one=%x", rdigest, original)
|
||||
log.Warn(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Trace("downloaded file matches random file", "ruid", ruid, "len", res.ContentLength)
|
||||
|
||||
return nil
|
||||
}
|
@@ -21,7 +21,6 @@ import (
|
||||
"sort"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
colorable "github.com/mattn/go-colorable"
|
||||
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
@@ -34,11 +33,10 @@ var (
|
||||
filesize int
|
||||
from int
|
||||
to int
|
||||
verbosity int
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.PrintOrigins(true)
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
|
||||
|
||||
app := cli.NewApp()
|
||||
app.Name = "smoke-test"
|
||||
@@ -47,8 +45,8 @@ func main() {
|
||||
app.Flags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "cluster-endpoint",
|
||||
Value: "testing",
|
||||
Usage: "cluster to point to (local, open or testing)",
|
||||
Value: "prod",
|
||||
Usage: "cluster to point to (prod or a given namespace)",
|
||||
Destination: &cluster,
|
||||
},
|
||||
cli.IntFlag{
|
||||
@@ -80,6 +78,12 @@ func main() {
|
||||
Usage: "file size for generated random file in KB",
|
||||
Destination: &filesize,
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "verbosity",
|
||||
Value: 1,
|
||||
Usage: "verbosity",
|
||||
Destination: &verbosity,
|
||||
},
|
||||
}
|
||||
|
||||
app.Commands = []cli.Command{
|
||||
@@ -89,6 +93,12 @@ func main() {
|
||||
Usage: "upload and sync",
|
||||
Action: cliUploadAndSync,
|
||||
},
|
||||
{
|
||||
Name: "feed_sync",
|
||||
Aliases: []string{"f"},
|
||||
Usage: "feed update generate, upload and sync",
|
||||
Action: cliFeedUploadAndSync,
|
||||
},
|
||||
}
|
||||
|
||||
sort.Sort(cli.FlagsByName(app.Flags))
|
||||
|
@@ -19,7 +19,9 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
crand "crypto/rand"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -31,6 +33,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
colorable "github.com/mattn/go-colorable"
|
||||
"github.com/pborman/uuid"
|
||||
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
@@ -38,18 +41,13 @@ import (
|
||||
|
||||
func generateEndpoints(scheme string, cluster string, from int, to int) {
|
||||
if cluster == "prod" {
|
||||
cluster = ""
|
||||
} else if cluster == "local" {
|
||||
for port := from; port <= to; port++ {
|
||||
endpoints = append(endpoints, fmt.Sprintf("%s://localhost:%v", scheme, port))
|
||||
endpoints = append(endpoints, fmt.Sprintf("%s://%v.swarm-gateways.net", scheme, port))
|
||||
}
|
||||
return
|
||||
} else {
|
||||
cluster = cluster + "."
|
||||
}
|
||||
|
||||
for port := from; port <= to; port++ {
|
||||
endpoints = append(endpoints, fmt.Sprintf("%s://%v.%sswarm-gateways.net", scheme, port, cluster))
|
||||
for port := from; port <= to; port++ {
|
||||
endpoints = append(endpoints, fmt.Sprintf("%s://swarm-%v-%s.stg.swarm-gateways.net", scheme, port, cluster))
|
||||
}
|
||||
}
|
||||
|
||||
if includeLocalhost {
|
||||
@@ -58,6 +56,9 @@ func generateEndpoints(scheme string, cluster string, from int, to int) {
|
||||
}
|
||||
|
||||
func cliUploadAndSync(c *cli.Context) error {
|
||||
log.PrintOrigins(true)
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
|
||||
|
||||
defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size (kb)", filesize) }(time.Now())
|
||||
|
||||
generateEndpoints(scheme, cluster, from, to)
|
||||
@@ -85,7 +86,6 @@ func cliUploadAndSync(c *cli.Context) error {
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
for _, endpoint := range endpoints {
|
||||
endpoint := endpoint
|
||||
ruid := uuid.New()[:8]
|
||||
wg.Add(1)
|
||||
go func(endpoint string, ruid string) {
|
||||
@@ -112,7 +112,10 @@ func fetch(hash string, endpoint string, original []byte, ruid string) error {
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
log.Trace("http get request", "ruid", ruid, "api", endpoint, "hash", hash)
|
||||
res, err := http.Get(endpoint + "/bzz:/" + hash + "/")
|
||||
client := &http.Client{Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}}
|
||||
res, err := client.Get(endpoint + "/bzz:/" + hash + "/")
|
||||
if err != nil {
|
||||
log.Warn(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
@@ -166,6 +169,18 @@ func digest(r io.Reader) ([]byte, error) {
|
||||
return h.Sum(nil), nil
|
||||
}
|
||||
|
||||
// generates random data in heap buffer
|
||||
func generateRandomData(datasize int) ([]byte, error) {
|
||||
b := make([]byte, datasize)
|
||||
c, err := crand.Read(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if c != datasize {
|
||||
return nil, errors.New("short read")
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// generateRandomFile is creating a temporary file with the requested byte size
|
||||
func generateRandomFile(size int) (f *os.File, teardown func()) {
|
||||
// create a tmp file
|
||||
@@ -181,7 +196,7 @@ func generateRandomFile(size int) (f *os.File, teardown func()) {
|
||||
}
|
||||
|
||||
buf := make([]byte, size)
|
||||
_, err = rand.Read(buf)
|
||||
_, err = crand.Read(buf)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@@ -22,34 +22,51 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/user"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
func upload(ctx *cli.Context) {
|
||||
var upCommand = cli.Command{
|
||||
Action: upload,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "up",
|
||||
Usage: "uploads a file or directory to swarm using the HTTP API",
|
||||
ArgsUsage: "<file>",
|
||||
Flags: []cli.Flag{SwarmEncryptedFlag},
|
||||
Description: "uploads a file or directory to swarm using the HTTP API and prints the root hash",
|
||||
}
|
||||
|
||||
func upload(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
var (
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
recursive = ctx.GlobalBool(SwarmRecursiveFlag.Name)
|
||||
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
|
||||
defaultPath = ctx.GlobalString(SwarmUploadDefaultPath.Name)
|
||||
fromStdin = ctx.GlobalBool(SwarmUpFromStdinFlag.Name)
|
||||
mimeType = ctx.GlobalString(SwarmUploadMimeType.Name)
|
||||
client = swarm.NewClient(bzzapi)
|
||||
toEncrypt = ctx.Bool(SwarmEncryptedFlag.Name)
|
||||
file string
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
recursive = ctx.GlobalBool(SwarmRecursiveFlag.Name)
|
||||
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
|
||||
defaultPath = ctx.GlobalString(SwarmUploadDefaultPath.Name)
|
||||
fromStdin = ctx.GlobalBool(SwarmUpFromStdinFlag.Name)
|
||||
mimeType = ctx.GlobalString(SwarmUploadMimeType.Name)
|
||||
client = swarm.NewClient(bzzapi)
|
||||
toEncrypt = ctx.Bool(SwarmEncryptedFlag.Name)
|
||||
autoDefaultPath = false
|
||||
file string
|
||||
)
|
||||
|
||||
if autoDefaultPathString := os.Getenv(SWARM_AUTO_DEFAULTPATH); autoDefaultPathString != "" {
|
||||
b, err := strconv.ParseBool(autoDefaultPathString)
|
||||
if err != nil {
|
||||
utils.Fatalf("invalid environment variable %s: %v", SWARM_AUTO_DEFAULTPATH, err)
|
||||
}
|
||||
autoDefaultPath = b
|
||||
}
|
||||
if len(args) != 1 {
|
||||
if fromStdin {
|
||||
tmp, err := ioutil.TempFile("", "swarm-stdin")
|
||||
@@ -98,6 +115,15 @@ func upload(ctx *cli.Context) {
|
||||
if !recursive {
|
||||
return "", errors.New("Argument is a directory and recursive upload is disabled")
|
||||
}
|
||||
if autoDefaultPath && defaultPath == "" {
|
||||
defaultEntryCandidate := path.Join(file, "index.html")
|
||||
log.Debug("trying to find default path", "path", defaultEntryCandidate)
|
||||
defaultEntryStat, err := os.Stat(defaultEntryCandidate)
|
||||
if err == nil && !defaultEntryStat.IsDir() {
|
||||
log.Debug("setting auto detected default path", "path", defaultEntryCandidate)
|
||||
defaultPath = defaultEntryCandidate
|
||||
}
|
||||
}
|
||||
if defaultPath != "" {
|
||||
// construct absolute default path
|
||||
absDefaultPath, _ := filepath.Abs(defaultPath)
|
||||
@@ -118,10 +144,9 @@ func upload(ctx *cli.Context) {
|
||||
return "", fmt.Errorf("error opening file: %s", err)
|
||||
}
|
||||
defer f.Close()
|
||||
if mimeType == "" {
|
||||
mimeType = detectMimeType(file)
|
||||
if mimeType != "" {
|
||||
f.ContentType = mimeType
|
||||
}
|
||||
f.ContentType = mimeType
|
||||
return client.Upload(f, "", toEncrypt)
|
||||
}
|
||||
}
|
||||
@@ -138,6 +163,12 @@ func upload(ctx *cli.Context) {
|
||||
// 3. cleans the path, e.g. /a/b/../c -> /a/c
|
||||
// Note, it has limitations, e.g. ~someuser/tmp will not be expanded
|
||||
func expandPath(p string) string {
|
||||
if i := strings.Index(p, ":"); i > 0 {
|
||||
return p
|
||||
}
|
||||
if i := strings.Index(p, "@"); i > 0 {
|
||||
return p
|
||||
}
|
||||
if strings.HasPrefix(p, "~/") || strings.HasPrefix(p, "~\\") {
|
||||
if home := homeDir(); home != "" {
|
||||
p = home + p[1:]
|
||||
@@ -155,19 +186,3 @@ func homeDir() string {
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func detectMimeType(file string) string {
|
||||
if ext := filepath.Ext(file); ext != "" {
|
||||
return mime.TypeByExtension(ext)
|
||||
}
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
defer f.Close()
|
||||
buf := make([]byte, 512)
|
||||
if n, _ := f.Read(buf); n > 0 {
|
||||
return http.DetectContentType(buf)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
@@ -18,7 +18,6 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -26,72 +25,82 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||
colorable "github.com/mattn/go-colorable"
|
||||
swarmapi "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||
"github.com/mattn/go-colorable"
|
||||
)
|
||||
|
||||
var loglevel = flag.Int("loglevel", 3, "verbosity of logs")
|
||||
|
||||
func init() {
|
||||
log.PrintOrigins(true)
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
|
||||
}
|
||||
|
||||
// TestCLISwarmUp tests that running 'swarm up' makes the resulting file
|
||||
// available from all nodes via the HTTP API
|
||||
func TestCLISwarmUp(t *testing.T) {
|
||||
testCLISwarmUp(false, t)
|
||||
}
|
||||
func TestCLISwarmUpRecursive(t *testing.T) {
|
||||
testCLISwarmUpRecursive(false, t)
|
||||
}
|
||||
|
||||
// TestCLISwarmUpEncrypted tests that running 'swarm encrypted-up' makes the resulting file
|
||||
// available from all nodes via the HTTP API
|
||||
func TestCLISwarmUpEncrypted(t *testing.T) {
|
||||
testCLISwarmUp(true, t)
|
||||
}
|
||||
func TestCLISwarmUpEncryptedRecursive(t *testing.T) {
|
||||
testCLISwarmUpRecursive(true, t)
|
||||
}
|
||||
|
||||
func testCLISwarmUp(toEncrypt bool, t *testing.T) {
|
||||
log.Info("starting 3 node cluster")
|
||||
cluster := newTestCluster(t, 3)
|
||||
defer cluster.Shutdown()
|
||||
|
||||
// create a tmp file
|
||||
tmp, err := ioutil.TempFile("", "swarm-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
func TestSwarmUp(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip()
|
||||
}
|
||||
defer tmp.Close()
|
||||
defer os.Remove(tmp.Name())
|
||||
|
||||
initCluster(t)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
f func(t *testing.T)
|
||||
}{
|
||||
{"NoEncryption", testNoEncryption},
|
||||
{"Encrypted", testEncrypted},
|
||||
{"RecursiveNoEncryption", testRecursiveNoEncryption},
|
||||
{"RecursiveEncrypted", testRecursiveEncrypted},
|
||||
{"DefaultPathAll", testDefaultPathAll},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, tc.f)
|
||||
}
|
||||
}
|
||||
|
||||
// testNoEncryption tests that running 'swarm up' makes the resulting file
|
||||
// available from all nodes via the HTTP API
|
||||
func testNoEncryption(t *testing.T) {
|
||||
testDefault(false, t)
|
||||
}
|
||||
|
||||
// testEncrypted tests that running 'swarm up --encrypted' makes the resulting file
|
||||
// available from all nodes via the HTTP API
|
||||
func testEncrypted(t *testing.T) {
|
||||
testDefault(true, t)
|
||||
}
|
||||
|
||||
func testRecursiveNoEncryption(t *testing.T) {
|
||||
testRecursive(false, t)
|
||||
}
|
||||
|
||||
func testRecursiveEncrypted(t *testing.T) {
|
||||
testRecursive(true, t)
|
||||
}
|
||||
|
||||
func testDefault(toEncrypt bool, t *testing.T) {
|
||||
tmpFileName := testutil.TempFileWithContent(t, data)
|
||||
defer os.Remove(tmpFileName)
|
||||
|
||||
// write data to file
|
||||
data := "notsorandomdata"
|
||||
_, err = io.WriteString(tmp, data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hashRegexp := `[a-f\d]{64}`
|
||||
flags := []string{
|
||||
"--bzzapi", cluster.Nodes[0].URL,
|
||||
"up",
|
||||
tmp.Name()}
|
||||
tmpFileName}
|
||||
if toEncrypt {
|
||||
hashRegexp = `[a-f\d]{128}`
|
||||
flags = []string{
|
||||
"--bzzapi", cluster.Nodes[0].URL,
|
||||
"up",
|
||||
"--encrypt",
|
||||
tmp.Name()}
|
||||
tmpFileName}
|
||||
}
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
log.Info(fmt.Sprintf("uploading file with 'swarm up'"))
|
||||
@@ -180,18 +189,13 @@ func testCLISwarmUp(toEncrypt bool, t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testCLISwarmUpRecursive(toEncrypt bool, t *testing.T) {
|
||||
fmt.Println("starting 3 node cluster")
|
||||
cluster := newTestCluster(t, 3)
|
||||
defer cluster.Shutdown()
|
||||
|
||||
func testRecursive(toEncrypt bool, t *testing.T) {
|
||||
tmpUploadDir, err := ioutil.TempDir("", "swarm-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpUploadDir)
|
||||
// create tmp files
|
||||
data := "notsorandomdata"
|
||||
for _, path := range []string{"tmp1", "tmp2"} {
|
||||
if err := ioutil.WriteFile(filepath.Join(tmpUploadDir, path), bytes.NewBufferString(data).Bytes(), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -231,8 +235,7 @@ func testCLISwarmUpRecursive(toEncrypt bool, t *testing.T) {
|
||||
}
|
||||
defer os.RemoveAll(tmpDownload)
|
||||
bzzLocator := "bzz:/" + hash
|
||||
flagss := []string{}
|
||||
flagss = []string{
|
||||
flagss := []string{
|
||||
"--bzzapi", cluster.Nodes[0].URL,
|
||||
"down",
|
||||
"--recursive",
|
||||
@@ -253,7 +256,7 @@ func testCLISwarmUpRecursive(toEncrypt bool, t *testing.T) {
|
||||
|
||||
switch mode := fi.Mode(); {
|
||||
case mode.IsRegular():
|
||||
if file, err := swarm.Open(path.Join(tmpDownload, v.Name())); err != nil {
|
||||
if file, err := swarmapi.Open(path.Join(tmpDownload, v.Name())); err != nil {
|
||||
t.Fatalf("encountered an error opening the file returned from the CLI: %v", err)
|
||||
} else {
|
||||
ff := make([]byte, len(data))
|
||||
@@ -274,19 +277,16 @@ func testCLISwarmUpRecursive(toEncrypt bool, t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestCLISwarmUpDefaultPath tests swarm recursive upload with relative and absolute
|
||||
// testDefaultPathAll tests swarm recursive upload with relative and absolute
|
||||
// default paths and with encryption.
|
||||
func TestCLISwarmUpDefaultPath(t *testing.T) {
|
||||
testCLISwarmUpDefaultPath(false, false, t)
|
||||
testCLISwarmUpDefaultPath(false, true, t)
|
||||
testCLISwarmUpDefaultPath(true, false, t)
|
||||
testCLISwarmUpDefaultPath(true, true, t)
|
||||
func testDefaultPathAll(t *testing.T) {
|
||||
testDefaultPath(false, false, t)
|
||||
testDefaultPath(false, true, t)
|
||||
testDefaultPath(true, false, t)
|
||||
testDefaultPath(true, true, t)
|
||||
}
|
||||
|
||||
func testCLISwarmUpDefaultPath(toEncrypt bool, absDefaultPath bool, t *testing.T) {
|
||||
cluster := newTestCluster(t, 1)
|
||||
defer cluster.Shutdown()
|
||||
|
||||
func testDefaultPath(toEncrypt bool, absDefaultPath bool, t *testing.T) {
|
||||
tmp, err := ioutil.TempDir("", "swarm-defaultpath-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -326,7 +326,7 @@ func testCLISwarmUpDefaultPath(toEncrypt bool, absDefaultPath bool, t *testing.T
|
||||
up.ExpectExit()
|
||||
hash := matches[0]
|
||||
|
||||
client := swarm.NewClient(cluster.Nodes[0].URL)
|
||||
client := swarmapi.NewClient(cluster.Nodes[0].URL)
|
||||
|
||||
m, isEncrypted, err := client.DownloadManifest(hash)
|
||||
if err != nil {
|
||||
|
@@ -272,13 +272,13 @@ func ImportPreimages(db *ethdb.LDBDatabase, fn string) error {
|
||||
// Accumulate the preimages and flush when enough ws gathered
|
||||
preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob)
|
||||
if len(preimages) > 1024 {
|
||||
rawdb.WritePreimages(db, 0, preimages)
|
||||
rawdb.WritePreimages(db, preimages)
|
||||
preimages = make(map[common.Hash][]byte)
|
||||
}
|
||||
}
|
||||
// Flush the last batch preimage data
|
||||
if len(preimages) > 0 {
|
||||
rawdb.WritePreimages(db, 0, preimages)
|
||||
rawdb.WritePreimages(db, preimages)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -51,8 +51,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/metrics/influxdb"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/discv5"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
"github.com/ethereum/go-ethereum/p2p/netutil"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
@@ -295,7 +295,12 @@ var (
|
||||
CacheDatabaseFlag = cli.IntFlag{
|
||||
Name: "cache.database",
|
||||
Usage: "Percentage of cache memory allowance to use for database io",
|
||||
Value: 75,
|
||||
Value: 50,
|
||||
}
|
||||
CacheTrieFlag = cli.IntFlag{
|
||||
Name: "cache.trie",
|
||||
Usage: "Percentage of cache memory allowance to use for trie caching",
|
||||
Value: 25,
|
||||
}
|
||||
CacheGCFlag = cli.IntFlag{
|
||||
Name: "cache.gc",
|
||||
@@ -692,9 +697,9 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
|
||||
return // already set, don't apply defaults.
|
||||
}
|
||||
|
||||
cfg.BootstrapNodes = make([]*discover.Node, 0, len(urls))
|
||||
cfg.BootstrapNodes = make([]*enode.Node, 0, len(urls))
|
||||
for _, url := range urls {
|
||||
node, err := discover.ParseNode(url)
|
||||
node, err := enode.ParseV4(url)
|
||||
if err != nil {
|
||||
log.Crit("Bootstrap URL invalid", "enode", url, "err", err)
|
||||
}
|
||||
@@ -973,16 +978,7 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
|
||||
setWS(ctx, cfg)
|
||||
setNodeUserIdent(ctx, cfg)
|
||||
|
||||
switch {
|
||||
case ctx.GlobalIsSet(DataDirFlag.Name):
|
||||
cfg.DataDir = ctx.GlobalString(DataDirFlag.Name)
|
||||
case ctx.GlobalBool(DeveloperFlag.Name):
|
||||
cfg.DataDir = "" // unless explicitly requested, use memory databases
|
||||
case ctx.GlobalBool(TestnetFlag.Name):
|
||||
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "testnet")
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby")
|
||||
}
|
||||
setDataDir(ctx, cfg)
|
||||
|
||||
if ctx.GlobalIsSet(KeyStoreDirFlag.Name) {
|
||||
cfg.KeyStoreDir = ctx.GlobalString(KeyStoreDirFlag.Name)
|
||||
@@ -995,6 +991,19 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
func setDataDir(ctx *cli.Context, cfg *node.Config) {
|
||||
switch {
|
||||
case ctx.GlobalIsSet(DataDirFlag.Name):
|
||||
cfg.DataDir = ctx.GlobalString(DataDirFlag.Name)
|
||||
case ctx.GlobalBool(DeveloperFlag.Name):
|
||||
cfg.DataDir = "" // unless explicitly requested, use memory databases
|
||||
case ctx.GlobalBool(TestnetFlag.Name):
|
||||
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "testnet")
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby")
|
||||
}
|
||||
}
|
||||
|
||||
func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
|
||||
if ctx.GlobalIsSet(GpoBlocksFlag.Name) {
|
||||
cfg.Blocks = ctx.GlobalInt(GpoBlocksFlag.Name)
|
||||
@@ -1085,11 +1094,14 @@ func checkExclusive(ctx *cli.Context, args ...interface{}) {
|
||||
if i+1 < len(args) {
|
||||
switch option := args[i+1].(type) {
|
||||
case string:
|
||||
// Extended flag, expand the name and shift the arguments
|
||||
// Extended flag check, make sure value set doesn't conflict with passed in option
|
||||
if ctx.GlobalString(flag.GetName()) == option {
|
||||
name += "=" + option
|
||||
set = append(set, "--"+name)
|
||||
}
|
||||
// shift arguments and continue
|
||||
i++
|
||||
continue
|
||||
|
||||
case cli.Flag:
|
||||
default:
|
||||
@@ -1154,8 +1166,11 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
}
|
||||
cfg.NoPruning = ctx.GlobalString(GCModeFlag.Name) == "archive"
|
||||
|
||||
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) {
|
||||
cfg.TrieCleanCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheTrieFlag.Name) / 100
|
||||
}
|
||||
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) {
|
||||
cfg.TrieCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
|
||||
cfg.TrieDirtyCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
|
||||
}
|
||||
if ctx.GlobalIsSet(MinerNotifyFlag.Name) {
|
||||
cfg.MinerNotify = strings.Split(ctx.GlobalString(MinerNotifyFlag.Name), ",")
|
||||
@@ -1390,12 +1405,16 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
|
||||
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
|
||||
}
|
||||
cache := &core.CacheConfig{
|
||||
Disabled: ctx.GlobalString(GCModeFlag.Name) == "archive",
|
||||
TrieNodeLimit: eth.DefaultConfig.TrieCache,
|
||||
TrieTimeLimit: eth.DefaultConfig.TrieTimeout,
|
||||
Disabled: ctx.GlobalString(GCModeFlag.Name) == "archive",
|
||||
TrieCleanLimit: eth.DefaultConfig.TrieCleanCache,
|
||||
TrieDirtyLimit: eth.DefaultConfig.TrieDirtyCache,
|
||||
TrieTimeLimit: eth.DefaultConfig.TrieTimeout,
|
||||
}
|
||||
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) {
|
||||
cache.TrieCleanLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheTrieFlag.Name) / 100
|
||||
}
|
||||
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) {
|
||||
cache.TrieNodeLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
|
||||
cache.TrieDirtyLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
|
||||
}
|
||||
vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)}
|
||||
chain, err = core.NewBlockChain(chainDb, cache, config, engine, vmcfg, nil)
|
||||
|
@@ -41,7 +41,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
"github.com/ethereum/go-ethereum/whisper/mailserver"
|
||||
whisper "github.com/ethereum/go-ethereum/whisper/whisperv6"
|
||||
@@ -175,7 +175,7 @@ func initialize() {
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*argVerbosity), log.StreamHandler(os.Stderr, log.TerminalFormat(false))))
|
||||
|
||||
done = make(chan struct{})
|
||||
var peers []*discover.Node
|
||||
var peers []*enode.Node
|
||||
var err error
|
||||
|
||||
if *generateKey {
|
||||
@@ -203,7 +203,7 @@ func initialize() {
|
||||
if len(*argEnode) == 0 {
|
||||
argEnode = scanLineA("Please enter the peer's enode: ")
|
||||
}
|
||||
peer := discover.MustParseNode(*argEnode)
|
||||
peer := enode.MustParseV4(*argEnode)
|
||||
peers = append(peers, peer)
|
||||
}
|
||||
|
||||
@@ -747,11 +747,11 @@ func requestExpiredMessagesLoop() {
|
||||
}
|
||||
|
||||
func extractIDFromEnode(s string) []byte {
|
||||
n, err := discover.ParseNode(s)
|
||||
n, err := enode.ParseV4(s)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to parse enode: %s", err)
|
||||
}
|
||||
return n.ID[:]
|
||||
return n.ID().Bytes()
|
||||
}
|
||||
|
||||
// obfuscateBloom adds 16 random bits to the bloom
|
||||
|
@@ -31,6 +31,15 @@ func ToHex(b []byte) string {
|
||||
return "0x" + hex
|
||||
}
|
||||
|
||||
// ToHexArray creates a array of hex-string based on []byte
|
||||
func ToHexArray(b [][]byte) []string {
|
||||
r := make([]string, len(b))
|
||||
for i := range b {
|
||||
r[i] = ToHex(b[i])
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// FromHex returns the bytes represented by the hexadecimal string s.
|
||||
// s may be prefixed with "0x".
|
||||
func FromHex(s string) []byte {
|
||||
|
@@ -31,14 +31,15 @@ import (
|
||||
|
||||
var versionRegexp = regexp.MustCompile(`([0-9]+)\.([0-9]+)\.([0-9]+)`)
|
||||
|
||||
// Contract contains information about a compiled contract, alongside its code.
|
||||
// Contract contains information about a compiled contract, alongside its code and runtime code.
|
||||
type Contract struct {
|
||||
Code string `json:"code"`
|
||||
Info ContractInfo `json:"info"`
|
||||
Code string `json:"code"`
|
||||
RuntimeCode string `json:"runtime-code"`
|
||||
Info ContractInfo `json:"info"`
|
||||
}
|
||||
|
||||
// ContractInfo contains information about a compiled contract, including access
|
||||
// to the ABI definition, user and developer docs, and metadata.
|
||||
// to the ABI definition, source mapping, user and developer docs, and metadata.
|
||||
//
|
||||
// Depending on the source, language version, compiler version, and compiler
|
||||
// options will provide information about how the contract was compiled.
|
||||
@@ -48,6 +49,8 @@ type ContractInfo struct {
|
||||
LanguageVersion string `json:"languageVersion"`
|
||||
CompilerVersion string `json:"compilerVersion"`
|
||||
CompilerOptions string `json:"compilerOptions"`
|
||||
SrcMap string `json:"srcMap"`
|
||||
SrcMapRuntime string `json:"srcMapRuntime"`
|
||||
AbiDefinition interface{} `json:"abiDefinition"`
|
||||
UserDoc interface{} `json:"userDoc"`
|
||||
DeveloperDoc interface{} `json:"developerDoc"`
|
||||
@@ -63,14 +66,16 @@ type Solidity struct {
|
||||
// --combined-output format
|
||||
type solcOutput struct {
|
||||
Contracts map[string]struct {
|
||||
Bin, Abi, Devdoc, Userdoc, Metadata string
|
||||
BinRuntime string `json:"bin-runtime"`
|
||||
SrcMapRuntime string `json:"srcmap-runtime"`
|
||||
Bin, SrcMap, Abi, Devdoc, Userdoc, Metadata string
|
||||
}
|
||||
Version string
|
||||
}
|
||||
|
||||
func (s *Solidity) makeArgs() []string {
|
||||
p := []string{
|
||||
"--combined-json", "bin,abi,userdoc,devdoc",
|
||||
"--combined-json", "bin,bin-runtime,srcmap,srcmap-runtime,abi,userdoc,devdoc",
|
||||
"--optimize", // code optimizer switched on
|
||||
}
|
||||
if s.Major > 0 || s.Minor > 4 || s.Patch > 6 {
|
||||
@@ -157,7 +162,7 @@ func (s *Solidity) run(cmd *exec.Cmd, source string) (map[string]*Contract, erro
|
||||
// provided source, language and compiler version, and compiler options are all
|
||||
// passed through into the Contract structs.
|
||||
//
|
||||
// The solc output is expected to contain ABI, user docs, and dev docs.
|
||||
// The solc output is expected to contain ABI, source mapping, user docs, and dev docs.
|
||||
//
|
||||
// Returns an error if the JSON is malformed or missing data, or if the JSON
|
||||
// embedded within the JSON is malformed.
|
||||
@@ -184,13 +189,16 @@ func ParseCombinedJSON(combinedJSON []byte, source string, languageVersion strin
|
||||
return nil, fmt.Errorf("solc: error reading dev doc: %v", err)
|
||||
}
|
||||
contracts[name] = &Contract{
|
||||
Code: "0x" + info.Bin,
|
||||
Code: "0x" + info.Bin,
|
||||
RuntimeCode: "0x" + info.BinRuntime,
|
||||
Info: ContractInfo{
|
||||
Source: source,
|
||||
Language: "Solidity",
|
||||
LanguageVersion: languageVersion,
|
||||
CompilerVersion: compilerVersion,
|
||||
CompilerOptions: compilerOptions,
|
||||
SrcMap: info.SrcMap,
|
||||
SrcMapRuntime: info.SrcMapRuntime,
|
||||
AbiDefinition: abi,
|
||||
UserDoc: userdoc,
|
||||
DeveloperDoc: devdoc,
|
||||
|
@@ -696,7 +696,7 @@ func (c *Clique) SealHash(header *types.Header) common.Hash {
|
||||
return sigHash(header)
|
||||
}
|
||||
|
||||
// Close implements consensus.Engine. It's a noop for clique as there is are no background threads.
|
||||
// Close implements consensus.Engine. It's a noop for clique as there are no background threads.
|
||||
func (c *Clique) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
@@ -37,27 +37,28 @@ type API struct {
|
||||
// result[0] - 32 bytes hex encoded current block header pow-hash
|
||||
// result[1] - 32 bytes hex encoded seed hash used for DAG
|
||||
// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||
func (api *API) GetWork() ([3]string, error) {
|
||||
// result[3] - hex encoded block number
|
||||
func (api *API) GetWork() ([4]string, error) {
|
||||
if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest {
|
||||
return [3]string{}, errors.New("not supported")
|
||||
return [4]string{}, errors.New("not supported")
|
||||
}
|
||||
|
||||
var (
|
||||
workCh = make(chan [3]string, 1)
|
||||
workCh = make(chan [4]string, 1)
|
||||
errc = make(chan error, 1)
|
||||
)
|
||||
|
||||
select {
|
||||
case api.ethash.fetchWorkCh <- &sealWork{errc: errc, res: workCh}:
|
||||
case <-api.ethash.exitCh:
|
||||
return [3]string{}, errEthashStopped
|
||||
return [4]string{}, errEthashStopped
|
||||
}
|
||||
|
||||
select {
|
||||
case work := <-workCh:
|
||||
return work, nil
|
||||
case err := <-errc:
|
||||
return [3]string{}, err
|
||||
return [4]string{}, err
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -432,7 +432,7 @@ type hashrate struct {
|
||||
// sealWork wraps a seal work package for remote sealer.
|
||||
type sealWork struct {
|
||||
errc chan error
|
||||
res chan [3]string
|
||||
res chan [4]string
|
||||
}
|
||||
|
||||
// Ethash is a consensus engine based on proof-of-work implementing the ethash
|
||||
|
@@ -107,7 +107,7 @@ func TestRemoteSealer(t *testing.T) {
|
||||
ethash.Seal(nil, block, results, nil)
|
||||
|
||||
var (
|
||||
work [3]string
|
||||
work [4]string
|
||||
err error
|
||||
)
|
||||
if work, err = api.GetWork(); err != nil || work[0] != sealhash.Hex() {
|
||||
|
@@ -30,6 +30,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
@@ -193,7 +194,7 @@ func (ethash *Ethash) remote(notify []string, noverify bool) {
|
||||
|
||||
results chan<- *types.Block
|
||||
currentBlock *types.Block
|
||||
currentWork [3]string
|
||||
currentWork [4]string
|
||||
|
||||
notifyTransport = &http.Transport{}
|
||||
notifyClient = &http.Client{
|
||||
@@ -234,12 +235,14 @@ func (ethash *Ethash) remote(notify []string, noverify bool) {
|
||||
// result[0], 32 bytes hex encoded current block header pow-hash
|
||||
// result[1], 32 bytes hex encoded seed hash used for DAG
|
||||
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||
// result[3], hex encoded block number
|
||||
makeWork := func(block *types.Block) {
|
||||
hash := ethash.SealHash(block.Header())
|
||||
|
||||
currentWork[0] = hash.Hex()
|
||||
currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex()
|
||||
currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex()
|
||||
currentWork[3] = hexutil.EncodeBig(block.Number())
|
||||
|
||||
// Trace the seal work fetched by remote sealer.
|
||||
currentBlock = block
|
||||
|
@@ -151,6 +151,38 @@ func (self *ENS) Resolve(name string) (common.Hash, error) {
|
||||
return common.BytesToHash(ret[:]), nil
|
||||
}
|
||||
|
||||
// Addr is a non-transactional call that returns the address associated with a name.
|
||||
func (self *ENS) Addr(name string) (common.Address, error) {
|
||||
node := EnsNode(name)
|
||||
|
||||
resolver, err := self.getResolver(node)
|
||||
if err != nil {
|
||||
return common.Address{}, err
|
||||
}
|
||||
|
||||
ret, err := resolver.Addr(node)
|
||||
if err != nil {
|
||||
return common.Address{}, err
|
||||
}
|
||||
|
||||
return common.BytesToAddress(ret[:]), nil
|
||||
}
|
||||
|
||||
// SetAddress sets the address associated with a name. Only works if the caller
|
||||
// owns the name, and the associated resolver implements a `setAddress` function.
|
||||
func (self *ENS) SetAddr(name string, addr common.Address) (*types.Transaction, error) {
|
||||
node := EnsNode(name)
|
||||
|
||||
resolver, err := self.getResolver(node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts := self.TransactOpts
|
||||
opts.GasLimit = 200000
|
||||
return resolver.Contract.SetAddr(&opts, node, addr)
|
||||
}
|
||||
|
||||
// Register registers a new domain name for the caller, making them the owner of the new name.
|
||||
// Only works if the registrar for the parent domain implements the FIFS registrar protocol.
|
||||
func (self *ENS) Register(name string) (*types.Transaction, error) {
|
||||
|
@@ -22,16 +22,18 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/contracts/ens/contract"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
var (
|
||||
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
name = "my name on ENS"
|
||||
hash = crypto.Keccak256Hash([]byte("my content"))
|
||||
addr = crypto.PubkeyToAddress(key.PublicKey)
|
||||
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
name = "my name on ENS"
|
||||
hash = crypto.Keccak256Hash([]byte("my content"))
|
||||
addr = crypto.PubkeyToAddress(key.PublicKey)
|
||||
testAddr = common.HexToAddress("0x1234123412341234123412341234123412341234")
|
||||
)
|
||||
|
||||
func TestENS(t *testing.T) {
|
||||
@@ -74,4 +76,19 @@ func TestENS(t *testing.T) {
|
||||
if vhost != hash {
|
||||
t.Fatalf("resolve error, expected %v, got %v", hash.Hex(), vhost.Hex())
|
||||
}
|
||||
|
||||
// set the address for the name
|
||||
if _, err = ens.SetAddr(name, testAddr); err != nil {
|
||||
t.Fatalf("can't set address: %v", err)
|
||||
}
|
||||
contractBackend.Commit()
|
||||
|
||||
// Try to resolve the name to an address
|
||||
recoveredAddr, err := ens.Addr(name)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got %v", err)
|
||||
}
|
||||
if vhost != hash {
|
||||
t.Fatalf("resolve error, expected %v, got %v", testAddr.Hex(), recoveredAddr.Hex())
|
||||
}
|
||||
}
|
||||
|
@@ -109,9 +109,9 @@ func PrintDisassembled(code string) error {
|
||||
it := NewInstructionIterator(script)
|
||||
for it.Next() {
|
||||
if it.Arg() != nil && 0 < len(it.Arg()) {
|
||||
fmt.Printf("%06v: %v 0x%x\n", it.PC(), it.Op(), it.Arg())
|
||||
fmt.Printf("%05x: %v 0x%x\n", it.PC(), it.Op(), it.Arg())
|
||||
} else {
|
||||
fmt.Printf("%06v: %v\n", it.PC(), it.Op())
|
||||
fmt.Printf("%05x: %v\n", it.PC(), it.Op())
|
||||
}
|
||||
}
|
||||
return it.Error()
|
||||
@@ -124,9 +124,9 @@ func Disassemble(script []byte) ([]string, error) {
|
||||
it := NewInstructionIterator(script)
|
||||
for it.Next() {
|
||||
if it.Arg() != nil && 0 < len(it.Arg()) {
|
||||
instrs = append(instrs, fmt.Sprintf("%06v: %v 0x%x\n", it.PC(), it.Op(), it.Arg()))
|
||||
instrs = append(instrs, fmt.Sprintf("%05x: %v 0x%x\n", it.PC(), it.Op(), it.Arg()))
|
||||
} else {
|
||||
instrs = append(instrs, fmt.Sprintf("%06v: %v\n", it.PC(), it.Op()))
|
||||
instrs = append(instrs, fmt.Sprintf("%05x: %v\n", it.PC(), it.Op()))
|
||||
}
|
||||
}
|
||||
if err := it.Error(); err != nil {
|
||||
|
@@ -53,12 +53,6 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
|
||||
if v.bc.HasBlockAndState(block.Hash(), block.NumberU64()) {
|
||||
return ErrKnownBlock
|
||||
}
|
||||
if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {
|
||||
if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {
|
||||
return consensus.ErrUnknownAncestor
|
||||
}
|
||||
return consensus.ErrPrunedAncestor
|
||||
}
|
||||
// Header validity is known at this point, check the uncles and transactions
|
||||
header := block.Header()
|
||||
if err := v.engine.VerifyUncles(v.bc, block); err != nil {
|
||||
@@ -70,6 +64,12 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
|
||||
if hash := types.DeriveSha(block.Transactions()); hash != header.TxHash {
|
||||
return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash)
|
||||
}
|
||||
if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {
|
||||
if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {
|
||||
return consensus.ErrUnknownAncestor
|
||||
}
|
||||
return consensus.ErrPrunedAncestor
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@@ -47,7 +47,10 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
|
||||
blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
|
||||
blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
|
||||
blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil)
|
||||
blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil)
|
||||
|
||||
ErrNoGenesis = errors.New("Genesis not found in chain")
|
||||
)
|
||||
@@ -55,6 +58,7 @@ var (
|
||||
const (
|
||||
bodyCacheLimit = 256
|
||||
blockCacheLimit = 256
|
||||
receiptsCacheLimit = 32
|
||||
maxFutureBlocks = 256
|
||||
maxTimeFutureBlocks = 30
|
||||
badBlockLimit = 10
|
||||
@@ -67,9 +71,10 @@ const (
|
||||
// CacheConfig contains the configuration values for the trie caching/pruning
|
||||
// that's resident in a blockchain.
|
||||
type CacheConfig struct {
|
||||
Disabled bool // Whether to disable trie write caching (archive node)
|
||||
TrieNodeLimit int // Memory limit (MB) at which to flush the current in-memory trie to disk
|
||||
TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
|
||||
Disabled bool // Whether to disable trie write caching (archive node)
|
||||
TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
|
||||
TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk
|
||||
TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
|
||||
}
|
||||
|
||||
// BlockChain represents the canonical chain given a database with a genesis
|
||||
@@ -111,11 +116,12 @@ type BlockChain struct {
|
||||
currentBlock atomic.Value // Current head of the block chain
|
||||
currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!)
|
||||
|
||||
stateCache state.Database // State database to reuse between imports (contains state cache)
|
||||
bodyCache *lru.Cache // Cache for the most recent block bodies
|
||||
bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format
|
||||
blockCache *lru.Cache // Cache for the most recent entire blocks
|
||||
futureBlocks *lru.Cache // future blocks are blocks added for later processing
|
||||
stateCache state.Database // State database to reuse between imports (contains state cache)
|
||||
bodyCache *lru.Cache // Cache for the most recent block bodies
|
||||
bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format
|
||||
receiptsCache *lru.Cache // Cache for the most recent receipts per block
|
||||
blockCache *lru.Cache // Cache for the most recent entire blocks
|
||||
futureBlocks *lru.Cache // future blocks are blocks added for later processing
|
||||
|
||||
quit chan struct{} // blockchain quit channel
|
||||
running int32 // running must be called atomically
|
||||
@@ -138,12 +144,14 @@ type BlockChain struct {
|
||||
func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) {
|
||||
if cacheConfig == nil {
|
||||
cacheConfig = &CacheConfig{
|
||||
TrieNodeLimit: 256 * 1024 * 1024,
|
||||
TrieTimeLimit: 5 * time.Minute,
|
||||
TrieCleanLimit: 256,
|
||||
TrieDirtyLimit: 256,
|
||||
TrieTimeLimit: 5 * time.Minute,
|
||||
}
|
||||
}
|
||||
bodyCache, _ := lru.New(bodyCacheLimit)
|
||||
bodyRLPCache, _ := lru.New(bodyCacheLimit)
|
||||
receiptsCache, _ := lru.New(receiptsCacheLimit)
|
||||
blockCache, _ := lru.New(blockCacheLimit)
|
||||
futureBlocks, _ := lru.New(maxFutureBlocks)
|
||||
badBlocks, _ := lru.New(badBlockLimit)
|
||||
@@ -153,11 +161,12 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
|
||||
cacheConfig: cacheConfig,
|
||||
db: db,
|
||||
triegc: prque.New(nil),
|
||||
stateCache: state.NewDatabase(db),
|
||||
stateCache: state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit),
|
||||
quit: make(chan struct{}),
|
||||
shouldPreserve: shouldPreserve,
|
||||
bodyCache: bodyCache,
|
||||
bodyRLPCache: bodyRLPCache,
|
||||
receiptsCache: receiptsCache,
|
||||
blockCache: blockCache,
|
||||
futureBlocks: futureBlocks,
|
||||
engine: engine,
|
||||
@@ -280,6 +289,7 @@ func (bc *BlockChain) SetHead(head uint64) error {
|
||||
// Clear out any stale content from the caches
|
||||
bc.bodyCache.Purge()
|
||||
bc.bodyRLPCache.Purge()
|
||||
bc.receiptsCache.Purge()
|
||||
bc.blockCache.Purge()
|
||||
bc.futureBlocks.Purge()
|
||||
|
||||
@@ -388,6 +398,11 @@ func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
|
||||
return state.New(root, bc.stateCache)
|
||||
}
|
||||
|
||||
// StateCache returns the caching database underpinning the blockchain instance.
|
||||
func (bc *BlockChain) StateCache() state.Database {
|
||||
return bc.stateCache
|
||||
}
|
||||
|
||||
// Reset purges the entire blockchain, restoring it to its genesis state.
|
||||
func (bc *BlockChain) Reset() error {
|
||||
return bc.ResetWithGenesisBlock(bc.genesisBlock)
|
||||
@@ -433,7 +448,11 @@ func (bc *BlockChain) repair(head **types.Block) error {
|
||||
return nil
|
||||
}
|
||||
// Otherwise rewind one block and recheck state availability there
|
||||
(*head) = bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
|
||||
block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
|
||||
if block == nil {
|
||||
return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash())
|
||||
}
|
||||
(*head) = block
|
||||
}
|
||||
}
|
||||
|
||||
@@ -549,6 +568,17 @@ func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
|
||||
return rawdb.HasBody(bc.db, hash, number)
|
||||
}
|
||||
|
||||
// HasFastBlock checks if a fast block is fully present in the database or not.
|
||||
func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool {
|
||||
if !bc.HasBlock(hash, number) {
|
||||
return false
|
||||
}
|
||||
if bc.receiptsCache.Contains(hash) {
|
||||
return true
|
||||
}
|
||||
return rawdb.HasReceipts(bc.db, hash, number)
|
||||
}
|
||||
|
||||
// HasState checks if state trie is fully present in the database or not.
|
||||
func (bc *BlockChain) HasState(hash common.Hash) bool {
|
||||
_, err := bc.stateCache.OpenTrie(hash)
|
||||
@@ -603,11 +633,16 @@ func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
|
||||
|
||||
// GetReceiptsByHash retrieves the receipts for all transactions in a given block.
|
||||
func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
|
||||
if receipts, ok := bc.receiptsCache.Get(hash); ok {
|
||||
return receipts.(types.Receipts)
|
||||
}
|
||||
number := rawdb.ReadHeaderNumber(bc.db, hash)
|
||||
if number == nil {
|
||||
return nil
|
||||
}
|
||||
return rawdb.ReadReceipts(bc.db, hash, *number)
|
||||
receipts := rawdb.ReadReceipts(bc.db, hash, *number)
|
||||
bc.receiptsCache.Add(hash, receipts)
|
||||
return receipts
|
||||
}
|
||||
|
||||
// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
|
||||
@@ -926,7 +961,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
|
||||
// If we exceeded our memory allowance, flush matured singleton nodes to disk
|
||||
var (
|
||||
nodes, imgs = triedb.Size()
|
||||
limit = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024
|
||||
limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
|
||||
)
|
||||
if nodes > limit || imgs > 4*1024*1024 {
|
||||
triedb.Cap(limit - ethdb.IdealBatchSize)
|
||||
@@ -990,7 +1025,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
|
||||
}
|
||||
// Write the positional metadata for transaction/receipt lookups and preimages
|
||||
rawdb.WriteTxLookupEntries(batch, block)
|
||||
rawdb.WritePreimages(batch, block.NumberU64(), state.Preimages())
|
||||
rawdb.WritePreimages(batch, state.Preimages())
|
||||
|
||||
status = CanonStatTy
|
||||
} else {
|
||||
@@ -1008,6 +1043,18 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
|
||||
return status, nil
|
||||
}
|
||||
|
||||
// addFutureBlock checks if the block is within the max allowed window to get
|
||||
// accepted for future processing, and returns an error if the block is too far
|
||||
// ahead and was not added.
|
||||
func (bc *BlockChain) addFutureBlock(block *types.Block) error {
|
||||
max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
|
||||
if block.Time().Cmp(max) > 0 {
|
||||
return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max)
|
||||
}
|
||||
bc.futureBlocks.Add(block.Hash(), block)
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertChain attempts to insert the given batch of blocks in to the canonical
|
||||
// chain or, otherwise, create a fork. If an error is returned it will return
|
||||
// the index number of the failing block as well an error describing what went
|
||||
@@ -1015,18 +1062,9 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
|
||||
//
|
||||
// After insertion is done, all accumulated events will be fired.
|
||||
func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
||||
n, events, logs, err := bc.insertChain(chain)
|
||||
bc.PostChainEvents(events, logs)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// insertChain will execute the actual chain insertion and event aggregation. The
|
||||
// only reason this method exists as a separate one is to make locking cleaner
|
||||
// with deferred statements.
|
||||
func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) {
|
||||
// Sanity check that we have something meaningful to import
|
||||
if len(chain) == 0 {
|
||||
return 0, nil, nil, nil
|
||||
return 0, nil
|
||||
}
|
||||
// Do a sanity check that the provided chain is actually ordered and linked
|
||||
for i := 1; i < len(chain); i++ {
|
||||
@@ -1035,16 +1073,36 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
|
||||
log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(),
|
||||
"parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash())
|
||||
|
||||
return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
|
||||
return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
|
||||
chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
|
||||
}
|
||||
}
|
||||
// Pre-checks passed, start the full block imports
|
||||
bc.wg.Add(1)
|
||||
defer bc.wg.Done()
|
||||
|
||||
bc.chainmu.Lock()
|
||||
defer bc.chainmu.Unlock()
|
||||
n, events, logs, err := bc.insertChain(chain, true)
|
||||
bc.chainmu.Unlock()
|
||||
bc.wg.Done()
|
||||
|
||||
bc.PostChainEvents(events, logs)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// insertChain is the internal implementation of insertChain, which assumes that
|
||||
// 1) chains are contiguous, and 2) The chain mutex is held.
|
||||
//
|
||||
// This method is split out so that import batches that require re-injecting
|
||||
// historical blocks can do so without releasing the lock, which could lead to
|
||||
// racey behaviour. If a sidechain import is in progress, and the historic state
|
||||
// is imported, but then new canon-head is added before the actual sidechain
|
||||
// completes, then the historic state could be pruned again
|
||||
func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, error) {
|
||||
// If the chain is terminating, don't even bother starting u
|
||||
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
|
||||
return 0, nil, nil, nil
|
||||
}
|
||||
// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
|
||||
senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
|
||||
|
||||
// A queued approach to delivering events. This is generally
|
||||
// faster than direct delivery and requires much less mutex
|
||||
@@ -1061,16 +1119,56 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
|
||||
|
||||
for i, block := range chain {
|
||||
headers[i] = block.Header()
|
||||
seals[i] = true
|
||||
seals[i] = verifySeals
|
||||
}
|
||||
abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
|
||||
defer close(abort)
|
||||
|
||||
// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
|
||||
senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
|
||||
// Peek the error for the first block to decide the directing import logic
|
||||
it := newInsertIterator(chain, results, bc.Validator())
|
||||
|
||||
// Iterate over the blocks and insert when the verifier permits
|
||||
for i, block := range chain {
|
||||
block, err := it.next()
|
||||
switch {
|
||||
// First block is pruned, insert as sidechain and reorg only if TD grows enough
|
||||
case err == consensus.ErrPrunedAncestor:
|
||||
return bc.insertSidechain(it)
|
||||
|
||||
// First block is future, shove it (and all children) to the future queue (unknown ancestor)
|
||||
case err == consensus.ErrFutureBlock || (err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(it.first().ParentHash())):
|
||||
for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) {
|
||||
if err := bc.addFutureBlock(block); err != nil {
|
||||
return it.index, events, coalescedLogs, err
|
||||
}
|
||||
block, err = it.next()
|
||||
}
|
||||
stats.queued += it.processed()
|
||||
stats.ignored += it.remaining()
|
||||
|
||||
// If there are any still remaining, mark as ignored
|
||||
return it.index, events, coalescedLogs, err
|
||||
|
||||
// First block (and state) is known
|
||||
// 1. We did a roll-back, and should now do a re-import
|
||||
// 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
|
||||
// from the canonical chain, which has not been verified.
|
||||
case err == ErrKnownBlock:
|
||||
// Skip all known blocks that behind us
|
||||
current := bc.CurrentBlock().NumberU64()
|
||||
|
||||
for block != nil && err == ErrKnownBlock && current >= block.NumberU64() {
|
||||
stats.ignored++
|
||||
block, err = it.next()
|
||||
}
|
||||
// Falls through to the block import
|
||||
|
||||
// Some other error occurred, abort
|
||||
case err != nil:
|
||||
stats.ignored += len(it.chain)
|
||||
bc.reportBlock(block, nil, err)
|
||||
return it.index, events, coalescedLogs, err
|
||||
}
|
||||
// No validation errors for the first block (or chain prefix skipped)
|
||||
for ; block != nil && err == nil; block, err = it.next() {
|
||||
// If the chain is terminating, stop processing blocks
|
||||
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
|
||||
log.Debug("Premature abort during blocks processing")
|
||||
@@ -1079,115 +1177,53 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
|
||||
// If the header is a banned one, straight out abort
|
||||
if BadHashes[block.Hash()] {
|
||||
bc.reportBlock(block, nil, ErrBlacklistedHash)
|
||||
return i, events, coalescedLogs, ErrBlacklistedHash
|
||||
return it.index, events, coalescedLogs, ErrBlacklistedHash
|
||||
}
|
||||
// Wait for the block's verification to complete
|
||||
bstart := time.Now()
|
||||
// Retrieve the parent block and it's state to execute on top
|
||||
start := time.Now()
|
||||
|
||||
err := <-results
|
||||
if err == nil {
|
||||
err = bc.Validator().ValidateBody(block)
|
||||
}
|
||||
switch {
|
||||
case err == ErrKnownBlock:
|
||||
// Block and state both already known. However if the current block is below
|
||||
// this number we did a rollback and we should reimport it nonetheless.
|
||||
if bc.CurrentBlock().NumberU64() >= block.NumberU64() {
|
||||
stats.ignored++
|
||||
continue
|
||||
}
|
||||
|
||||
case err == consensus.ErrFutureBlock:
|
||||
// Allow up to MaxFuture second in the future blocks. If this limit is exceeded
|
||||
// the chain is discarded and processed at a later time if given.
|
||||
max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
|
||||
if block.Time().Cmp(max) > 0 {
|
||||
return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max)
|
||||
}
|
||||
bc.futureBlocks.Add(block.Hash(), block)
|
||||
stats.queued++
|
||||
continue
|
||||
|
||||
case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()):
|
||||
bc.futureBlocks.Add(block.Hash(), block)
|
||||
stats.queued++
|
||||
continue
|
||||
|
||||
case err == consensus.ErrPrunedAncestor:
|
||||
// Block competing with the canonical chain, store in the db, but don't process
|
||||
// until the competitor TD goes above the canonical TD
|
||||
currentBlock := bc.CurrentBlock()
|
||||
localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
|
||||
externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty())
|
||||
if localTd.Cmp(externTd) > 0 {
|
||||
if err = bc.WriteBlockWithoutState(block, externTd); err != nil {
|
||||
return i, events, coalescedLogs, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Competitor chain beat canonical, gather all blocks from the common ancestor
|
||||
var winner []*types.Block
|
||||
|
||||
parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
|
||||
for !bc.HasState(parent.Root()) {
|
||||
winner = append(winner, parent)
|
||||
parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1)
|
||||
}
|
||||
for j := 0; j < len(winner)/2; j++ {
|
||||
winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j]
|
||||
}
|
||||
// Import all the pruned blocks to make the state available
|
||||
bc.chainmu.Unlock()
|
||||
_, evs, logs, err := bc.insertChain(winner)
|
||||
bc.chainmu.Lock()
|
||||
events, coalescedLogs = evs, logs
|
||||
|
||||
if err != nil {
|
||||
return i, events, coalescedLogs, err
|
||||
}
|
||||
|
||||
case err != nil:
|
||||
bc.reportBlock(block, nil, err)
|
||||
return i, events, coalescedLogs, err
|
||||
}
|
||||
// Create a new statedb using the parent block and report an
|
||||
// error if it fails.
|
||||
var parent *types.Block
|
||||
if i == 0 {
|
||||
parent := it.previous()
|
||||
if parent == nil {
|
||||
parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
|
||||
} else {
|
||||
parent = chain[i-1]
|
||||
}
|
||||
state, err := state.New(parent.Root(), bc.stateCache)
|
||||
if err != nil {
|
||||
return i, events, coalescedLogs, err
|
||||
return it.index, events, coalescedLogs, err
|
||||
}
|
||||
// Process block using the parent state as reference point.
|
||||
t0 := time.Now()
|
||||
receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig)
|
||||
t1 := time.Now()
|
||||
if err != nil {
|
||||
bc.reportBlock(block, receipts, err)
|
||||
return i, events, coalescedLogs, err
|
||||
return it.index, events, coalescedLogs, err
|
||||
}
|
||||
// Validate the state using the default validator
|
||||
err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas)
|
||||
if err != nil {
|
||||
if err := bc.Validator().ValidateState(block, parent, state, receipts, usedGas); err != nil {
|
||||
bc.reportBlock(block, receipts, err)
|
||||
return i, events, coalescedLogs, err
|
||||
return it.index, events, coalescedLogs, err
|
||||
}
|
||||
proctime := time.Since(bstart)
|
||||
t2 := time.Now()
|
||||
proctime := time.Since(start)
|
||||
|
||||
// Write the block to the chain and get the status.
|
||||
status, err := bc.WriteBlockWithState(block, receipts, state)
|
||||
t3 := time.Now()
|
||||
if err != nil {
|
||||
return i, events, coalescedLogs, err
|
||||
return it.index, events, coalescedLogs, err
|
||||
}
|
||||
blockInsertTimer.UpdateSince(start)
|
||||
blockExecutionTimer.Update(t1.Sub(t0))
|
||||
blockValidationTimer.Update(t2.Sub(t1))
|
||||
blockWriteTimer.Update(t3.Sub(t2))
|
||||
switch status {
|
||||
case CanonStatTy:
|
||||
log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()),
|
||||
"txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart)))
|
||||
log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
|
||||
"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
|
||||
"elapsed", common.PrettyDuration(time.Since(start)),
|
||||
"root", block.Root())
|
||||
|
||||
coalescedLogs = append(coalescedLogs, logs...)
|
||||
blockInsertTimer.UpdateSince(bstart)
|
||||
events = append(events, ChainEvent{block, block.Hash(), logs})
|
||||
lastCanon = block
|
||||
|
||||
@@ -1195,78 +1231,153 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
|
||||
bc.gcproc += proctime
|
||||
|
||||
case SideStatTy:
|
||||
log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed",
|
||||
common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()))
|
||||
|
||||
blockInsertTimer.UpdateSince(bstart)
|
||||
log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(),
|
||||
"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
|
||||
"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
|
||||
"root", block.Root())
|
||||
events = append(events, ChainSideEvent{block})
|
||||
}
|
||||
blockInsertTimer.UpdateSince(start)
|
||||
stats.processed++
|
||||
stats.usedGas += usedGas
|
||||
|
||||
cache, _ := bc.stateCache.TrieDB().Size()
|
||||
stats.report(chain, i, cache)
|
||||
stats.report(chain, it.index, cache)
|
||||
}
|
||||
// Any blocks remaining here? The only ones we care about are the future ones
|
||||
if block != nil && err == consensus.ErrFutureBlock {
|
||||
if err := bc.addFutureBlock(block); err != nil {
|
||||
return it.index, events, coalescedLogs, err
|
||||
}
|
||||
block, err = it.next()
|
||||
|
||||
for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() {
|
||||
if err := bc.addFutureBlock(block); err != nil {
|
||||
return it.index, events, coalescedLogs, err
|
||||
}
|
||||
stats.queued++
|
||||
}
|
||||
}
|
||||
stats.ignored += it.remaining()
|
||||
|
||||
// Append a single chain head event if we've progressed the chain
|
||||
if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
|
||||
events = append(events, ChainHeadEvent{lastCanon})
|
||||
}
|
||||
return 0, events, coalescedLogs, nil
|
||||
return it.index, events, coalescedLogs, err
|
||||
}
|
||||
|
||||
// insertStats tracks and reports on block insertion.
|
||||
type insertStats struct {
|
||||
queued, processed, ignored int
|
||||
usedGas uint64
|
||||
lastIndex int
|
||||
startTime mclock.AbsTime
|
||||
}
|
||||
|
||||
// statsReportLimit is the time limit during import and export after which we
|
||||
// always print out progress. This avoids the user wondering what's going on.
|
||||
const statsReportLimit = 8 * time.Second
|
||||
|
||||
// report prints statistics if some number of blocks have been processed
|
||||
// or more than a few seconds have passed since the last message.
|
||||
func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) {
|
||||
// Fetch the timings for the batch
|
||||
// insertSidechain is called when an import batch hits upon a pruned ancestor
|
||||
// error, which happens when a sidechain with a sufficiently old fork-block is
|
||||
// found.
|
||||
//
|
||||
// The method writes all (header-and-body-valid) blocks to disk, then tries to
|
||||
// switch over to the new chain if the TD exceeded the current chain.
|
||||
func (bc *BlockChain) insertSidechain(it *insertIterator) (int, []interface{}, []*types.Log, error) {
|
||||
var (
|
||||
now = mclock.Now()
|
||||
elapsed = time.Duration(now) - time.Duration(st.startTime)
|
||||
externTd *big.Int
|
||||
current = bc.CurrentBlock().NumberU64()
|
||||
)
|
||||
// If we're at the last block of the batch or report period reached, log
|
||||
if index == len(chain)-1 || elapsed >= statsReportLimit {
|
||||
var (
|
||||
end = chain[index]
|
||||
txs = countTransactions(chain[st.lastIndex : index+1])
|
||||
)
|
||||
context := []interface{}{
|
||||
"blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
|
||||
"elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
|
||||
"number", end.Number(), "hash", end.Hash(),
|
||||
}
|
||||
if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute {
|
||||
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
|
||||
}
|
||||
context = append(context, []interface{}{"cache", cache}...)
|
||||
// The first sidechain block error is already verified to be ErrPrunedAncestor.
|
||||
// Since we don't import them here, we expect ErrUnknownAncestor for the remaining
|
||||
// ones. Any other errors means that the block is invalid, and should not be written
|
||||
// to disk.
|
||||
block, err := it.current(), consensus.ErrPrunedAncestor
|
||||
for ; block != nil && (err == consensus.ErrPrunedAncestor); block, err = it.next() {
|
||||
// Check the canonical state root for that number
|
||||
if number := block.NumberU64(); current >= number {
|
||||
if canonical := bc.GetBlockByNumber(number); canonical != nil && canonical.Root() == block.Root() {
|
||||
// This is most likely a shadow-state attack. When a fork is imported into the
|
||||
// database, and it eventually reaches a block height which is not pruned, we
|
||||
// just found that the state already exist! This means that the sidechain block
|
||||
// refers to a state which already exists in our canon chain.
|
||||
//
|
||||
// If left unchecked, we would now proceed importing the blocks, without actually
|
||||
// having verified the state of the previous blocks.
|
||||
log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root())
|
||||
|
||||
if st.queued > 0 {
|
||||
context = append(context, []interface{}{"queued", st.queued}...)
|
||||
// If someone legitimately side-mines blocks, they would still be imported as usual. However,
|
||||
// we cannot risk writing unverified blocks to disk when they obviously target the pruning
|
||||
// mechanism.
|
||||
return it.index, nil, nil, errors.New("sidechain ghost-state attack")
|
||||
}
|
||||
}
|
||||
if st.ignored > 0 {
|
||||
context = append(context, []interface{}{"ignored", st.ignored}...)
|
||||
if externTd == nil {
|
||||
externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1)
|
||||
}
|
||||
log.Info("Imported new chain segment", context...)
|
||||
externTd = new(big.Int).Add(externTd, block.Difficulty())
|
||||
|
||||
*st = insertStats{startTime: now, lastIndex: index + 1}
|
||||
if !bc.HasBlock(block.Hash(), block.NumberU64()) {
|
||||
start := time.Now()
|
||||
if err := bc.WriteBlockWithoutState(block, externTd); err != nil {
|
||||
return it.index, nil, nil, err
|
||||
}
|
||||
log.Debug("Inserted sidechain block", "number", block.Number(), "hash", block.Hash(),
|
||||
"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
|
||||
"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
|
||||
"root", block.Root())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func countTransactions(chain []*types.Block) (c int) {
|
||||
for _, b := range chain {
|
||||
c += len(b.Transactions())
|
||||
// At this point, we've written all sidechain blocks to database. Loop ended
|
||||
// either on some other error or all were processed. If there was some other
|
||||
// error, we can ignore the rest of those blocks.
|
||||
//
|
||||
// If the externTd was larger than our local TD, we now need to reimport the previous
|
||||
// blocks to regenerate the required state
|
||||
localTd := bc.GetTd(bc.CurrentBlock().Hash(), current)
|
||||
if localTd.Cmp(externTd) > 0 {
|
||||
log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().NumberU64(), "sidetd", externTd, "localtd", localTd)
|
||||
return it.index, nil, nil, err
|
||||
}
|
||||
return c
|
||||
// Gather all the sidechain hashes (full blocks may be memory heavy)
|
||||
var (
|
||||
hashes []common.Hash
|
||||
numbers []uint64
|
||||
)
|
||||
parent := bc.GetHeader(it.previous().Hash(), it.previous().NumberU64())
|
||||
for parent != nil && !bc.HasState(parent.Root) {
|
||||
hashes = append(hashes, parent.Hash())
|
||||
numbers = append(numbers, parent.Number.Uint64())
|
||||
|
||||
parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
|
||||
}
|
||||
if parent == nil {
|
||||
return it.index, nil, nil, errors.New("missing parent")
|
||||
}
|
||||
// Import all the pruned blocks to make the state available
|
||||
var (
|
||||
blocks []*types.Block
|
||||
memory common.StorageSize
|
||||
)
|
||||
for i := len(hashes) - 1; i >= 0; i-- {
|
||||
// Append the next block to our batch
|
||||
block := bc.GetBlock(hashes[i], numbers[i])
|
||||
|
||||
blocks = append(blocks, block)
|
||||
memory += block.Size()
|
||||
|
||||
// If memory use grew too large, import and continue. Sadly we need to discard
|
||||
// all raised events and logs from notifications since we're too heavy on the
|
||||
// memory here.
|
||||
if len(blocks) >= 2048 || memory > 64*1024*1024 {
|
||||
log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
|
||||
if _, _, _, err := bc.insertChain(blocks, false); err != nil {
|
||||
return 0, nil, nil, err
|
||||
}
|
||||
blocks, memory = blocks[:0], 0
|
||||
|
||||
// If the chain is terminating, stop processing blocks
|
||||
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
|
||||
log.Debug("Premature abort during blocks processing")
|
||||
return 0, nil, nil, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(blocks) > 0 {
|
||||
log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
|
||||
return bc.insertChain(blocks, false)
|
||||
}
|
||||
return 0, nil, nil, nil
|
||||
}
|
||||
|
||||
// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
|
||||
@@ -1441,8 +1552,10 @@ func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, e
|
||||
bc.addBadBlock(block)
|
||||
|
||||
var receiptString string
|
||||
for _, receipt := range receipts {
|
||||
receiptString += fmt.Sprintf("\t%v\n", receipt)
|
||||
for i, receipt := range receipts {
|
||||
receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n",
|
||||
i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(),
|
||||
receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState)
|
||||
}
|
||||
log.Error(fmt.Sprintf(`
|
||||
########## BAD BLOCK #########
|
||||
|
143
core/blockchain_insert.go
Normal file
@@ -0,0 +1,143 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/mclock"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// insertStats tracks and reports on block insertion.
|
||||
type insertStats struct {
|
||||
queued, processed, ignored int
|
||||
usedGas uint64
|
||||
lastIndex int
|
||||
startTime mclock.AbsTime
|
||||
}
|
||||
|
||||
// statsReportLimit is the time limit during import and export after which we
|
||||
// always print out progress. This avoids the user wondering what's going on.
|
||||
const statsReportLimit = 8 * time.Second
|
||||
|
||||
// report prints statistics if some number of blocks have been processed
|
||||
// or more than a few seconds have passed since the last message.
|
||||
func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) {
|
||||
// Fetch the timings for the batch
|
||||
var (
|
||||
now = mclock.Now()
|
||||
elapsed = time.Duration(now) - time.Duration(st.startTime)
|
||||
)
|
||||
// If we're at the last block of the batch or report period reached, log
|
||||
if index == len(chain)-1 || elapsed >= statsReportLimit {
|
||||
// Count the number of transactions in this segment
|
||||
var txs int
|
||||
for _, block := range chain[st.lastIndex : index+1] {
|
||||
txs += len(block.Transactions())
|
||||
}
|
||||
end := chain[index]
|
||||
|
||||
// Assemble the log context and send it to the logger
|
||||
context := []interface{}{
|
||||
"blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
|
||||
"elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
|
||||
"number", end.Number(), "hash", end.Hash(),
|
||||
}
|
||||
if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute {
|
||||
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
|
||||
}
|
||||
context = append(context, []interface{}{"cache", cache}...)
|
||||
|
||||
if st.queued > 0 {
|
||||
context = append(context, []interface{}{"queued", st.queued}...)
|
||||
}
|
||||
if st.ignored > 0 {
|
||||
context = append(context, []interface{}{"ignored", st.ignored}...)
|
||||
}
|
||||
log.Info("Imported new chain segment", context...)
|
||||
|
||||
// Bump the stats reported to the next section
|
||||
*st = insertStats{startTime: now, lastIndex: index + 1}
|
||||
}
|
||||
}
|
||||
|
||||
// insertIterator is a helper to assist during chain import.
|
||||
type insertIterator struct {
|
||||
chain types.Blocks
|
||||
results <-chan error
|
||||
index int
|
||||
validator Validator
|
||||
}
|
||||
|
||||
// newInsertIterator creates a new iterator based on the given blocks, which are
|
||||
// assumed to be a contiguous chain.
|
||||
func newInsertIterator(chain types.Blocks, results <-chan error, validator Validator) *insertIterator {
|
||||
return &insertIterator{
|
||||
chain: chain,
|
||||
results: results,
|
||||
index: -1,
|
||||
validator: validator,
|
||||
}
|
||||
}
|
||||
|
||||
// next returns the next block in the iterator, along with any potential validation
|
||||
// error for that block. When the end is reached, it will return (nil, nil).
|
||||
func (it *insertIterator) next() (*types.Block, error) {
|
||||
if it.index+1 >= len(it.chain) {
|
||||
it.index = len(it.chain)
|
||||
return nil, nil
|
||||
}
|
||||
it.index++
|
||||
if err := <-it.results; err != nil {
|
||||
return it.chain[it.index], err
|
||||
}
|
||||
return it.chain[it.index], it.validator.ValidateBody(it.chain[it.index])
|
||||
}
|
||||
|
||||
// current returns the current block that's being processed.
|
||||
func (it *insertIterator) current() *types.Block {
|
||||
if it.index < 0 || it.index+1 >= len(it.chain) {
|
||||
return nil
|
||||
}
|
||||
return it.chain[it.index]
|
||||
}
|
||||
|
||||
// previous returns the previous block was being processed, or nil
|
||||
func (it *insertIterator) previous() *types.Block {
|
||||
if it.index < 1 {
|
||||
return nil
|
||||
}
|
||||
return it.chain[it.index-1]
|
||||
}
|
||||
|
||||
// first returns the first block in the it.
|
||||
func (it *insertIterator) first() *types.Block {
|
||||
return it.chain[0]
|
||||
}
|
||||
|
||||
// remaining returns the number of remaining blocks.
|
||||
func (it *insertIterator) remaining() int {
|
||||
return len(it.chain) - it.index
|
||||
}
|
||||
|
||||
// processed returns the number of processed blocks.
|
||||
func (it *insertIterator) processed() int {
|
||||
return it.index + 1
|
||||
}
|
@@ -579,11 +579,11 @@ func testInsertNonceError(t *testing.T, full bool) {
|
||||
blockchain.hc.engine = blockchain.engine
|
||||
failRes, err = blockchain.InsertHeaderChain(headers, 1)
|
||||
}
|
||||
// Check that the returned error indicates the failure.
|
||||
// Check that the returned error indicates the failure
|
||||
if failRes != failAt {
|
||||
t.Errorf("test %d: failure index mismatch: have %d, want %d", i, failRes, failAt)
|
||||
t.Errorf("test %d: failure (%v) index mismatch: have %d, want %d", i, err, failRes, failAt)
|
||||
}
|
||||
// Check that all no blocks after the failing block have been inserted.
|
||||
// Check that all blocks after the failing block have been inserted
|
||||
for j := 0; j < i-failAt; j++ {
|
||||
if full {
|
||||
if block := blockchain.GetBlockByNumber(failNum + uint64(j)); block != nil {
|
||||
@@ -1345,7 +1345,7 @@ func TestLargeReorgTrieGC(t *testing.T) {
|
||||
t.Fatalf("failed to insert shared chain: %v", err)
|
||||
}
|
||||
if _, err := chain.InsertChain(original); err != nil {
|
||||
t.Fatalf("failed to insert shared chain: %v", err)
|
||||
t.Fatalf("failed to insert original chain: %v", err)
|
||||
}
|
||||
// Ensure that the state associated with the forking point is pruned away
|
||||
if node, _ := chain.stateCache.TrieDB().Node(shared[len(shared)-1].Root()); node != nil {
|
||||
|
@@ -53,14 +53,14 @@ type ChainIndexerChain interface {
|
||||
// CurrentHeader retrieves the latest locally known header.
|
||||
CurrentHeader() *types.Header
|
||||
|
||||
// SubscribeChainEvent subscribes to new head header notifications.
|
||||
SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription
|
||||
// SubscribeChainHeadEvent subscribes to new head header notifications.
|
||||
SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription
|
||||
}
|
||||
|
||||
// ChainIndexer does a post-processing job for equally sized sections of the
|
||||
// canonical chain (like BlooomBits and CHT structures). A ChainIndexer is
|
||||
// connected to the blockchain through the event system by starting a
|
||||
// ChainEventLoop in a goroutine.
|
||||
// ChainHeadEventLoop in a goroutine.
|
||||
//
|
||||
// Further child ChainIndexers can be added which use the output of the parent
|
||||
// section indexer. These child indexers receive new head notifications only
|
||||
@@ -142,8 +142,8 @@ func (c *ChainIndexer) AddCheckpoint(section uint64, shead common.Hash) {
|
||||
// cascading background processing. Children do not need to be started, they
|
||||
// are notified about new events by their parents.
|
||||
func (c *ChainIndexer) Start(chain ChainIndexerChain) {
|
||||
events := make(chan ChainEvent, 10)
|
||||
sub := chain.SubscribeChainEvent(events)
|
||||
events := make(chan ChainHeadEvent, 10)
|
||||
sub := chain.SubscribeChainHeadEvent(events)
|
||||
|
||||
go c.eventLoop(chain.CurrentHeader(), events, sub)
|
||||
}
|
||||
@@ -190,7 +190,7 @@ func (c *ChainIndexer) Close() error {
|
||||
// eventLoop is a secondary - optional - event loop of the indexer which is only
|
||||
// started for the outermost indexer to push chain head events into a processing
|
||||
// queue.
|
||||
func (c *ChainIndexer) eventLoop(currentHeader *types.Header, events chan ChainEvent, sub event.Subscription) {
|
||||
func (c *ChainIndexer) eventLoop(currentHeader *types.Header, events chan ChainHeadEvent, sub event.Subscription) {
|
||||
// Mark the chain indexer as active, requiring an additional teardown
|
||||
atomic.StoreUint32(&c.active, 1)
|
||||
|
||||
@@ -219,13 +219,13 @@ func (c *ChainIndexer) eventLoop(currentHeader *types.Header, events chan ChainE
|
||||
}
|
||||
header := ev.Block.Header()
|
||||
if header.ParentHash != prevHash {
|
||||
// Reorg to the common ancestor (might not exist in light sync mode, skip reorg then)
|
||||
// Reorg to the common ancestor if needed (might not exist in light sync mode, skip reorg then)
|
||||
// TODO(karalabe, zsfelfoldi): This seems a bit brittle, can we detect this case explicitly?
|
||||
|
||||
// TODO(karalabe): This operation is expensive and might block, causing the event system to
|
||||
// potentially also lock up. We need to do with on a different thread somehow.
|
||||
if h := rawdb.FindCommonAncestor(c.chainDb, prevHeader, header); h != nil {
|
||||
c.newHead(h.Number.Uint64(), true)
|
||||
if rawdb.ReadCanonicalHash(c.chainDb, prevHeader.Number.Uint64()) != prevHash {
|
||||
if h := rawdb.FindCommonAncestor(c.chainDb, prevHeader, header); h != nil {
|
||||
c.newHead(h.Number.Uint64(), true)
|
||||
}
|
||||
}
|
||||
}
|
||||
c.newHead(header.Number.Uint64(), false)
|
||||
|
@@ -33,12 +33,11 @@ import (
|
||||
// BlockGen creates blocks for testing.
|
||||
// See GenerateChain for a detailed explanation.
|
||||
type BlockGen struct {
|
||||
i int
|
||||
parent *types.Block
|
||||
chain []*types.Block
|
||||
chainReader consensus.ChainReader
|
||||
header *types.Header
|
||||
statedb *state.StateDB
|
||||
i int
|
||||
parent *types.Block
|
||||
chain []*types.Block
|
||||
header *types.Header
|
||||
statedb *state.StateDB
|
||||
|
||||
gasPool *GasPool
|
||||
txs []*types.Transaction
|
||||
@@ -138,7 +137,7 @@ func (b *BlockGen) AddUncle(h *types.Header) {
|
||||
// For index -1, PrevBlock returns the parent block given to GenerateChain.
|
||||
func (b *BlockGen) PrevBlock(index int) *types.Block {
|
||||
if index >= b.i {
|
||||
panic("block index out of range")
|
||||
panic(fmt.Errorf("block index %d out of range (%d,%d)", index, -1, b.i))
|
||||
}
|
||||
if index == -1 {
|
||||
return b.parent
|
||||
@@ -154,7 +153,8 @@ func (b *BlockGen) OffsetTime(seconds int64) {
|
||||
if b.header.Time.Cmp(b.parent.Header().Time) <= 0 {
|
||||
panic("block time out of range")
|
||||
}
|
||||
b.header.Difficulty = b.engine.CalcDifficulty(b.chainReader, b.header.Time.Uint64(), b.parent.Header())
|
||||
chainreader := &fakeChainReader{config: b.config}
|
||||
b.header.Difficulty = b.engine.CalcDifficulty(chainreader, b.header.Time.Uint64(), b.parent.Header())
|
||||
}
|
||||
|
||||
// GenerateChain creates a chain of n blocks. The first block's
|
||||
@@ -174,14 +174,10 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
|
||||
config = params.TestChainConfig
|
||||
}
|
||||
blocks, receipts := make(types.Blocks, n), make([]types.Receipts, n)
|
||||
chainreader := &fakeChainReader{config: config}
|
||||
genblock := func(i int, parent *types.Block, statedb *state.StateDB) (*types.Block, types.Receipts) {
|
||||
// TODO(karalabe): This is needed for clique, which depends on multiple blocks.
|
||||
// It's nonetheless ugly to spin up a blockchain here. Get rid of this somehow.
|
||||
blockchain, _ := NewBlockChain(db, nil, config, engine, vm.Config{}, nil)
|
||||
defer blockchain.Stop()
|
||||
|
||||
b := &BlockGen{i: i, parent: parent, chain: blocks, chainReader: blockchain, statedb: statedb, config: config, engine: engine}
|
||||
b.header = makeHeader(b.chainReader, parent, statedb, b.engine)
|
||||
b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine}
|
||||
b.header = makeHeader(chainreader, parent, statedb, b.engine)
|
||||
|
||||
// Mutate the state and block according to any hard-fork specs
|
||||
if daoBlock := config.DAOForkBlock; daoBlock != nil {
|
||||
@@ -201,7 +197,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
|
||||
}
|
||||
if b.engine != nil {
|
||||
// Finalize and seal the block
|
||||
block, _ := b.engine.Finalize(b.chainReader, b.header, statedb, b.txs, b.uncles, b.receipts)
|
||||
block, _ := b.engine.Finalize(chainreader, b.header, statedb, b.txs, b.uncles, b.receipts)
|
||||
|
||||
// Write state changes to db
|
||||
root, err := statedb.Commit(config.IsEIP158(b.header.Number))
|
||||
@@ -269,3 +265,19 @@ func makeBlockChain(parent *types.Block, n int, engine consensus.Engine, db ethd
|
||||
})
|
||||
return blocks
|
||||
}
|
||||
|
||||
type fakeChainReader struct {
|
||||
config *params.ChainConfig
|
||||
genesis *types.Block
|
||||
}
|
||||
|
||||
// Config returns the chain configuration.
|
||||
func (cr *fakeChainReader) Config() *params.ChainConfig {
|
||||
return cr.config
|
||||
}
|
||||
|
||||
func (cr *fakeChainReader) CurrentHeader() *types.Header { return nil }
|
||||
func (cr *fakeChainReader) GetHeaderByNumber(number uint64) *types.Header { return nil }
|
||||
func (cr *fakeChainReader) GetHeaderByHash(hash common.Hash) *types.Header { return nil }
|
||||
func (cr *fakeChainReader) GetHeader(hash common.Hash, number uint64) *types.Header { return nil }
|
||||
func (cr *fakeChainReader) GetBlock(hash common.Hash, number uint64) *types.Block { return nil }
|
||||
|
@@ -271,6 +271,15 @@ func DeleteTd(db DatabaseDeleter, hash common.Hash, number uint64) {
|
||||
}
|
||||
}
|
||||
|
||||
// HasReceipts verifies the existence of all the transaction receipts belonging
|
||||
// to a block.
|
||||
func HasReceipts(db DatabaseReader, hash common.Hash, number uint64) bool {
|
||||
if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ReadReceipts retrieves all the transaction receipts belonging to a block.
|
||||
func ReadReceipts(db DatabaseReader, hash common.Hash, number uint64) types.Receipts {
|
||||
// Retrieve the flattened receipt slice
|
||||
@@ -278,7 +287,7 @@ func ReadReceipts(db DatabaseReader, hash common.Hash, number uint64) types.Rece
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
}
|
||||
// Convert the revceipts from their storage form to their internal representation
|
||||
// Convert the receipts from their storage form to their internal representation
|
||||
storageReceipts := []*types.ReceiptForStorage{}
|
||||
if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
|
||||
log.Error("Invalid receipt array RLP", "hash", hash, "err", err)
|
||||
|
@@ -77,9 +77,8 @@ func ReadPreimage(db DatabaseReader, hash common.Hash) []byte {
|
||||
return data
|
||||
}
|
||||
|
||||
// WritePreimages writes the provided set of preimages to the database. `number` is the
|
||||
// current block number, and is used for debug messages only.
|
||||
func WritePreimages(db DatabaseWriter, number uint64, preimages map[common.Hash][]byte) {
|
||||
// WritePreimages writes the provided set of preimages to the database.
|
||||
func WritePreimages(db DatabaseWriter, preimages map[common.Hash][]byte) {
|
||||
for hash, preimage := range preimages {
|
||||
if err := db.Put(preimageKey(hash), preimage); err != nil {
|
||||
log.Crit("Failed to store trie preimage", "err", err)
|
||||
|
@@ -35,7 +35,7 @@ var (
|
||||
// headBlockKey tracks the latest know full block's hash.
|
||||
headBlockKey = []byte("LastBlock")
|
||||
|
||||
// headFastBlockKey tracks the latest known incomplete block's hash duirng fast sync.
|
||||
// headFastBlockKey tracks the latest known incomplete block's hash during fast sync.
|
||||
headFastBlockKey = []byte("LastFast")
|
||||
|
||||
// fastTrieProgressKey tracks the number of trie entries imported during fast sync.
|
||||
|
@@ -72,13 +72,19 @@ type Trie interface {
|
||||
}
|
||||
|
||||
// NewDatabase creates a backing store for state. The returned database is safe for
|
||||
// concurrent use and retains cached trie nodes in memory. The pool is an optional
|
||||
// intermediate trie-node memory pool between the low level storage layer and the
|
||||
// high level trie abstraction.
|
||||
// concurrent use and retains a few recent expanded trie nodes in memory. To keep
|
||||
// more historical state in memory, use the NewDatabaseWithCache constructor.
|
||||
func NewDatabase(db ethdb.Database) Database {
|
||||
return NewDatabaseWithCache(db, 0)
|
||||
}
|
||||
|
||||
// NewDatabase creates a backing store for state. The returned database is safe for
|
||||
// concurrent use and retains both a few recent expanded trie nodes in memory, as
|
||||
// well as a lot of collapsed RLP trie nodes in a large memory cache.
|
||||
func NewDatabaseWithCache(db ethdb.Database, cache int) Database {
|
||||
csc, _ := lru.New(codeSizeCacheSize)
|
||||
return &cachingDB{
|
||||
db: trie.NewDatabase(db),
|
||||
db: trie.NewDatabaseWithCache(db, cache),
|
||||
codeSizeCache: csc,
|
||||
}
|
||||
}
|
||||
|
@@ -18,10 +18,10 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
@@ -44,6 +44,13 @@ var (
|
||||
emptyCode = crypto.Keccak256Hash(nil)
|
||||
)
|
||||
|
||||
type proofList [][]byte
|
||||
|
||||
func (n *proofList) Put(key []byte, value []byte) error {
|
||||
*n = append(*n, value)
|
||||
return nil
|
||||
}
|
||||
|
||||
// StateDBs within the ethereum protocol are used to store anything
|
||||
// within the merkle trie. StateDBs take care of caching and storing
|
||||
// nested states. It's the general query interface to retrieve:
|
||||
@@ -79,8 +86,6 @@ type StateDB struct {
|
||||
journal *journal
|
||||
validRevisions []revision
|
||||
nextRevisionId int
|
||||
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
// Create a new state from a given trie.
|
||||
@@ -256,6 +261,24 @@ func (self *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash
|
||||
return common.Hash{}
|
||||
}
|
||||
|
||||
// GetProof returns the MerkleProof for a given Account
|
||||
func (self *StateDB) GetProof(a common.Address) ([][]byte, error) {
|
||||
var proof proofList
|
||||
err := self.trie.Prove(crypto.Keccak256(a.Bytes()), 0, &proof)
|
||||
return [][]byte(proof), err
|
||||
}
|
||||
|
||||
// GetProof returns the StorageProof for given key
|
||||
func (self *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte, error) {
|
||||
var proof proofList
|
||||
trie := self.StorageTrie(a)
|
||||
if trie == nil {
|
||||
return proof, errors.New("storage trie for requested address does not exist")
|
||||
}
|
||||
err := trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof)
|
||||
return [][]byte(proof), err
|
||||
}
|
||||
|
||||
// GetCommittedState retrieves a value from the given account's committed storage trie.
|
||||
func (self *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash {
|
||||
stateObject := self.getStateObject(addr)
|
||||
@@ -470,9 +493,6 @@ func (db *StateDB) ForEachStorage(addr common.Address, cb func(key, value common
|
||||
// Copy creates a deep, independent copy of the state.
|
||||
// Snapshots of the copied state cannot be applied to the copy.
|
||||
func (self *StateDB) Copy() *StateDB {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
|
||||
// Copy all the basic fields, initialize the memory ones
|
||||
state := &StateDB{
|
||||
db: self.db,
|
||||
|
@@ -825,7 +825,7 @@ func (pool *TxPool) addTxs(txs []*types.Transaction, local bool) []error {
|
||||
// addTxsLocked attempts to queue a batch of transactions if they are valid,
|
||||
// whilst assuming the transaction pool lock is already held.
|
||||
func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) []error {
|
||||
// Add the batch of transaction, tracking the accepted ones
|
||||
// Add the batch of transactions, tracking the accepted ones
|
||||
dirty := make(map[common.Address]struct{})
|
||||
errs := make([]error, len(txs))
|
||||
|
||||
|
@@ -81,8 +81,8 @@ type Header struct {
|
||||
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
|
||||
Time *big.Int `json:"timestamp" gencodec:"required"`
|
||||
Extra []byte `json:"extraData" gencodec:"required"`
|
||||
MixDigest common.Hash `json:"mixHash" gencodec:"required"`
|
||||
Nonce BlockNonce `json:"nonce" gencodec:"required"`
|
||||
MixDigest common.Hash `json:"mixHash"`
|
||||
Nonce BlockNonce `json:"nonce"`
|
||||
}
|
||||
|
||||
// field type overrides for gencodec
|
||||
|
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
var _ = (*headerMarshaling)(nil)
|
||||
|
||||
// MarshalJSON marshals as JSON.
|
||||
func (h Header) MarshalJSON() ([]byte, error) {
|
||||
type Header struct {
|
||||
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
|
||||
@@ -28,8 +29,8 @@ func (h Header) MarshalJSON() ([]byte, error) {
|
||||
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
|
||||
Time *hexutil.Big `json:"timestamp" gencodec:"required"`
|
||||
Extra hexutil.Bytes `json:"extraData" gencodec:"required"`
|
||||
MixDigest common.Hash `json:"mixHash" gencodec:"required"`
|
||||
Nonce BlockNonce `json:"nonce" gencodec:"required"`
|
||||
MixDigest common.Hash `json:"mixHash"`
|
||||
Nonce BlockNonce `json:"nonce"`
|
||||
Hash common.Hash `json:"hash"`
|
||||
}
|
||||
var enc Header
|
||||
@@ -52,6 +53,7 @@ func (h Header) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(&enc)
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshals from JSON.
|
||||
func (h *Header) UnmarshalJSON(input []byte) error {
|
||||
type Header struct {
|
||||
ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
|
||||
@@ -67,8 +69,8 @@ func (h *Header) UnmarshalJSON(input []byte) error {
|
||||
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
|
||||
Time *hexutil.Big `json:"timestamp" gencodec:"required"`
|
||||
Extra *hexutil.Bytes `json:"extraData" gencodec:"required"`
|
||||
MixDigest *common.Hash `json:"mixHash" gencodec:"required"`
|
||||
Nonce *BlockNonce `json:"nonce" gencodec:"required"`
|
||||
MixDigest *common.Hash `json:"mixHash"`
|
||||
Nonce *BlockNonce `json:"nonce"`
|
||||
}
|
||||
var dec Header
|
||||
if err := json.Unmarshal(input, &dec); err != nil {
|
||||
@@ -126,13 +128,11 @@ func (h *Header) UnmarshalJSON(input []byte) error {
|
||||
return errors.New("missing required field 'extraData' for Header")
|
||||
}
|
||||
h.Extra = *dec.Extra
|
||||
if dec.MixDigest == nil {
|
||||
return errors.New("missing required field 'mixHash' for Header")
|
||||
if dec.MixDigest != nil {
|
||||
h.MixDigest = *dec.MixDigest
|
||||
}
|
||||
h.MixDigest = *dec.MixDigest
|
||||
if dec.Nonce == nil {
|
||||
return errors.New("missing required field 'nonce' for Header")
|
||||
if dec.Nonce != nil {
|
||||
h.Nonce = *dec.Nonce
|
||||
}
|
||||
h.Nonce = *dec.Nonce
|
||||
return nil
|
||||
}
|
||||
|
@@ -47,7 +47,7 @@ type Log struct {
|
||||
TxIndex uint `json:"transactionIndex" gencodec:"required"`
|
||||
// hash of the block in which the transaction was included
|
||||
BlockHash common.Hash `json:"blockHash"`
|
||||
// index of the log in the receipt
|
||||
// index of the log in the block
|
||||
Index uint `json:"logIndex" gencodec:"required"`
|
||||
|
||||
// The Removed field is true if this log was reverted due to a chain reorganisation.
|
||||
|
@@ -153,16 +153,21 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
|
||||
if err := dec.UnmarshalJSON(input); err != nil {
|
||||
return err
|
||||
}
|
||||
var V byte
|
||||
if isProtectedV(dec.V) {
|
||||
chainID := deriveChainId(dec.V).Uint64()
|
||||
V = byte(dec.V.Uint64() - 35 - 2*chainID)
|
||||
} else {
|
||||
V = byte(dec.V.Uint64() - 27)
|
||||
}
|
||||
if !crypto.ValidateSignatureValues(V, dec.R, dec.S, false) {
|
||||
return ErrInvalidSig
|
||||
|
||||
withSignature := dec.V.Sign() != 0 || dec.R.Sign() != 0 || dec.S.Sign() != 0
|
||||
if withSignature {
|
||||
var V byte
|
||||
if isProtectedV(dec.V) {
|
||||
chainID := deriveChainId(dec.V).Uint64()
|
||||
V = byte(dec.V.Uint64() - 35 - 2*chainID)
|
||||
} else {
|
||||
V = byte(dec.V.Uint64() - 27)
|
||||
}
|
||||
if !crypto.ValidateSignatureValues(V, dec.R, dec.S, false) {
|
||||
return ErrInvalidSig
|
||||
}
|
||||
}
|
||||
|
||||
*tx = Transaction{data: dec}
|
||||
return nil
|
||||
}
|
||||
|
@@ -136,7 +136,7 @@ func (s EIP155Signer) Sender(tx *Transaction) (common.Address, error) {
|
||||
return recoverPlain(s.Hash(tx), tx.data.R, tx.data.S, V, true)
|
||||
}
|
||||
|
||||
// WithSignature returns a new transaction with the given signature. This signature
|
||||
// SignatureValues returns signature values. This signature
|
||||
// needs to be in the [R || S || V] format where V is 0 or 1.
|
||||
func (s EIP155Signer) SignatureValues(tx *Transaction, sig []byte) (R, S, V *big.Int, err error) {
|
||||
R, S, V, err = HomesteadSigner{}.SignatureValues(tx, sig)
|
||||
@@ -227,13 +227,13 @@ func recoverPlain(sighash common.Hash, R, S, Vb *big.Int, homestead bool) (commo
|
||||
if !crypto.ValidateSignatureValues(V, R, S, homestead) {
|
||||
return common.Address{}, ErrInvalidSig
|
||||
}
|
||||
// encode the snature in uncompressed format
|
||||
// encode the signature in uncompressed format
|
||||
r, s := R.Bytes(), S.Bytes()
|
||||
sig := make([]byte, 65)
|
||||
copy(sig[32-len(r):32], r)
|
||||
copy(sig[64-len(s):64], s)
|
||||
sig[64] = V
|
||||
// recover the public key from the snature
|
||||
// recover the public key from the signature
|
||||
pub, err := crypto.Ecrecover(sighash[:], sig)
|
||||
if err != nil {
|
||||
return common.Address{}, err
|
||||
|
@@ -185,6 +185,7 @@ func TestTransactionJSON(t *testing.T) {
|
||||
}
|
||||
signer := NewEIP155Signer(common.Big1)
|
||||
|
||||
transactions := make([]*Transaction, 0, 50)
|
||||
for i := uint64(0); i < 25; i++ {
|
||||
var tx *Transaction
|
||||
switch i % 2 {
|
||||
@@ -193,20 +194,25 @@ func TestTransactionJSON(t *testing.T) {
|
||||
case 1:
|
||||
tx = NewContractCreation(i, common.Big0, 1, common.Big2, []byte("abcdef"))
|
||||
}
|
||||
transactions = append(transactions, tx)
|
||||
|
||||
tx, err := SignTx(tx, signer, key)
|
||||
signedTx, err := SignTx(tx, signer, key)
|
||||
if err != nil {
|
||||
t.Fatalf("could not sign transaction: %v", err)
|
||||
}
|
||||
|
||||
transactions = append(transactions, signedTx)
|
||||
}
|
||||
|
||||
for _, tx := range transactions {
|
||||
data, err := json.Marshal(tx)
|
||||
if err != nil {
|
||||
t.Errorf("json.Marshal failed: %v", err)
|
||||
t.Fatalf("json.Marshal failed: %v", err)
|
||||
}
|
||||
|
||||
var parsedTx *Transaction
|
||||
if err := json.Unmarshal(data, &parsedTx); err != nil {
|
||||
t.Errorf("json.Unmarshal failed: %v", err)
|
||||
t.Fatalf("json.Unmarshal failed: %v", err)
|
||||
}
|
||||
|
||||
// compare nonce, price, gaslimit, recipient, amount, payload, V, R, S
|
||||
|
@@ -16,34 +16,6 @@
|
||||
|
||||
package vm
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
// destinations stores one map per contract (keyed by hash of code).
|
||||
// The maps contain an entry for each location of a JUMPDEST
|
||||
// instruction.
|
||||
type destinations map[common.Hash]bitvec
|
||||
|
||||
// has checks whether code has a JUMPDEST at dest.
|
||||
func (d destinations) has(codehash common.Hash, code []byte, dest *big.Int) bool {
|
||||
// PC cannot go beyond len(code) and certainly can't be bigger than 63bits.
|
||||
// Don't bother checking for JUMPDEST in that case.
|
||||
udest := dest.Uint64()
|
||||
if dest.BitLen() >= 63 || udest >= uint64(len(code)) {
|
||||
return false
|
||||
}
|
||||
|
||||
m, analysed := d[codehash]
|
||||
if !analysed {
|
||||
m = codeBitmap(code)
|
||||
d[codehash] = m
|
||||
}
|
||||
return OpCode(code[udest]) == JUMPDEST && m.codeSegment(udest)
|
||||
}
|
||||
|
||||
// bitvec is a bit vector which maps bytes in a program.
|
||||
// An unset bit means the byte is an opcode, a set bit means
|
||||
// it's data (i.e. argument of PUSHxx).
|
||||
|
@@ -16,7 +16,11 @@
|
||||
|
||||
package vm
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
func TestJumpDestAnalysis(t *testing.T) {
|
||||
tests := []struct {
|
||||
@@ -49,5 +53,23 @@ func TestJumpDestAnalysis(t *testing.T) {
|
||||
t.Fatalf("expected %x, got %02x", test.exp, ret[test.which])
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func BenchmarkJumpdestAnalysis_1200k(bench *testing.B) {
|
||||
// 1.4 ms
|
||||
code := make([]byte, 1200000)
|
||||
bench.ResetTimer()
|
||||
for i := 0; i < bench.N; i++ {
|
||||
codeBitmap(code)
|
||||
}
|
||||
bench.StopTimer()
|
||||
}
|
||||
func BenchmarkJumpdestHashing_1200k(bench *testing.B) {
|
||||
// 4 ms
|
||||
code := make([]byte, 1200000)
|
||||
bench.ResetTimer()
|
||||
for i := 0; i < bench.N; i++ {
|
||||
crypto.Keccak256Hash(code)
|
||||
}
|
||||
bench.StopTimer()
|
||||
}
|
||||
|
@@ -49,7 +49,8 @@ type Contract struct {
|
||||
caller ContractRef
|
||||
self ContractRef
|
||||
|
||||
jumpdests destinations // result of JUMPDEST analysis.
|
||||
jumpdests map[common.Hash]bitvec // Aggregated result of JUMPDEST analysis.
|
||||
analysis bitvec // Locally cached result of JUMPDEST analysis
|
||||
|
||||
Code []byte
|
||||
CodeHash common.Hash
|
||||
@@ -58,21 +59,17 @@ type Contract struct {
|
||||
|
||||
Gas uint64
|
||||
value *big.Int
|
||||
|
||||
Args []byte
|
||||
|
||||
DelegateCall bool
|
||||
}
|
||||
|
||||
// NewContract returns a new contract environment for the execution of EVM.
|
||||
func NewContract(caller ContractRef, object ContractRef, value *big.Int, gas uint64) *Contract {
|
||||
c := &Contract{CallerAddress: caller.Address(), caller: caller, self: object, Args: nil}
|
||||
c := &Contract{CallerAddress: caller.Address(), caller: caller, self: object}
|
||||
|
||||
if parent, ok := caller.(*Contract); ok {
|
||||
// Reuse JUMPDEST analysis from parent context if available.
|
||||
c.jumpdests = parent.jumpdests
|
||||
} else {
|
||||
c.jumpdests = make(destinations)
|
||||
c.jumpdests = make(map[common.Hash]bitvec)
|
||||
}
|
||||
|
||||
// Gas should be a pointer so it can safely be reduced through the run
|
||||
@@ -84,10 +81,42 @@ func NewContract(caller ContractRef, object ContractRef, value *big.Int, gas uin
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Contract) validJumpdest(dest *big.Int) bool {
|
||||
udest := dest.Uint64()
|
||||
// PC cannot go beyond len(code) and certainly can't be bigger than 63bits.
|
||||
// Don't bother checking for JUMPDEST in that case.
|
||||
if dest.BitLen() >= 63 || udest >= uint64(len(c.Code)) {
|
||||
return false
|
||||
}
|
||||
// Only JUMPDESTs allowed for destinations
|
||||
if OpCode(c.Code[udest]) != JUMPDEST {
|
||||
return false
|
||||
}
|
||||
// Do we have a contract hash already?
|
||||
if c.CodeHash != (common.Hash{}) {
|
||||
// Does parent context have the analysis?
|
||||
analysis, exist := c.jumpdests[c.CodeHash]
|
||||
if !exist {
|
||||
// Do the analysis and save in parent context
|
||||
// We do not need to store it in c.analysis
|
||||
analysis = codeBitmap(c.Code)
|
||||
c.jumpdests[c.CodeHash] = analysis
|
||||
}
|
||||
return analysis.codeSegment(udest)
|
||||
}
|
||||
// We don't have the code hash, most likely a piece of initcode not already
|
||||
// in state trie. In that case, we do an analysis, and save it locally, so
|
||||
// we don't have to recalculate it for every JUMP instruction in the execution
|
||||
// However, we don't save it within the parent context
|
||||
if c.analysis == nil {
|
||||
c.analysis = codeBitmap(c.Code)
|
||||
}
|
||||
return c.analysis.codeSegment(udest)
|
||||
}
|
||||
|
||||
// AsDelegate sets the contract to be a delegate call and returns the current
|
||||
// contract (for chaining calls)
|
||||
func (c *Contract) AsDelegate() *Contract {
|
||||
c.DelegateCall = true
|
||||
// NOTE: caller must, at all times be a contract. It should never happen
|
||||
// that caller is something other than a Contract.
|
||||
parent := c.caller.(*Contract)
|
||||
@@ -138,12 +167,6 @@ func (c *Contract) Value() *big.Int {
|
||||
return c.value
|
||||
}
|
||||
|
||||
// SetCode sets the code to the contract
|
||||
func (c *Contract) SetCode(hash common.Hash, code []byte) {
|
||||
c.Code = code
|
||||
c.CodeHash = hash
|
||||
}
|
||||
|
||||
// SetCallCode sets the code of the contract and address of the backing data
|
||||
// object
|
||||
func (c *Contract) SetCallCode(addr *common.Address, hash common.Hash, code []byte) {
|
||||
@@ -151,3 +174,11 @@ func (c *Contract) SetCallCode(addr *common.Address, hash common.Hash, code []by
|
||||
c.CodeHash = hash
|
||||
c.CodeAddr = addr
|
||||
}
|
||||
|
||||
// SetCodeOptionalHash can be used to provide code, but it's optional to provide hash.
|
||||
// In case hash is not provided, the jumpdest analysis will not be saved to the parent context
|
||||
func (c *Contract) SetCodeOptionalHash(addr *common.Address, codeAndHash *codeAndHash) {
|
||||
c.Code = codeAndHash.code
|
||||
c.CodeHash = codeAndHash.hash
|
||||
c.CodeAddr = addr
|
||||
}
|
||||
|
@@ -212,12 +212,12 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
|
||||
evm.StateDB.CreateAccount(addr)
|
||||
}
|
||||
evm.Transfer(evm.StateDB, caller.Address(), to.Address(), value)
|
||||
|
||||
// Initialise a new contract and set the code that is to be used by the EVM.
|
||||
// The contract is a scoped environment for this execution context only.
|
||||
contract := NewContract(caller, to, value, gas)
|
||||
contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr))
|
||||
|
||||
// Even if the account has no code, we need to continue because it might be a precompile
|
||||
start := time.Now()
|
||||
|
||||
// Capture the tracer start/end events in debug mode
|
||||
@@ -352,8 +352,20 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte
|
||||
return ret, contract.Gas, err
|
||||
}
|
||||
|
||||
type codeAndHash struct {
|
||||
code []byte
|
||||
hash common.Hash
|
||||
}
|
||||
|
||||
func (c *codeAndHash) Hash() common.Hash {
|
||||
if c.hash == (common.Hash{}) {
|
||||
c.hash = crypto.Keccak256Hash(c.code)
|
||||
}
|
||||
return c.hash
|
||||
}
|
||||
|
||||
// create creates a new contract using code as deployment code.
|
||||
func (evm *EVM) create(caller ContractRef, code []byte, gas uint64, value *big.Int, address common.Address) ([]byte, common.Address, uint64, error) {
|
||||
func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, value *big.Int, address common.Address) ([]byte, common.Address, uint64, error) {
|
||||
// Depth check execution. Fail if we're trying to execute above the
|
||||
// limit.
|
||||
if evm.depth > int(params.CallCreateDepth) {
|
||||
@@ -382,14 +394,14 @@ func (evm *EVM) create(caller ContractRef, code []byte, gas uint64, value *big.I
|
||||
// EVM. The contract is a scoped environment for this execution context
|
||||
// only.
|
||||
contract := NewContract(caller, AccountRef(address), value, gas)
|
||||
contract.SetCallCode(&address, crypto.Keccak256Hash(code), code)
|
||||
contract.SetCodeOptionalHash(&address, codeAndHash)
|
||||
|
||||
if evm.vmConfig.NoRecursion && evm.depth > 0 {
|
||||
return nil, address, gas, nil
|
||||
}
|
||||
|
||||
if evm.vmConfig.Debug && evm.depth == 0 {
|
||||
evm.vmConfig.Tracer.CaptureStart(caller.Address(), address, true, code, gas, value)
|
||||
evm.vmConfig.Tracer.CaptureStart(caller.Address(), address, true, codeAndHash.code, gas, value)
|
||||
}
|
||||
start := time.Now()
|
||||
|
||||
@@ -433,7 +445,7 @@ func (evm *EVM) create(caller ContractRef, code []byte, gas uint64, value *big.I
|
||||
// Create creates a new contract using code as deployment code.
|
||||
func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) {
|
||||
contractAddr = crypto.CreateAddress(caller.Address(), evm.StateDB.GetNonce(caller.Address()))
|
||||
return evm.create(caller, code, gas, value, contractAddr)
|
||||
return evm.create(caller, &codeAndHash{code: code}, gas, value, contractAddr)
|
||||
}
|
||||
|
||||
// Create2 creates a new contract using code as deployment code.
|
||||
@@ -441,8 +453,9 @@ func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.I
|
||||
// The different between Create2 with Create is Create2 uses sha3(0xff ++ msg.sender ++ salt ++ sha3(init_code))[12:]
|
||||
// instead of the usual sender-and-nonce-hash as the address where the contract is initialized at.
|
||||
func (evm *EVM) Create2(caller ContractRef, code []byte, gas uint64, endowment *big.Int, salt *big.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) {
|
||||
contractAddr = crypto.CreateAddress2(caller.Address(), common.BigToHash(salt), code)
|
||||
return evm.create(caller, code, gas, endowment, contractAddr)
|
||||
codeAndHash := &codeAndHash{code: code}
|
||||
contractAddr = crypto.CreateAddress2(caller.Address(), common.BigToHash(salt), codeAndHash.Hash().Bytes())
|
||||
return evm.create(caller, codeAndHash, gas, endowment, contractAddr)
|
||||
}
|
||||
|
||||
// ChainConfig returns the environment's chain configuration
|
||||
|
@@ -347,6 +347,17 @@ func gasCreate2(gt params.GasTable, evm *EVM, contract *Contract, stack *Stack,
|
||||
if gas, overflow = math.SafeAdd(gas, params.Create2Gas); overflow {
|
||||
return 0, errGasUintOverflow
|
||||
}
|
||||
wordGas, overflow := bigUint64(stack.Back(2))
|
||||
if overflow {
|
||||
return 0, errGasUintOverflow
|
||||
}
|
||||
if wordGas, overflow = math.SafeMul(toWordSize(wordGas), params.Sha3WordGas); overflow {
|
||||
return 0, errGasUintOverflow
|
||||
}
|
||||
if gas, overflow = math.SafeAdd(gas, wordGas); overflow {
|
||||
return 0, errGasUintOverflow
|
||||
}
|
||||
|
||||
return gas, nil
|
||||
}
|
||||
|
||||
|