Compare commits
322 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
225171a4bf | ||
|
702b8a7aec | ||
|
5d7e18539e | ||
|
c4a1d4fecf | ||
|
fb9f7261ec | ||
|
d927cbb638 | ||
|
2fbc454355 | ||
|
d6efa69187 | ||
|
3ea8ac6a9a | ||
|
8a9c31a307 | ||
|
6380c06c65 | ||
|
54d1111965 | ||
|
f00d0daf33 | ||
|
2cffd4ff3c | ||
|
7b1aa64220 | ||
|
8f4c4fea20 | ||
|
d42ce0f2c1 | ||
|
273c7a9dc4 | ||
|
a5d5609e38 | ||
|
93c0f1715d | ||
|
d9575e92fc | ||
|
11a402f747 | ||
|
021d6fbbbb | ||
|
feed8069a6 | ||
|
514022bde6 | ||
|
ff22ec31b6 | ||
|
0598707129 | ||
|
a511f6b515 | ||
|
8b1e14b7f8 | ||
|
6c412e313c | ||
|
6b232ce325 | ||
|
7abedf9bbb | ||
|
27a278e6e3 | ||
|
a0bcb16875 | ||
|
bc0a43191e | ||
|
1064b9e691 | ||
|
10780e8a00 | ||
|
2433349c80 | ||
|
58243b4d3e | ||
|
cab1cff11c | ||
|
2909f6d7a2 | ||
|
d96ba77113 | ||
|
62467e4405 | ||
|
d0082bb7ec | ||
|
21c059b67e | ||
|
9e24491c65 | ||
|
49f63deb24 | ||
|
b536460f8e | ||
|
afd8b84706 | ||
|
f6206efe5b | ||
|
ae674a3660 | ||
|
894022a3d5 | ||
|
68da9aa716 | ||
|
14bdcdeab4 | ||
|
fe6a9473dc | ||
|
427316a707 | ||
|
0647c4de7b | ||
|
7ddc2c9e95 | ||
|
dcaaa3c804 | ||
|
f5b128a5b3 | ||
|
fd982d3f3b | ||
|
526abe2736 | ||
|
8997efe31f | ||
|
4ea2d707f9 | ||
|
ee8877509a | ||
|
763e64cad8 | ||
|
040dd5bd5d | ||
|
dcdd57df62 | ||
|
323428865f | ||
|
65c91ad5e7 | ||
|
5d30be412b | ||
|
eb7f901289 | ||
|
db5e403afe | ||
|
96116758d2 | ||
|
65cebb7730 | ||
|
2e0391ea84 | ||
|
d483da766f | ||
|
7c9314f231 | ||
|
e1f1d3085c | ||
|
96339daf40 | ||
|
f7d3678c28 | ||
|
facf1bc9d6 | ||
|
e8824f6e74 | ||
|
a9835c1816 | ||
|
2eedbe799f | ||
|
b3711af051 | ||
|
30bdf817a0 | ||
|
fbeb4f20f9 | ||
|
0b20b1a050 | ||
|
4dbefc1f25 | ||
|
dbae1dc7b3 | ||
|
3b07451564 | ||
|
37685930d9 | ||
|
51df1c1f20 | ||
|
f524ec4326 | ||
|
eb794af833 | ||
|
67a7857124 | ||
|
c73b654fd1 | ||
|
9da128db70 | ||
|
4e5d1f1c39 | ||
|
d57e85ecc9 | ||
|
1990c9e621 | ||
|
319098cc1c | ||
|
223d943481 | ||
|
8974e2e5e0 | ||
|
a4a2343cdc | ||
|
fdfd6d3c39 | ||
|
b5537c5601 | ||
|
e916f9786d | ||
|
909e968ebb | ||
|
598f786aab | ||
|
461291882e | ||
|
1f0f6f0272 | ||
|
0a22ae5572 | ||
|
b0cfd9c786 | ||
|
6d8a1bfb08 | ||
|
4895665670 | ||
|
eaff89291c | ||
|
e187711c65 | ||
|
d926bf2c7e | ||
|
8db8d074e2 | ||
|
1a70338734 | ||
|
61a5976368 | ||
|
88c42ab4e7 | ||
|
4210dd1500 | ||
|
c971ab617d | ||
|
f1986f86f2 | ||
|
28aca90716 | ||
|
9b1536b26a | ||
|
3e57c33147 | ||
|
baa7eb901e | ||
|
574378edb5 | ||
|
c95e4a80d1 | ||
|
897ea01d5f | ||
|
ec192f18b4 | ||
|
aa34173f13 | ||
|
c343f75c26 | ||
|
52b1d09457 | ||
|
9402f96597 | ||
|
d0fd8d6fc2 | ||
|
cfde0b5f52 | ||
|
de06185fc3 | ||
|
3fb5f3ae11 | ||
|
e75d0a6e4c | ||
|
947e0afeb3 | ||
|
1836366ac1 | ||
|
591cef17d4 | ||
|
e33a5de454 | ||
|
f04c0e341e | ||
|
ea89f40f0d | ||
|
1fc54d92ec | ||
|
8c4a7fa8d3 | ||
|
423d4254f5 | ||
|
dea1ce052a | ||
|
25982375a8 | ||
|
049f5b3572 | ||
|
0255951587 | ||
|
85cd64df0e | ||
|
9608ccf106 | ||
|
3f06da7b5f | ||
|
546d42179e | ||
|
90829a04bf | ||
|
f991995918 | ||
|
aab7ab04b0 | ||
|
43b940ec5a | ||
|
b487bdf0ba | ||
|
a3267ed929 | ||
|
9f7592c802 | ||
|
99483e85b9 | ||
|
1d666cf27e | ||
|
eac16f9824 | ||
|
69c52bde3f | ||
|
2977538ac0 | ||
|
7f0726f706 | ||
|
13af276418 | ||
|
ea06da0892 | ||
|
feb6620c34 | ||
|
90b22773e9 | ||
|
9e4f96a1a6 | ||
|
01a7e267dc | ||
|
e8ea5aa0d5 | ||
|
5bee5d69d7 | ||
|
cbfb40b0aa | ||
|
4cf2b4110e | ||
|
0029a869f0 | ||
|
2ab24a2a8f | ||
|
400332b99d | ||
|
a5237a27ea | ||
|
7a22e89080 | ||
|
e3a993d774 | ||
|
ed40767355 | ||
|
a20cc75b4a | ||
|
b659718fd0 | ||
|
be2aec092d | ||
|
17f80cc2e2 | ||
|
143c4341d8 | ||
|
3f33a7c8ce | ||
|
c8dcb9584e | ||
|
af28d12847 | ||
|
0ad32d3be7 | ||
|
68b0d30d4a | ||
|
eae63c511c | ||
|
ca34e8230e | ||
|
342ec83d67 | ||
|
38c7eb0f26 | ||
|
d51faee240 | ||
|
426f62f1a8 | ||
|
7677ec1f34 | ||
|
d258eee211 | ||
|
84f8c0cc1f | ||
|
998f6564b2 | ||
|
40a2c52397 | ||
|
a9c6ef6905 | ||
|
ccc0debb63 | ||
|
ff9b14617e | ||
|
d6ed2f67a8 | ||
|
54294b45b1 | ||
|
d31802312a | ||
|
55b579e02c | ||
|
be22ee8dda | ||
|
56de337e57 | ||
|
c934c06cc1 | ||
|
fbf57d53e2 | ||
|
6ce21a4744 | ||
|
9af364e42b | ||
|
09d44247f7 | ||
|
0fe47e98c4 | ||
|
415969f534 | ||
|
d9cee2c172 | ||
|
ab6bdbd9b0 | ||
|
953b5ac015 | ||
|
f2fdb75dd9 | ||
|
f9c456e02d | ||
|
579bd0f9fb | ||
|
49719e21bc | ||
|
a2e43d28d0 | ||
|
6286c255f1 | ||
|
f6bc65fc68 | ||
|
ff8a033f18 | ||
|
247b5f0369 | ||
|
49ec4f0cd1 | ||
|
2688dab48c | ||
|
595b47e535 | ||
|
784aa83942 | ||
|
fcc18f4c80 | ||
|
53a18d2e27 | ||
|
7beccb29be | ||
|
5dbd8b42a9 | ||
|
4e7dc34ff1 | ||
|
4747aad160 | ||
|
4ea493e7eb | ||
|
c60f6f6214 | ||
|
ba975dc093 | ||
|
eab6e5a317 | ||
|
c4a4613d95 | ||
|
fedae95015 | ||
|
864e80a48f | ||
|
a42be3b78d | ||
|
6cf0ab38bd | ||
|
5463ed9996 | ||
|
d7be5c6619 | ||
|
d2fe83dc5c | ||
|
16f3c31773 | ||
|
5b3af4c3d1 | ||
|
60b433ab84 | ||
|
fd3da7c69d | ||
|
cd9a1d5b37 | ||
|
2ad511ce09 | ||
|
541f299fbb | ||
|
7c02933275 | ||
|
f2447bd4c3 | ||
|
ea1724de1a | ||
|
577d375a0d | ||
|
66432f3821 | ||
|
5d4d79ae26 | ||
|
6a01363d1d | ||
|
58c4e033f4 | ||
|
5449139ca2 | ||
|
579ac6287b | ||
|
a7720b5926 | ||
|
670bae4cd3 | ||
|
4a8d5d2b1e | ||
|
d76c5ca532 | ||
|
c1ea527573 | ||
|
8dfa4f46a9 | ||
|
0afd767537 | ||
|
448d17b8f7 | ||
|
9922943b42 | ||
|
a1949d0788 | ||
|
9f6af6f812 | ||
|
ea171d5bd9 | ||
|
1da33028ce | ||
|
7a7428a027 | ||
|
cfe8f5fd94 | ||
|
852aa143ac | ||
|
b724d1aada | ||
|
86be91b3e2 | ||
|
e7067be94f | ||
|
9586f2acc7 | ||
|
12683feca7 | ||
|
49371bf255 | ||
|
16a78b095e | ||
|
96a6c8ba0a | ||
|
7d2c730acb | ||
|
abd881f6d4 | ||
|
4f91831aec | ||
|
3f2583d6d1 | ||
|
26a4dbb467 | ||
|
50aa1dcfda | ||
|
cbdaa0ca2a | ||
|
7cf83cee52 | ||
|
744428cb03 | ||
|
b15eb665ee | ||
|
a16f12ba86 | ||
|
8feb31825e | ||
|
8f8774cf6d | ||
|
c514fbccc0 | ||
|
52b046c9b6 | ||
|
661f5f3dac | ||
|
49e38c970e | ||
|
ba1030b6b8 | ||
|
7605e63cb9 |
38
.github/CODEOWNERS
vendored
38
.github/CODEOWNERS
vendored
@@ -1,12 +1,32 @@
|
||||
# Lines starting with '#' are comments.
|
||||
# Each line is a file pattern followed by one or more owners.
|
||||
|
||||
accounts/usbwallet @karalabe
|
||||
consensus @karalabe
|
||||
core/ @karalabe @holiman
|
||||
eth/ @karalabe
|
||||
les/ @zsfelfoldi
|
||||
light/ @zsfelfoldi
|
||||
mobile/ @karalabe
|
||||
p2p/ @fjl @zsfelfoldi
|
||||
whisper/ @gballet @gluk256
|
||||
accounts/usbwallet @karalabe
|
||||
consensus @karalabe
|
||||
core/ @karalabe @holiman
|
||||
eth/ @karalabe
|
||||
les/ @zsfelfoldi
|
||||
light/ @zsfelfoldi
|
||||
mobile/ @karalabe
|
||||
p2p/ @fjl @zsfelfoldi
|
||||
swarm/bmt @zelig
|
||||
swarm/dev @lmars
|
||||
swarm/fuse @jmozah @holisticode
|
||||
swarm/grafana_dashboards @nonsense
|
||||
swarm/metrics @nonsense @holisticode
|
||||
swarm/multihash @nolash
|
||||
swarm/network/bitvector @zelig @janos @gbalint
|
||||
swarm/network/priorityqueue @zelig @janos @gbalint
|
||||
swarm/network/simulations @zelig
|
||||
swarm/network/stream @janos @zelig @gbalint @holisticode @justelad
|
||||
swarm/network/stream/intervals @janos
|
||||
swarm/network/stream/testing @zelig
|
||||
swarm/pot @zelig
|
||||
swarm/pss @nolash @zelig @nonsense
|
||||
swarm/services @zelig
|
||||
swarm/state @justelad
|
||||
swarm/storage/encryption @gbalint @zelig @nagydani
|
||||
swarm/storage/mock @janos
|
||||
swarm/storage/mru @nolash
|
||||
swarm/testutil @lmars
|
||||
whisper/ @gballet @gluk256
|
||||
|
13
.travis.yml
13
.travis.yml
@@ -31,7 +31,6 @@ matrix:
|
||||
script:
|
||||
- unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703
|
||||
- brew update
|
||||
- brew install caskroom/cask/brew-cask
|
||||
- brew cask install osxfuse
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
@@ -127,7 +126,7 @@ matrix:
|
||||
|
||||
# This builder does the Android Maven and Azure uploads
|
||||
- os: linux
|
||||
dist: precise # Needed for the android tools
|
||||
dist: trusty
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
@@ -147,16 +146,16 @@ matrix:
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
before_install:
|
||||
- curl https://storage.googleapis.com/golang/go1.10.1.linux-amd64.tar.gz | tar -xz
|
||||
- curl https://storage.googleapis.com/golang/go1.10.3.linux-amd64.tar.gz | tar -xz
|
||||
- export PATH=`pwd`/go/bin:$PATH
|
||||
- export GOROOT=`pwd`/go
|
||||
- export GOPATH=$HOME/go
|
||||
script:
|
||||
# Build the Android archive and upload it to Maven Central and Azure
|
||||
- curl https://dl.google.com/android/repository/android-ndk-r15c-linux-x86_64.zip -o android-ndk-r15c.zip
|
||||
- unzip -q android-ndk-r15c.zip && rm android-ndk-r15c.zip
|
||||
- mv android-ndk-r15c $HOME
|
||||
- export ANDROID_NDK=$HOME/android-ndk-r15c
|
||||
- curl https://dl.google.com/android/repository/android-ndk-r17b-linux-x86_64.zip -o android-ndk-r17b.zip
|
||||
- unzip -q android-ndk-r17b.zip && rm android-ndk-r17b.zip
|
||||
- mv android-ndk-r17b $HOME
|
||||
- export ANDROID_NDK=$HOME/android-ndk-r17b
|
||||
|
||||
- mkdir -p $GOPATH/src/github.com/ethereum
|
||||
- ln -s `pwd` $GOPATH/src/github.com/ethereum
|
||||
|
1
AUTHORS
1
AUTHORS
@@ -171,3 +171,4 @@ xiekeyang <xiekeyang@users.noreply.github.com>
|
||||
yoza <yoza.is12s@gmail.com>
|
||||
ΞTHΞЯSPHΞЯΞ <{viktor.tron,nagydani,zsfelfoldi}@gmail.com>
|
||||
Максим Чусовлянов <mchusovlianov@gmail.com>
|
||||
Ralph Caraveo <deckarep@gmail.com>
|
||||
|
@@ -12,11 +12,5 @@ FROM alpine:latest
|
||||
RUN apk add --no-cache ca-certificates
|
||||
COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/
|
||||
|
||||
RUN addgroup -g 1000 geth && \
|
||||
adduser -h /root -D -u 1000 -G geth geth && \
|
||||
chown geth:geth /root
|
||||
|
||||
USER geth
|
||||
|
||||
EXPOSE 8545 8546 30303 30303/udp 30304/udp
|
||||
EXPOSE 8545 8546 30303 30303/udp
|
||||
ENTRYPOINT ["geth"]
|
||||
|
@@ -12,10 +12,4 @@ FROM alpine:latest
|
||||
RUN apk add --no-cache ca-certificates
|
||||
COPY --from=builder /go-ethereum/build/bin/* /usr/local/bin/
|
||||
|
||||
RUN addgroup -g 1000 geth && \
|
||||
adduser -h /root -D -u 1000 -G geth geth && \
|
||||
chown geth:geth /root
|
||||
|
||||
USER geth
|
||||
|
||||
EXPOSE 8545 8546 30303 30303/udp 30304/udp
|
||||
EXPOSE 8545 8546 30303 30303/udp
|
||||
|
4
Makefile
4
Makefile
@@ -37,7 +37,11 @@ ios:
|
||||
test: all
|
||||
build/env.sh go run build/ci.go test
|
||||
|
||||
lint: ## Run linters.
|
||||
build/env.sh go run build/ci.go lint
|
||||
|
||||
clean:
|
||||
./build/clean_go_build_cache.sh
|
||||
rm -fr build/_workspace/pkg/ $(GOBIN)/*
|
||||
|
||||
# The devtools target installs tools required for 'go generate'.
|
||||
|
@@ -40,7 +40,7 @@ The go-ethereum project comes with several wrappers/executables found in the `cm
|
||||
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug`). |
|
||||
| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://github.com/ethereum/wiki/wiki/JSON-RPC) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. |
|
||||
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://github.com/ethereum/wiki/wiki/RLP)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
|
||||
| `swarm` | swarm daemon and tools. This is the entrypoint for the swarm network. `swarm --help` for command line options and subcommands. See https://swarm-guide.readthedocs.io for swarm documentation. |
|
||||
| `swarm` | Swarm daemon and tools. This is the entrypoint for the Swarm network. `swarm --help` for command line options and subcommands. See [Swarm README](https://github.com/ethereum/go-ethereum/tree/master/swarm) for more information. |
|
||||
| `puppeth` | a CLI wizard that aids in creating a new Ethereum network. |
|
||||
|
||||
## Running geth
|
||||
|
@@ -111,9 +111,14 @@ func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interfa
|
||||
if err := requireUnpackKind(value, typ, kind, arguments); err != nil {
|
||||
return err
|
||||
}
|
||||
// If the output interface is a struct, make sure names don't collide
|
||||
|
||||
// If the interface is a struct, get of abi->struct_field mapping
|
||||
|
||||
var abi2struct map[string]string
|
||||
if kind == reflect.Struct {
|
||||
if err := requireUniqueStructFieldNames(arguments); err != nil {
|
||||
var err error
|
||||
abi2struct, err = mapAbiToStructFields(arguments, value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -123,9 +128,10 @@ func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interfa
|
||||
|
||||
switch kind {
|
||||
case reflect.Struct:
|
||||
err := unpackStruct(value, reflectValue, arg)
|
||||
if err != nil {
|
||||
return err
|
||||
if structField, ok := abi2struct[arg.Name]; ok {
|
||||
if err := set(value.FieldByName(structField), reflectValue, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case reflect.Slice, reflect.Array:
|
||||
if value.Len() < i {
|
||||
@@ -151,17 +157,22 @@ func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues []interf
|
||||
if len(marshalledValues) != 1 {
|
||||
return fmt.Errorf("abi: wrong length, expected single value, got %d", len(marshalledValues))
|
||||
}
|
||||
|
||||
elem := reflect.ValueOf(v).Elem()
|
||||
kind := elem.Kind()
|
||||
reflectValue := reflect.ValueOf(marshalledValues[0])
|
||||
|
||||
var abi2struct map[string]string
|
||||
if kind == reflect.Struct {
|
||||
//make sure names don't collide
|
||||
if err := requireUniqueStructFieldNames(arguments); err != nil {
|
||||
var err error
|
||||
if abi2struct, err = mapAbiToStructFields(arguments, elem); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return unpackStruct(elem, reflectValue, arguments[0])
|
||||
arg := arguments.NonIndexed()[0]
|
||||
if structField, ok := abi2struct[arg.Name]; ok {
|
||||
return set(elem.FieldByName(structField), reflectValue, arg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return set(elem, reflectValue, arguments.NonIndexed()[0])
|
||||
@@ -277,18 +288,3 @@ func capitalise(input string) string {
|
||||
}
|
||||
return strings.ToUpper(input[:1]) + input[1:]
|
||||
}
|
||||
|
||||
//unpackStruct extracts each argument into its corresponding struct field
|
||||
func unpackStruct(value, reflectValue reflect.Value, arg Argument) error {
|
||||
name := capitalise(arg.Name)
|
||||
typ := value.Type()
|
||||
for j := 0; j < typ.NumField(); j++ {
|
||||
// TODO read tags: `abi:"fieldName"`
|
||||
if typ.Field(j).Name == name {
|
||||
if err := set(value.Field(j), reflectValue, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -31,6 +31,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
@@ -65,7 +66,7 @@ type SimulatedBackend struct {
|
||||
// NewSimulatedBackend creates a new binding backend using a simulated blockchain
|
||||
// for testing purposes.
|
||||
func NewSimulatedBackend(alloc core.GenesisAlloc) *SimulatedBackend {
|
||||
database, _ := ethdb.NewMemDatabase()
|
||||
database := ethdb.NewMemDatabase()
|
||||
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, Alloc: alloc}
|
||||
genesis.MustCommit(database)
|
||||
blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{})
|
||||
@@ -159,7 +160,7 @@ func (b *SimulatedBackend) StorageAt(ctx context.Context, contract common.Addres
|
||||
|
||||
// TransactionReceipt returns the receipt of a transaction.
|
||||
func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) {
|
||||
receipt, _, _, _ := core.GetReceipt(b.database, txHash)
|
||||
receipt, _, _, _ := rawdb.ReadReceipt(b.database, txHash)
|
||||
return receipt, nil
|
||||
}
|
||||
|
||||
@@ -323,18 +324,24 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
|
||||
//
|
||||
// TODO(karalabe): Deprecate when the subscription one can return past data too.
|
||||
func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) {
|
||||
// Initialize unset filter boundaried to run from genesis to chain head
|
||||
from := int64(0)
|
||||
if query.FromBlock != nil {
|
||||
from = query.FromBlock.Int64()
|
||||
var filter *filters.Filter
|
||||
if query.BlockHash != nil {
|
||||
// Block filter requested, construct a single-shot filter
|
||||
filter = filters.NewBlockFilter(&filterBackend{b.database, b.blockchain}, *query.BlockHash, query.Addresses, query.Topics)
|
||||
} else {
|
||||
// Initialize unset filter boundaried to run from genesis to chain head
|
||||
from := int64(0)
|
||||
if query.FromBlock != nil {
|
||||
from = query.FromBlock.Int64()
|
||||
}
|
||||
to := int64(-1)
|
||||
if query.ToBlock != nil {
|
||||
to = query.ToBlock.Int64()
|
||||
}
|
||||
// Construct the range filter
|
||||
filter = filters.NewRangeFilter(&filterBackend{b.database, b.blockchain}, from, to, query.Addresses, query.Topics)
|
||||
}
|
||||
to := int64(-1)
|
||||
if query.ToBlock != nil {
|
||||
to = query.ToBlock.Int64()
|
||||
}
|
||||
// Construct and execute the filter
|
||||
filter := filters.New(&filterBackend{b.database, b.blockchain}, from, to, query.Addresses, query.Topics)
|
||||
|
||||
// Run the filter and return all the logs
|
||||
logs, err := filter.Logs(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -429,12 +436,24 @@ func (fb *filterBackend) HeaderByNumber(ctx context.Context, block rpc.BlockNumb
|
||||
return fb.bc.GetHeaderByNumber(uint64(block.Int64())), nil
|
||||
}
|
||||
|
||||
func (fb *filterBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
|
||||
return fb.bc.GetHeaderByHash(hash), nil
|
||||
}
|
||||
|
||||
func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
|
||||
return core.GetBlockReceipts(fb.db, hash, core.GetBlockNumber(fb.db, hash)), nil
|
||||
number := rawdb.ReadHeaderNumber(fb.db, hash)
|
||||
if number == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return rawdb.ReadReceipts(fb.db, hash, *number), nil
|
||||
}
|
||||
|
||||
func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) {
|
||||
receipts := core.GetBlockReceipts(fb.db, hash, core.GetBlockNumber(fb.db, hash))
|
||||
number := rawdb.ReadHeaderNumber(fb.db, hash)
|
||||
if number == nil {
|
||||
return nil, nil
|
||||
}
|
||||
receipts := rawdb.ReadReceipts(fb.db, hash, *number)
|
||||
if receipts == nil {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -445,7 +464,7 @@ func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*ty
|
||||
return logs, nil
|
||||
}
|
||||
|
||||
func (fb *filterBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription {
|
||||
func (fb *filterBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
|
||||
return event.NewSubscription(func(quit <-chan struct{}) error {
|
||||
<-quit
|
||||
return nil
|
||||
|
@@ -33,15 +33,15 @@ type Event struct {
|
||||
Inputs Arguments
|
||||
}
|
||||
|
||||
func (event Event) String() string {
|
||||
inputs := make([]string, len(event.Inputs))
|
||||
for i, input := range event.Inputs {
|
||||
func (e Event) String() string {
|
||||
inputs := make([]string, len(e.Inputs))
|
||||
for i, input := range e.Inputs {
|
||||
inputs[i] = fmt.Sprintf("%v %v", input.Name, input.Type)
|
||||
if input.Indexed {
|
||||
inputs[i] = fmt.Sprintf("%v indexed %v", input.Name, input.Type)
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("event %v(%v)", event.Name, strings.Join(inputs, ", "))
|
||||
return fmt.Sprintf("e %v(%v)", e.Name, strings.Join(inputs, ", "))
|
||||
}
|
||||
|
||||
// Id returns the canonical representation of the event's signature used by the
|
||||
|
@@ -58,12 +58,28 @@ var jsonEventPledge = []byte(`{
|
||||
"type": "event"
|
||||
}`)
|
||||
|
||||
var jsonEventMixedCase = []byte(`{
|
||||
"anonymous": false,
|
||||
"inputs": [{
|
||||
"indexed": false, "name": "value", "type": "uint256"
|
||||
}, {
|
||||
"indexed": false, "name": "_value", "type": "uint256"
|
||||
}, {
|
||||
"indexed": false, "name": "Value", "type": "uint256"
|
||||
}],
|
||||
"name": "MixedCase",
|
||||
"type": "event"
|
||||
}`)
|
||||
|
||||
// 1000000
|
||||
var transferData1 = "00000000000000000000000000000000000000000000000000000000000f4240"
|
||||
|
||||
// "0x00Ce0d46d924CC8437c806721496599FC3FFA268", 2218516807680, "usd"
|
||||
var pledgeData1 = "00000000000000000000000000ce0d46d924cc8437c806721496599fc3ffa2680000000000000000000000000000000000000000000000000000020489e800007573640000000000000000000000000000000000000000000000000000000000"
|
||||
|
||||
// 1000000,2218516807680,1000001
|
||||
var mixedCaseData1 = "00000000000000000000000000000000000000000000000000000000000f42400000000000000000000000000000000000000000000000000000020489e8000000000000000000000000000000000000000000000000000000000000000f4241"
|
||||
|
||||
func TestEventId(t *testing.T) {
|
||||
var table = []struct {
|
||||
definition string
|
||||
@@ -121,6 +137,27 @@ func TestEventTupleUnpack(t *testing.T) {
|
||||
Value *big.Int
|
||||
}
|
||||
|
||||
type EventTransferWithTag struct {
|
||||
// this is valid because `value` is not exportable,
|
||||
// so value is only unmarshalled into `Value1`.
|
||||
value *big.Int
|
||||
Value1 *big.Int `abi:"value"`
|
||||
}
|
||||
|
||||
type BadEventTransferWithSameFieldAndTag struct {
|
||||
Value *big.Int
|
||||
Value1 *big.Int `abi:"value"`
|
||||
}
|
||||
|
||||
type BadEventTransferWithDuplicatedTag struct {
|
||||
Value1 *big.Int `abi:"value"`
|
||||
Value2 *big.Int `abi:"value"`
|
||||
}
|
||||
|
||||
type BadEventTransferWithEmptyTag struct {
|
||||
Value *big.Int `abi:""`
|
||||
}
|
||||
|
||||
type EventPledge struct {
|
||||
Who common.Address
|
||||
Wad *big.Int
|
||||
@@ -133,9 +170,16 @@ func TestEventTupleUnpack(t *testing.T) {
|
||||
Currency [3]byte
|
||||
}
|
||||
|
||||
type EventMixedCase struct {
|
||||
Value1 *big.Int `abi:"value"`
|
||||
Value2 *big.Int `abi:"_value"`
|
||||
Value3 *big.Int `abi:"Value"`
|
||||
}
|
||||
|
||||
bigint := new(big.Int)
|
||||
bigintExpected := big.NewInt(1000000)
|
||||
bigintExpected2 := big.NewInt(2218516807680)
|
||||
bigintExpected3 := big.NewInt(1000001)
|
||||
addr := common.HexToAddress("0x00Ce0d46d924CC8437c806721496599FC3FFA268")
|
||||
var testCases = []struct {
|
||||
data string
|
||||
@@ -158,6 +202,34 @@ func TestEventTupleUnpack(t *testing.T) {
|
||||
jsonEventTransfer,
|
||||
"",
|
||||
"Can unpack ERC20 Transfer event into slice",
|
||||
}, {
|
||||
transferData1,
|
||||
&EventTransferWithTag{},
|
||||
&EventTransferWithTag{Value1: bigintExpected},
|
||||
jsonEventTransfer,
|
||||
"",
|
||||
"Can unpack ERC20 Transfer event into structure with abi: tag",
|
||||
}, {
|
||||
transferData1,
|
||||
&BadEventTransferWithDuplicatedTag{},
|
||||
&BadEventTransferWithDuplicatedTag{},
|
||||
jsonEventTransfer,
|
||||
"struct: abi tag in 'Value2' already mapped",
|
||||
"Can not unpack ERC20 Transfer event with duplicated abi tag",
|
||||
}, {
|
||||
transferData1,
|
||||
&BadEventTransferWithSameFieldAndTag{},
|
||||
&BadEventTransferWithSameFieldAndTag{},
|
||||
jsonEventTransfer,
|
||||
"abi: multiple variables maps to the same abi field 'value'",
|
||||
"Can not unpack ERC20 Transfer event with a field and a tag mapping to the same abi variable",
|
||||
}, {
|
||||
transferData1,
|
||||
&BadEventTransferWithEmptyTag{},
|
||||
&BadEventTransferWithEmptyTag{},
|
||||
jsonEventTransfer,
|
||||
"struct: abi tag in 'Value' is empty",
|
||||
"Can not unpack ERC20 Transfer event with an empty tag",
|
||||
}, {
|
||||
pledgeData1,
|
||||
&EventPledge{},
|
||||
@@ -216,6 +288,13 @@ func TestEventTupleUnpack(t *testing.T) {
|
||||
jsonEventPledge,
|
||||
"abi: cannot unmarshal tuple into map[string]interface {}",
|
||||
"Can not unpack Pledge event into map",
|
||||
}, {
|
||||
mixedCaseData1,
|
||||
&EventMixedCase{},
|
||||
&EventMixedCase{Value1: bigintExpected, Value2: bigintExpected2, Value3: bigintExpected3},
|
||||
jsonEventMixedCase,
|
||||
"",
|
||||
"Can unpack abi variables with mixed case",
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -227,7 +306,7 @@ func TestEventTupleUnpack(t *testing.T) {
|
||||
assert.Nil(err, "Should be able to unpack event data.")
|
||||
assert.Equal(tc.expected, tc.dest, tc.name)
|
||||
} else {
|
||||
assert.EqualError(err, tc.error)
|
||||
assert.EqualError(err, tc.error, tc.name)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@@ -47,10 +47,8 @@ type Method struct {
|
||||
// Please note that "int" is substitute for its canonical representation "int256"
|
||||
func (method Method) Sig() string {
|
||||
types := make([]string, len(method.Inputs))
|
||||
i := 0
|
||||
for _, input := range method.Inputs {
|
||||
for i, input := range method.Inputs {
|
||||
types[i] = input.Type.String()
|
||||
i++
|
||||
}
|
||||
return fmt.Sprintf("%v(%v)", method.Name, strings.Join(types, ","))
|
||||
}
|
||||
|
@@ -31,29 +31,14 @@ var (
|
||||
uint16T = reflect.TypeOf(uint16(0))
|
||||
uint32T = reflect.TypeOf(uint32(0))
|
||||
uint64T = reflect.TypeOf(uint64(0))
|
||||
intT = reflect.TypeOf(int(0))
|
||||
int8T = reflect.TypeOf(int8(0))
|
||||
int16T = reflect.TypeOf(int16(0))
|
||||
int32T = reflect.TypeOf(int32(0))
|
||||
int64T = reflect.TypeOf(int64(0))
|
||||
addressT = reflect.TypeOf(common.Address{})
|
||||
intTS = reflect.TypeOf([]int(nil))
|
||||
int8TS = reflect.TypeOf([]int8(nil))
|
||||
int16TS = reflect.TypeOf([]int16(nil))
|
||||
int32TS = reflect.TypeOf([]int32(nil))
|
||||
int64TS = reflect.TypeOf([]int64(nil))
|
||||
)
|
||||
|
||||
// U256 converts a big Int into a 256bit EVM number.
|
||||
func U256(n *big.Int) []byte {
|
||||
return math.PaddedBigBytes(math.U256(n), 32)
|
||||
}
|
||||
|
||||
// checks whether the given reflect value is signed. This also works for slices with a number type
|
||||
func isSigned(v reflect.Value) bool {
|
||||
switch v.Type() {
|
||||
case intTS, int8TS, int16TS, int32TS, int64TS, intT, int8T, int16T, int32T, int64T:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@@ -19,7 +19,6 @@ package abi
|
||||
import (
|
||||
"bytes"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -32,13 +31,3 @@ func TestNumberTypes(t *testing.T) {
|
||||
t.Errorf("expected %x got %x", ubytes, unsigned)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSigned(t *testing.T) {
|
||||
if isSigned(reflect.ValueOf(uint(10))) {
|
||||
t.Error("signed")
|
||||
}
|
||||
|
||||
if !isSigned(reflect.ValueOf(int(10))) {
|
||||
t.Error("not signed")
|
||||
}
|
||||
}
|
||||
|
@@ -19,6 +19,7 @@ package abi
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// indirect recursively dereferences the value until it either gets the value
|
||||
@@ -111,18 +112,101 @@ func requireUnpackKind(v reflect.Value, t reflect.Type, k reflect.Kind,
|
||||
return nil
|
||||
}
|
||||
|
||||
// requireUniqueStructFieldNames makes sure field names don't collide
|
||||
func requireUniqueStructFieldNames(args Arguments) error {
|
||||
exists := make(map[string]bool)
|
||||
for _, arg := range args {
|
||||
field := capitalise(arg.Name)
|
||||
if field == "" {
|
||||
return fmt.Errorf("abi: purely underscored output cannot unpack to struct")
|
||||
// mapAbiToStringField maps abi to struct fields.
|
||||
// first round: for each Exportable field that contains a `abi:""` tag
|
||||
// and this field name exists in the arguments, pair them together.
|
||||
// second round: for each argument field that has not been already linked,
|
||||
// find what variable is expected to be mapped into, if it exists and has not been
|
||||
// used, pair them.
|
||||
func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]string, error) {
|
||||
|
||||
typ := value.Type()
|
||||
|
||||
abi2struct := make(map[string]string)
|
||||
struct2abi := make(map[string]string)
|
||||
|
||||
// first round ~~~
|
||||
for i := 0; i < typ.NumField(); i++ {
|
||||
structFieldName := typ.Field(i).Name
|
||||
|
||||
// skip private struct fields.
|
||||
if structFieldName[:1] != strings.ToUpper(structFieldName[:1]) {
|
||||
continue
|
||||
}
|
||||
if exists[field] {
|
||||
return fmt.Errorf("abi: multiple outputs mapping to the same struct field '%s'", field)
|
||||
|
||||
// skip fields that have no abi:"" tag.
|
||||
var ok bool
|
||||
var tagName string
|
||||
if tagName, ok = typ.Field(i).Tag.Lookup("abi"); !ok {
|
||||
continue
|
||||
}
|
||||
exists[field] = true
|
||||
|
||||
// check if tag is empty.
|
||||
if tagName == "" {
|
||||
return nil, fmt.Errorf("struct: abi tag in '%s' is empty", structFieldName)
|
||||
}
|
||||
|
||||
// check which argument field matches with the abi tag.
|
||||
found := false
|
||||
for _, abiField := range args.NonIndexed() {
|
||||
if abiField.Name == tagName {
|
||||
if abi2struct[abiField.Name] != "" {
|
||||
return nil, fmt.Errorf("struct: abi tag in '%s' already mapped", structFieldName)
|
||||
}
|
||||
// pair them
|
||||
abi2struct[abiField.Name] = structFieldName
|
||||
struct2abi[structFieldName] = abiField.Name
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
// check if this tag has been mapped.
|
||||
if !found {
|
||||
return nil, fmt.Errorf("struct: abi tag '%s' defined but not found in abi", tagName)
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
|
||||
// second round ~~~
|
||||
for _, arg := range args {
|
||||
|
||||
abiFieldName := arg.Name
|
||||
structFieldName := capitalise(abiFieldName)
|
||||
|
||||
if structFieldName == "" {
|
||||
return nil, fmt.Errorf("abi: purely underscored output cannot unpack to struct")
|
||||
}
|
||||
|
||||
// this abi has already been paired, skip it... unless there exists another, yet unassigned
|
||||
// struct field with the same field name. If so, raise an error:
|
||||
// abi: [ { "name": "value" } ]
|
||||
// struct { Value *big.Int , Value1 *big.Int `abi:"value"`}
|
||||
if abi2struct[abiFieldName] != "" {
|
||||
if abi2struct[abiFieldName] != structFieldName &&
|
||||
struct2abi[structFieldName] == "" &&
|
||||
value.FieldByName(structFieldName).IsValid() {
|
||||
return nil, fmt.Errorf("abi: multiple variables maps to the same abi field '%s'", abiFieldName)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// return an error if this struct field has already been paired.
|
||||
if struct2abi[structFieldName] != "" {
|
||||
return nil, fmt.Errorf("abi: multiple outputs mapping to the same struct field '%s'", structFieldName)
|
||||
}
|
||||
|
||||
if value.FieldByName(structFieldName).IsValid() {
|
||||
// pair them
|
||||
abi2struct[abiFieldName] = structFieldName
|
||||
struct2abi[structFieldName] = abiFieldName
|
||||
} else {
|
||||
// not paired, but annotate as used, to detect cases like
|
||||
// abi : [ { "name": "value" }, { "name": "_value" } ]
|
||||
// struct { Value *big.Int }
|
||||
struct2abi[structFieldName] = abiFieldName
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return abi2struct, nil
|
||||
}
|
||||
|
@@ -27,10 +27,10 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
mapset "github.com/deckarep/golang-set"
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gopkg.in/fatih/set.v0"
|
||||
)
|
||||
|
||||
// Minimum amount of time between cache reloads. This limit applies if the platform does
|
||||
@@ -79,7 +79,7 @@ func newAccountCache(keydir string) (*accountCache, chan struct{}) {
|
||||
keydir: keydir,
|
||||
byAddr: make(map[common.Address][]accounts.Account),
|
||||
notify: make(chan struct{}, 1),
|
||||
fileC: fileCache{all: set.NewNonTS()},
|
||||
fileC: fileCache{all: mapset.NewThreadUnsafeSet()},
|
||||
}
|
||||
ac.watcher = newWatcher(ac)
|
||||
return ac, ac.notify
|
||||
@@ -237,7 +237,7 @@ func (ac *accountCache) scanAccounts() error {
|
||||
log.Debug("Failed to reload keystore contents", "err", err)
|
||||
return err
|
||||
}
|
||||
if creates.Size() == 0 && deletes.Size() == 0 && updates.Size() == 0 {
|
||||
if creates.Cardinality() == 0 && deletes.Cardinality() == 0 && updates.Cardinality() == 0 {
|
||||
return nil
|
||||
}
|
||||
// Create a helper method to scan the contents of the key files
|
||||
@@ -272,15 +272,15 @@ func (ac *accountCache) scanAccounts() error {
|
||||
// Process all the file diffs
|
||||
start := time.Now()
|
||||
|
||||
for _, p := range creates.List() {
|
||||
for _, p := range creates.ToSlice() {
|
||||
if a := readAccount(p.(string)); a != nil {
|
||||
ac.add(*a)
|
||||
}
|
||||
}
|
||||
for _, p := range deletes.List() {
|
||||
for _, p := range deletes.ToSlice() {
|
||||
ac.deleteByFile(p.(string))
|
||||
}
|
||||
for _, p := range updates.List() {
|
||||
for _, p := range updates.ToSlice() {
|
||||
path := p.(string)
|
||||
ac.deleteByFile(path)
|
||||
if a := readAccount(path); a != nil {
|
||||
|
@@ -24,20 +24,20 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
mapset "github.com/deckarep/golang-set"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
set "gopkg.in/fatih/set.v0"
|
||||
)
|
||||
|
||||
// fileCache is a cache of files seen during scan of keystore.
|
||||
type fileCache struct {
|
||||
all *set.SetNonTS // Set of all files from the keystore folder
|
||||
lastMod time.Time // Last time instance when a file was modified
|
||||
all mapset.Set // Set of all files from the keystore folder
|
||||
lastMod time.Time // Last time instance when a file was modified
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// scan performs a new scan on the given directory, compares against the already
|
||||
// cached filenames, and returns file sets: creates, deletes, updates.
|
||||
func (fc *fileCache) scan(keyDir string) (set.Interface, set.Interface, set.Interface, error) {
|
||||
func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, error) {
|
||||
t0 := time.Now()
|
||||
|
||||
// List all the failes from the keystore folder
|
||||
@@ -51,8 +51,8 @@ func (fc *fileCache) scan(keyDir string) (set.Interface, set.Interface, set.Inte
|
||||
defer fc.mu.Unlock()
|
||||
|
||||
// Iterate all the files and gather their metadata
|
||||
all := set.NewNonTS()
|
||||
mods := set.NewNonTS()
|
||||
all := mapset.NewThreadUnsafeSet()
|
||||
mods := mapset.NewThreadUnsafeSet()
|
||||
|
||||
var newLastMod time.Time
|
||||
for _, fi := range files {
|
||||
@@ -76,9 +76,9 @@ func (fc *fileCache) scan(keyDir string) (set.Interface, set.Interface, set.Inte
|
||||
t2 := time.Now()
|
||||
|
||||
// Update the tracked files and return the three sets
|
||||
deletes := set.Difference(fc.all, all) // Deletes = previous - current
|
||||
creates := set.Difference(all, fc.all) // Creates = current - previous
|
||||
updates := set.Difference(mods, creates) // Updates = modified - creates
|
||||
deletes := fc.all.Difference(all) // Deletes = previous - current
|
||||
creates := all.Difference(fc.all) // Creates = current - previous
|
||||
updates := mods.Difference(creates) // Updates = modified - creates
|
||||
|
||||
fc.all, fc.lastMod = all, newLastMod
|
||||
t3 := time.Now()
|
||||
|
@@ -50,7 +50,7 @@ var (
|
||||
var KeyStoreType = reflect.TypeOf(&KeyStore{})
|
||||
|
||||
// KeyStoreScheme is the protocol scheme prefixing account and wallet URLs.
|
||||
var KeyStoreScheme = "keystore"
|
||||
const KeyStoreScheme = "keystore"
|
||||
|
||||
// Maximum time between wallet refreshes (if filesystem notifications don't work).
|
||||
const walletRefreshCycle = 3 * time.Second
|
||||
|
@@ -28,18 +28,18 @@ package keystore
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/aes"
|
||||
crand "crypto/rand"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/crypto/randentropy"
|
||||
"github.com/pborman/uuid"
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
"golang.org/x/crypto/scrypt"
|
||||
@@ -93,7 +93,7 @@ func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string)
|
||||
|
||||
// StoreKey generates a key, encrypts with 'auth' and stores in the given directory
|
||||
func StoreKey(dir, auth string, scryptN, scryptP int) (common.Address, error) {
|
||||
_, a, err := storeNewKey(&keyStorePassphrase{dir, scryptN, scryptP}, crand.Reader, auth)
|
||||
_, a, err := storeNewKey(&keyStorePassphrase{dir, scryptN, scryptP}, rand.Reader, auth)
|
||||
return a.Address, err
|
||||
}
|
||||
|
||||
@@ -108,16 +108,19 @@ func (ks keyStorePassphrase) StoreKey(filename string, key *Key, auth string) er
|
||||
func (ks keyStorePassphrase) JoinPath(filename string) string {
|
||||
if filepath.IsAbs(filename) {
|
||||
return filename
|
||||
} else {
|
||||
return filepath.Join(ks.keysDirPath, filename)
|
||||
}
|
||||
return filepath.Join(ks.keysDirPath, filename)
|
||||
}
|
||||
|
||||
// EncryptKey encrypts a key using the specified scrypt parameters into a json
|
||||
// blob that can be decrypted later on.
|
||||
func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
|
||||
authArray := []byte(auth)
|
||||
salt := randentropy.GetEntropyCSPRNG(32)
|
||||
|
||||
salt := make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
|
||||
panic("reading from crypto/rand failed: " + err.Error())
|
||||
}
|
||||
derivedKey, err := scrypt.Key(authArray, salt, scryptN, scryptR, scryptP, scryptDKLen)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -125,7 +128,10 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
|
||||
encryptKey := derivedKey[:16]
|
||||
keyBytes := math.PaddedBigBytes(key.PrivateKey.D, 32)
|
||||
|
||||
iv := randentropy.GetEntropyCSPRNG(aes.BlockSize) // 16
|
||||
iv := make([]byte, aes.BlockSize) // 16
|
||||
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
|
||||
panic("reading from crypto/rand failed: " + err.Error())
|
||||
}
|
||||
cipherText, err := aesCTRXOR(encryptKey, keyBytes, iv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@@ -56,7 +56,6 @@ func (ks keyStorePlain) StoreKey(filename string, key *Key, auth string) error {
|
||||
func (ks keyStorePlain) JoinPath(filename string) string {
|
||||
if filepath.IsAbs(filename) {
|
||||
return filename
|
||||
} else {
|
||||
return filepath.Join(ks.keysDirPath, filename)
|
||||
}
|
||||
return filepath.Join(ks.keysDirPath, filename)
|
||||
}
|
||||
|
@@ -76,12 +76,12 @@ func (u URL) MarshalJSON() ([]byte, error) {
|
||||
|
||||
// UnmarshalJSON parses url.
|
||||
func (u *URL) UnmarshalJSON(input []byte) error {
|
||||
var textUrl string
|
||||
err := json.Unmarshal(input, &textUrl)
|
||||
var textURL string
|
||||
err := json.Unmarshal(input, &textURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
url, err := parseURL(textUrl)
|
||||
url, err := parseURL(textURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
96
accounts/url_test.go
Normal file
96
accounts/url_test.go
Normal file
@@ -0,0 +1,96 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package accounts
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestURLParsing(t *testing.T) {
|
||||
url, err := parseURL("https://ethereum.org")
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if url.Scheme != "https" {
|
||||
t.Errorf("expected: %v, got: %v", "https", url.Scheme)
|
||||
}
|
||||
if url.Path != "ethereum.org" {
|
||||
t.Errorf("expected: %v, got: %v", "ethereum.org", url.Path)
|
||||
}
|
||||
|
||||
_, err = parseURL("ethereum.org")
|
||||
if err == nil {
|
||||
t.Error("expected err, got: nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestURLString(t *testing.T) {
|
||||
url := URL{Scheme: "https", Path: "ethereum.org"}
|
||||
if url.String() != "https://ethereum.org" {
|
||||
t.Errorf("expected: %v, got: %v", "https://ethereum.org", url.String())
|
||||
}
|
||||
|
||||
url = URL{Scheme: "", Path: "ethereum.org"}
|
||||
if url.String() != "ethereum.org" {
|
||||
t.Errorf("expected: %v, got: %v", "ethereum.org", url.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestURLMarshalJSON(t *testing.T) {
|
||||
url := URL{Scheme: "https", Path: "ethereum.org"}
|
||||
json, err := url.MarshalJSON()
|
||||
if err != nil {
|
||||
t.Errorf("unexpcted error: %v", err)
|
||||
}
|
||||
if string(json) != "\"https://ethereum.org\"" {
|
||||
t.Errorf("expected: %v, got: %v", "\"https://ethereum.org\"", string(json))
|
||||
}
|
||||
}
|
||||
|
||||
func TestURLUnmarshalJSON(t *testing.T) {
|
||||
url := &URL{}
|
||||
err := url.UnmarshalJSON([]byte("\"https://ethereum.org\""))
|
||||
if err != nil {
|
||||
t.Errorf("unexpcted error: %v", err)
|
||||
}
|
||||
if url.Scheme != "https" {
|
||||
t.Errorf("expected: %v, got: %v", "https", url.Scheme)
|
||||
}
|
||||
if url.Path != "ethereum.org" {
|
||||
t.Errorf("expected: %v, got: %v", "https", url.Path)
|
||||
}
|
||||
}
|
||||
|
||||
func TestURLComparison(t *testing.T) {
|
||||
tests := []struct {
|
||||
urlA URL
|
||||
urlB URL
|
||||
expect int
|
||||
}{
|
||||
{URL{"https", "ethereum.org"}, URL{"https", "ethereum.org"}, 0},
|
||||
{URL{"http", "ethereum.org"}, URL{"https", "ethereum.org"}, -1},
|
||||
{URL{"https", "ethereum.org/a"}, URL{"https", "ethereum.org"}, 1},
|
||||
{URL{"https", "abc.org"}, URL{"https", "ethereum.org"}, -1},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
result := tt.urlA.Cmp(tt.urlB)
|
||||
if result != tt.expect {
|
||||
t.Errorf("test %d: cmp mismatch: expected: %d, got: %d", i, tt.expect, result)
|
||||
}
|
||||
}
|
||||
}
|
@@ -36,7 +36,7 @@ func Type(msg proto.Message) uint16 {
|
||||
}
|
||||
|
||||
// Name returns the friendly message type name of a specific protocol buffer
|
||||
// type numbers.
|
||||
// type number.
|
||||
func Name(kind uint16) string {
|
||||
name := MessageType_name[int32(kind)]
|
||||
if len(name) < 12 {
|
||||
|
@@ -53,11 +53,9 @@ const (
|
||||
ledgerOpGetConfiguration ledgerOpcode = 0x06 // Returns specific wallet application configuration
|
||||
|
||||
ledgerP1DirectlyFetchAddress ledgerParam1 = 0x00 // Return address directly from the wallet
|
||||
ledgerP1ConfirmFetchAddress ledgerParam1 = 0x01 // Require a user confirmation before returning the address
|
||||
ledgerP1InitTransactionData ledgerParam1 = 0x00 // First transaction data block for signing
|
||||
ledgerP1ContTransactionData ledgerParam1 = 0x80 // Subsequent transaction data block for signing
|
||||
ledgerP2DiscardAddressChainCode ledgerParam2 = 0x00 // Do not return the chain code along with the address
|
||||
ledgerP2ReturnAddressChainCode ledgerParam2 = 0x01 // Require a user confirmation before returning the address
|
||||
)
|
||||
|
||||
// errLedgerReplyInvalidHeader is the error message returned by a Ledger data exchange
|
||||
@@ -304,7 +302,7 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
|
||||
for i, component := range derivationPath {
|
||||
binary.BigEndian.PutUint32(path[1+4*i:], component)
|
||||
}
|
||||
// Create the transaction RLP based on whether legacy or EIP155 signing was requeste
|
||||
// Create the transaction RLP based on whether legacy or EIP155 signing was requested
|
||||
var (
|
||||
txrlp []byte
|
||||
err error
|
||||
|
@@ -23,8 +23,8 @@ environment:
|
||||
install:
|
||||
- git submodule update --init
|
||||
- rmdir C:\go /s /q
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.10.1.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.10.1.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.10.3.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.10.3.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- go version
|
||||
- gcc --version
|
||||
|
||||
|
560
bmt/bmt.go
560
bmt/bmt.go
@@ -1,560 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package bmt provides a binary merkle tree implementation
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
/*
|
||||
Binary Merkle Tree Hash is a hash function over arbitrary datachunks of limited size
|
||||
It is defined as the root hash of the binary merkle tree built over fixed size segments
|
||||
of the underlying chunk using any base hash function (e.g keccak 256 SHA3)
|
||||
|
||||
It is used as the chunk hash function in swarm which in turn is the basis for the
|
||||
128 branching swarm hash http://swarm-guide.readthedocs.io/en/latest/architecture.html#swarm-hash
|
||||
|
||||
The BMT is optimal for providing compact inclusion proofs, i.e. prove that a
|
||||
segment is a substring of a chunk starting at a particular offset
|
||||
The size of the underlying segments is fixed at 32 bytes (called the resolution
|
||||
of the BMT hash), the EVM word size to optimize for on-chain BMT verification
|
||||
as well as the hash size optimal for inclusion proofs in the merkle tree of the swarm hash.
|
||||
|
||||
Two implementations are provided:
|
||||
|
||||
* RefHasher is optimized for code simplicity and meant as a reference implementation
|
||||
* Hasher is optimized for speed taking advantage of concurrency with minimalistic
|
||||
control structure to coordinate the concurrent routines
|
||||
It implements the ChunkHash interface as well as the go standard hash.Hash interface
|
||||
|
||||
*/
|
||||
|
||||
const (
|
||||
// DefaultSegmentCount is the maximum number of segments of the underlying chunk
|
||||
DefaultSegmentCount = 128 // Should be equal to storage.DefaultBranches
|
||||
// DefaultPoolSize is the maximum number of bmt trees used by the hashers, i.e,
|
||||
// the maximum number of concurrent BMT hashing operations performed by the same hasher
|
||||
DefaultPoolSize = 8
|
||||
)
|
||||
|
||||
// BaseHasher is a hash.Hash constructor function used for the base hash of the BMT.
|
||||
type BaseHasher func() hash.Hash
|
||||
|
||||
// Hasher a reusable hasher for fixed maximum size chunks representing a BMT
|
||||
// implements the hash.Hash interface
|
||||
// reuse pool of Tree-s for amortised memory allocation and resource control
|
||||
// supports order-agnostic concurrent segment writes
|
||||
// as well as sequential read and write
|
||||
// can not be called concurrently on more than one chunk
|
||||
// can be further appended after Sum
|
||||
// Reset gives back the Tree to the pool and guaranteed to leave
|
||||
// the tree and itself in a state reusable for hashing a new chunk
|
||||
type Hasher struct {
|
||||
pool *TreePool // BMT resource pool
|
||||
bmt *Tree // prebuilt BMT resource for flowcontrol and proofs
|
||||
blocksize int // segment size (size of hash) also for hash.Hash
|
||||
count int // segment count
|
||||
size int // for hash.Hash same as hashsize
|
||||
cur int // cursor position for rightmost currently open chunk
|
||||
segment []byte // the rightmost open segment (not complete)
|
||||
depth int // index of last level
|
||||
result chan []byte // result channel
|
||||
hash []byte // to record the result
|
||||
max int32 // max segments for SegmentWriter interface
|
||||
blockLength []byte // The block length that needes to be added in Sum
|
||||
}
|
||||
|
||||
// New creates a reusable Hasher
|
||||
// implements the hash.Hash interface
|
||||
// pulls a new Tree from a resource pool for hashing each chunk
|
||||
func New(p *TreePool) *Hasher {
|
||||
return &Hasher{
|
||||
pool: p,
|
||||
depth: depth(p.SegmentCount),
|
||||
size: p.SegmentSize,
|
||||
blocksize: p.SegmentSize,
|
||||
count: p.SegmentCount,
|
||||
result: make(chan []byte),
|
||||
}
|
||||
}
|
||||
|
||||
// Node is a reuseable segment hasher representing a node in a BMT
|
||||
// it allows for continued writes after a Sum
|
||||
// and is left in completely reusable state after Reset
|
||||
type Node struct {
|
||||
level, index int // position of node for information/logging only
|
||||
initial bool // first and last node
|
||||
root bool // whether the node is root to a smaller BMT
|
||||
isLeft bool // whether it is left side of the parent double segment
|
||||
unbalanced bool // indicates if a node has only the left segment
|
||||
parent *Node // BMT connections
|
||||
state int32 // atomic increment impl concurrent boolean toggle
|
||||
left, right []byte
|
||||
}
|
||||
|
||||
// NewNode constructor for segment hasher nodes in the BMT
|
||||
func NewNode(level, index int, parent *Node) *Node {
|
||||
return &Node{
|
||||
parent: parent,
|
||||
level: level,
|
||||
index: index,
|
||||
initial: index == 0,
|
||||
isLeft: index%2 == 0,
|
||||
}
|
||||
}
|
||||
|
||||
// TreePool provides a pool of Trees used as resources by Hasher
|
||||
// a Tree popped from the pool is guaranteed to have clean state
|
||||
// for hashing a new chunk
|
||||
// Hasher Reset releases the Tree to the pool
|
||||
type TreePool struct {
|
||||
lock sync.Mutex
|
||||
c chan *Tree
|
||||
hasher BaseHasher
|
||||
SegmentSize int
|
||||
SegmentCount int
|
||||
Capacity int
|
||||
count int
|
||||
}
|
||||
|
||||
// NewTreePool creates a Tree pool with hasher, segment size, segment count and capacity
|
||||
// on GetTree it reuses free Trees or creates a new one if size is not reached
|
||||
func NewTreePool(hasher BaseHasher, segmentCount, capacity int) *TreePool {
|
||||
return &TreePool{
|
||||
c: make(chan *Tree, capacity),
|
||||
hasher: hasher,
|
||||
SegmentSize: hasher().Size(),
|
||||
SegmentCount: segmentCount,
|
||||
Capacity: capacity,
|
||||
}
|
||||
}
|
||||
|
||||
// Drain drains the pool until it has no more than n resources
|
||||
func (self *TreePool) Drain(n int) {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
for len(self.c) > n {
|
||||
<-self.c
|
||||
self.count--
|
||||
}
|
||||
}
|
||||
|
||||
// Reserve is blocking until it returns an available Tree
|
||||
// it reuses free Trees or creates a new one if size is not reached
|
||||
func (self *TreePool) Reserve() *Tree {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
var t *Tree
|
||||
if self.count == self.Capacity {
|
||||
return <-self.c
|
||||
}
|
||||
select {
|
||||
case t = <-self.c:
|
||||
default:
|
||||
t = NewTree(self.hasher, self.SegmentSize, self.SegmentCount)
|
||||
self.count++
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Release gives back a Tree to the pool.
|
||||
// This Tree is guaranteed to be in reusable state
|
||||
// does not need locking
|
||||
func (self *TreePool) Release(t *Tree) {
|
||||
self.c <- t // can never fail but...
|
||||
}
|
||||
|
||||
// Tree is a reusable control structure representing a BMT
|
||||
// organised in a binary tree
|
||||
// Hasher uses a TreePool to pick one for each chunk hash
|
||||
// the Tree is 'locked' while not in the pool
|
||||
type Tree struct {
|
||||
leaves []*Node
|
||||
}
|
||||
|
||||
// Draw draws the BMT (badly)
|
||||
func (self *Tree) Draw(hash []byte, d int) string {
|
||||
var left, right []string
|
||||
var anc []*Node
|
||||
for i, n := range self.leaves {
|
||||
left = append(left, fmt.Sprintf("%v", hashstr(n.left)))
|
||||
if i%2 == 0 {
|
||||
anc = append(anc, n.parent)
|
||||
}
|
||||
right = append(right, fmt.Sprintf("%v", hashstr(n.right)))
|
||||
}
|
||||
anc = self.leaves
|
||||
var hashes [][]string
|
||||
for l := 0; len(anc) > 0; l++ {
|
||||
var nodes []*Node
|
||||
hash := []string{""}
|
||||
for i, n := range anc {
|
||||
hash = append(hash, fmt.Sprintf("%v|%v", hashstr(n.left), hashstr(n.right)))
|
||||
if i%2 == 0 && n.parent != nil {
|
||||
nodes = append(nodes, n.parent)
|
||||
}
|
||||
}
|
||||
hash = append(hash, "")
|
||||
hashes = append(hashes, hash)
|
||||
anc = nodes
|
||||
}
|
||||
hashes = append(hashes, []string{"", fmt.Sprintf("%v", hashstr(hash)), ""})
|
||||
total := 60
|
||||
del := " "
|
||||
var rows []string
|
||||
for i := len(hashes) - 1; i >= 0; i-- {
|
||||
var textlen int
|
||||
hash := hashes[i]
|
||||
for _, s := range hash {
|
||||
textlen += len(s)
|
||||
}
|
||||
if total < textlen {
|
||||
total = textlen + len(hash)
|
||||
}
|
||||
delsize := (total - textlen) / (len(hash) - 1)
|
||||
if delsize > len(del) {
|
||||
delsize = len(del)
|
||||
}
|
||||
row := fmt.Sprintf("%v: %v", len(hashes)-i-1, strings.Join(hash, del[:delsize]))
|
||||
rows = append(rows, row)
|
||||
|
||||
}
|
||||
rows = append(rows, strings.Join(left, " "))
|
||||
rows = append(rows, strings.Join(right, " "))
|
||||
return strings.Join(rows, "\n") + "\n"
|
||||
}
|
||||
|
||||
// NewTree initialises the Tree by building up the nodes of a BMT
|
||||
// segment size is stipulated to be the size of the hash
|
||||
// segmentCount needs to be positive integer and does not need to be
|
||||
// a power of two and can even be an odd number
|
||||
// segmentSize * segmentCount determines the maximum chunk size
|
||||
// hashed using the tree
|
||||
func NewTree(hasher BaseHasher, segmentSize, segmentCount int) *Tree {
|
||||
n := NewNode(0, 0, nil)
|
||||
n.root = true
|
||||
prevlevel := []*Node{n}
|
||||
// iterate over levels and creates 2^level nodes
|
||||
level := 1
|
||||
count := 2
|
||||
for d := 1; d <= depth(segmentCount); d++ {
|
||||
nodes := make([]*Node, count)
|
||||
for i := 0; i < len(nodes); i++ {
|
||||
parent := prevlevel[i/2]
|
||||
t := NewNode(level, i, parent)
|
||||
nodes[i] = t
|
||||
}
|
||||
prevlevel = nodes
|
||||
level++
|
||||
count *= 2
|
||||
}
|
||||
// the datanode level is the nodes on the last level where
|
||||
return &Tree{
|
||||
leaves: prevlevel,
|
||||
}
|
||||
}
|
||||
|
||||
// methods needed by hash.Hash
|
||||
|
||||
// Size returns the size
|
||||
func (self *Hasher) Size() int {
|
||||
return self.size
|
||||
}
|
||||
|
||||
// BlockSize returns the block size
|
||||
func (self *Hasher) BlockSize() int {
|
||||
return self.blocksize
|
||||
}
|
||||
|
||||
// Sum returns the hash of the buffer
|
||||
// hash.Hash interface Sum method appends the byte slice to the underlying
|
||||
// data before it calculates and returns the hash of the chunk
|
||||
func (self *Hasher) Sum(b []byte) (r []byte) {
|
||||
t := self.bmt
|
||||
i := self.cur
|
||||
n := t.leaves[i]
|
||||
j := i
|
||||
// must run strictly before all nodes calculate
|
||||
// datanodes are guaranteed to have a parent
|
||||
if len(self.segment) > self.size && i > 0 && n.parent != nil {
|
||||
n = n.parent
|
||||
} else {
|
||||
i *= 2
|
||||
}
|
||||
d := self.finalise(n, i)
|
||||
self.writeSegment(j, self.segment, d)
|
||||
c := <-self.result
|
||||
self.releaseTree()
|
||||
|
||||
// sha3(length + BMT(pure_chunk))
|
||||
if self.blockLength == nil {
|
||||
return c
|
||||
}
|
||||
res := self.pool.hasher()
|
||||
res.Reset()
|
||||
res.Write(self.blockLength)
|
||||
res.Write(c)
|
||||
return res.Sum(nil)
|
||||
}
|
||||
|
||||
// Hasher implements the SwarmHash interface
|
||||
|
||||
// Hash waits for the hasher result and returns it
|
||||
// caller must call this on a BMT Hasher being written to
|
||||
func (self *Hasher) Hash() []byte {
|
||||
return <-self.result
|
||||
}
|
||||
|
||||
// Hasher implements the io.Writer interface
|
||||
|
||||
// Write fills the buffer to hash
|
||||
// with every full segment complete launches a hasher go routine
|
||||
// that shoots up the BMT
|
||||
func (self *Hasher) Write(b []byte) (int, error) {
|
||||
l := len(b)
|
||||
if l <= 0 {
|
||||
return 0, nil
|
||||
}
|
||||
s := self.segment
|
||||
i := self.cur
|
||||
count := (self.count + 1) / 2
|
||||
need := self.count*self.size - self.cur*2*self.size
|
||||
size := self.size
|
||||
if need > size {
|
||||
size *= 2
|
||||
}
|
||||
if l < need {
|
||||
need = l
|
||||
}
|
||||
// calculate missing bit to complete current open segment
|
||||
rest := size - len(s)
|
||||
if need < rest {
|
||||
rest = need
|
||||
}
|
||||
s = append(s, b[:rest]...)
|
||||
need -= rest
|
||||
// read full segments and the last possibly partial segment
|
||||
for need > 0 && i < count-1 {
|
||||
// push all finished chunks we read
|
||||
self.writeSegment(i, s, self.depth)
|
||||
need -= size
|
||||
if need < 0 {
|
||||
size += need
|
||||
}
|
||||
s = b[rest : rest+size]
|
||||
rest += size
|
||||
i++
|
||||
}
|
||||
self.segment = s
|
||||
self.cur = i
|
||||
// otherwise, we can assume len(s) == 0, so all buffer is read and chunk is not yet full
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// Hasher implements the io.ReaderFrom interface
|
||||
|
||||
// ReadFrom reads from io.Reader and appends to the data to hash using Write
|
||||
// it reads so that chunk to hash is maximum length or reader reaches EOF
|
||||
// caller must Reset the hasher prior to call
|
||||
func (self *Hasher) ReadFrom(r io.Reader) (m int64, err error) {
|
||||
bufsize := self.size*self.count - self.size*self.cur - len(self.segment)
|
||||
buf := make([]byte, bufsize)
|
||||
var read int
|
||||
for {
|
||||
var n int
|
||||
n, err = r.Read(buf)
|
||||
read += n
|
||||
if err == io.EOF || read == len(buf) {
|
||||
hash := self.Sum(buf[:n])
|
||||
if read == len(buf) {
|
||||
err = NewEOC(hash)
|
||||
}
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
n, err = self.Write(buf[:n])
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return int64(read), err
|
||||
}
|
||||
|
||||
// Reset needs to be called before writing to the hasher
|
||||
func (self *Hasher) Reset() {
|
||||
self.getTree()
|
||||
self.blockLength = nil
|
||||
}
|
||||
|
||||
// Hasher implements the SwarmHash interface
|
||||
|
||||
// ResetWithLength needs to be called before writing to the hasher
|
||||
// the argument is supposed to be the byte slice binary representation of
|
||||
// the length of the data subsumed under the hash
|
||||
func (self *Hasher) ResetWithLength(l []byte) {
|
||||
self.Reset()
|
||||
self.blockLength = l
|
||||
}
|
||||
|
||||
// Release gives back the Tree to the pool whereby it unlocks
|
||||
// it resets tree, segment and index
|
||||
func (self *Hasher) releaseTree() {
|
||||
if self.bmt != nil {
|
||||
n := self.bmt.leaves[self.cur]
|
||||
for ; n != nil; n = n.parent {
|
||||
n.unbalanced = false
|
||||
if n.parent != nil {
|
||||
n.root = false
|
||||
}
|
||||
}
|
||||
self.pool.Release(self.bmt)
|
||||
self.bmt = nil
|
||||
|
||||
}
|
||||
self.cur = 0
|
||||
self.segment = nil
|
||||
}
|
||||
|
||||
func (self *Hasher) writeSegment(i int, s []byte, d int) {
|
||||
h := self.pool.hasher()
|
||||
n := self.bmt.leaves[i]
|
||||
|
||||
if len(s) > self.size && n.parent != nil {
|
||||
go func() {
|
||||
h.Reset()
|
||||
h.Write(s)
|
||||
s = h.Sum(nil)
|
||||
|
||||
if n.root {
|
||||
self.result <- s
|
||||
return
|
||||
}
|
||||
self.run(n.parent, h, d, n.index, s)
|
||||
}()
|
||||
return
|
||||
}
|
||||
go self.run(n, h, d, i*2, s)
|
||||
}
|
||||
|
||||
func (self *Hasher) run(n *Node, h hash.Hash, d int, i int, s []byte) {
|
||||
isLeft := i%2 == 0
|
||||
for {
|
||||
if isLeft {
|
||||
n.left = s
|
||||
} else {
|
||||
n.right = s
|
||||
}
|
||||
if !n.unbalanced && n.toggle() {
|
||||
return
|
||||
}
|
||||
if !n.unbalanced || !isLeft || i == 0 && d == 0 {
|
||||
h.Reset()
|
||||
h.Write(n.left)
|
||||
h.Write(n.right)
|
||||
s = h.Sum(nil)
|
||||
|
||||
} else {
|
||||
s = append(n.left, n.right...)
|
||||
}
|
||||
|
||||
self.hash = s
|
||||
if n.root {
|
||||
self.result <- s
|
||||
return
|
||||
}
|
||||
|
||||
isLeft = n.isLeft
|
||||
n = n.parent
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
// getTree obtains a BMT resource by reserving one from the pool
|
||||
func (self *Hasher) getTree() *Tree {
|
||||
if self.bmt != nil {
|
||||
return self.bmt
|
||||
}
|
||||
t := self.pool.Reserve()
|
||||
self.bmt = t
|
||||
return t
|
||||
}
|
||||
|
||||
// atomic bool toggle implementing a concurrent reusable 2-state object
|
||||
// atomic addint with %2 implements atomic bool toggle
|
||||
// it returns true if the toggler just put it in the active/waiting state
|
||||
func (self *Node) toggle() bool {
|
||||
return atomic.AddInt32(&self.state, 1)%2 == 1
|
||||
}
|
||||
|
||||
func hashstr(b []byte) string {
|
||||
end := len(b)
|
||||
if end > 4 {
|
||||
end = 4
|
||||
}
|
||||
return fmt.Sprintf("%x", b[:end])
|
||||
}
|
||||
|
||||
func depth(n int) (d int) {
|
||||
for l := (n - 1) / 2; l > 0; l /= 2 {
|
||||
d++
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// finalise is following the zigzags on the tree belonging
|
||||
// to the final datasegment
|
||||
func (self *Hasher) finalise(n *Node, i int) (d int) {
|
||||
isLeft := i%2 == 0
|
||||
for {
|
||||
// when the final segment's path is going via left segments
|
||||
// the incoming data is pushed to the parent upon pulling the left
|
||||
// we do not need toggle the state since this condition is
|
||||
// detectable
|
||||
n.unbalanced = isLeft
|
||||
n.right = nil
|
||||
if n.initial {
|
||||
n.root = true
|
||||
return d
|
||||
}
|
||||
isLeft = n.isLeft
|
||||
n = n.parent
|
||||
d++
|
||||
}
|
||||
}
|
||||
|
||||
// EOC (end of chunk) implements the error interface
|
||||
type EOC struct {
|
||||
Hash []byte // read the hash of the chunk off the error
|
||||
}
|
||||
|
||||
// Error returns the error string
|
||||
func (self *EOC) Error() string {
|
||||
return fmt.Sprintf("hasher limit reached, chunk hash: %x", self.Hash)
|
||||
}
|
||||
|
||||
// NewEOC creates new end of chunk error with the hash
|
||||
func NewEOC(hash []byte) *EOC {
|
||||
return &EOC{hash}
|
||||
}
|
85
bmt/bmt_r.go
85
bmt/bmt_r.go
@@ -1,85 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// simple nonconcurrent reference implementation for hashsize segment based
|
||||
// Binary Merkle tree hash on arbitrary but fixed maximum chunksize
|
||||
//
|
||||
// This implementation does not take advantage of any paralellisms and uses
|
||||
// far more memory than necessary, but it is easy to see that it is correct.
|
||||
// It can be used for generating test cases for optimized implementations.
|
||||
// see testBMTHasherCorrectness function in bmt_test.go
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"hash"
|
||||
)
|
||||
|
||||
// RefHasher is the non-optimized easy to read reference implementation of BMT
|
||||
type RefHasher struct {
|
||||
span int
|
||||
section int
|
||||
cap int
|
||||
h hash.Hash
|
||||
}
|
||||
|
||||
// NewRefHasher returns a new RefHasher
|
||||
func NewRefHasher(hasher BaseHasher, count int) *RefHasher {
|
||||
h := hasher()
|
||||
hashsize := h.Size()
|
||||
maxsize := hashsize * count
|
||||
c := 2
|
||||
for ; c < count; c *= 2 {
|
||||
}
|
||||
if c > 2 {
|
||||
c /= 2
|
||||
}
|
||||
return &RefHasher{
|
||||
section: 2 * hashsize,
|
||||
span: c * hashsize,
|
||||
cap: maxsize,
|
||||
h: h,
|
||||
}
|
||||
}
|
||||
|
||||
// Hash returns the BMT hash of the byte slice
|
||||
// implements the SwarmHash interface
|
||||
func (rh *RefHasher) Hash(d []byte) []byte {
|
||||
if len(d) > rh.cap {
|
||||
d = d[:rh.cap]
|
||||
}
|
||||
|
||||
return rh.hash(d, rh.span)
|
||||
}
|
||||
|
||||
func (rh *RefHasher) hash(d []byte, s int) []byte {
|
||||
l := len(d)
|
||||
left := d
|
||||
var right []byte
|
||||
if l > rh.section {
|
||||
for ; s >= l; s /= 2 {
|
||||
}
|
||||
left = rh.hash(d[:s], s)
|
||||
right = d[s:]
|
||||
if l-s > rh.section/2 {
|
||||
right = rh.hash(right, s)
|
||||
}
|
||||
}
|
||||
defer rh.h.Reset()
|
||||
rh.h.Write(left)
|
||||
rh.h.Write(right)
|
||||
h := rh.h.Sum(nil)
|
||||
return h
|
||||
}
|
481
bmt/bmt_test.go
481
bmt/bmt_test.go
@@ -1,481 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
crand "crypto/rand"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||
)
|
||||
|
||||
const (
|
||||
maxproccnt = 8
|
||||
)
|
||||
|
||||
// TestRefHasher tests that the RefHasher computes the expected BMT hash for
|
||||
// all data lengths between 0 and 256 bytes
|
||||
func TestRefHasher(t *testing.T) {
|
||||
hashFunc := sha3.NewKeccak256
|
||||
|
||||
sha3 := func(data ...[]byte) []byte {
|
||||
h := hashFunc()
|
||||
for _, v := range data {
|
||||
h.Write(v)
|
||||
}
|
||||
return h.Sum(nil)
|
||||
}
|
||||
|
||||
// the test struct is used to specify the expected BMT hash for data
|
||||
// lengths between "from" and "to"
|
||||
type test struct {
|
||||
from int64
|
||||
to int64
|
||||
expected func([]byte) []byte
|
||||
}
|
||||
|
||||
var tests []*test
|
||||
|
||||
// all lengths in [0,64] should be:
|
||||
//
|
||||
// sha3(data)
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 0,
|
||||
to: 64,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(data)
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [65,96] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// data[64:]
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 65,
|
||||
to: 96,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(data[:64]), data[64:])
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [97,128] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:])
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 97,
|
||||
to: 128,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(data[:64]), sha3(data[64:]))
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [129,160] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// data[128:]
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 129,
|
||||
to: 160,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), data[128:])
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [161,192] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(data[128:])
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 161,
|
||||
to: 192,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(data[128:]))
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [193,224] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(
|
||||
// sha3(data[128:192])
|
||||
// data[192:]
|
||||
// )
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 193,
|
||||
to: 224,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), data[192:]))
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [225,256] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(
|
||||
// sha3(data[128:192])
|
||||
// sha3(data[192:])
|
||||
// )
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 225,
|
||||
to: 256,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), sha3(data[192:])))
|
||||
},
|
||||
})
|
||||
|
||||
// run the tests
|
||||
for _, x := range tests {
|
||||
for length := x.from; length <= x.to; length++ {
|
||||
t.Run(fmt.Sprintf("%d_bytes", length), func(t *testing.T) {
|
||||
data := make([]byte, length)
|
||||
if _, err := io.ReadFull(crand.Reader, data); err != nil && err != io.EOF {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := x.expected(data)
|
||||
actual := NewRefHasher(hashFunc, 128).Hash(data)
|
||||
if !bytes.Equal(actual, expected) {
|
||||
t.Fatalf("expected %x, got %x", expected, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testDataReader(l int) (r io.Reader) {
|
||||
return io.LimitReader(crand.Reader, int64(l))
|
||||
}
|
||||
|
||||
func TestHasherCorrectness(t *testing.T) {
|
||||
err := testHasher(testBaseHasher)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testHasher(f func(BaseHasher, []byte, int, int) error) error {
|
||||
tdata := testDataReader(4128)
|
||||
data := make([]byte, 4128)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
size := hasher().Size()
|
||||
counts := []int{1, 2, 3, 4, 5, 8, 16, 32, 64, 128}
|
||||
|
||||
var err error
|
||||
for _, count := range counts {
|
||||
max := count * size
|
||||
incr := 1
|
||||
for n := 0; n <= max+incr; n += incr {
|
||||
err = f(hasher, data, n, count)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestHasherReuseWithoutRelease(t *testing.T) {
|
||||
testHasherReuse(1, t)
|
||||
}
|
||||
|
||||
func TestHasherReuseWithRelease(t *testing.T) {
|
||||
testHasherReuse(maxproccnt, t)
|
||||
}
|
||||
|
||||
func testHasherReuse(i int, t *testing.T) {
|
||||
hasher := sha3.NewKeccak256
|
||||
pool := NewTreePool(hasher, 128, i)
|
||||
defer pool.Drain(0)
|
||||
bmt := New(pool)
|
||||
|
||||
for i := 0; i < 500; i++ {
|
||||
n := rand.Intn(4096)
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
|
||||
err := testHasherCorrectness(bmt, hasher, data, n, 128)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasherConcurrency(t *testing.T) {
|
||||
hasher := sha3.NewKeccak256
|
||||
pool := NewTreePool(hasher, 128, maxproccnt)
|
||||
defer pool.Drain(0)
|
||||
wg := sync.WaitGroup{}
|
||||
cycles := 100
|
||||
wg.Add(maxproccnt * cycles)
|
||||
errc := make(chan error)
|
||||
|
||||
for p := 0; p < maxproccnt; p++ {
|
||||
for i := 0; i < cycles; i++ {
|
||||
go func() {
|
||||
bmt := New(pool)
|
||||
n := rand.Intn(4096)
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
err := testHasherCorrectness(bmt, hasher, data, n, 128)
|
||||
wg.Done()
|
||||
if err != nil {
|
||||
errc <- err
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(errc)
|
||||
}()
|
||||
var err error
|
||||
select {
|
||||
case <-time.NewTimer(5 * time.Second).C:
|
||||
err = fmt.Errorf("timed out")
|
||||
case err = <-errc:
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testBaseHasher(hasher BaseHasher, d []byte, n, count int) error {
|
||||
pool := NewTreePool(hasher, count, 1)
|
||||
defer pool.Drain(0)
|
||||
bmt := New(pool)
|
||||
return testHasherCorrectness(bmt, hasher, d, n, count)
|
||||
}
|
||||
|
||||
func testHasherCorrectness(bmt hash.Hash, hasher BaseHasher, d []byte, n, count int) (err error) {
|
||||
data := d[:n]
|
||||
rbmt := NewRefHasher(hasher, count)
|
||||
exp := rbmt.Hash(data)
|
||||
timeout := time.NewTimer(time.Second)
|
||||
c := make(chan error)
|
||||
|
||||
go func() {
|
||||
bmt.Reset()
|
||||
bmt.Write(data)
|
||||
got := bmt.Sum(nil)
|
||||
if !bytes.Equal(got, exp) {
|
||||
c <- fmt.Errorf("wrong hash: expected %x, got %x", exp, got)
|
||||
}
|
||||
close(c)
|
||||
}()
|
||||
select {
|
||||
case <-timeout.C:
|
||||
err = fmt.Errorf("BMT hash calculation timed out")
|
||||
case err = <-c:
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func BenchmarkSHA3_4k(t *testing.B) { benchmarkSHA3(4096, t) }
|
||||
func BenchmarkSHA3_2k(t *testing.B) { benchmarkSHA3(4096/2, t) }
|
||||
func BenchmarkSHA3_1k(t *testing.B) { benchmarkSHA3(4096/4, t) }
|
||||
func BenchmarkSHA3_512b(t *testing.B) { benchmarkSHA3(4096/8, t) }
|
||||
func BenchmarkSHA3_256b(t *testing.B) { benchmarkSHA3(4096/16, t) }
|
||||
func BenchmarkSHA3_128b(t *testing.B) { benchmarkSHA3(4096/32, t) }
|
||||
|
||||
func BenchmarkBMTBaseline_4k(t *testing.B) { benchmarkBMTBaseline(4096, t) }
|
||||
func BenchmarkBMTBaseline_2k(t *testing.B) { benchmarkBMTBaseline(4096/2, t) }
|
||||
func BenchmarkBMTBaseline_1k(t *testing.B) { benchmarkBMTBaseline(4096/4, t) }
|
||||
func BenchmarkBMTBaseline_512b(t *testing.B) { benchmarkBMTBaseline(4096/8, t) }
|
||||
func BenchmarkBMTBaseline_256b(t *testing.B) { benchmarkBMTBaseline(4096/16, t) }
|
||||
func BenchmarkBMTBaseline_128b(t *testing.B) { benchmarkBMTBaseline(4096/32, t) }
|
||||
|
||||
func BenchmarkRefHasher_4k(t *testing.B) { benchmarkRefHasher(4096, t) }
|
||||
func BenchmarkRefHasher_2k(t *testing.B) { benchmarkRefHasher(4096/2, t) }
|
||||
func BenchmarkRefHasher_1k(t *testing.B) { benchmarkRefHasher(4096/4, t) }
|
||||
func BenchmarkRefHasher_512b(t *testing.B) { benchmarkRefHasher(4096/8, t) }
|
||||
func BenchmarkRefHasher_256b(t *testing.B) { benchmarkRefHasher(4096/16, t) }
|
||||
func BenchmarkRefHasher_128b(t *testing.B) { benchmarkRefHasher(4096/32, t) }
|
||||
|
||||
func BenchmarkHasher_4k(t *testing.B) { benchmarkHasher(4096, t) }
|
||||
func BenchmarkHasher_2k(t *testing.B) { benchmarkHasher(4096/2, t) }
|
||||
func BenchmarkHasher_1k(t *testing.B) { benchmarkHasher(4096/4, t) }
|
||||
func BenchmarkHasher_512b(t *testing.B) { benchmarkHasher(4096/8, t) }
|
||||
func BenchmarkHasher_256b(t *testing.B) { benchmarkHasher(4096/16, t) }
|
||||
func BenchmarkHasher_128b(t *testing.B) { benchmarkHasher(4096/32, t) }
|
||||
|
||||
func BenchmarkHasherNoReuse_4k(t *testing.B) { benchmarkHasherReuse(1, 4096, t) }
|
||||
func BenchmarkHasherNoReuse_2k(t *testing.B) { benchmarkHasherReuse(1, 4096/2, t) }
|
||||
func BenchmarkHasherNoReuse_1k(t *testing.B) { benchmarkHasherReuse(1, 4096/4, t) }
|
||||
func BenchmarkHasherNoReuse_512b(t *testing.B) { benchmarkHasherReuse(1, 4096/8, t) }
|
||||
func BenchmarkHasherNoReuse_256b(t *testing.B) { benchmarkHasherReuse(1, 4096/16, t) }
|
||||
func BenchmarkHasherNoReuse_128b(t *testing.B) { benchmarkHasherReuse(1, 4096/32, t) }
|
||||
|
||||
func BenchmarkHasherReuse_4k(t *testing.B) { benchmarkHasherReuse(16, 4096, t) }
|
||||
func BenchmarkHasherReuse_2k(t *testing.B) { benchmarkHasherReuse(16, 4096/2, t) }
|
||||
func BenchmarkHasherReuse_1k(t *testing.B) { benchmarkHasherReuse(16, 4096/4, t) }
|
||||
func BenchmarkHasherReuse_512b(t *testing.B) { benchmarkHasherReuse(16, 4096/8, t) }
|
||||
func BenchmarkHasherReuse_256b(t *testing.B) { benchmarkHasherReuse(16, 4096/16, t) }
|
||||
func BenchmarkHasherReuse_128b(t *testing.B) { benchmarkHasherReuse(16, 4096/32, t) }
|
||||
|
||||
// benchmarks the minimum hashing time for a balanced (for simplicity) BMT
|
||||
// by doing count/segmentsize parallel hashings of 2*segmentsize bytes
|
||||
// doing it on n maxproccnt each reusing the base hasher
|
||||
// the premise is that this is the minimum computation needed for a BMT
|
||||
// therefore this serves as a theoretical optimum for concurrent implementations
|
||||
func benchmarkBMTBaseline(n int, t *testing.B) {
|
||||
tdata := testDataReader(64)
|
||||
data := make([]byte, 64)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
count := int32((n-1)/hasher().Size() + 1)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(maxproccnt)
|
||||
var i int32
|
||||
for j := 0; j < maxproccnt; j++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
h := hasher()
|
||||
for atomic.AddInt32(&i, 1) < count {
|
||||
h.Reset()
|
||||
h.Write(data)
|
||||
h.Sum(nil)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkHasher(n int, t *testing.B) {
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
|
||||
size := 1
|
||||
hasher := sha3.NewKeccak256
|
||||
segmentCount := 128
|
||||
pool := NewTreePool(hasher, segmentCount, size)
|
||||
bmt := New(pool)
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
bmt.Reset()
|
||||
bmt.Write(data)
|
||||
bmt.Sum(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkHasherReuse(poolsize, n int, t *testing.B) {
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
|
||||
hasher := sha3.NewKeccak256
|
||||
segmentCount := 128
|
||||
pool := NewTreePool(hasher, segmentCount, poolsize)
|
||||
cycles := 200
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(cycles)
|
||||
for j := 0; j < cycles; j++ {
|
||||
bmt := New(pool)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
bmt.Reset()
|
||||
bmt.Write(data)
|
||||
bmt.Sum(nil)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkSHA3(n int, t *testing.B) {
|
||||
data := make([]byte, n)
|
||||
tdata := testDataReader(n)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
h := hasher()
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
h.Reset()
|
||||
h.Write(data)
|
||||
h.Sum(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkRefHasher(n int, t *testing.B) {
|
||||
data := make([]byte, n)
|
||||
tdata := testDataReader(n)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
rbmt := NewRefHasher(hasher, 128)
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
rbmt.Hash(data)
|
||||
}
|
||||
}
|
241
build/ci.go
241
build/ci.go
@@ -26,7 +26,7 @@ Available commands are:
|
||||
install [ -arch architecture ] [ -cc compiler ] [ packages... ] -- builds packages and executables
|
||||
test [ -coverage ] [ packages... ] -- runs the tests
|
||||
lint -- runs certain pre-selected linters
|
||||
archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -upload dest ] -- archives build artefacts
|
||||
archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -upload dest ] -- archives build artifacts
|
||||
importkeys -- imports signing keys from env
|
||||
debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package
|
||||
nsis -- creates a Windows NSIS installer
|
||||
@@ -59,6 +59,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/internal/build"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
sv "github.com/ethereum/go-ethereum/swarm/version"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -77,52 +79,81 @@ var (
|
||||
executablePath("geth"),
|
||||
executablePath("puppeth"),
|
||||
executablePath("rlpdump"),
|
||||
executablePath("swarm"),
|
||||
executablePath("wnode"),
|
||||
}
|
||||
|
||||
// Files that end up in the swarm*.zip archive.
|
||||
swarmArchiveFiles = []string{
|
||||
"COPYING",
|
||||
executablePath("swarm"),
|
||||
}
|
||||
|
||||
// A debian package is created for all executables listed here.
|
||||
debExecutables = []debExecutable{
|
||||
{
|
||||
Name: "abigen",
|
||||
BinaryName: "abigen",
|
||||
Description: "Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages.",
|
||||
},
|
||||
{
|
||||
Name: "bootnode",
|
||||
BinaryName: "bootnode",
|
||||
Description: "Ethereum bootnode.",
|
||||
},
|
||||
{
|
||||
Name: "evm",
|
||||
BinaryName: "evm",
|
||||
Description: "Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode.",
|
||||
},
|
||||
{
|
||||
Name: "geth",
|
||||
BinaryName: "geth",
|
||||
Description: "Ethereum CLI client.",
|
||||
},
|
||||
{
|
||||
Name: "puppeth",
|
||||
BinaryName: "puppeth",
|
||||
Description: "Ethereum private network manager.",
|
||||
},
|
||||
{
|
||||
Name: "rlpdump",
|
||||
BinaryName: "rlpdump",
|
||||
Description: "Developer utility tool that prints RLP structures.",
|
||||
},
|
||||
{
|
||||
Name: "swarm",
|
||||
Description: "Ethereum Swarm daemon and tools",
|
||||
},
|
||||
{
|
||||
Name: "wnode",
|
||||
BinaryName: "wnode",
|
||||
Description: "Ethereum Whisper diagnostic tool",
|
||||
},
|
||||
}
|
||||
|
||||
// A debian package is created for all executables listed here.
|
||||
debSwarmExecutables = []debExecutable{
|
||||
{
|
||||
BinaryName: "swarm",
|
||||
PackageName: "ethereum-swarm",
|
||||
Description: "Ethereum Swarm daemon and tools",
|
||||
},
|
||||
}
|
||||
|
||||
debEthereum = debPackage{
|
||||
Name: "ethereum",
|
||||
Version: params.Version,
|
||||
Executables: debExecutables,
|
||||
}
|
||||
|
||||
debSwarm = debPackage{
|
||||
Name: "ethereum-swarm",
|
||||
Version: sv.Version,
|
||||
Executables: debSwarmExecutables,
|
||||
}
|
||||
|
||||
// Debian meta packages to build and push to Ubuntu PPA
|
||||
debPackages = []debPackage{
|
||||
debSwarm,
|
||||
debEthereum,
|
||||
}
|
||||
|
||||
// Distros for which packages are created.
|
||||
// Note: vivid is unsupported because there is no golang-1.6 package for it.
|
||||
// Note: wily is unsupported because it was officially deprecated on lanchpad.
|
||||
// Note: yakkety is unsupported because it was officially deprecated on lanchpad.
|
||||
// Note: zesty is unsupported because it was officially deprecated on lanchpad.
|
||||
debDistros = []string{"trusty", "xenial", "artful", "bionic"}
|
||||
// Note: artful is unsupported because it was officially deprecated on lanchpad.
|
||||
debDistros = []string{"trusty", "xenial", "bionic", "cosmic"}
|
||||
)
|
||||
|
||||
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
|
||||
@@ -329,7 +360,11 @@ func doLint(cmdline []string) {
|
||||
// Run fast linters batched together
|
||||
configs := []string{
|
||||
"--vendor",
|
||||
"--tests",
|
||||
"--deadline=2m",
|
||||
"--disable-all",
|
||||
"--enable=goimports",
|
||||
"--enable=varcheck",
|
||||
"--enable=vet",
|
||||
"--enable=gofmt",
|
||||
"--enable=misspell",
|
||||
@@ -340,13 +375,12 @@ func doLint(cmdline []string) {
|
||||
|
||||
// Run slow linters one by one
|
||||
for _, linter := range []string{"unconvert", "gosimple"} {
|
||||
configs = []string{"--vendor", "--deadline=10m", "--disable-all", "--enable=" + linter}
|
||||
configs = []string{"--vendor", "--tests", "--deadline=10m", "--disable-all", "--enable=" + linter}
|
||||
build.MustRunCommand(filepath.Join(GOBIN, "gometalinter.v2"), append(configs, packages...)...)
|
||||
}
|
||||
}
|
||||
|
||||
// Release Packaging
|
||||
|
||||
func doArchive(cmdline []string) {
|
||||
var (
|
||||
arch = flag.String("arch", runtime.GOARCH, "Architecture cross packaging")
|
||||
@@ -366,10 +400,14 @@ func doArchive(cmdline []string) {
|
||||
}
|
||||
|
||||
var (
|
||||
env = build.Env()
|
||||
base = archiveBasename(*arch, env)
|
||||
geth = "geth-" + base + ext
|
||||
alltools = "geth-alltools-" + base + ext
|
||||
env = build.Env()
|
||||
|
||||
basegeth = archiveBasename(*arch, params.ArchiveVersion(env.Commit))
|
||||
geth = "geth-" + basegeth + ext
|
||||
alltools = "geth-alltools-" + basegeth + ext
|
||||
|
||||
baseswarm = archiveBasename(*arch, sv.ArchiveVersion(env.Commit))
|
||||
swarm = "swarm-" + baseswarm + ext
|
||||
)
|
||||
maybeSkipArchive(env)
|
||||
if err := build.WriteArchive(geth, gethArchiveFiles); err != nil {
|
||||
@@ -378,14 +416,17 @@ func doArchive(cmdline []string) {
|
||||
if err := build.WriteArchive(alltools, allToolsArchiveFiles); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
for _, archive := range []string{geth, alltools} {
|
||||
if err := build.WriteArchive(swarm, swarmArchiveFiles); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
for _, archive := range []string{geth, alltools, swarm} {
|
||||
if err := archiveUpload(archive, *upload, *signer); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func archiveBasename(arch string, env build.Environment) string {
|
||||
func archiveBasename(arch string, archiveVersion string) string {
|
||||
platform := runtime.GOOS + "-" + arch
|
||||
if arch == "arm" {
|
||||
platform += os.Getenv("GOARM")
|
||||
@@ -396,18 +437,7 @@ func archiveBasename(arch string, env build.Environment) string {
|
||||
if arch == "ios" {
|
||||
platform = "ios-all"
|
||||
}
|
||||
return platform + "-" + archiveVersion(env)
|
||||
}
|
||||
|
||||
func archiveVersion(env build.Environment) string {
|
||||
version := build.VERSION()
|
||||
if isUnstableBuild(env) {
|
||||
version += "-unstable"
|
||||
}
|
||||
if env.Commit != "" {
|
||||
version += "-" + env.Commit[:8]
|
||||
}
|
||||
return version
|
||||
return platform + "-" + archiveVersion
|
||||
}
|
||||
|
||||
func archiveUpload(archive string, blobstore string, signer string) error {
|
||||
@@ -457,7 +487,6 @@ func maybeSkipArchive(env build.Environment) {
|
||||
}
|
||||
|
||||
// Debian Packaging
|
||||
|
||||
func doDebianSource(cmdline []string) {
|
||||
var (
|
||||
signer = flag.String("signer", "", `Signing key name, also used as package author`)
|
||||
@@ -481,21 +510,23 @@ func doDebianSource(cmdline []string) {
|
||||
build.MustRun(gpg)
|
||||
}
|
||||
|
||||
// Create the packages.
|
||||
for _, distro := range debDistros {
|
||||
meta := newDebMetadata(distro, *signer, env, now)
|
||||
pkgdir := stageDebianSource(*workdir, meta)
|
||||
debuild := exec.Command("debuild", "-S", "-sa", "-us", "-uc")
|
||||
debuild.Dir = pkgdir
|
||||
build.MustRun(debuild)
|
||||
// Create Debian packages and upload them
|
||||
for _, pkg := range debPackages {
|
||||
for _, distro := range debDistros {
|
||||
meta := newDebMetadata(distro, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables)
|
||||
pkgdir := stageDebianSource(*workdir, meta)
|
||||
debuild := exec.Command("debuild", "-S", "-sa", "-us", "-uc")
|
||||
debuild.Dir = pkgdir
|
||||
build.MustRun(debuild)
|
||||
|
||||
changes := fmt.Sprintf("%s_%s_source.changes", meta.Name(), meta.VersionString())
|
||||
changes = filepath.Join(*workdir, changes)
|
||||
if *signer != "" {
|
||||
build.MustRunCommand("debsign", changes)
|
||||
}
|
||||
if *upload != "" {
|
||||
build.MustRunCommand("dput", *upload, changes)
|
||||
changes := fmt.Sprintf("%s_%s_source.changes", meta.Name(), meta.VersionString())
|
||||
changes = filepath.Join(*workdir, changes)
|
||||
if *signer != "" {
|
||||
build.MustRunCommand("debsign", changes)
|
||||
}
|
||||
if *upload != "" {
|
||||
build.MustRunCommand("dput", *upload, changes)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -520,9 +551,17 @@ func isUnstableBuild(env build.Environment) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type debPackage struct {
|
||||
Name string // the name of the Debian package to produce, e.g. "ethereum", or "ethereum-swarm"
|
||||
Version string // the clean version of the debPackage, e.g. 1.8.12 or 0.3.0, without any metadata
|
||||
Executables []debExecutable // executables to be included in the package
|
||||
}
|
||||
|
||||
type debMetadata struct {
|
||||
Env build.Environment
|
||||
|
||||
PackageName string
|
||||
|
||||
// go-ethereum version being built. Note that this
|
||||
// is not the debian package version. The package version
|
||||
// is constructed by VersionString.
|
||||
@@ -534,21 +573,33 @@ type debMetadata struct {
|
||||
}
|
||||
|
||||
type debExecutable struct {
|
||||
Name, Description string
|
||||
PackageName string
|
||||
BinaryName string
|
||||
Description string
|
||||
}
|
||||
|
||||
func newDebMetadata(distro, author string, env build.Environment, t time.Time) debMetadata {
|
||||
// Package returns the name of the package if present, or
|
||||
// fallbacks to BinaryName
|
||||
func (d debExecutable) Package() string {
|
||||
if d.PackageName != "" {
|
||||
return d.PackageName
|
||||
}
|
||||
return d.BinaryName
|
||||
}
|
||||
|
||||
func newDebMetadata(distro, author string, env build.Environment, t time.Time, name string, version string, exes []debExecutable) debMetadata {
|
||||
if author == "" {
|
||||
// No signing key, use default author.
|
||||
author = "Ethereum Builds <fjl@ethereum.org>"
|
||||
}
|
||||
return debMetadata{
|
||||
PackageName: name,
|
||||
Env: env,
|
||||
Author: author,
|
||||
Distro: distro,
|
||||
Version: build.VERSION(),
|
||||
Version: version,
|
||||
Time: t.Format(time.RFC1123Z),
|
||||
Executables: debExecutables,
|
||||
Executables: exes,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -556,9 +607,9 @@ func newDebMetadata(distro, author string, env build.Environment, t time.Time) d
|
||||
// on all executable packages.
|
||||
func (meta debMetadata) Name() string {
|
||||
if isUnstableBuild(meta.Env) {
|
||||
return "ethereum-unstable"
|
||||
return meta.PackageName + "-unstable"
|
||||
}
|
||||
return "ethereum"
|
||||
return meta.PackageName
|
||||
}
|
||||
|
||||
// VersionString returns the debian version of the packages.
|
||||
@@ -585,9 +636,20 @@ func (meta debMetadata) ExeList() string {
|
||||
// ExeName returns the package name of an executable package.
|
||||
func (meta debMetadata) ExeName(exe debExecutable) string {
|
||||
if isUnstableBuild(meta.Env) {
|
||||
return exe.Name + "-unstable"
|
||||
return exe.Package() + "-unstable"
|
||||
}
|
||||
return exe.Name
|
||||
return exe.Package()
|
||||
}
|
||||
|
||||
// EthereumSwarmPackageName returns the name of the swarm package based on
|
||||
// environment, e.g. "ethereum-swarm-unstable", or "ethereum-swarm".
|
||||
// This is needed so that we make sure that "ethereum" package,
|
||||
// depends on and installs "ethereum-swarm"
|
||||
func (meta debMetadata) EthereumSwarmPackageName() string {
|
||||
if isUnstableBuild(meta.Env) {
|
||||
return debSwarm.Name + "-unstable"
|
||||
}
|
||||
return debSwarm.Name
|
||||
}
|
||||
|
||||
// ExeConflicts returns the content of the Conflicts field
|
||||
@@ -602,7 +664,7 @@ func (meta debMetadata) ExeConflicts(exe debExecutable) string {
|
||||
// be preferred and the conflicting files should be handled via
|
||||
// alternates. We might do this eventually but using a conflict is
|
||||
// easier now.
|
||||
return "ethereum, " + exe.Name
|
||||
return "ethereum, " + exe.Package()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
@@ -619,24 +681,23 @@ func stageDebianSource(tmpdir string, meta debMetadata) (pkgdir string) {
|
||||
|
||||
// Put the debian build files in place.
|
||||
debian := filepath.Join(pkgdir, "debian")
|
||||
build.Render("build/deb.rules", filepath.Join(debian, "rules"), 0755, meta)
|
||||
build.Render("build/deb.changelog", filepath.Join(debian, "changelog"), 0644, meta)
|
||||
build.Render("build/deb.control", filepath.Join(debian, "control"), 0644, meta)
|
||||
build.Render("build/deb.copyright", filepath.Join(debian, "copyright"), 0644, meta)
|
||||
build.Render("build/deb/"+meta.PackageName+"/deb.rules", filepath.Join(debian, "rules"), 0755, meta)
|
||||
build.Render("build/deb/"+meta.PackageName+"/deb.changelog", filepath.Join(debian, "changelog"), 0644, meta)
|
||||
build.Render("build/deb/"+meta.PackageName+"/deb.control", filepath.Join(debian, "control"), 0644, meta)
|
||||
build.Render("build/deb/"+meta.PackageName+"/deb.copyright", filepath.Join(debian, "copyright"), 0644, meta)
|
||||
build.RenderString("8\n", filepath.Join(debian, "compat"), 0644, meta)
|
||||
build.RenderString("3.0 (native)\n", filepath.Join(debian, "source/format"), 0644, meta)
|
||||
for _, exe := range meta.Executables {
|
||||
install := filepath.Join(debian, meta.ExeName(exe)+".install")
|
||||
docs := filepath.Join(debian, meta.ExeName(exe)+".docs")
|
||||
build.Render("build/deb.install", install, 0644, exe)
|
||||
build.Render("build/deb.docs", docs, 0644, exe)
|
||||
build.Render("build/deb/"+meta.PackageName+"/deb.install", install, 0644, exe)
|
||||
build.Render("build/deb/"+meta.PackageName+"/deb.docs", docs, 0644, exe)
|
||||
}
|
||||
|
||||
return pkgdir
|
||||
}
|
||||
|
||||
// Windows installer
|
||||
|
||||
func doWindowsInstaller(cmdline []string) {
|
||||
// Parse the flags and make skip installer generation on PRs
|
||||
var (
|
||||
@@ -686,11 +747,11 @@ func doWindowsInstaller(cmdline []string) {
|
||||
// Build the installer. This assumes that all the needed files have been previously
|
||||
// built (don't mix building and packaging to keep cross compilation complexity to a
|
||||
// minimum).
|
||||
version := strings.Split(build.VERSION(), ".")
|
||||
version := strings.Split(params.Version, ".")
|
||||
if env.Commit != "" {
|
||||
version[2] += "-" + env.Commit[:8]
|
||||
}
|
||||
installer, _ := filepath.Abs("geth-" + archiveBasename(*arch, env) + ".exe")
|
||||
installer, _ := filepath.Abs("geth-" + archiveBasename(*arch, params.ArchiveVersion(env.Commit)) + ".exe")
|
||||
build.MustRunCommand("makensis.exe",
|
||||
"/DOUTPUTFILE="+installer,
|
||||
"/DMAJORVERSION="+version[0],
|
||||
@@ -728,7 +789,7 @@ func doAndroidArchive(cmdline []string) {
|
||||
// Build the Android archive and Maven resources
|
||||
build.MustRun(goTool("get", "golang.org/x/mobile/cmd/gomobile", "golang.org/x/mobile/cmd/gobind"))
|
||||
build.MustRun(gomobileTool("init", "--ndk", os.Getenv("ANDROID_NDK")))
|
||||
build.MustRun(gomobileTool("bind", "--target", "android", "--javapkg", "org.ethereum", "-v", "github.com/ethereum/go-ethereum/mobile"))
|
||||
build.MustRun(gomobileTool("bind", "-ldflags", "-s -w", "--target", "android", "--javapkg", "org.ethereum", "-v", "github.com/ethereum/go-ethereum/mobile"))
|
||||
|
||||
if *local {
|
||||
// If we're building locally, copy bundle to build dir and skip Maven
|
||||
@@ -742,7 +803,7 @@ func doAndroidArchive(cmdline []string) {
|
||||
maybeSkipArchive(env)
|
||||
|
||||
// Sign and upload the archive to Azure
|
||||
archive := "geth-" + archiveBasename("android", env) + ".aar"
|
||||
archive := "geth-" + archiveBasename("android", params.ArchiveVersion(env.Commit)) + ".aar"
|
||||
os.Rename("geth.aar", archive)
|
||||
|
||||
if err := archiveUpload(archive, *upload, *signer); err != nil {
|
||||
@@ -752,14 +813,18 @@ func doAndroidArchive(cmdline []string) {
|
||||
os.Rename(archive, meta.Package+".aar")
|
||||
if *signer != "" && *deploy != "" {
|
||||
// Import the signing key into the local GPG instance
|
||||
if b64key := os.Getenv(*signer); b64key != "" {
|
||||
key, err := base64.StdEncoding.DecodeString(b64key)
|
||||
if err != nil {
|
||||
log.Fatalf("invalid base64 %s", *signer)
|
||||
}
|
||||
gpg := exec.Command("gpg", "--import")
|
||||
gpg.Stdin = bytes.NewReader(key)
|
||||
build.MustRun(gpg)
|
||||
b64key := os.Getenv(*signer)
|
||||
key, err := base64.StdEncoding.DecodeString(b64key)
|
||||
if err != nil {
|
||||
log.Fatalf("invalid base64 %s", *signer)
|
||||
}
|
||||
gpg := exec.Command("gpg", "--import")
|
||||
gpg.Stdin = bytes.NewReader(key)
|
||||
build.MustRun(gpg)
|
||||
|
||||
keyID, err := build.PGPKeyID(string(key))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// Upload the artifacts to Sonatype and/or Maven Central
|
||||
repo := *deploy + "/service/local/staging/deploy/maven2"
|
||||
@@ -768,6 +833,7 @@ func doAndroidArchive(cmdline []string) {
|
||||
}
|
||||
build.MustRunCommand("mvn", "gpg:sign-and-deploy-file", "-e", "-X",
|
||||
"-settings=build/mvn.settings", "-Durl="+repo, "-DrepositoryId=ossrh",
|
||||
"-Dgpg.keyname="+keyID,
|
||||
"-DpomFile="+meta.Package+".pom", "-Dfile="+meta.Package+".aar")
|
||||
}
|
||||
}
|
||||
@@ -822,7 +888,7 @@ func newMavenMetadata(env build.Environment) mavenMetadata {
|
||||
}
|
||||
}
|
||||
// Render the version and package strings
|
||||
version := build.VERSION()
|
||||
version := params.Version
|
||||
if isUnstableBuild(env) {
|
||||
version += "-SNAPSHOT"
|
||||
}
|
||||
@@ -849,7 +915,7 @@ func doXCodeFramework(cmdline []string) {
|
||||
// Build the iOS XCode framework
|
||||
build.MustRun(goTool("get", "golang.org/x/mobile/cmd/gomobile", "golang.org/x/mobile/cmd/gobind"))
|
||||
build.MustRun(gomobileTool("init"))
|
||||
bind := gomobileTool("bind", "--target", "ios", "--tags", "ios", "-v", "github.com/ethereum/go-ethereum/mobile")
|
||||
bind := gomobileTool("bind", "-ldflags", "-s -w", "--target", "ios", "--tags", "ios", "-v", "github.com/ethereum/go-ethereum/mobile")
|
||||
|
||||
if *local {
|
||||
// If we're building locally, use the build folder and stop afterwards
|
||||
@@ -857,7 +923,7 @@ func doXCodeFramework(cmdline []string) {
|
||||
build.MustRun(bind)
|
||||
return
|
||||
}
|
||||
archive := "geth-" + archiveBasename("ios", env)
|
||||
archive := "geth-" + archiveBasename("ios", params.ArchiveVersion(env.Commit))
|
||||
if err := os.Mkdir(archive, os.ModePerm); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -913,7 +979,7 @@ func newPodMetadata(env build.Environment, archive string) podMetadata {
|
||||
}
|
||||
}
|
||||
}
|
||||
version := build.VERSION()
|
||||
version := params.Version
|
||||
if isUnstableBuild(env) {
|
||||
version += "-unstable." + env.Buildnum
|
||||
}
|
||||
@@ -1009,23 +1075,14 @@ func doPurge(cmdline []string) {
|
||||
}
|
||||
for i := 0; i < len(blobs); i++ {
|
||||
for j := i + 1; j < len(blobs); j++ {
|
||||
iTime, err := time.Parse(time.RFC1123, blobs[i].Properties.LastModified)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
jTime, err := time.Parse(time.RFC1123, blobs[j].Properties.LastModified)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if iTime.After(jTime) {
|
||||
if blobs[i].Properties.LastModified.After(blobs[j].Properties.LastModified) {
|
||||
blobs[i], blobs[j] = blobs[j], blobs[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
// Filter out all archives more recent that the given threshold
|
||||
for i, blob := range blobs {
|
||||
timestamp, _ := time.Parse(time.RFC1123, blob.Properties.LastModified)
|
||||
if time.Since(timestamp) < time.Duration(*limit)*24*time.Hour {
|
||||
if time.Since(blob.Properties.LastModified) < time.Duration(*limit)*24*time.Hour {
|
||||
blobs = blobs[:i]
|
||||
break
|
||||
}
|
||||
|
19
build/clean_go_build_cache.sh
Executable file
19
build/clean_go_build_cache.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Cleaning the Go cache only makes sense if we actually have Go installed... or
|
||||
# if Go is actually callable. This does not hold true during deb packaging, so
|
||||
# we need an explicit check to avoid build failures.
|
||||
if ! command -v go > /dev/null; then
|
||||
exit
|
||||
fi
|
||||
|
||||
version_gt() {
|
||||
test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"
|
||||
}
|
||||
|
||||
golang_version=$(go version |cut -d' ' -f3 |sed 's/go//')
|
||||
|
||||
# Clean go build cache when go version is greater than or equal to 1.10
|
||||
if !(version_gt 1.10 $golang_version); then
|
||||
go clean -cache
|
||||
fi
|
@@ -1 +0,0 @@
|
||||
build/bin/{{.Name}} usr/bin
|
19
build/deb/ethereum-swarm/deb.control
Normal file
19
build/deb/ethereum-swarm/deb.control
Normal file
@@ -0,0 +1,19 @@
|
||||
Source: {{.Name}}
|
||||
Section: science
|
||||
Priority: extra
|
||||
Maintainer: {{.Author}}
|
||||
Build-Depends: debhelper (>= 8.0.0), golang-1.10
|
||||
Standards-Version: 3.9.5
|
||||
Homepage: https://ethereum.org
|
||||
Vcs-Git: git://github.com/ethereum/go-ethereum.git
|
||||
Vcs-Browser: https://github.com/ethereum/go-ethereum
|
||||
|
||||
{{range .Executables}}
|
||||
Package: {{$.ExeName .}}
|
||||
Conflicts: {{$.ExeConflicts .}}
|
||||
Architecture: any
|
||||
Depends: ${shlibs:Depends}, ${misc:Depends}
|
||||
Built-Using: ${misc:Built-Using}
|
||||
Description: {{.Description}}
|
||||
{{.Description}}
|
||||
{{end}}
|
@@ -1,4 +1,4 @@
|
||||
Copyright 2016 The go-ethereum Authors
|
||||
Copyright 2018 The go-ethereum Authors
|
||||
|
||||
go-ethereum is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
1
build/deb/ethereum-swarm/deb.install
Normal file
1
build/deb/ethereum-swarm/deb.install
Normal file
@@ -0,0 +1 @@
|
||||
build/bin/{{.BinaryName}} usr/bin
|
5
build/deb/ethereum/deb.changelog
Normal file
5
build/deb/ethereum/deb.changelog
Normal file
@@ -0,0 +1,5 @@
|
||||
{{.Name}} ({{.VersionString}}) {{.Distro}}; urgency=low
|
||||
|
||||
* git build of {{.Env.Commit}}
|
||||
|
||||
-- {{.Author}} {{.Time}}
|
@@ -10,9 +10,9 @@ Vcs-Browser: https://github.com/ethereum/go-ethereum
|
||||
|
||||
Package: {{.Name}}
|
||||
Architecture: any
|
||||
Depends: ${misc:Depends}, {{.ExeList}}
|
||||
Description: Meta-package to install geth and other tools
|
||||
Meta-package to install geth and other tools
|
||||
Depends: ${misc:Depends}, {{.EthereumSwarmPackageName}}, {{.ExeList}}
|
||||
Description: Meta-package to install geth, swarm, and other tools
|
||||
Meta-package to install geth, swarm and other tools
|
||||
|
||||
{{range .Executables}}
|
||||
Package: {{$.ExeName .}}
|
14
build/deb/ethereum/deb.copyright
Normal file
14
build/deb/ethereum/deb.copyright
Normal file
@@ -0,0 +1,14 @@
|
||||
Copyright 2018 The go-ethereum Authors
|
||||
|
||||
go-ethereum is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
go-ethereum is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
1
build/deb/ethereum/deb.docs
Normal file
1
build/deb/ethereum/deb.docs
Normal file
@@ -0,0 +1 @@
|
||||
AUTHORS
|
1
build/deb/ethereum/deb.install
Normal file
1
build/deb/ethereum/deb.install
Normal file
@@ -0,0 +1 @@
|
||||
build/bin/{{.BinaryName}} usr/bin
|
13
build/deb/ethereum/deb.rules
Normal file
13
build/deb/ethereum/deb.rules
Normal file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/make -f
|
||||
# -*- makefile -*-
|
||||
|
||||
# Uncomment this to turn on verbose mode.
|
||||
#export DH_VERBOSE=1
|
||||
|
||||
override_dh_auto_build:
|
||||
build/env.sh /usr/lib/go-1.10/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
|
||||
|
||||
override_dh_auto_test:
|
||||
|
||||
%:
|
||||
dh $@
|
18
build/goimports.sh
Executable file
18
build/goimports.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/sh
|
||||
|
||||
find_files() {
|
||||
find . ! \( \
|
||||
\( \
|
||||
-path '.github' \
|
||||
-o -path './build/_workspace' \
|
||||
-o -path './build/bin' \
|
||||
-o -path './crypto/bn256' \
|
||||
-o -path '*/vendor/*' \
|
||||
\) -prune \
|
||||
\) -name '*.go'
|
||||
}
|
||||
|
||||
GOFMT="gofmt -s -w"
|
||||
GOIMPORTS="goimports -w"
|
||||
find_files | xargs $GOFMT
|
||||
find_files | xargs $GOIMPORTS
|
@@ -29,7 +29,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
abiFlag = flag.String("abi", "", "Path to the Ethereum contract ABI json to bind")
|
||||
abiFlag = flag.String("abi", "", "Path to the Ethereum contract ABI json to bind, - for STDIN")
|
||||
binFlag = flag.String("bin", "", "Path to the Ethereum contract bytecode (generate deploy method)")
|
||||
typFlag = flag.String("type", "", "Struct name for the binding (default = package name)")
|
||||
|
||||
@@ -75,16 +75,27 @@ func main() {
|
||||
bins []string
|
||||
types []string
|
||||
)
|
||||
if *solFlag != "" {
|
||||
if *solFlag != "" || *abiFlag == "-" {
|
||||
// Generate the list of types to exclude from binding
|
||||
exclude := make(map[string]bool)
|
||||
for _, kind := range strings.Split(*excFlag, ",") {
|
||||
exclude[strings.ToLower(kind)] = true
|
||||
}
|
||||
contracts, err := compiler.CompileSolidity(*solcFlag, *solFlag)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to build Solidity contract: %v\n", err)
|
||||
os.Exit(-1)
|
||||
|
||||
var contracts map[string]*compiler.Contract
|
||||
var err error
|
||||
if *solFlag != "" {
|
||||
contracts, err = compiler.CompileSolidity(*solcFlag, *solFlag)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to build Solidity contract: %v\n", err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
} else {
|
||||
contracts, err = contractsFromStdin()
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to read input ABIs from STDIN: %v\n", err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
}
|
||||
// Gather all non-excluded contract for binding
|
||||
for name, contract := range contracts {
|
||||
@@ -138,3 +149,12 @@ func main() {
|
||||
os.Exit(-1)
|
||||
}
|
||||
}
|
||||
|
||||
func contractsFromStdin() (map[string]*compiler.Contract, error) {
|
||||
bytes, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return compiler.ParseCombinedJSON(bytes, "", "", "", "")
|
||||
}
|
||||
|
@@ -12,6 +12,11 @@ synchronised with the chain or a particular Ethereum node that has no built-in (
|
||||
Clef can run as a daemon on the same machine, or off a usb-stick like [usb armory](https://inversepath.com/usbarmory),
|
||||
or a separate VM in a [QubesOS](https://www.qubes-os.org/) type os setup.
|
||||
|
||||
Check out
|
||||
|
||||
* the [tutorial](tutorial.md) for some concrete examples on how the signer works.
|
||||
* the [setup docs](docs/setup.md) for some information on how to configure it to work on QubesOS or USBArmory.
|
||||
|
||||
|
||||
## Command line flags
|
||||
Clef accepts the following command line options:
|
||||
@@ -49,7 +54,6 @@ Example:
|
||||
signer -keystore /my/keystore -chainid 4
|
||||
```
|
||||
|
||||
Check out the [tutorial](tutorial.md) for some concrete examples on how the signer works.
|
||||
|
||||
## Security model
|
||||
|
||||
@@ -862,3 +866,12 @@ A UI should conform to the following rules.
|
||||
along with the UI.
|
||||
|
||||
|
||||
### UI Implementations
|
||||
|
||||
There are a couple of implementation for a UI. We'll try to keep this list up to date.
|
||||
|
||||
| Name | Repo | UI type| No external resources| Blocky support| Verifies permissions | Hash information | No secondary storage | Statically linked| Can modify parameters|
|
||||
| ---- | ---- | -------| ---- | ---- | ---- |---- | ---- | ---- | ---- |
|
||||
| QtSigner| https://github.com/holiman/qtsigner/| Python3/QT-based| :+1:| :+1:| :+1:| :+1:| :+1:| :x: | :+1: (partially)|
|
||||
| GtkSigner| https://github.com/holiman/gtksigner| Python3/GTK-based| :+1:| :x:| :x:| :+1:| :+1:| :x: | :x: |
|
||||
| Frame | https://github.com/floating/frame/commits/go-signer| Electron-based| :x:| :x:| :x:| :x:| ?| :x: | :x: |
|
||||
|
BIN
cmd/clef/docs/qubes/clef_qubes_http.png
Normal file
BIN
cmd/clef/docs/qubes/clef_qubes_http.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 14 KiB |
BIN
cmd/clef/docs/qubes/clef_qubes_qrexec.png
Normal file
BIN
cmd/clef/docs/qubes/clef_qubes_qrexec.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 20 KiB |
BIN
cmd/clef/docs/qubes/qrexec-example.png
Normal file
BIN
cmd/clef/docs/qubes/qrexec-example.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 19 KiB |
23
cmd/clef/docs/qubes/qubes-client.py
Normal file
23
cmd/clef/docs/qubes/qubes-client.py
Normal file
@@ -0,0 +1,23 @@
|
||||
"""
|
||||
This implements a dispatcher which listens to localhost:8550, and proxies
|
||||
requests via qrexec to the service qubes.EthSign on a target domain
|
||||
"""
|
||||
|
||||
import http.server
|
||||
import socketserver,subprocess
|
||||
|
||||
PORT=8550
|
||||
TARGET_DOMAIN= 'debian-work'
|
||||
|
||||
class Dispatcher(http.server.BaseHTTPRequestHandler):
|
||||
def do_POST(self):
|
||||
post_data = self.rfile.read(int(self.headers['Content-Length']))
|
||||
p = subprocess.Popen(['/usr/bin/qrexec-client-vm',TARGET_DOMAIN,'qubes.Clefsign'],stdin=subprocess.PIPE, stdout=subprocess.PIPE)
|
||||
output = p.communicate(post_data)[0]
|
||||
self.wfile.write(output)
|
||||
|
||||
|
||||
with socketserver.TCPServer(("",PORT), Dispatcher) as httpd:
|
||||
print("Serving at port", PORT)
|
||||
httpd.serve_forever()
|
||||
|
16
cmd/clef/docs/qubes/qubes.Clefsign
Normal file
16
cmd/clef/docs/qubes/qubes.Clefsign
Normal file
@@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
SIGNER_BIN="/home/user/tools/clef/clef"
|
||||
SIGNER_CMD="/home/user/tools/gtksigner/gtkui.py -s $SIGNER_BIN"
|
||||
|
||||
# Start clef if not already started
|
||||
if [ ! -S /home/user/.clef/clef.ipc ]; then
|
||||
$SIGNER_CMD &
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
# Should be started by now
|
||||
if [ -S /home/user/.clef/clef.ipc ]; then
|
||||
# Post incoming request to HTTP channel
|
||||
curl -H "Content-Type: application/json" -X POST -d @- http://localhost:8550 2>/dev/null
|
||||
fi
|
BIN
cmd/clef/docs/qubes/qubes_newaccount-1.png
Normal file
BIN
cmd/clef/docs/qubes/qubes_newaccount-1.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 25 KiB |
BIN
cmd/clef/docs/qubes/qubes_newaccount-2.png
Normal file
BIN
cmd/clef/docs/qubes/qubes_newaccount-2.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 42 KiB |
198
cmd/clef/docs/setup.md
Normal file
198
cmd/clef/docs/setup.md
Normal file
@@ -0,0 +1,198 @@
|
||||
# Setting up Clef
|
||||
|
||||
This document describes how Clef can be used in a more secure manner than executing it from your everyday laptop,
|
||||
in order to ensure that the keys remain safe in the event that your computer should get compromised.
|
||||
|
||||
## Qubes OS
|
||||
|
||||
|
||||
### Background
|
||||
|
||||
The Qubes operating system is based around virtual machines (qubes), where a set of virtual machines are configured, typically for
|
||||
different purposes such as:
|
||||
|
||||
- personal
|
||||
- Your personal email, browsing etc
|
||||
- work
|
||||
- Work email etc
|
||||
- vault
|
||||
- a VM without network access, where gpg-keys and/or keepass credentials are stored.
|
||||
|
||||
A couple of dedicated virtual machines handle externalities:
|
||||
|
||||
- sys-net provides networking to all other (network-enabled) machines
|
||||
- sys-firewall handles firewall rules
|
||||
- sys-usb handles USB devices, and can map usb-devices to certain qubes.
|
||||
|
||||
The goal of this document is to describe how we can set up clef to provide secure transaction
|
||||
signing from a `vault` vm, to another networked qube which runs Dapps.
|
||||
|
||||
### Setup
|
||||
|
||||
There are two ways that this can be achieved: integrated via Qubes or integrated via networking.
|
||||
|
||||
|
||||
#### 1. Qubes Integrated
|
||||
|
||||
Qubes provdes a facility for inter-qubes communication via `qrexec`. A qube can request to make a cross-qube RPC request
|
||||
to another qube. The OS then asks the user if the call is permitted.
|
||||
|
||||

|
||||
|
||||
A policy-file can be created to allow such interaction. On the `target` domain, a service is invoked which can read the
|
||||
`stdin` from the `client` qube.
|
||||
|
||||
This is how [Split GPG](https://www.qubes-os.org/doc/split-gpg/) is implemented. We can set up Clef the same way:
|
||||
|
||||
##### Server
|
||||
|
||||

|
||||
|
||||
On the `target` qubes, we need to define the rpc service.
|
||||
|
||||
[qubes.Clefsign](qubes/qubes.Clefsign):
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
SIGNER_BIN="/home/user/tools/clef/clef"
|
||||
SIGNER_CMD="/home/user/tools/gtksigner/gtkui.py -s $SIGNER_BIN"
|
||||
|
||||
# Start clef if not already started
|
||||
if [ ! -S /home/user/.clef/clef.ipc ]; then
|
||||
$SIGNER_CMD &
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
# Should be started by now
|
||||
if [ -S /home/user/.clef/clef.ipc ]; then
|
||||
# Post incoming request to HTTP channel
|
||||
curl -H "Content-Type: application/json" -X POST -d @- http://localhost:8550 2>/dev/null
|
||||
fi
|
||||
|
||||
```
|
||||
This RPC service is not complete (see notes about HTTP headers below), but works as a proof-of-concept.
|
||||
It will forward the data received on `stdin` (forwarded by the OS) to Clef's HTTP channel.
|
||||
|
||||
It would have been possible to send data directly to the `/home/user/.clef/.clef.ipc`
|
||||
socket via e.g `nc -U /home/user/.clef/clef.ipc`, but the reason for sending the request
|
||||
data over `HTTP` instead of `IPC` is that we want the ability to forward `HTTP` headers.
|
||||
|
||||
To enable the service:
|
||||
|
||||
``` bash
|
||||
sudo cp qubes.Clefsign /etc/qubes-rpc/
|
||||
sudo chmod +x /etc/qubes-rpc/ qubes.Clefsign
|
||||
```
|
||||
|
||||
This setup uses [gtksigner](https://github.com/holiman/gtksigner), which is a very minimal GTK-based UI that works well
|
||||
with minimal requirements.
|
||||
|
||||
##### Client
|
||||
|
||||
|
||||
On the `client` qube, we need to create a listener which will receive the request from the Dapp, and proxy it.
|
||||
|
||||
|
||||
[qubes-client.py](qubes/client/qubes-client.py):
|
||||
|
||||
```python
|
||||
|
||||
"""
|
||||
This implements a dispatcher which listens to localhost:8550, and proxies
|
||||
requests via qrexec to the service qubes.EthSign on a target domain
|
||||
"""
|
||||
|
||||
import http.server
|
||||
import socketserver,subprocess
|
||||
|
||||
PORT=8550
|
||||
TARGET_DOMAIN= 'debian-work'
|
||||
|
||||
class Dispatcher(http.server.BaseHTTPRequestHandler):
|
||||
def do_POST(self):
|
||||
post_data = self.rfile.read(int(self.headers['Content-Length']))
|
||||
p = subprocess.Popen(['/usr/bin/qrexec-client-vm',TARGET_DOMAIN,'qubes.Clefsign'],stdin=subprocess.PIPE, stdout=subprocess.PIPE)
|
||||
output = p.communicate(post_data)[0]
|
||||
self.wfile.write(output)
|
||||
|
||||
|
||||
with socketserver.TCPServer(("",PORT), Dispatcher) as httpd:
|
||||
print("Serving at port", PORT)
|
||||
httpd.serve_forever()
|
||||
|
||||
|
||||
```
|
||||
|
||||
#### Testing
|
||||
|
||||
To test the flow, if we have set up `debian-work` as the `target`, we can do
|
||||
|
||||
```bash
|
||||
$ cat newaccnt.json
|
||||
{ "id": 0, "jsonrpc": "2.0","method": "account_new","params": []}
|
||||
|
||||
$ cat newaccnt.json| qrexec-client-vm debian-work qubes.Clefsign
|
||||
```
|
||||
|
||||
This should pop up first a dialog to allow the IPC call:
|
||||
|
||||

|
||||
|
||||
Followed by a GTK-dialog to approve the operation
|
||||
|
||||

|
||||
|
||||
To test the full flow, we use the client wrapper. Start it on the `client` qube:
|
||||
```
|
||||
[user@work qubes]$ python3 qubes-client.py
|
||||
```
|
||||
|
||||
Make the request over http (`client` qube):
|
||||
```
|
||||
[user@work clef]$ cat newaccnt.json | curl -X POST -d @- http://localhost:8550
|
||||
```
|
||||
And it should show the same popups again.
|
||||
|
||||
##### Pros and cons
|
||||
|
||||
The benefits of this setup are:
|
||||
|
||||
- This is the qubes-os intended model for inter-qube communication,
|
||||
- and thus benefits from qubes-os dialogs and policies for user approval
|
||||
|
||||
However, it comes with a couple of drawbacks:
|
||||
|
||||
- The `qubes-gpg-client` must forward the http request via RPC to the `target` qube. When doing so, the proxy
|
||||
will either drop important headers, or replace them.
|
||||
- The `Host` header is most likely `localhost`
|
||||
- The `Origin` header must be forwarded
|
||||
- Information about the remote ip must be added as a `X-Forwarded-For`. However, Clef cannot always trust an `XFF` header,
|
||||
since malicious clients may lie about `XFF` in order to fool the http server into believing it comes from another address.
|
||||
- Even with a policy in place to allow rpc-calls between `caller` and `target`, there will be several popups:
|
||||
- One qubes-specific where the user specifies the `target` vm
|
||||
- One clef-specific to approve the transaction
|
||||
|
||||
|
||||
#### 2. Network integrated
|
||||
|
||||
The second way to set up Clef on a qubes system is to allow networking, and have Clef listen to a port which is accessible
|
||||
form other qubes.
|
||||
|
||||

|
||||
|
||||
|
||||
|
||||
|
||||
## USBArmory
|
||||
|
||||
The [USB armory](https://inversepath.com/usbarmory) is an open source hardware design with an 800 Mhz ARM processor. It is a pocket-size
|
||||
computer. When inserted into a laptop, it identifies itself as a USB network interface, basically adding another network
|
||||
to your computer. Over this new network interface, you can SSH into the device.
|
||||
|
||||
Running Clef off a USB armory means that you can use the armory as a very versatile offline computer, which only
|
||||
ever connects to a local network between your computer and the device itself.
|
||||
|
||||
Needless to say, the while this model should be fairly secure against remote attacks, an attacker with physical access
|
||||
to the USB Armory would trivially be able to extract the contents of the device filesystem.
|
||||
|
@@ -23,17 +23,18 @@ import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/signal"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"encoding/hex"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
@@ -44,14 +45,13 @@ import (
|
||||
"github.com/ethereum/go-ethereum/signer/rules"
|
||||
"github.com/ethereum/go-ethereum/signer/storage"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
"os/signal"
|
||||
)
|
||||
|
||||
// ExternalApiVersion -- see extapi_changelog.md
|
||||
const ExternalApiVersion = "2.0.0"
|
||||
// ExternalAPIVersion -- see extapi_changelog.md
|
||||
const ExternalAPIVersion = "2.0.0"
|
||||
|
||||
// InternalApiVersion -- see intapi_changelog.md
|
||||
const InternalApiVersion = "2.0.0"
|
||||
// InternalAPIVersion -- see intapi_changelog.md
|
||||
const InternalAPIVersion = "2.0.0"
|
||||
|
||||
const legalWarning = `
|
||||
WARNING!
|
||||
@@ -398,10 +398,10 @@ func signer(c *cli.Context) error {
|
||||
}
|
||||
// register signer API with server
|
||||
var (
|
||||
extapiUrl = "n/a"
|
||||
ipcApiUrl = "n/a"
|
||||
extapiURL = "n/a"
|
||||
ipcapiURL = "n/a"
|
||||
)
|
||||
rpcApi := []rpc.API{
|
||||
rpcAPI := []rpc.API{
|
||||
{
|
||||
Namespace: "account",
|
||||
Public: true,
|
||||
@@ -415,12 +415,12 @@ func signer(c *cli.Context) error {
|
||||
|
||||
// start http server
|
||||
httpEndpoint := fmt.Sprintf("%s:%d", c.String(utils.RPCListenAddrFlag.Name), c.Int(rpcPortFlag.Name))
|
||||
listener, _, err := rpc.StartHTTPEndpoint(httpEndpoint, rpcApi, []string{"account"}, cors, vhosts)
|
||||
listener, _, err := rpc.StartHTTPEndpoint(httpEndpoint, rpcAPI, []string{"account"}, cors, vhosts, rpc.DefaultHTTPTimeouts)
|
||||
if err != nil {
|
||||
utils.Fatalf("Could not start RPC api: %v", err)
|
||||
}
|
||||
extapiUrl = fmt.Sprintf("http://%s", httpEndpoint)
|
||||
log.Info("HTTP endpoint opened", "url", extapiUrl)
|
||||
extapiURL = fmt.Sprintf("http://%s", httpEndpoint)
|
||||
log.Info("HTTP endpoint opened", "url", extapiURL)
|
||||
|
||||
defer func() {
|
||||
listener.Close()
|
||||
@@ -430,19 +430,19 @@ func signer(c *cli.Context) error {
|
||||
}
|
||||
if !c.Bool(utils.IPCDisabledFlag.Name) {
|
||||
if c.IsSet(utils.IPCPathFlag.Name) {
|
||||
ipcApiUrl = c.String(utils.IPCPathFlag.Name)
|
||||
ipcapiURL = c.String(utils.IPCPathFlag.Name)
|
||||
} else {
|
||||
ipcApiUrl = filepath.Join(configDir, "clef.ipc")
|
||||
ipcapiURL = filepath.Join(configDir, "clef.ipc")
|
||||
}
|
||||
|
||||
listener, _, err := rpc.StartIPCEndpoint(func() bool { return true }, ipcApiUrl, rpcApi)
|
||||
listener, _, err := rpc.StartIPCEndpoint(ipcapiURL, rpcAPI)
|
||||
if err != nil {
|
||||
utils.Fatalf("Could not start IPC api: %v", err)
|
||||
}
|
||||
log.Info("IPC endpoint opened", "url", ipcApiUrl)
|
||||
log.Info("IPC endpoint opened", "url", ipcapiURL)
|
||||
defer func() {
|
||||
listener.Close()
|
||||
log.Info("IPC endpoint closed", "url", ipcApiUrl)
|
||||
log.Info("IPC endpoint closed", "url", ipcapiURL)
|
||||
}()
|
||||
|
||||
}
|
||||
@@ -453,10 +453,10 @@ func signer(c *cli.Context) error {
|
||||
}
|
||||
ui.OnSignerStartup(core.StartupInfo{
|
||||
Info: map[string]interface{}{
|
||||
"extapi_version": ExternalApiVersion,
|
||||
"intapi_version": InternalApiVersion,
|
||||
"extapi_http": extapiUrl,
|
||||
"extapi_ipc": ipcApiUrl,
|
||||
"extapi_version": ExternalAPIVersion,
|
||||
"intapi_version": InternalAPIVersion,
|
||||
"extapi_http": extapiURL,
|
||||
"extapi_ipc": ipcapiURL,
|
||||
},
|
||||
})
|
||||
|
||||
|
72
cmd/ethkey/changepassphrase.go
Normal file
72
cmd/ethkey/changepassphrase.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var newPassphraseFlag = cli.StringFlag{
|
||||
Name: "newpasswordfile",
|
||||
Usage: "the file that contains the new passphrase for the keyfile",
|
||||
}
|
||||
|
||||
var commandChangePassphrase = cli.Command{
|
||||
Name: "changepassphrase",
|
||||
Usage: "change the passphrase on a keyfile",
|
||||
ArgsUsage: "<keyfile>",
|
||||
Description: `
|
||||
Change the passphrase of a keyfile.`,
|
||||
Flags: []cli.Flag{
|
||||
passphraseFlag,
|
||||
newPassphraseFlag,
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
keyfilepath := ctx.Args().First()
|
||||
|
||||
// Read key from file.
|
||||
keyjson, err := ioutil.ReadFile(keyfilepath)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read the keyfile at '%s': %v", keyfilepath, err)
|
||||
}
|
||||
|
||||
// Decrypt key with passphrase.
|
||||
passphrase := getPassphrase(ctx)
|
||||
key, err := keystore.DecryptKey(keyjson, passphrase)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error decrypting key: %v", err)
|
||||
}
|
||||
|
||||
// Get a new passphrase.
|
||||
fmt.Println("Please provide a new passphrase")
|
||||
var newPhrase string
|
||||
if passFile := ctx.String(newPassphraseFlag.Name); passFile != "" {
|
||||
content, err := ioutil.ReadFile(passFile)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read new passphrase file '%s': %v", passFile, err)
|
||||
}
|
||||
newPhrase = strings.TrimRight(string(content), "\r\n")
|
||||
} else {
|
||||
newPhrase = promptPassphrase(true)
|
||||
}
|
||||
|
||||
// Encrypt the key with the new passphrase.
|
||||
newJson, err := keystore.EncryptKey(key, newPhrase, keystore.StandardScryptN, keystore.StandardScryptP)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error encrypting with new passphrase: %v", err)
|
||||
}
|
||||
|
||||
// Then write the new keyfile in place of the old one.
|
||||
if err := ioutil.WriteFile(keyfilepath, newJson, 600); err != nil {
|
||||
utils.Fatalf("Error writing new keyfile to disk: %v", err)
|
||||
}
|
||||
|
||||
// Don't print anything. Just return successfully,
|
||||
// producing a positive exit code.
|
||||
return nil
|
||||
},
|
||||
}
|
@@ -90,7 +90,7 @@ If you want to encrypt an existing private key, it can be specified by setting
|
||||
}
|
||||
|
||||
// Encrypt key with passphrase.
|
||||
passphrase := getPassPhrase(ctx, true)
|
||||
passphrase := promptPassphrase(true)
|
||||
keyjson, err := keystore.EncryptKey(key, passphrase, keystore.StandardScryptN, keystore.StandardScryptP)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error encrypting key: %v", err)
|
||||
|
@@ -60,7 +60,7 @@ make sure to use this feature with great caution!`,
|
||||
}
|
||||
|
||||
// Decrypt key with passphrase.
|
||||
passphrase := getPassPhrase(ctx, false)
|
||||
passphrase := getPassphrase(ctx)
|
||||
key, err := keystore.DecryptKey(keyjson, passphrase)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error decrypting key: %v", err)
|
||||
|
@@ -38,6 +38,7 @@ func init() {
|
||||
app.Commands = []cli.Command{
|
||||
commandGenerate,
|
||||
commandInspect,
|
||||
commandChangePassphrase,
|
||||
commandSignMessage,
|
||||
commandVerifyMessage,
|
||||
}
|
||||
@@ -53,10 +54,6 @@ var (
|
||||
Name: "json",
|
||||
Usage: "output JSON instead of human-readable format",
|
||||
}
|
||||
messageFlag = cli.StringFlag{
|
||||
Name: "message",
|
||||
Usage: "the file that contains the message to sign/verify",
|
||||
}
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@@ -62,7 +62,7 @@ To sign a message contained in a file, use the --msgfile flag.
|
||||
}
|
||||
|
||||
// Decrypt key with passphrase.
|
||||
passphrase := getPassPhrase(ctx, false)
|
||||
passphrase := getPassphrase(ctx)
|
||||
key, err := keystore.DecryptKey(keyjson, passphrase)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error decrypting key: %v", err)
|
||||
|
@@ -28,11 +28,32 @@ import (
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
// getPassPhrase obtains a passphrase given by the user. It first checks the
|
||||
// --passphrase command line flag and ultimately prompts the user for a
|
||||
// promptPassphrase prompts the user for a passphrase. Set confirmation to true
|
||||
// to require the user to confirm the passphrase.
|
||||
func promptPassphrase(confirmation bool) string {
|
||||
passphrase, err := console.Stdin.PromptPassword("Passphrase: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase: %v", err)
|
||||
}
|
||||
|
||||
if confirmation {
|
||||
confirm, err := console.Stdin.PromptPassword("Repeat passphrase: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase confirmation: %v", err)
|
||||
}
|
||||
if passphrase != confirm {
|
||||
utils.Fatalf("Passphrases do not match")
|
||||
}
|
||||
}
|
||||
|
||||
return passphrase
|
||||
}
|
||||
|
||||
// getPassphrase obtains a passphrase given by the user. It first checks the
|
||||
// --passfile command line flag and ultimately prompts the user for a
|
||||
// passphrase.
|
||||
func getPassPhrase(ctx *cli.Context, confirmation bool) string {
|
||||
// Look for the --passphrase flag.
|
||||
func getPassphrase(ctx *cli.Context) string {
|
||||
// Look for the --passwordfile flag.
|
||||
passphraseFile := ctx.String(passphraseFlag.Name)
|
||||
if passphraseFile != "" {
|
||||
content, err := ioutil.ReadFile(passphraseFile)
|
||||
@@ -44,20 +65,7 @@ func getPassPhrase(ctx *cli.Context, confirmation bool) string {
|
||||
}
|
||||
|
||||
// Otherwise prompt the user for the passphrase.
|
||||
passphrase, err := console.Stdin.PromptPassword("Passphrase: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase: %v", err)
|
||||
}
|
||||
if confirmation {
|
||||
confirm, err := console.Stdin.PromptPassword("Repeat passphrase: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase confirmation: %v", err)
|
||||
}
|
||||
if passphrase != confirm {
|
||||
utils.Fatalf("Passphrases do not match")
|
||||
}
|
||||
}
|
||||
return passphrase
|
||||
return promptPassphrase(false)
|
||||
}
|
||||
|
||||
// signHash is a helper function that calculates a hash for the given message
|
||||
|
@@ -50,6 +50,6 @@ func compileCmd(ctx *cli.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(bin)
|
||||
fmt.Fprintln(ctx.App.Writer, bin)
|
||||
return nil
|
||||
}
|
||||
|
@@ -45,6 +45,6 @@ func disasmCmd(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
code := strings.TrimSpace(string(in[:]))
|
||||
fmt.Printf("%v\n", code)
|
||||
fmt.Fprintf(ctx.App.Writer, "%v\n", code)
|
||||
return asm.PrintDisassembled(code)
|
||||
}
|
||||
|
@@ -30,10 +30,11 @@ func Compile(fn string, src []byte, debug bool) (string, error) {
|
||||
bin, compileErrors := compiler.Compile()
|
||||
if len(compileErrors) > 0 {
|
||||
// report errors
|
||||
errs := ""
|
||||
for _, err := range compileErrors {
|
||||
fmt.Printf("%s:%v\n", fn, err)
|
||||
errs += fmt.Sprintf("%s:%v\n", fn, err)
|
||||
}
|
||||
return "", errors.New("compiling failed")
|
||||
return "", errors.New(errs + "compiling failed\n")
|
||||
}
|
||||
return bin, nil
|
||||
}
|
||||
|
@@ -32,6 +32,8 @@ type JSONLogger struct {
|
||||
cfg *vm.LogConfig
|
||||
}
|
||||
|
||||
// NewJSONLogger creates a new EVM tracer that prints execution steps as JSON objects
|
||||
// into the provided stream.
|
||||
func NewJSONLogger(cfg *vm.LogConfig, writer io.Writer) *JSONLogger {
|
||||
return &JSONLogger{json.NewEncoder(writer), cfg}
|
||||
}
|
||||
|
@@ -21,12 +21,12 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
goruntime "runtime"
|
||||
"runtime/pprof"
|
||||
"time"
|
||||
|
||||
goruntime "runtime"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/evm/internal/compiler"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -86,6 +86,7 @@ func runCmd(ctx *cli.Context) error {
|
||||
chainConfig *params.ChainConfig
|
||||
sender = common.BytesToAddress([]byte("sender"))
|
||||
receiver = common.BytesToAddress([]byte("receiver"))
|
||||
blockNumber uint64
|
||||
)
|
||||
if ctx.GlobalBool(MachineFlag.Name) {
|
||||
tracer = NewJSONLogger(logconfig, os.Stdout)
|
||||
@@ -97,13 +98,13 @@ func runCmd(ctx *cli.Context) error {
|
||||
}
|
||||
if ctx.GlobalString(GenesisFlag.Name) != "" {
|
||||
gen := readGenesis(ctx.GlobalString(GenesisFlag.Name))
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
db := ethdb.NewMemDatabase()
|
||||
genesis := gen.ToBlock(db)
|
||||
statedb, _ = state.New(genesis.Root(), state.NewDatabase(db))
|
||||
chainConfig = gen.Config
|
||||
blockNumber = gen.Number
|
||||
} else {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
statedb, _ = state.New(common.Hash{}, state.NewDatabase(db))
|
||||
statedb, _ = state.New(common.Hash{}, state.NewDatabase(ethdb.NewMemDatabase()))
|
||||
}
|
||||
if ctx.GlobalString(SenderFlag.Name) != "" {
|
||||
sender = common.HexToAddress(ctx.GlobalString(SenderFlag.Name))
|
||||
@@ -127,13 +128,13 @@ func runCmd(ctx *cli.Context) error {
|
||||
if ctx.GlobalString(CodeFileFlag.Name) == "-" {
|
||||
//Try reading from stdin
|
||||
if hexcode, err = ioutil.ReadAll(os.Stdin); err != nil {
|
||||
fmt.Printf("Could not load code from stdin: %v\n", err)
|
||||
fmt.Fprintf(ctx.App.ErrWriter, "Could not load code from stdin: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
// Codefile with hex assembly
|
||||
if hexcode, err = ioutil.ReadFile(ctx.GlobalString(CodeFileFlag.Name)); err != nil {
|
||||
fmt.Printf("Could not load code from file: %v\n", err)
|
||||
fmt.Fprintf(ctx.App.ErrWriter, "Could not load code from file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -156,11 +157,12 @@ func runCmd(ctx *cli.Context) error {
|
||||
|
||||
initialGas := ctx.GlobalUint64(GasFlag.Name)
|
||||
runtimeConfig := runtime.Config{
|
||||
Origin: sender,
|
||||
State: statedb,
|
||||
GasLimit: initialGas,
|
||||
GasPrice: utils.GlobalBig(ctx, PriceFlag.Name),
|
||||
Value: utils.GlobalBig(ctx, ValueFlag.Name),
|
||||
Origin: sender,
|
||||
State: statedb,
|
||||
GasLimit: initialGas,
|
||||
GasPrice: utils.GlobalBig(ctx, PriceFlag.Name),
|
||||
Value: utils.GlobalBig(ctx, ValueFlag.Name),
|
||||
BlockNumber: new(big.Int).SetUint64(blockNumber),
|
||||
EVMConfig: vm.Config{
|
||||
Tracer: tracer,
|
||||
Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name),
|
||||
@@ -170,11 +172,11 @@ func runCmd(ctx *cli.Context) error {
|
||||
if cpuProfilePath := ctx.GlobalString(CPUProfileFlag.Name); cpuProfilePath != "" {
|
||||
f, err := os.Create(cpuProfilePath)
|
||||
if err != nil {
|
||||
fmt.Println("could not create CPU profile: ", err)
|
||||
fmt.Fprintf(ctx.App.ErrWriter, "could not create CPU profile: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
fmt.Println("could not start CPU profile: ", err)
|
||||
fmt.Fprintf(ctx.App.ErrWriter, "could not start CPU profile: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
@@ -198,17 +200,17 @@ func runCmd(ctx *cli.Context) error {
|
||||
|
||||
if ctx.GlobalBool(DumpFlag.Name) {
|
||||
statedb.IntermediateRoot(true)
|
||||
fmt.Println(string(statedb.Dump()))
|
||||
fmt.Fprintln(ctx.App.Writer, string(statedb.Dump()))
|
||||
}
|
||||
|
||||
if memProfilePath := ctx.GlobalString(MemProfileFlag.Name); memProfilePath != "" {
|
||||
f, err := os.Create(memProfilePath)
|
||||
if err != nil {
|
||||
fmt.Println("could not create memory profile: ", err)
|
||||
fmt.Fprintf(ctx.App.ErrWriter, "could not create memory profile: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := pprof.WriteHeapProfile(f); err != nil {
|
||||
fmt.Println("could not write memory profile: ", err)
|
||||
fmt.Fprintf(ctx.App.ErrWriter, "could not create memory profile: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
f.Close()
|
||||
@@ -216,17 +218,17 @@ func runCmd(ctx *cli.Context) error {
|
||||
|
||||
if ctx.GlobalBool(DebugFlag.Name) {
|
||||
if debugLogger != nil {
|
||||
fmt.Fprintln(os.Stderr, "#### TRACE ####")
|
||||
vm.WriteTrace(os.Stderr, debugLogger.StructLogs())
|
||||
fmt.Fprintln(ctx.App.ErrWriter, "#### TRACE ####")
|
||||
vm.WriteTrace(ctx.App.ErrWriter, debugLogger.StructLogs())
|
||||
}
|
||||
fmt.Fprintln(os.Stderr, "#### LOGS ####")
|
||||
vm.WriteLogs(os.Stderr, statedb.Logs())
|
||||
fmt.Fprintln(ctx.App.ErrWriter, "#### LOGS ####")
|
||||
vm.WriteLogs(ctx.App.ErrWriter, statedb.Logs())
|
||||
}
|
||||
|
||||
if ctx.GlobalBool(StatDumpFlag.Name) {
|
||||
var mem goruntime.MemStats
|
||||
goruntime.ReadMemStats(&mem)
|
||||
fmt.Fprintf(os.Stderr, `evm execution time: %v
|
||||
fmt.Fprintf(ctx.App.ErrWriter, `evm execution time: %v
|
||||
heap objects: %d
|
||||
allocations: %d
|
||||
total allocations: %d
|
||||
@@ -236,9 +238,9 @@ Gas used: %d
|
||||
`, execTime, mem.HeapObjects, mem.Alloc, mem.TotalAlloc, mem.NumGC, initialGas-leftOverGas)
|
||||
}
|
||||
if tracer == nil {
|
||||
fmt.Printf("0x%x\n", ret)
|
||||
fmt.Fprintf(ctx.App.Writer, "0x%x\n", ret)
|
||||
if err != nil {
|
||||
fmt.Printf(" error: %v\n", err)
|
||||
fmt.Fprintf(ctx.App.ErrWriter, " error: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -38,6 +38,8 @@ var stateTestCommand = cli.Command{
|
||||
ArgsUsage: "<file>",
|
||||
}
|
||||
|
||||
// StatetestResult contains the execution status after running a state test, any
|
||||
// error that might have occurred and a dump of the final state if requested.
|
||||
type StatetestResult struct {
|
||||
Name string `json:"name"`
|
||||
Pass bool `json:"pass"`
|
||||
@@ -105,7 +107,7 @@ func stateTestCmd(ctx *cli.Context) error {
|
||||
}
|
||||
// print state root for evmlab tracing (already committed above, so no need to delete objects again
|
||||
if ctx.GlobalBool(MachineFlag.Name) && state != nil {
|
||||
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false))
|
||||
fmt.Fprintf(ctx.App.ErrWriter, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false))
|
||||
}
|
||||
|
||||
results = append(results, *result)
|
||||
@@ -113,13 +115,13 @@ func stateTestCmd(ctx *cli.Context) error {
|
||||
// Print any structured logs collected
|
||||
if ctx.GlobalBool(DebugFlag.Name) {
|
||||
if debugger != nil {
|
||||
fmt.Fprintln(os.Stderr, "#### TRACE ####")
|
||||
vm.WriteTrace(os.Stderr, debugger.StructLogs())
|
||||
fmt.Fprintln(ctx.App.ErrWriter, "#### TRACE ####")
|
||||
vm.WriteTrace(ctx.App.ErrWriter, debugger.StructLogs())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
out, _ := json.MarshalIndent(results, "", " ")
|
||||
fmt.Println(string(out))
|
||||
fmt.Fprintln(ctx.App.Writer, string(out))
|
||||
return nil
|
||||
}
|
||||
|
@@ -77,9 +77,6 @@ var (
|
||||
accJSONFlag = flag.String("account.json", "", "Key json file to fund user requests with")
|
||||
accPassFlag = flag.String("account.pass", "", "Decryption password to access faucet funds")
|
||||
|
||||
githubUser = flag.String("github.user", "", "GitHub user to authenticate with for Gist access")
|
||||
githubToken = flag.String("github.token", "", "GitHub personal token to access Gists with")
|
||||
|
||||
captchaToken = flag.String("captcha.token", "", "Recaptcha site key to authenticate client side")
|
||||
captchaSecret = flag.String("captcha.secret", "", "Recaptcha secret key to authenticate server side")
|
||||
|
||||
@@ -216,7 +213,7 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u
|
||||
// Assemble the raw devp2p protocol stack
|
||||
stack, err := node.New(&node.Config{
|
||||
Name: "geth",
|
||||
Version: params.Version,
|
||||
Version: params.VersionWithMeta,
|
||||
DataDir: filepath.Join(os.Getenv("HOME"), ".faucet"),
|
||||
P2P: p2p.Config{
|
||||
NAT: nat.Any(),
|
||||
@@ -474,7 +471,7 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
amount = new(big.Int).Div(amount, new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(msg.Tier)), nil))
|
||||
|
||||
tx := types.NewTransaction(f.nonce+uint64(len(f.reqs)), address, amount, 21000, f.price, nil)
|
||||
signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainId)
|
||||
signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainID)
|
||||
if err != nil {
|
||||
f.lock.Unlock()
|
||||
if err = sendError(conn, err); err != nil {
|
||||
@@ -638,59 +635,6 @@ func sendSuccess(conn *websocket.Conn, msg string) error {
|
||||
return send(conn, map[string]string{"success": msg}, time.Second)
|
||||
}
|
||||
|
||||
// authGitHub tries to authenticate a faucet request using GitHub gists, returning
|
||||
// the username, avatar URL and Ethereum address to fund on success.
|
||||
func authGitHub(url string) (string, string, common.Address, error) {
|
||||
// Retrieve the gist from the GitHub Gist APIs
|
||||
parts := strings.Split(url, "/")
|
||||
req, _ := http.NewRequest("GET", "https://api.github.com/gists/"+parts[len(parts)-1], nil)
|
||||
if *githubUser != "" {
|
||||
req.SetBasicAuth(*githubUser, *githubToken)
|
||||
}
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
var gist struct {
|
||||
Owner struct {
|
||||
Login string `json:"login"`
|
||||
} `json:"owner"`
|
||||
Files map[string]struct {
|
||||
Content string `json:"content"`
|
||||
} `json:"files"`
|
||||
}
|
||||
err = json.NewDecoder(res.Body).Decode(&gist)
|
||||
res.Body.Close()
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
if gist.Owner.Login == "" {
|
||||
return "", "", common.Address{}, errors.New("Anonymous Gists not allowed")
|
||||
}
|
||||
// Iterate over all the files and look for Ethereum addresses
|
||||
var address common.Address
|
||||
for _, file := range gist.Files {
|
||||
content := strings.TrimSpace(file.Content)
|
||||
if len(content) == 2+common.AddressLength*2 {
|
||||
address = common.HexToAddress(content)
|
||||
}
|
||||
}
|
||||
if address == (common.Address{}) {
|
||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
// Validate the user's existence since the API is unhelpful here
|
||||
if res, err = http.Head("https://github.com/" + gist.Owner.Login); err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
res.Body.Close()
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
return "", "", common.Address{}, errors.New("Invalid user... boom!")
|
||||
}
|
||||
// Everything passed validation, return the gathered infos
|
||||
return gist.Owner.Login + "@github", fmt.Sprintf("https://github.com/%s.png?size=64", gist.Owner.Login), address, nil
|
||||
}
|
||||
|
||||
// authTwitter tries to authenticate a faucet request using Twitter posts, returning
|
||||
// the username, avatar URL and Ethereum address to fund on success.
|
||||
func authTwitter(url string) (string, string, common.Address, error) {
|
||||
|
@@ -340,7 +340,7 @@ func importWallet(ctx *cli.Context) error {
|
||||
if len(keyfile) == 0 {
|
||||
utils.Fatalf("keyfile must be given as argument")
|
||||
}
|
||||
keyJson, err := ioutil.ReadFile(keyfile)
|
||||
keyJSON, err := ioutil.ReadFile(keyfile)
|
||||
if err != nil {
|
||||
utils.Fatalf("Could not read wallet file: %v", err)
|
||||
}
|
||||
@@ -349,7 +349,7 @@ func importWallet(ctx *cli.Context) error {
|
||||
passphrase := getPassPhrase("", false, 0, utils.MakePasswordList(ctx))
|
||||
|
||||
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
|
||||
acct, err := ks.ImportPreSaleKey(keyJson, passphrase)
|
||||
acct, err := ks.ImportPreSaleKey(keyJSON, passphrase)
|
||||
if err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
|
@@ -41,7 +41,7 @@ var bugCommand = cli.Command{
|
||||
Category: "MISCELLANEOUS COMMANDS",
|
||||
}
|
||||
|
||||
const issueUrl = "https://github.com/ethereum/go-ethereum/issues/new"
|
||||
const issueURL = "https://github.com/ethereum/go-ethereum/issues/new"
|
||||
|
||||
// reportBug reports a bug by opening a new URL to the go-ethereum GH issue
|
||||
// tracker and setting default values as the issue body.
|
||||
@@ -51,15 +51,15 @@ func reportBug(ctx *cli.Context) error {
|
||||
|
||||
fmt.Fprintln(&buff, "#### System information")
|
||||
fmt.Fprintln(&buff)
|
||||
fmt.Fprintln(&buff, "Version:", params.Version)
|
||||
fmt.Fprintln(&buff, "Version:", params.VersionWithMeta)
|
||||
fmt.Fprintln(&buff, "Go Version:", runtime.Version())
|
||||
fmt.Fprintln(&buff, "OS:", runtime.GOOS)
|
||||
printOSDetails(&buff)
|
||||
fmt.Fprintln(&buff, header)
|
||||
|
||||
// open a new GH issue
|
||||
if !browser.Open(issueUrl + "?body=" + url.QueryEscape(buff.String())) {
|
||||
fmt.Printf("Please file a new issue at %s using this template:\n\n%s", issueUrl, buff.String())
|
||||
if !browser.Open(issueURL + "?body=" + url.QueryEscape(buff.String())) {
|
||||
fmt.Printf("Please file a new issue at %s using this template:\n\n%s", issueURL, buff.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -94,7 +94,8 @@ processing will proceed even if an individual RLP-file import failure occurs.`,
|
||||
Requires a first argument of the file to write to.
|
||||
Optional second and third arguments control the first and
|
||||
last block to write. In this mode, the file will be appended
|
||||
if already existing.`,
|
||||
if already existing. If the file ends with .gz, the output will
|
||||
be gzipped.`,
|
||||
}
|
||||
importPreimagesCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(importPreimages),
|
||||
|
@@ -50,7 +50,7 @@ func TestConsoleWelcome(t *testing.T) {
|
||||
geth.SetTemplateFunc("goos", func() string { return runtime.GOOS })
|
||||
geth.SetTemplateFunc("goarch", func() string { return runtime.GOARCH })
|
||||
geth.SetTemplateFunc("gover", runtime.Version)
|
||||
geth.SetTemplateFunc("gethver", func() string { return params.Version })
|
||||
geth.SetTemplateFunc("gethver", func() string { return params.VersionWithMeta })
|
||||
geth.SetTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
|
||||
geth.SetTemplateFunc("apis", func() string { return ipcAPIs })
|
||||
|
||||
@@ -133,7 +133,7 @@ func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) {
|
||||
attach.SetTemplateFunc("goos", func() string { return runtime.GOOS })
|
||||
attach.SetTemplateFunc("goarch", func() string { return runtime.GOARCH })
|
||||
attach.SetTemplateFunc("gover", runtime.Version)
|
||||
attach.SetTemplateFunc("gethver", func() string { return params.Version })
|
||||
attach.SetTemplateFunc("gethver", func() string { return params.VersionWithMeta })
|
||||
attach.SetTemplateFunc("etherbase", func() string { return geth.Etherbase })
|
||||
attach.SetTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
|
||||
attach.SetTemplateFunc("ipc", func() bool { return strings.HasPrefix(endpoint, "ipc") })
|
||||
|
@@ -24,7 +24,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
@@ -131,8 +131,8 @@ func testDAOForkBlockNewChain(t *testing.T, test int, genesis string, expectBloc
|
||||
if genesis != "" {
|
||||
genesisHash = daoGenesisHash
|
||||
}
|
||||
config, err := core.GetChainConfig(db, genesisHash)
|
||||
if err != nil {
|
||||
config := rawdb.ReadChainConfig(db, genesisHash)
|
||||
if config == nil {
|
||||
t.Errorf("test %d: failed to retrieve chain config: %v", test, err)
|
||||
return // we want to return here, the other checks can't make it past this point (nil panic).
|
||||
}
|
||||
|
@@ -77,7 +77,7 @@ var customGenesisTests = []struct {
|
||||
"homesteadBlock" : 314,
|
||||
"daoForkBlock" : 141,
|
||||
"daoForkSupport" : true
|
||||
},
|
||||
}
|
||||
}`,
|
||||
query: "eth.getBlock(0).nonce",
|
||||
result: "0x0000000000000042",
|
||||
|
@@ -19,12 +19,16 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"runtime"
|
||||
godebug "runtime/debug"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/elastic/gosigar"
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
@@ -140,13 +144,22 @@ var (
|
||||
utils.WhisperMaxMessageSizeFlag,
|
||||
utils.WhisperMinPOWFlag,
|
||||
}
|
||||
|
||||
metricsFlags = []cli.Flag{
|
||||
utils.MetricsEnableInfluxDBFlag,
|
||||
utils.MetricsInfluxDBEndpointFlag,
|
||||
utils.MetricsInfluxDBDatabaseFlag,
|
||||
utils.MetricsInfluxDBUsernameFlag,
|
||||
utils.MetricsInfluxDBPasswordFlag,
|
||||
utils.MetricsInfluxDBHostTagFlag,
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Initialize the CLI app and start Geth
|
||||
app.Action = geth
|
||||
app.HideVersion = true // we have a command to print the version
|
||||
app.Copyright = "Copyright 2013-2017 The go-ethereum Authors"
|
||||
app.Copyright = "Copyright 2013-2018 The go-ethereum Authors"
|
||||
app.Commands = []cli.Command{
|
||||
// See chaincmd.go:
|
||||
initCommand,
|
||||
@@ -182,12 +195,37 @@ func init() {
|
||||
app.Flags = append(app.Flags, consoleFlags...)
|
||||
app.Flags = append(app.Flags, debug.Flags...)
|
||||
app.Flags = append(app.Flags, whisperFlags...)
|
||||
app.Flags = append(app.Flags, metricsFlags...)
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
if err := debug.Setup(ctx); err != nil {
|
||||
|
||||
logdir := ""
|
||||
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {
|
||||
logdir = (&node.Config{DataDir: utils.MakeDataDir(ctx)}).ResolvePath("logs")
|
||||
}
|
||||
if err := debug.Setup(ctx, logdir); err != nil {
|
||||
return err
|
||||
}
|
||||
// Cap the cache allowance and tune the garbage collector
|
||||
var mem gosigar.Mem
|
||||
if err := mem.Get(); err == nil {
|
||||
allowance := int(mem.Total / 1024 / 1024 / 3)
|
||||
if cache := ctx.GlobalInt(utils.CacheFlag.Name); cache > allowance {
|
||||
log.Warn("Sanitizing cache to Go's GC limits", "provided", cache, "updated", allowance)
|
||||
ctx.GlobalSet(utils.CacheFlag.Name, strconv.Itoa(allowance))
|
||||
}
|
||||
}
|
||||
// Ensure Go's GC ignores the database cache for trigger percentage
|
||||
cache := ctx.GlobalInt(utils.CacheFlag.Name)
|
||||
gogc := math.Max(20, math.Min(100, 100/(float64(cache)/1024)))
|
||||
|
||||
log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc))
|
||||
godebug.SetGCPercent(int(gogc))
|
||||
|
||||
// Start metrics export if enabled
|
||||
utils.SetupMetrics(ctx)
|
||||
|
||||
// Start system runtime metrics collection
|
||||
go metrics.CollectProcessMetrics(3 * time.Second)
|
||||
|
||||
@@ -213,6 +251,9 @@ func main() {
|
||||
// It creates a default node based on the command line arguments and runs it in
|
||||
// blocking mode, waiting for it to be shut down.
|
||||
func geth(ctx *cli.Context) error {
|
||||
if args := ctx.Args(); len(args) > 0 {
|
||||
return fmt.Errorf("invalid command: %q", args[0])
|
||||
}
|
||||
node := makeFullNode(ctx)
|
||||
startNode(ctx, node)
|
||||
node.Wait()
|
||||
@@ -223,6 +264,8 @@ func geth(ctx *cli.Context) error {
|
||||
// it unlocks any requested accounts, and starts the RPC/IPC interfaces and the
|
||||
// miner.
|
||||
func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
debug.Memsize.Add("node", stack)
|
||||
|
||||
// Start up the node itself
|
||||
utils.StartNode(stack)
|
||||
|
||||
@@ -241,7 +284,7 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
stack.AccountManager().Subscribe(events)
|
||||
|
||||
go func() {
|
||||
// Create an chain state reader for self-derivation
|
||||
// Create a chain state reader for self-derivation
|
||||
rpcClient, err := stack.Attach()
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to attach to self: %v", err)
|
||||
@@ -265,11 +308,11 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
status, _ := event.Wallet.Status()
|
||||
log.Info("New wallet appeared", "url", event.Wallet.URL(), "status", status)
|
||||
|
||||
derivationPath := accounts.DefaultBaseDerivationPath
|
||||
if event.Wallet.URL().Scheme == "ledger" {
|
||||
event.Wallet.SelfDerive(accounts.DefaultLedgerBaseDerivationPath, stateReader)
|
||||
} else {
|
||||
event.Wallet.SelfDerive(accounts.DefaultBaseDerivationPath, stateReader)
|
||||
derivationPath = accounts.DefaultLedgerBaseDerivationPath
|
||||
}
|
||||
event.Wallet.SelfDerive(derivationPath, stateReader)
|
||||
|
||||
case accounts.WalletDropped:
|
||||
log.Info("Old wallet dropped", "url", event.Wallet.URL())
|
||||
|
@@ -108,7 +108,7 @@ func makedag(ctx *cli.Context) error {
|
||||
|
||||
func version(ctx *cli.Context) error {
|
||||
fmt.Println(strings.Title(clientIdentifier))
|
||||
fmt.Println("Version:", params.Version)
|
||||
fmt.Println("Version:", params.VersionWithMeta)
|
||||
if gitCommit != "" {
|
||||
fmt.Println("Git Commit:", gitCommit)
|
||||
}
|
||||
|
@@ -185,12 +185,12 @@ func resolveMetric(metrics map[string]interface{}, pattern string, path string)
|
||||
parts := strings.SplitN(pattern, "/", 2)
|
||||
if len(parts) > 1 {
|
||||
for _, variation := range strings.Split(parts[0], ",") {
|
||||
if submetrics, ok := metrics[variation].(map[string]interface{}); !ok {
|
||||
submetrics, ok := metrics[variation].(map[string]interface{})
|
||||
if !ok {
|
||||
utils.Fatalf("Failed to retrieve system metrics: %s", path+variation)
|
||||
return nil
|
||||
} else {
|
||||
results = append(results, resolveMetric(submetrics, parts[1], path+variation+"/")...)
|
||||
}
|
||||
results = append(results, resolveMetric(submetrics, parts[1], path+variation+"/")...)
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
@@ -33,7 +33,7 @@ import (
|
||||
var AppHelpTemplate = `NAME:
|
||||
{{.App.Name}} - {{.App.Usage}}
|
||||
|
||||
Copyright 2013-2017 The go-ethereum Authors
|
||||
Copyright 2013-2018 The go-ethereum Authors
|
||||
|
||||
USAGE:
|
||||
{{.App.HelpName}} [options]{{if .App.Commands}} command [command options]{{end}} {{if .App.ArgsUsage}}{{.App.ArgsUsage}}{{else}}[arguments...]{{end}}
|
||||
@@ -83,7 +83,8 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.LightKDFFlag,
|
||||
},
|
||||
},
|
||||
{Name: "DEVELOPER CHAIN",
|
||||
{
|
||||
Name: "DEVELOPER CHAIN",
|
||||
Flags: []cli.Flag{
|
||||
utils.DeveloperFlag,
|
||||
utils.DeveloperPeriodFlag,
|
||||
@@ -206,11 +207,22 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
{
|
||||
Name: "LOGGING AND DEBUGGING",
|
||||
Flags: append([]cli.Flag{
|
||||
utils.MetricsEnabledFlag,
|
||||
utils.FakePoWFlag,
|
||||
utils.NoCompactionFlag,
|
||||
}, debug.Flags...),
|
||||
},
|
||||
{
|
||||
Name: "METRICS AND STATS",
|
||||
Flags: []cli.Flag{
|
||||
utils.MetricsEnabledFlag,
|
||||
utils.MetricsEnableInfluxDBFlag,
|
||||
utils.MetricsInfluxDBEndpointFlag,
|
||||
utils.MetricsInfluxDBDatabaseFlag,
|
||||
utils.MetricsInfluxDBUsernameFlag,
|
||||
utils.MetricsInfluxDBPasswordFlag,
|
||||
utils.MetricsInfluxDBHostTagFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "WHISPER (EXPERIMENTAL)",
|
||||
Flags: whisperFlags,
|
||||
|
@@ -180,7 +180,10 @@ func main() {
|
||||
},
|
||||
},
|
||||
}
|
||||
app.Run(os.Args)
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func showNetwork(ctx *cli.Context) error {
|
||||
@@ -275,9 +278,8 @@ func createNode(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
config := &adapters.NodeConfig{
|
||||
Name: ctx.String("name"),
|
||||
}
|
||||
config := adapters.RandomNodeConfig()
|
||||
config.Name = ctx.String("name")
|
||||
if key := ctx.String("key"); key != "" {
|
||||
privKey, err := crypto.HexToECDSA(key)
|
||||
if err != nil {
|
||||
|
@@ -103,8 +103,8 @@ func newCppEthereumGenesisSpec(network string, genesis *core.Genesis) (*cppEther
|
||||
spec.Params.ByzantiumForkBlock = (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())
|
||||
spec.Params.ConstantinopleForkBlock = (hexutil.Uint64)(math.MaxUint64)
|
||||
|
||||
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainId.Uint64())
|
||||
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainId.Uint64())
|
||||
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
|
||||
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
|
||||
|
||||
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
||||
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
|
||||
@@ -284,7 +284,7 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
|
||||
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
||||
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
|
||||
spec.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor)
|
||||
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainId.Uint64())
|
||||
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
|
||||
spec.Params.MaxCodeSize = params.MaxCodeSize
|
||||
spec.Params.EIP155Transition = genesis.Config.EIP155Block.Uint64()
|
||||
spec.Params.EIP98Transition = math.MaxUint64
|
||||
|
@@ -609,7 +609,7 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
|
||||
}
|
||||
template.Must(template.New("").Parse(dashboardContent)).Execute(indexfile, map[string]interface{}{
|
||||
"Network": network,
|
||||
"NetworkID": conf.Genesis.Config.ChainId,
|
||||
"NetworkID": conf.Genesis.Config.ChainID,
|
||||
"NetworkTitle": strings.Title(network),
|
||||
"EthstatsPage": config.ethstats,
|
||||
"ExplorerPage": config.explorer,
|
||||
@@ -683,7 +683,7 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
}
|
||||
|
||||
// dashboardInfos is returned from an dashboard status check to allow reporting
|
||||
// dashboardInfos is returned from a dashboard status check to allow reporting
|
||||
// various configuration parameters.
|
||||
type dashboardInfos struct {
|
||||
host string
|
||||
|
@@ -122,7 +122,7 @@ func (info *ethstatsInfos) Report() map[string]string {
|
||||
"Website address": info.host,
|
||||
"Website listener port": strconv.Itoa(info.port),
|
||||
"Login secret": info.secret,
|
||||
"Banned addresses": fmt.Sprintf("%v", info.banned),
|
||||
"Banned addresses": strings.Join(info.banned, "\n"),
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -168,7 +168,7 @@ func (info *explorerInfos) Report() map[string]string {
|
||||
return report
|
||||
}
|
||||
|
||||
// checkExplorer does a health-check against an block explorer server to verify
|
||||
// checkExplorer does a health-check against a block explorer server to verify
|
||||
// whether it's running, and if yes, whether it's responsive.
|
||||
func checkExplorer(client *sshClient, network string) (*explorerInfos, error) {
|
||||
// Inspect a possible block explorer container on the host
|
||||
|
@@ -30,7 +30,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// faucetDockerfile is the Dockerfile required to build an faucet container to
|
||||
// faucetDockerfile is the Dockerfile required to build a faucet container to
|
||||
// grant crypto tokens based on GitHub authentications.
|
||||
var faucetDockerfile = `
|
||||
FROM ethereum/client-go:alltools-latest
|
||||
@@ -138,7 +138,7 @@ func deployFaucet(client *sshClient, network string, bootnodes []string, config
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
}
|
||||
|
||||
// faucetInfos is returned from an faucet status check to allow reporting various
|
||||
// faucetInfos is returned from a faucet status check to allow reporting various
|
||||
// configuration parameters.
|
||||
type faucetInfos struct {
|
||||
node *nodeInfos
|
||||
@@ -181,7 +181,7 @@ func (info *faucetInfos) Report() map[string]string {
|
||||
return report
|
||||
}
|
||||
|
||||
// checkFaucet does a health-check against an faucet server to verify whether
|
||||
// checkFaucet does a health-check against a faucet server to verify whether
|
||||
// it's running, and if yes, gathering a collection of useful infos about it.
|
||||
func checkFaucet(client *sshClient, network string) (*faucetInfos, error) {
|
||||
// Inspect a possible faucet container on the host
|
||||
|
@@ -40,11 +40,11 @@ ADD genesis.json /genesis.json
|
||||
ADD signer.pass /signer.pass
|
||||
{{end}}
|
||||
RUN \
|
||||
echo 'geth --cache 512 init /genesis.json' > /root/geth.sh && \{{if .Unlock}}
|
||||
echo 'mkdir -p /root/.ethereum/keystore/ && cp /signer.json /root/.ethereum/keystore/' >> /root/geth.sh && \{{end}}
|
||||
echo $'geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine --minerthreads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --targetgaslimit {{.GasTarget}} --gasprice {{.GasPrice}}' >> /root/geth.sh
|
||||
echo 'geth --cache 512 init /genesis.json' > geth.sh && \{{if .Unlock}}
|
||||
echo 'mkdir -p /root/.ethereum/keystore/ && cp /signer.json /root/.ethereum/keystore/' >> geth.sh && \{{end}}
|
||||
echo $'geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine --minerthreads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --targetgaslimit {{.GasTarget}} --gasprice {{.GasPrice}}' >> geth.sh
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "/root/geth.sh"]
|
||||
ENTRYPOINT ["/bin/sh", "geth.sh"]
|
||||
`
|
||||
|
||||
// nodeComposefile is the docker-compose.yml file required to deploy and maintain
|
||||
@@ -198,7 +198,7 @@ func (info *nodeInfos) Report() map[string]string {
|
||||
return report
|
||||
}
|
||||
|
||||
// checkNode does a health-check against an boot or seal node server to verify
|
||||
// checkNode does a health-check against a boot or seal node server to verify
|
||||
// whether it's running, and if yes, whether it's responsive.
|
||||
func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error) {
|
||||
kind := "bootnode"
|
||||
@@ -221,7 +221,7 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
|
||||
|
||||
// Container available, retrieve its node ID and its genesis json
|
||||
var out []byte
|
||||
if out, err = client.Run(fmt.Sprintf("docker exec %s_%s_1 geth --exec admin.nodeInfo.id attach", network, kind)); err != nil {
|
||||
if out, err = client.Run(fmt.Sprintf("docker exec %s_%s_1 geth --exec admin.nodeInfo.id --cache=16 attach", network, kind)); err != nil {
|
||||
return nil, ErrServiceUnreachable
|
||||
}
|
||||
id := bytes.Trim(bytes.TrimSpace(out), "\"")
|
||||
|
@@ -49,7 +49,7 @@ func (w *wizard) deployFaucet() {
|
||||
existed := err == nil
|
||||
|
||||
infos.node.genesis, _ = json.MarshalIndent(w.conf.Genesis, "", " ")
|
||||
infos.node.network = w.conf.Genesis.Config.ChainId.Int64()
|
||||
infos.node.network = w.conf.Genesis.Config.ChainID.Int64()
|
||||
|
||||
// Figure out which port to listen on
|
||||
fmt.Println()
|
||||
|
@@ -121,7 +121,7 @@ func (w *wizard) makeGenesis() {
|
||||
// Query the user for some custom extras
|
||||
fmt.Println()
|
||||
fmt.Println("Specify your chain/network ID if you want an explicit one (default = random)")
|
||||
genesis.Config.ChainId = new(big.Int).SetUint64(uint64(w.readDefaultInt(rand.Intn(65536))))
|
||||
genesis.Config.ChainID = new(big.Int).SetUint64(uint64(w.readDefaultInt(rand.Intn(65536))))
|
||||
|
||||
// All done, store the genesis and flush to disk
|
||||
log.Info("Configured new genesis block")
|
||||
|
@@ -203,7 +203,7 @@ func (stats serverStats) render() {
|
||||
|
||||
table.SetHeader([]string{"Server", "Address", "Service", "Config", "Value"})
|
||||
table.SetAlignment(tablewriter.ALIGN_LEFT)
|
||||
table.SetColWidth(100)
|
||||
table.SetColWidth(40)
|
||||
|
||||
// Find the longest lines for all columns for the hacked separator
|
||||
separator := make([]string, 5)
|
||||
@@ -222,8 +222,10 @@ func (stats serverStats) render() {
|
||||
if len(config) > len(separator[3]) {
|
||||
separator[3] = strings.Repeat("-", len(config))
|
||||
}
|
||||
if len(value) > len(separator[4]) {
|
||||
separator[4] = strings.Repeat("-", len(value))
|
||||
for _, val := range strings.Split(value, "\n") {
|
||||
if len(val) > len(separator[4]) {
|
||||
separator[4] = strings.Repeat("-", len(val))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -263,26 +265,20 @@ func (stats serverStats) render() {
|
||||
sort.Strings(configs)
|
||||
|
||||
for k, config := range configs {
|
||||
switch {
|
||||
case j == 0 && k == 0:
|
||||
table.Append([]string{server, stats[server].address, service, config, stats[server].services[service][config]})
|
||||
case k == 0:
|
||||
table.Append([]string{"", "", service, config, stats[server].services[service][config]})
|
||||
default:
|
||||
table.Append([]string{"", "", "", config, stats[server].services[service][config]})
|
||||
for l, value := range strings.Split(stats[server].services[service][config], "\n") {
|
||||
switch {
|
||||
case j == 0 && k == 0 && l == 0:
|
||||
table.Append([]string{server, stats[server].address, service, config, value})
|
||||
case k == 0 && l == 0:
|
||||
table.Append([]string{"", "", service, config, value})
|
||||
case l == 0:
|
||||
table.Append([]string{"", "", "", config, value})
|
||||
default:
|
||||
table.Append([]string{"", "", "", "", value})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
table.Render()
|
||||
}
|
||||
|
||||
// protips contains a collection of network infos to report pro-tips
|
||||
// based on.
|
||||
type protips struct {
|
||||
genesis string
|
||||
network int64
|
||||
bootFull []string
|
||||
bootLight []string
|
||||
ethstats string
|
||||
}
|
||||
|
@@ -56,7 +56,7 @@ func (w *wizard) deployNode(boot bool) {
|
||||
existed := err == nil
|
||||
|
||||
infos.genesis, _ = json.MarshalIndent(w.conf.Genesis, "", " ")
|
||||
infos.network = w.conf.Genesis.Config.ChainId.Int64()
|
||||
infos.network = w.conf.Genesis.Config.ChainID.Int64()
|
||||
|
||||
// Figure out where the user wants to store the persistent data
|
||||
fmt.Println()
|
||||
@@ -107,7 +107,7 @@ func (w *wizard) deployNode(boot bool) {
|
||||
// Ethash based miners only need an etherbase to mine against
|
||||
fmt.Println()
|
||||
if infos.etherbase == "" {
|
||||
fmt.Printf("What address should the miner user?\n")
|
||||
fmt.Printf("What address should the miner use?\n")
|
||||
for {
|
||||
if address := w.readAddress(); address != nil {
|
||||
infos.etherbase = address.Hex()
|
||||
@@ -115,7 +115,7 @@ func (w *wizard) deployNode(boot bool) {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("What address should the miner user? (default = %s)\n", infos.etherbase)
|
||||
fmt.Printf("What address should the miner use? (default = %s)\n", infos.etherbase)
|
||||
infos.etherbase = w.readDefaultAddress(common.HexToAddress(infos.etherbase)).Hex()
|
||||
}
|
||||
} else if w.conf.Genesis.Config.Clique != nil {
|
||||
|
@@ -52,7 +52,7 @@ func (w *wizard) deployWallet() {
|
||||
existed := err == nil
|
||||
|
||||
infos.genesis, _ = json.MarshalIndent(w.conf.Genesis, "", " ")
|
||||
infos.network = w.conf.Genesis.Config.ChainId.Int64()
|
||||
infos.network = w.conf.Genesis.Config.ChainID.Int64()
|
||||
|
||||
// Figure out which port to listen on
|
||||
fmt.Println()
|
||||
|
@@ -24,6 +24,7 @@ import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
@@ -58,19 +59,25 @@ var (
|
||||
|
||||
//constants for environment variables
|
||||
const (
|
||||
SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR"
|
||||
SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT"
|
||||
SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR"
|
||||
SWARM_ENV_PORT = "SWARM_PORT"
|
||||
SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID"
|
||||
SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE"
|
||||
SWARM_ENV_SWAP_API = "SWARM_SWAP_API"
|
||||
SWARM_ENV_SYNC_ENABLE = "SWARM_SYNC_ENABLE"
|
||||
SWARM_ENV_ENS_API = "SWARM_ENS_API"
|
||||
SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR"
|
||||
SWARM_ENV_CORS = "SWARM_CORS"
|
||||
SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES"
|
||||
GETH_ENV_DATADIR = "GETH_DATADIR"
|
||||
SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR"
|
||||
SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT"
|
||||
SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR"
|
||||
SWARM_ENV_PORT = "SWARM_PORT"
|
||||
SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID"
|
||||
SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE"
|
||||
SWARM_ENV_SWAP_API = "SWARM_SWAP_API"
|
||||
SWARM_ENV_SYNC_DISABLE = "SWARM_SYNC_DISABLE"
|
||||
SWARM_ENV_SYNC_UPDATE_DELAY = "SWARM_ENV_SYNC_UPDATE_DELAY"
|
||||
SWARM_ENV_DELIVERY_SKIP_CHECK = "SWARM_DELIVERY_SKIP_CHECK"
|
||||
SWARM_ENV_ENS_API = "SWARM_ENS_API"
|
||||
SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR"
|
||||
SWARM_ENV_CORS = "SWARM_CORS"
|
||||
SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES"
|
||||
SWARM_ENV_PSS_ENABLE = "SWARM_PSS_ENABLE"
|
||||
SWARM_ENV_STORE_PATH = "SWARM_STORE_PATH"
|
||||
SWARM_ENV_STORE_CAPACITY = "SWARM_STORE_CAPACITY"
|
||||
SWARM_ENV_STORE_CACHE_CAPACITY = "SWARM_STORE_CACHE_CAPACITY"
|
||||
GETH_ENV_DATADIR = "GETH_DATADIR"
|
||||
)
|
||||
|
||||
// These settings ensure that TOML keys use the same names as Go struct fields.
|
||||
@@ -92,10 +99,8 @@ var tomlSettings = toml.Config{
|
||||
|
||||
//before booting the swarm node, build the configuration
|
||||
func buildConfig(ctx *cli.Context) (config *bzzapi.Config, err error) {
|
||||
//check for deprecated flags
|
||||
checkDeprecated(ctx)
|
||||
//start by creating a default config
|
||||
config = bzzapi.NewDefaultConfig()
|
||||
config = bzzapi.NewConfig()
|
||||
//first load settings from config file (if provided)
|
||||
config, err = configFileOverride(config, ctx)
|
||||
if err != nil {
|
||||
@@ -168,7 +173,7 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
||||
|
||||
if networkid := ctx.GlobalString(SwarmNetworkIdFlag.Name); networkid != "" {
|
||||
if id, _ := strconv.Atoi(networkid); id != 0 {
|
||||
currentConfig.NetworkId = uint64(id)
|
||||
currentConfig.NetworkID = uint64(id)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -191,12 +196,20 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
||||
currentConfig.SwapEnabled = true
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(SwarmSyncEnabledFlag.Name) {
|
||||
currentConfig.SyncEnabled = true
|
||||
if ctx.GlobalIsSet(SwarmSyncDisabledFlag.Name) {
|
||||
currentConfig.SyncEnabled = false
|
||||
}
|
||||
|
||||
currentConfig.SwapApi = ctx.GlobalString(SwarmSwapAPIFlag.Name)
|
||||
if currentConfig.SwapEnabled && currentConfig.SwapApi == "" {
|
||||
if d := ctx.GlobalDuration(SwarmSyncUpdateDelay.Name); d > 0 {
|
||||
currentConfig.SyncUpdateDelay = d
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(SwarmDeliverySkipCheckFlag.Name) {
|
||||
currentConfig.DeliverySkipCheck = true
|
||||
}
|
||||
|
||||
currentConfig.SwapAPI = ctx.GlobalString(SwarmSwapAPIFlag.Name)
|
||||
if currentConfig.SwapEnabled && currentConfig.SwapAPI == "" {
|
||||
utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API)
|
||||
}
|
||||
|
||||
@@ -209,10 +222,6 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
||||
currentConfig.EnsAPIs = ensAPIs
|
||||
}
|
||||
|
||||
if ensaddr := ctx.GlobalString(DeprecatedEnsAddrFlag.Name); ensaddr != "" {
|
||||
currentConfig.EnsRoot = common.HexToAddress(ensaddr)
|
||||
}
|
||||
|
||||
if cors := ctx.GlobalString(CorsStringFlag.Name); cors != "" {
|
||||
currentConfig.Cors = cors
|
||||
}
|
||||
@@ -221,6 +230,18 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
||||
currentConfig.BootNodes = ctx.GlobalString(utils.BootnodesFlag.Name)
|
||||
}
|
||||
|
||||
if storePath := ctx.GlobalString(SwarmStorePath.Name); storePath != "" {
|
||||
currentConfig.LocalStoreParams.ChunkDbPath = storePath
|
||||
}
|
||||
|
||||
if storeCapacity := ctx.GlobalUint64(SwarmStoreCapacity.Name); storeCapacity != 0 {
|
||||
currentConfig.LocalStoreParams.DbCapacity = storeCapacity
|
||||
}
|
||||
|
||||
if storeCacheCapacity := ctx.GlobalUint(SwarmStoreCacheCapacity.Name); storeCacheCapacity != 0 {
|
||||
currentConfig.LocalStoreParams.CacheCapacity = storeCacheCapacity
|
||||
}
|
||||
|
||||
return currentConfig
|
||||
|
||||
}
|
||||
@@ -239,7 +260,7 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
|
||||
|
||||
if networkid := os.Getenv(SWARM_ENV_NETWORK_ID); networkid != "" {
|
||||
if id, _ := strconv.Atoi(networkid); id != 0 {
|
||||
currentConfig.NetworkId = uint64(id)
|
||||
currentConfig.NetworkID = uint64(id)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -262,17 +283,29 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
if syncenable := os.Getenv(SWARM_ENV_SYNC_ENABLE); syncenable != "" {
|
||||
if sync, err := strconv.ParseBool(syncenable); err != nil {
|
||||
currentConfig.SyncEnabled = sync
|
||||
if syncdisable := os.Getenv(SWARM_ENV_SYNC_DISABLE); syncdisable != "" {
|
||||
if sync, err := strconv.ParseBool(syncdisable); err != nil {
|
||||
currentConfig.SyncEnabled = !sync
|
||||
}
|
||||
}
|
||||
|
||||
if v := os.Getenv(SWARM_ENV_DELIVERY_SKIP_CHECK); v != "" {
|
||||
if skipCheck, err := strconv.ParseBool(v); err != nil {
|
||||
currentConfig.DeliverySkipCheck = skipCheck
|
||||
}
|
||||
}
|
||||
|
||||
if v := os.Getenv(SWARM_ENV_SYNC_UPDATE_DELAY); v != "" {
|
||||
if d, err := time.ParseDuration(v); err != nil {
|
||||
currentConfig.SyncUpdateDelay = d
|
||||
}
|
||||
}
|
||||
|
||||
if swapapi := os.Getenv(SWARM_ENV_SWAP_API); swapapi != "" {
|
||||
currentConfig.SwapApi = swapapi
|
||||
currentConfig.SwapAPI = swapapi
|
||||
}
|
||||
|
||||
if currentConfig.SwapEnabled && currentConfig.SwapApi == "" {
|
||||
if currentConfig.SwapEnabled && currentConfig.SwapAPI == "" {
|
||||
utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API)
|
||||
}
|
||||
|
||||
@@ -312,18 +345,6 @@ func dumpConfig(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//deprecated flags checked here
|
||||
func checkDeprecated(ctx *cli.Context) {
|
||||
// exit if the deprecated --ethapi flag is set
|
||||
if ctx.GlobalString(DeprecatedEthAPIFlag.Name) != "" {
|
||||
utils.Fatalf("--ethapi is no longer a valid command line flag, please use --ens-api and/or --swap-api.")
|
||||
}
|
||||
// warn if --ens-api flag is set
|
||||
if ctx.GlobalString(DeprecatedEnsAddrFlag.Name) != "" {
|
||||
log.Warn("--ens-addr is no longer a valid command line flag, please use --ens-api to specify contract address.")
|
||||
}
|
||||
}
|
||||
|
||||
//validate configuration parameters
|
||||
func validateConfig(cfg *bzzapi.Config) (err error) {
|
||||
for _, ensAPI := range cfg.EnsAPIs {
|
||||
|
@@ -34,7 +34,7 @@ import (
|
||||
|
||||
func TestDumpConfig(t *testing.T) {
|
||||
swarm := runSwarm(t, "dumpconfig")
|
||||
defaultConf := api.NewDefaultConfig()
|
||||
defaultConf := api.NewConfig()
|
||||
out, err := tomlSettings.Marshal(&defaultConf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -43,7 +43,7 @@ func TestDumpConfig(t *testing.T) {
|
||||
swarm.ExpectExit()
|
||||
}
|
||||
|
||||
func TestFailsSwapEnabledNoSwapApi(t *testing.T) {
|
||||
func TestConfigFailsSwapEnabledNoSwapApi(t *testing.T) {
|
||||
flags := []string{
|
||||
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
|
||||
fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545",
|
||||
@@ -55,7 +55,7 @@ func TestFailsSwapEnabledNoSwapApi(t *testing.T) {
|
||||
swarm.ExpectExit()
|
||||
}
|
||||
|
||||
func TestFailsNoBzzAccount(t *testing.T) {
|
||||
func TestConfigFailsNoBzzAccount(t *testing.T) {
|
||||
flags := []string{
|
||||
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
|
||||
fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545",
|
||||
@@ -66,7 +66,7 @@ func TestFailsNoBzzAccount(t *testing.T) {
|
||||
swarm.ExpectExit()
|
||||
}
|
||||
|
||||
func TestCmdLineOverrides(t *testing.T) {
|
||||
func TestConfigCmdLineOverrides(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "bzztest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -85,9 +85,10 @@ func TestCmdLineOverrides(t *testing.T) {
|
||||
flags := []string{
|
||||
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
|
||||
fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort,
|
||||
fmt.Sprintf("--%s", SwarmSyncEnabledFlag.Name),
|
||||
fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name),
|
||||
fmt.Sprintf("--%s", CorsStringFlag.Name), "*",
|
||||
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
|
||||
fmt.Sprintf("--%s", SwarmDeliverySkipCheckFlag.Name),
|
||||
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
|
||||
"--datadir", dir,
|
||||
"--ipcpath", conf.IPCPath,
|
||||
@@ -120,12 +121,16 @@ func TestCmdLineOverrides(t *testing.T) {
|
||||
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
|
||||
}
|
||||
|
||||
if info.NetworkId != 42 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 42, info.NetworkId)
|
||||
if info.NetworkID != 42 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 42, info.NetworkID)
|
||||
}
|
||||
|
||||
if !info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be enabled, but is false")
|
||||
if info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be disabled, but is true")
|
||||
}
|
||||
|
||||
if !info.DeliverySkipCheck {
|
||||
t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not")
|
||||
}
|
||||
|
||||
if info.Cors != "*" {
|
||||
@@ -135,7 +140,7 @@ func TestCmdLineOverrides(t *testing.T) {
|
||||
node.Shutdown()
|
||||
}
|
||||
|
||||
func TestFileOverrides(t *testing.T) {
|
||||
func TestConfigFileOverrides(t *testing.T) {
|
||||
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort()
|
||||
@@ -145,16 +150,16 @@ func TestFileOverrides(t *testing.T) {
|
||||
|
||||
//create a config file
|
||||
//first, create a default conf
|
||||
defaultConf := api.NewDefaultConfig()
|
||||
defaultConf := api.NewConfig()
|
||||
//change some values in order to test if they have been loaded
|
||||
defaultConf.SyncEnabled = true
|
||||
defaultConf.NetworkId = 54
|
||||
defaultConf.SyncEnabled = false
|
||||
defaultConf.DeliverySkipCheck = true
|
||||
defaultConf.NetworkID = 54
|
||||
defaultConf.Port = httpPort
|
||||
defaultConf.StoreParams.DbCapacity = 9000000
|
||||
defaultConf.ChunkerParams.Branches = 64
|
||||
defaultConf.HiveParams.CallInterval = 6000000000
|
||||
defaultConf.DbCapacity = 9000000
|
||||
defaultConf.HiveParams.KeepAliveInterval = 6000000000
|
||||
defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second
|
||||
defaultConf.SyncParams.KeyBufferSize = 512
|
||||
//defaultConf.SyncParams.KeyBufferSize = 512
|
||||
//create a TOML string
|
||||
out, err := tomlSettings.Marshal(&defaultConf)
|
||||
if err != nil {
|
||||
@@ -215,38 +220,38 @@ func TestFileOverrides(t *testing.T) {
|
||||
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
|
||||
}
|
||||
|
||||
if info.NetworkId != 54 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId)
|
||||
if info.NetworkID != 54 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkID)
|
||||
}
|
||||
|
||||
if !info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be enabled, but is false")
|
||||
if info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be disabled, but is true")
|
||||
}
|
||||
|
||||
if info.StoreParams.DbCapacity != 9000000 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId)
|
||||
if !info.DeliverySkipCheck {
|
||||
t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not")
|
||||
}
|
||||
|
||||
if info.ChunkerParams.Branches != 64 {
|
||||
t.Fatalf("Expected chunker params branches to be %d, got %d", 64, info.ChunkerParams.Branches)
|
||||
if info.DbCapacity != 9000000 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkID)
|
||||
}
|
||||
|
||||
if info.HiveParams.CallInterval != 6000000000 {
|
||||
t.Fatalf("Expected HiveParams CallInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.CallInterval))
|
||||
if info.HiveParams.KeepAliveInterval != 6000000000 {
|
||||
t.Fatalf("Expected HiveParams KeepAliveInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.KeepAliveInterval))
|
||||
}
|
||||
|
||||
if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second {
|
||||
t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval)
|
||||
}
|
||||
|
||||
if info.SyncParams.KeyBufferSize != 512 {
|
||||
t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
|
||||
}
|
||||
// if info.SyncParams.KeyBufferSize != 512 {
|
||||
// t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
|
||||
// }
|
||||
|
||||
node.Shutdown()
|
||||
}
|
||||
|
||||
func TestEnvVars(t *testing.T) {
|
||||
func TestConfigEnvVars(t *testing.T) {
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort()
|
||||
if err != nil {
|
||||
@@ -257,7 +262,8 @@ func TestEnvVars(t *testing.T) {
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmPortFlag.EnvVar, httpPort))
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmNetworkIdFlag.EnvVar, "999"))
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", CorsStringFlag.EnvVar, "*"))
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmSyncEnabledFlag.EnvVar, "true"))
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmSyncDisabledFlag.EnvVar, "true"))
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmDeliverySkipCheckFlag.EnvVar, "true"))
|
||||
|
||||
dir, err := ioutil.TempDir("", "bzztest")
|
||||
if err != nil {
|
||||
@@ -326,23 +332,27 @@ func TestEnvVars(t *testing.T) {
|
||||
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
|
||||
}
|
||||
|
||||
if info.NetworkId != 999 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 999, info.NetworkId)
|
||||
if info.NetworkID != 999 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 999, info.NetworkID)
|
||||
}
|
||||
|
||||
if info.Cors != "*" {
|
||||
t.Fatalf("Expected Cors flag to be set to %s, got %s", "*", info.Cors)
|
||||
}
|
||||
|
||||
if !info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be enabled, but is false")
|
||||
if info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be disabled, but is true")
|
||||
}
|
||||
|
||||
if !info.DeliverySkipCheck {
|
||||
t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not")
|
||||
}
|
||||
|
||||
node.Shutdown()
|
||||
cmd.Process.Kill()
|
||||
}
|
||||
|
||||
func TestCmdLineOverridesFile(t *testing.T) {
|
||||
func TestConfigCmdLineOverridesFile(t *testing.T) {
|
||||
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort()
|
||||
@@ -352,26 +362,27 @@ func TestCmdLineOverridesFile(t *testing.T) {
|
||||
|
||||
//create a config file
|
||||
//first, create a default conf
|
||||
defaultConf := api.NewDefaultConfig()
|
||||
defaultConf := api.NewConfig()
|
||||
//change some values in order to test if they have been loaded
|
||||
defaultConf.SyncEnabled = false
|
||||
defaultConf.NetworkId = 54
|
||||
defaultConf.SyncEnabled = true
|
||||
defaultConf.NetworkID = 54
|
||||
defaultConf.Port = "8588"
|
||||
defaultConf.StoreParams.DbCapacity = 9000000
|
||||
defaultConf.ChunkerParams.Branches = 64
|
||||
defaultConf.HiveParams.CallInterval = 6000000000
|
||||
defaultConf.DbCapacity = 9000000
|
||||
defaultConf.HiveParams.KeepAliveInterval = 6000000000
|
||||
defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second
|
||||
defaultConf.SyncParams.KeyBufferSize = 512
|
||||
//defaultConf.SyncParams.KeyBufferSize = 512
|
||||
//create a TOML file
|
||||
out, err := tomlSettings.Marshal(&defaultConf)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating TOML file in TestFileOverride: %v", err)
|
||||
}
|
||||
//write file
|
||||
f, err := ioutil.TempFile("", "testconfig.toml")
|
||||
fname := "testconfig.toml"
|
||||
f, err := ioutil.TempFile("", fname)
|
||||
if err != nil {
|
||||
t.Fatalf("Error writing TOML file in TestFileOverride: %v", err)
|
||||
}
|
||||
defer os.Remove(fname)
|
||||
//write file
|
||||
_, err = f.WriteString(string(out))
|
||||
if err != nil {
|
||||
@@ -392,7 +403,7 @@ func TestCmdLineOverridesFile(t *testing.T) {
|
||||
flags := []string{
|
||||
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "77",
|
||||
fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort,
|
||||
fmt.Sprintf("--%s", SwarmSyncEnabledFlag.Name),
|
||||
fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name),
|
||||
fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(),
|
||||
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
|
||||
"--ens-api", "",
|
||||
@@ -427,33 +438,29 @@ func TestCmdLineOverridesFile(t *testing.T) {
|
||||
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
|
||||
}
|
||||
|
||||
if info.NetworkId != expectNetworkId {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", expectNetworkId, info.NetworkId)
|
||||
if info.NetworkID != expectNetworkId {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", expectNetworkId, info.NetworkID)
|
||||
}
|
||||
|
||||
if !info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be enabled, but is false")
|
||||
if info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be disabled, but is true")
|
||||
}
|
||||
|
||||
if info.StoreParams.DbCapacity != 9000000 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId)
|
||||
if info.LocalStoreParams.DbCapacity != 9000000 {
|
||||
t.Fatalf("Expected Capacity to be %d, got %d", 9000000, info.LocalStoreParams.DbCapacity)
|
||||
}
|
||||
|
||||
if info.ChunkerParams.Branches != 64 {
|
||||
t.Fatalf("Expected chunker params branches to be %d, got %d", 64, info.ChunkerParams.Branches)
|
||||
}
|
||||
|
||||
if info.HiveParams.CallInterval != 6000000000 {
|
||||
t.Fatalf("Expected HiveParams CallInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.CallInterval))
|
||||
if info.HiveParams.KeepAliveInterval != 6000000000 {
|
||||
t.Fatalf("Expected HiveParams KeepAliveInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.KeepAliveInterval))
|
||||
}
|
||||
|
||||
if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second {
|
||||
t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval)
|
||||
}
|
||||
|
||||
if info.SyncParams.KeyBufferSize != 512 {
|
||||
t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
|
||||
}
|
||||
// if info.SyncParams.KeyBufferSize != 512 {
|
||||
// t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
|
||||
// }
|
||||
|
||||
node.Shutdown()
|
||||
}
|
||||
|
@@ -23,6 +23,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
@@ -30,11 +31,11 @@ import (
|
||||
|
||||
func dbExport(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database) and <file> (path to write the tar archive to, - for stdout)")
|
||||
if len(args) != 3 {
|
||||
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to write the tar archive to, - for stdout) and the base key")
|
||||
}
|
||||
|
||||
store, err := openDbStore(args[0])
|
||||
store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
|
||||
if err != nil {
|
||||
utils.Fatalf("error opening local chunk database: %s", err)
|
||||
}
|
||||
@@ -62,11 +63,11 @@ func dbExport(ctx *cli.Context) {
|
||||
|
||||
func dbImport(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database) and <file> (path to read the tar archive from, - for stdin)")
|
||||
if len(args) != 3 {
|
||||
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to read the tar archive from, - for stdin) and the base key")
|
||||
}
|
||||
|
||||
store, err := openDbStore(args[0])
|
||||
store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
|
||||
if err != nil {
|
||||
utils.Fatalf("error opening local chunk database: %s", err)
|
||||
}
|
||||
@@ -94,11 +95,11 @@ func dbImport(ctx *cli.Context) {
|
||||
|
||||
func dbClean(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 1 {
|
||||
utils.Fatalf("invalid arguments, please specify <chunkdb> (path to a local chunk database)")
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf("invalid arguments, please specify <chunkdb> (path to a local chunk database) and the base key")
|
||||
}
|
||||
|
||||
store, err := openDbStore(args[0])
|
||||
store, err := openLDBStore(args[0], common.Hex2Bytes(args[1]))
|
||||
if err != nil {
|
||||
utils.Fatalf("error opening local chunk database: %s", err)
|
||||
}
|
||||
@@ -107,10 +108,13 @@ func dbClean(ctx *cli.Context) {
|
||||
store.Cleanup()
|
||||
}
|
||||
|
||||
func openDbStore(path string) (*storage.DbStore, error) {
|
||||
func openLDBStore(path string, basekey []byte) (*storage.LDBStore, error) {
|
||||
if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
|
||||
return nil, fmt.Errorf("invalid chunkdb path: %s", err)
|
||||
}
|
||||
hash := storage.MakeHashFunc("SHA3")
|
||||
return storage.NewDbStore(path, hash, 10000000, 0)
|
||||
|
||||
storeparams := storage.NewDefaultStoreParams()
|
||||
ldbparams := storage.NewLDBStoreParams(storeparams, path)
|
||||
ldbparams.BaseKey = basekey
|
||||
return storage.NewLDBStore(ldbparams)
|
||||
}
|
||||
|
85
cmd/swarm/download.go
Normal file
85
cmd/swarm/download.go
Normal file
@@ -0,0 +1,85 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
func download(ctx *cli.Context) {
|
||||
log.Debug("downloading content using swarm down")
|
||||
args := ctx.Args()
|
||||
dest := "."
|
||||
|
||||
switch len(args) {
|
||||
case 0:
|
||||
utils.Fatalf("Usage: swarm down [options] <bzz locator> [<destination path>]")
|
||||
case 1:
|
||||
log.Trace(fmt.Sprintf("swarm down: no destination path - assuming working dir"))
|
||||
default:
|
||||
log.Trace(fmt.Sprintf("destination path arg: %s", args[1]))
|
||||
if absDest, err := filepath.Abs(args[1]); err == nil {
|
||||
dest = absDest
|
||||
} else {
|
||||
utils.Fatalf("could not get download path: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
isRecursive = ctx.Bool(SwarmRecursiveFlag.Name)
|
||||
client = swarm.NewClient(bzzapi)
|
||||
)
|
||||
|
||||
if fi, err := os.Stat(dest); err == nil {
|
||||
if isRecursive && !fi.Mode().IsDir() {
|
||||
utils.Fatalf("destination path is not a directory!")
|
||||
}
|
||||
} else {
|
||||
if !os.IsNotExist(err) {
|
||||
utils.Fatalf("could not stat path: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
uri, err := api.Parse(args[0])
|
||||
if err != nil {
|
||||
utils.Fatalf("could not parse uri argument: %v", err)
|
||||
}
|
||||
|
||||
// assume behaviour according to --recursive switch
|
||||
if isRecursive {
|
||||
if err := client.DownloadDirectory(uri.Addr, uri.Path, dest); err != nil {
|
||||
utils.Fatalf("encoutered an error while downloading directory: %v", err)
|
||||
}
|
||||
} else {
|
||||
// we are downloading a file
|
||||
log.Debug(fmt.Sprintf("downloading file/path from a manifest. hash: %s, path:%s", uri.Addr, uri.Path))
|
||||
|
||||
err := client.DownloadFile(uri.Addr, uri.Path, dest)
|
||||
if err != nil {
|
||||
utils.Fatalf("could not download %s from given address: %s. error: %v", uri.Path, uri.Addr, err)
|
||||
}
|
||||
}
|
||||
}
|
139
cmd/swarm/export_test.go
Normal file
139
cmd/swarm/export_test.go
Normal file
@@ -0,0 +1,139 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm"
|
||||
)
|
||||
|
||||
// TestCLISwarmExportImport perform the following test:
|
||||
// 1. runs swarm node
|
||||
// 2. uploads a random file
|
||||
// 3. runs an export of the local datastore
|
||||
// 4. runs a second swarm node
|
||||
// 5. imports the exported datastore
|
||||
// 6. fetches the uploaded random file from the second node
|
||||
func TestCLISwarmExportImport(t *testing.T) {
|
||||
cluster := newTestCluster(t, 1)
|
||||
|
||||
// generate random 10mb file
|
||||
f, cleanup := generateRandomFile(t, 10000000)
|
||||
defer cleanup()
|
||||
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", f.Name())
|
||||
_, matches := up.ExpectRegexp(`[a-f\d]{64}`)
|
||||
up.ExpectExit()
|
||||
hash := matches[0]
|
||||
|
||||
var info swarm.Info
|
||||
if err := cluster.Nodes[0].Client.Call(&info, "bzz_info"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cluster.Stop()
|
||||
defer cluster.Cleanup()
|
||||
|
||||
// generate an export.tar
|
||||
exportCmd := runSwarm(t, "db", "export", info.Path+"/chunks", info.Path+"/export.tar", strings.TrimPrefix(info.BzzKey, "0x"))
|
||||
exportCmd.ExpectExit()
|
||||
|
||||
// start second cluster
|
||||
cluster2 := newTestCluster(t, 1)
|
||||
|
||||
var info2 swarm.Info
|
||||
if err := cluster2.Nodes[0].Client.Call(&info2, "bzz_info"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// stop second cluster, so that we close LevelDB
|
||||
cluster2.Stop()
|
||||
defer cluster2.Cleanup()
|
||||
|
||||
// import the export.tar
|
||||
importCmd := runSwarm(t, "db", "import", info2.Path+"/chunks", info.Path+"/export.tar", strings.TrimPrefix(info2.BzzKey, "0x"))
|
||||
importCmd.ExpectExit()
|
||||
|
||||
// spin second cluster back up
|
||||
cluster2.StartExistingNodes(t, 1, strings.TrimPrefix(info2.BzzAccount, "0x"))
|
||||
|
||||
// try to fetch imported file
|
||||
res, err := http.Get(cluster2.Nodes[0].URL + "/bzz:/" + hash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
t.Fatalf("expected HTTP status %d, got %s", 200, res.Status)
|
||||
}
|
||||
|
||||
// compare downloaded file with the generated random file
|
||||
mustEqualFiles(t, f, res.Body)
|
||||
}
|
||||
|
||||
func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) {
|
||||
h := md5.New()
|
||||
upLen, err := io.Copy(h, up)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
upHash := h.Sum(nil)
|
||||
h.Reset()
|
||||
downLen, err := io.Copy(h, down)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
downHash := h.Sum(nil)
|
||||
|
||||
if !bytes.Equal(upHash, downHash) || upLen != downLen {
|
||||
t.Fatalf("downloaded imported file md5=%x (length %v) is not the same as the generated one mp5=%x (length %v)", downHash, downLen, upHash, upLen)
|
||||
}
|
||||
}
|
||||
|
||||
func generateRandomFile(t *testing.T, size int) (f *os.File, teardown func()) {
|
||||
// create a tmp file
|
||||
tmp, err := ioutil.TempFile("", "swarm-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// callback for tmp file cleanup
|
||||
teardown = func() {
|
||||
tmp.Close()
|
||||
os.Remove(tmp.Name())
|
||||
}
|
||||
|
||||
// write 10mb random data to file
|
||||
buf := make([]byte, 10000000)
|
||||
_, err = rand.Read(buf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ioutil.WriteFile(tmp.Name(), buf, 0755)
|
||||
|
||||
return tmp, teardown
|
||||
}
|
127
cmd/swarm/fs.go
Normal file
127
cmd/swarm/fs.go
Normal file
@@ -0,0 +1,127 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/swarm/fuse"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
func mount(cliContext *cli.Context) {
|
||||
args := cliContext.Args()
|
||||
if len(args) < 2 {
|
||||
utils.Fatalf("Usage: swarm fs mount --ipcpath <path to bzzd.ipc> <manifestHash> <file name>")
|
||||
}
|
||||
|
||||
client, err := dialRPC(cliContext)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error dailing to RPC endpoint: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
mf := &fuse.MountInfo{}
|
||||
mountPoint, err := filepath.Abs(filepath.Clean(args[1]))
|
||||
if err != nil {
|
||||
utils.Fatalf("error expanding path for mount point: %v", err)
|
||||
}
|
||||
err = client.CallContext(ctx, mf, "swarmfs_mount", args[0], mountPoint)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error calling the RPC endpoint while mounting: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func unmount(cliContext *cli.Context) {
|
||||
args := cliContext.Args()
|
||||
|
||||
if len(args) < 1 {
|
||||
utils.Fatalf("Usage: swarm fs unmount --ipcpath <path to bzzd.ipc> <mount path>")
|
||||
}
|
||||
client, err := dialRPC(cliContext)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error dailing to RPC endpoint: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
mf := fuse.MountInfo{}
|
||||
err = client.CallContext(ctx, &mf, "swarmfs_unmount", args[0])
|
||||
if err != nil {
|
||||
utils.Fatalf("encountered an error calling the RPC endpoint while unmounting: %v", err)
|
||||
}
|
||||
fmt.Printf("%s\n", mf.LatestManifest) //print the latest manifest hash for user reference
|
||||
}
|
||||
|
||||
func listMounts(cliContext *cli.Context) {
|
||||
client, err := dialRPC(cliContext)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error dailing to RPC endpoint: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
mf := []fuse.MountInfo{}
|
||||
err = client.CallContext(ctx, &mf, "swarmfs_listmounts")
|
||||
if err != nil {
|
||||
utils.Fatalf("encountered an error calling the RPC endpoint while unmounting: %v", err)
|
||||
}
|
||||
if len(mf) == 0 {
|
||||
fmt.Print("Could not found any swarmfs mounts. Please make sure you've specified the correct RPC endpoint\n")
|
||||
} else {
|
||||
fmt.Printf("Found %d swarmfs mount(s):\n", len(mf))
|
||||
for i, mountInfo := range mf {
|
||||
fmt.Printf("%d:\n", i)
|
||||
fmt.Printf("\tMount point: %s\n", mountInfo.MountPoint)
|
||||
fmt.Printf("\tLatest Manifest: %s\n", mountInfo.LatestManifest)
|
||||
fmt.Printf("\tStart Manifest: %s\n", mountInfo.StartManifest)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func dialRPC(ctx *cli.Context) (*rpc.Client, error) {
|
||||
var endpoint string
|
||||
|
||||
if ctx.IsSet(utils.IPCPathFlag.Name) {
|
||||
endpoint = ctx.String(utils.IPCPathFlag.Name)
|
||||
} else {
|
||||
utils.Fatalf("swarm ipc endpoint not specified")
|
||||
}
|
||||
|
||||
if endpoint == "" {
|
||||
endpoint = node.DefaultIPCEndpoint(clientIdentifier)
|
||||
} else if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") {
|
||||
// Backwards compatibility with geth < 1.5 which required
|
||||
// these prefixes.
|
||||
endpoint = endpoint[4:]
|
||||
}
|
||||
return rpc.Dial(endpoint)
|
||||
}
|
236
cmd/swarm/fs_test.go
Normal file
236
cmd/swarm/fs_test.go
Normal file
@@ -0,0 +1,236 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build linux darwin freebsd
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
colorable "github.com/mattn/go-colorable"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.PrintOrigins(true)
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
|
||||
}
|
||||
|
||||
type testFile struct {
|
||||
filePath string
|
||||
content string
|
||||
}
|
||||
|
||||
// TestCLISwarmFs is a high-level test of swarmfs
|
||||
func TestCLISwarmFs(t *testing.T) {
|
||||
cluster := newTestCluster(t, 3)
|
||||
defer cluster.Shutdown()
|
||||
|
||||
// create a tmp dir
|
||||
mountPoint, err := ioutil.TempDir("", "swarm-test")
|
||||
log.Debug("swarmfs cli test", "1st mount", mountPoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(mountPoint)
|
||||
|
||||
handlingNode := cluster.Nodes[0]
|
||||
mhash := doUploadEmptyDir(t, handlingNode)
|
||||
log.Debug("swarmfs cli test: mounting first run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
|
||||
|
||||
mount := runSwarm(t, []string{
|
||||
"fs",
|
||||
"mount",
|
||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||
mhash,
|
||||
mountPoint,
|
||||
}...)
|
||||
mount.ExpectExit()
|
||||
|
||||
filesToAssert := []*testFile{}
|
||||
|
||||
dirPath, err := createDirInDir(mountPoint, "testSubDir")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dirPath2, err := createDirInDir(dirPath, "AnotherTestSubDir")
|
||||
|
||||
dummyContent := "somerandomtestcontentthatshouldbeasserted"
|
||||
dirs := []string{
|
||||
mountPoint,
|
||||
dirPath,
|
||||
dirPath2,
|
||||
}
|
||||
files := []string{"f1.tmp", "f2.tmp"}
|
||||
for _, d := range dirs {
|
||||
for _, entry := range files {
|
||||
tFile, err := createTestFileInPath(d, entry, dummyContent)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
filesToAssert = append(filesToAssert, tFile)
|
||||
}
|
||||
}
|
||||
if len(filesToAssert) != len(dirs)*len(files) {
|
||||
t.Fatalf("should have %d files to assert now, got %d", len(dirs)*len(files), len(filesToAssert))
|
||||
}
|
||||
hashRegexp := `[a-f\d]{64}`
|
||||
log.Debug("swarmfs cli test: unmounting first run...", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
|
||||
|
||||
unmount := runSwarm(t, []string{
|
||||
"fs",
|
||||
"unmount",
|
||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||
mountPoint,
|
||||
}...)
|
||||
_, matches := unmount.ExpectRegexp(hashRegexp)
|
||||
unmount.ExpectExit()
|
||||
|
||||
hash := matches[0]
|
||||
if hash == mhash {
|
||||
t.Fatal("this should not be equal")
|
||||
}
|
||||
log.Debug("swarmfs cli test: asserting no files in mount point")
|
||||
|
||||
//check that there's nothing in the mount folder
|
||||
filesInDir, err := ioutil.ReadDir(mountPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("had an error reading the directory: %v", err)
|
||||
}
|
||||
|
||||
if len(filesInDir) != 0 {
|
||||
t.Fatal("there shouldn't be anything here")
|
||||
}
|
||||
|
||||
secondMountPoint, err := ioutil.TempDir("", "swarm-test")
|
||||
log.Debug("swarmfs cli test", "2nd mount point at", secondMountPoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(secondMountPoint)
|
||||
|
||||
log.Debug("swarmfs cli test: remounting at second mount point", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
|
||||
|
||||
//remount, check files
|
||||
newMount := runSwarm(t, []string{
|
||||
"fs",
|
||||
"mount",
|
||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||
hash, // the latest hash
|
||||
secondMountPoint,
|
||||
}...)
|
||||
|
||||
newMount.ExpectExit()
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
filesInDir, err = ioutil.ReadDir(secondMountPoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(filesInDir) == 0 {
|
||||
t.Fatal("there should be something here")
|
||||
}
|
||||
|
||||
log.Debug("swarmfs cli test: traversing file tree to see it matches previous mount")
|
||||
|
||||
for _, file := range filesToAssert {
|
||||
file.filePath = strings.Replace(file.filePath, mountPoint, secondMountPoint, -1)
|
||||
fileBytes, err := ioutil.ReadFile(file.filePath)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(fileBytes, bytes.NewBufferString(file.content).Bytes()) {
|
||||
t.Fatal("this should be equal")
|
||||
}
|
||||
}
|
||||
|
||||
log.Debug("swarmfs cli test: unmounting second run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
|
||||
|
||||
unmountSec := runSwarm(t, []string{
|
||||
"fs",
|
||||
"unmount",
|
||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||
secondMountPoint,
|
||||
}...)
|
||||
|
||||
_, matches = unmountSec.ExpectRegexp(hashRegexp)
|
||||
unmountSec.ExpectExit()
|
||||
|
||||
if matches[0] != hash {
|
||||
t.Fatal("these should be equal - no changes made")
|
||||
}
|
||||
}
|
||||
|
||||
func doUploadEmptyDir(t *testing.T, node *testNode) string {
|
||||
// create a tmp dir
|
||||
tmpDir, err := ioutil.TempDir("", "swarm-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
hashRegexp := `[a-f\d]{64}`
|
||||
|
||||
flags := []string{
|
||||
"--bzzapi", node.URL,
|
||||
"--recursive",
|
||||
"up",
|
||||
tmpDir}
|
||||
|
||||
log.Info("swarmfs cli test: uploading dir with 'swarm up'")
|
||||
up := runSwarm(t, flags...)
|
||||
_, matches := up.ExpectRegexp(hashRegexp)
|
||||
up.ExpectExit()
|
||||
hash := matches[0]
|
||||
log.Info("swarmfs cli test: dir uploaded", "hash", hash)
|
||||
return hash
|
||||
}
|
||||
|
||||
func createDirInDir(createInDir string, dirToCreate string) (string, error) {
|
||||
fullpath := filepath.Join(createInDir, dirToCreate)
|
||||
err := os.MkdirAll(fullpath, 0777)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fullpath, nil
|
||||
}
|
||||
|
||||
func createTestFileInPath(dir, filename, content string) (*testFile, error) {
|
||||
tFile := &testFile{}
|
||||
filePath := filepath.Join(dir, filename)
|
||||
if file, err := os.Create(filePath); err == nil {
|
||||
tFile.content = content
|
||||
tFile.filePath = filePath
|
||||
|
||||
_, err = io.WriteString(file, content)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
file.Close()
|
||||
}
|
||||
|
||||
return tFile, nil
|
||||
}
|
@@ -18,6 +18,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
@@ -38,11 +39,11 @@ func hash(ctx *cli.Context) {
|
||||
defer f.Close()
|
||||
|
||||
stat, _ := f.Stat()
|
||||
chunker := storage.NewTreeChunker(storage.NewChunkerParams())
|
||||
key, err := chunker.Split(f, stat.Size(), nil, nil, nil)
|
||||
fileStore := storage.NewFileStore(storage.NewMapChunkStore(), storage.NewFileStoreParams())
|
||||
addr, _, err := fileStore.Store(context.TODO(), f, stat.Size(), false)
|
||||
if err != nil {
|
||||
utils.Fatalf("%v\n", err)
|
||||
} else {
|
||||
fmt.Printf("%v\n", key)
|
||||
fmt.Printf("%v\n", addr)
|
||||
}
|
||||
}
|
||||
|
@@ -34,21 +34,37 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/console"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/swarm"
|
||||
bzzapi "github.com/ethereum/go-ethereum/swarm/api"
|
||||
swarmmetrics "github.com/ethereum/go-ethereum/swarm/metrics"
|
||||
"github.com/ethereum/go-ethereum/swarm/tracing"
|
||||
sv "github.com/ethereum/go-ethereum/swarm/version"
|
||||
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
const clientIdentifier = "swarm"
|
||||
const helpTemplate = `NAME:
|
||||
{{.HelpName}} - {{.Usage}}
|
||||
|
||||
USAGE:
|
||||
{{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Category}}
|
||||
|
||||
CATEGORY:
|
||||
{{.Category}}{{end}}{{if .Description}}
|
||||
|
||||
DESCRIPTION:
|
||||
{{.Description}}{{end}}{{if .VisibleFlags}}
|
||||
|
||||
OPTIONS:
|
||||
{{range .VisibleFlags}}{{.}}
|
||||
{{end}}{{end}}
|
||||
`
|
||||
|
||||
var (
|
||||
gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
|
||||
@@ -87,10 +103,6 @@ var (
|
||||
Usage: "Network identifier (integer, default 3=swarm testnet)",
|
||||
EnvVar: SWARM_ENV_NETWORK_ID,
|
||||
}
|
||||
SwarmConfigPathFlag = cli.StringFlag{
|
||||
Name: "bzzconfig",
|
||||
Usage: "DEPRECATED: please use --config path/to/TOML-file",
|
||||
}
|
||||
SwarmSwapEnabledFlag = cli.BoolFlag{
|
||||
Name: "swap",
|
||||
Usage: "Swarm SWAP enabled (default false)",
|
||||
@@ -101,10 +113,20 @@ var (
|
||||
Usage: "URL of the Ethereum API provider to use to settle SWAP payments",
|
||||
EnvVar: SWARM_ENV_SWAP_API,
|
||||
}
|
||||
SwarmSyncEnabledFlag = cli.BoolTFlag{
|
||||
Name: "sync",
|
||||
Usage: "Swarm Syncing enabled (default true)",
|
||||
EnvVar: SWARM_ENV_SYNC_ENABLE,
|
||||
SwarmSyncDisabledFlag = cli.BoolTFlag{
|
||||
Name: "nosync",
|
||||
Usage: "Disable swarm syncing",
|
||||
EnvVar: SWARM_ENV_SYNC_DISABLE,
|
||||
}
|
||||
SwarmSyncUpdateDelay = cli.DurationFlag{
|
||||
Name: "sync-update-delay",
|
||||
Usage: "Duration for sync subscriptions update after no new peers are added (default 15s)",
|
||||
EnvVar: SWARM_ENV_SYNC_UPDATE_DELAY,
|
||||
}
|
||||
SwarmDeliverySkipCheckFlag = cli.BoolFlag{
|
||||
Name: "delivery-skip-check",
|
||||
Usage: "Skip chunk delivery check (default false)",
|
||||
EnvVar: SWARM_ENV_DELIVERY_SKIP_CHECK,
|
||||
}
|
||||
EnsAPIFlag = cli.StringSliceFlag{
|
||||
Name: "ens-api",
|
||||
@@ -116,13 +138,13 @@ var (
|
||||
Usage: "Swarm HTTP endpoint",
|
||||
Value: "http://127.0.0.1:8500",
|
||||
}
|
||||
SwarmRecursiveUploadFlag = cli.BoolFlag{
|
||||
SwarmRecursiveFlag = cli.BoolFlag{
|
||||
Name: "recursive",
|
||||
Usage: "Upload directories recursively",
|
||||
}
|
||||
SwarmWantManifestFlag = cli.BoolTFlag{
|
||||
Name: "manifest",
|
||||
Usage: "Automatic manifest upload",
|
||||
Usage: "Automatic manifest upload (default true)",
|
||||
}
|
||||
SwarmUploadDefaultPath = cli.StringFlag{
|
||||
Name: "defaultpath",
|
||||
@@ -134,22 +156,43 @@ var (
|
||||
}
|
||||
SwarmUploadMimeType = cli.StringFlag{
|
||||
Name: "mime",
|
||||
Usage: "force mime type",
|
||||
Usage: "Manually specify MIME type",
|
||||
}
|
||||
SwarmEncryptedFlag = cli.BoolFlag{
|
||||
Name: "encrypt",
|
||||
Usage: "use encrypted upload",
|
||||
}
|
||||
CorsStringFlag = cli.StringFlag{
|
||||
Name: "corsdomain",
|
||||
Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')",
|
||||
EnvVar: SWARM_ENV_CORS,
|
||||
}
|
||||
|
||||
// the following flags are deprecated and should be removed in the future
|
||||
DeprecatedEthAPIFlag = cli.StringFlag{
|
||||
Name: "ethapi",
|
||||
Usage: "DEPRECATED: please use --ens-api and --swap-api",
|
||||
SwarmStorePath = cli.StringFlag{
|
||||
Name: "store.path",
|
||||
Usage: "Path to leveldb chunk DB (default <$GETH_ENV_DIR>/swarm/bzz-<$BZZ_KEY>/chunks)",
|
||||
EnvVar: SWARM_ENV_STORE_PATH,
|
||||
}
|
||||
DeprecatedEnsAddrFlag = cli.StringFlag{
|
||||
Name: "ens-addr",
|
||||
Usage: "DEPRECATED: ENS contract address, please use --ens-api with contract address according to its format",
|
||||
SwarmStoreCapacity = cli.Uint64Flag{
|
||||
Name: "store.size",
|
||||
Usage: "Number of chunks (5M is roughly 20-25GB) (default 5000000)",
|
||||
EnvVar: SWARM_ENV_STORE_CAPACITY,
|
||||
}
|
||||
SwarmStoreCacheCapacity = cli.UintFlag{
|
||||
Name: "store.cache.size",
|
||||
Usage: "Number of recent chunks cached in memory (default 5000)",
|
||||
EnvVar: SWARM_ENV_STORE_CACHE_CAPACITY,
|
||||
}
|
||||
SwarmResourceMultihashFlag = cli.BoolFlag{
|
||||
Name: "multihash",
|
||||
Usage: "Determines how to interpret data for a resource update. If not present, data will be interpreted as raw, literal data that will be included in the resource",
|
||||
}
|
||||
SwarmResourceNameFlag = cli.StringFlag{
|
||||
Name: "name",
|
||||
Usage: "User-defined name for the new resource",
|
||||
}
|
||||
SwarmResourceDataOnCreateFlag = cli.StringFlag{
|
||||
Name: "data",
|
||||
Usage: "Initializes the resource with the given hex-encoded data. Data must be prefixed by 0x",
|
||||
}
|
||||
)
|
||||
|
||||
@@ -159,12 +202,21 @@ var (
|
||||
SWARM_ERR_SWAP_SET_NO_API = "SWAP is enabled but --swap-api is not set"
|
||||
)
|
||||
|
||||
// this help command gets added to any subcommand that does not define it explicitly
|
||||
var defaultSubcommandHelp = cli.Command{
|
||||
Action: func(ctx *cli.Context) { cli.ShowCommandHelpAndExit(ctx, "", 1) },
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "help",
|
||||
Usage: "shows this help",
|
||||
Hidden: true,
|
||||
}
|
||||
|
||||
var defaultNodeConfig = node.DefaultConfig
|
||||
|
||||
// This init function sets defaults so cmd/swarm can run alongside geth.
|
||||
func init() {
|
||||
defaultNodeConfig.Name = clientIdentifier
|
||||
defaultNodeConfig.Version = params.VersionWithCommit(gitCommit)
|
||||
defaultNodeConfig.Version = sv.VersionWithCommit(gitCommit)
|
||||
defaultNodeConfig.P2P.ListenAddr = ":30399"
|
||||
defaultNodeConfig.IPCPath = "bzzd.ipc"
|
||||
// Set flag defaults for --help display.
|
||||
@@ -180,91 +232,165 @@ func init() {
|
||||
app.Copyright = "Copyright 2013-2016 The go-ethereum Authors"
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Action: version,
|
||||
Name: "version",
|
||||
Usage: "Print version numbers",
|
||||
ArgsUsage: " ",
|
||||
Description: `
|
||||
The output of this command is supposed to be machine-readable.
|
||||
`,
|
||||
Action: version,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "version",
|
||||
Usage: "Print version numbers",
|
||||
Description: "The output of this command is supposed to be machine-readable",
|
||||
},
|
||||
{
|
||||
Action: upload,
|
||||
Name: "up",
|
||||
Usage: "upload a file or directory to swarm using the HTTP API",
|
||||
ArgsUsage: " <file>",
|
||||
Description: `
|
||||
"upload a file or directory to swarm using the HTTP API and prints the root hash",
|
||||
`,
|
||||
Action: upload,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "up",
|
||||
Usage: "uploads a file or directory to swarm using the HTTP API",
|
||||
ArgsUsage: "<file>",
|
||||
Flags: []cli.Flag{SwarmEncryptedFlag},
|
||||
Description: "uploads a file or directory to swarm using the HTTP API and prints the root hash",
|
||||
},
|
||||
{
|
||||
Action: list,
|
||||
Name: "ls",
|
||||
Usage: "list files and directories contained in a manifest",
|
||||
ArgsUsage: " <manifest> [<prefix>]",
|
||||
Description: `
|
||||
Lists files and directories contained in a manifest.
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: hash,
|
||||
Name: "hash",
|
||||
Usage: "print the swarm hash of a file or directory",
|
||||
ArgsUsage: " <file>",
|
||||
Description: `
|
||||
Prints the swarm hash of file or directory.
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "manifest",
|
||||
Usage: "update a MANIFEST",
|
||||
ArgsUsage: "manifest COMMAND",
|
||||
Description: `
|
||||
Updates a MANIFEST by adding/removing/updating the hash of a path.
|
||||
`,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "resource",
|
||||
Usage: "(Advanced) Create and update Mutable Resources",
|
||||
ArgsUsage: "<create|update|info>",
|
||||
Description: "Works with Mutable Resource Updates",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: add,
|
||||
Name: "add",
|
||||
Usage: "add a new path to the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path> <hash> [<content-type>]",
|
||||
Description: `
|
||||
Adds a new path to the manifest
|
||||
`,
|
||||
Action: resourceCreate,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "create",
|
||||
Usage: "creates a new Mutable Resource",
|
||||
ArgsUsage: "<frequency>",
|
||||
Description: "creates a new Mutable Resource",
|
||||
Flags: []cli.Flag{SwarmResourceNameFlag, SwarmResourceDataOnCreateFlag, SwarmResourceMultihashFlag},
|
||||
},
|
||||
{
|
||||
Action: update,
|
||||
Name: "update",
|
||||
Usage: "update the hash for an already existing path in the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path> <newhash> [<newcontent-type>]",
|
||||
Description: `
|
||||
Update the hash for an already existing path in the manifest
|
||||
`,
|
||||
Action: resourceUpdate,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "update",
|
||||
Usage: "updates the content of an existing Mutable Resource",
|
||||
ArgsUsage: "<Manifest Address or ENS domain> <0x Hex data>",
|
||||
Description: "updates the content of an existing Mutable Resource",
|
||||
Flags: []cli.Flag{SwarmResourceMultihashFlag},
|
||||
},
|
||||
{
|
||||
Action: remove,
|
||||
Name: "remove",
|
||||
Usage: "removes a path from the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path>",
|
||||
Description: `
|
||||
Removes a path from the manifest
|
||||
`,
|
||||
Action: resourceInfo,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "info",
|
||||
Usage: "obtains information about an existing Mutable Resource",
|
||||
ArgsUsage: "<Manifest Address or ENS domain>",
|
||||
Description: "obtains information about an existing Mutable Resource",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "db",
|
||||
Usage: "manage the local chunk database",
|
||||
ArgsUsage: "db COMMAND",
|
||||
Action: list,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "ls",
|
||||
Usage: "list files and directories contained in a manifest",
|
||||
ArgsUsage: "<manifest> [<prefix>]",
|
||||
Description: "Lists files and directories contained in a manifest",
|
||||
},
|
||||
{
|
||||
Action: hash,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "hash",
|
||||
Usage: "print the swarm hash of a file or directory",
|
||||
ArgsUsage: "<file>",
|
||||
Description: "Prints the swarm hash of file or directory",
|
||||
},
|
||||
{
|
||||
Action: download,
|
||||
Name: "down",
|
||||
Flags: []cli.Flag{SwarmRecursiveFlag},
|
||||
Usage: "downloads a swarm manifest or a file inside a manifest",
|
||||
ArgsUsage: " <uri> [<dir>]",
|
||||
Description: `
|
||||
Manage the local chunk database.
|
||||
Downloads a swarm bzz uri to the given dir. When no dir is provided, working directory is assumed. --recursive flag is expected when downloading a manifest with multiple entries.
|
||||
`,
|
||||
},
|
||||
|
||||
{
|
||||
Name: "manifest",
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Usage: "perform operations on swarm manifests",
|
||||
ArgsUsage: "COMMAND",
|
||||
Description: "Updates a MANIFEST by adding/removing/updating the hash of a path.\nCOMMAND could be: add, update, remove",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: dbExport,
|
||||
Name: "export",
|
||||
Usage: "export a local chunk database as a tar archive (use - to send to stdout)",
|
||||
ArgsUsage: "<chunkdb> <file>",
|
||||
Action: add,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "add",
|
||||
Usage: "add a new path to the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path> <hash> [<content-type>]",
|
||||
Description: "Adds a new path to the manifest",
|
||||
},
|
||||
{
|
||||
Action: update,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "update",
|
||||
Usage: "update the hash for an already existing path in the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path> <newhash> [<newcontent-type>]",
|
||||
Description: "Update the hash for an already existing path in the manifest",
|
||||
},
|
||||
{
|
||||
Action: remove,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "remove",
|
||||
Usage: "removes a path from the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path>",
|
||||
Description: "Removes a path from the manifest",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "fs",
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Usage: "perform FUSE operations",
|
||||
ArgsUsage: "fs COMMAND",
|
||||
Description: "Performs FUSE operations by mounting/unmounting/listing mount points. This assumes you already have a Swarm node running locally. For all operation you must reference the correct path to bzzd.ipc in order to communicate with the node",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: mount,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "mount",
|
||||
Flags: []cli.Flag{utils.IPCPathFlag},
|
||||
Usage: "mount a swarm hash to a mount point",
|
||||
ArgsUsage: "swarm fs mount --ipcpath <path to bzzd.ipc> <manifest hash> <mount point>",
|
||||
Description: "Mounts a Swarm manifest hash to a given mount point. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
|
||||
},
|
||||
{
|
||||
Action: unmount,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "unmount",
|
||||
Flags: []cli.Flag{utils.IPCPathFlag},
|
||||
Usage: "unmount a swarmfs mount",
|
||||
ArgsUsage: "swarm fs unmount --ipcpath <path to bzzd.ipc> <mount point>",
|
||||
Description: "Unmounts a swarmfs mount residing at <mount point>. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
|
||||
},
|
||||
{
|
||||
Action: listMounts,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "list",
|
||||
Flags: []cli.Flag{utils.IPCPathFlag},
|
||||
Usage: "list swarmfs mounts",
|
||||
ArgsUsage: "swarm fs list --ipcpath <path to bzzd.ipc>",
|
||||
Description: "Lists all mounted swarmfs volumes. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "db",
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Usage: "manage the local chunk database",
|
||||
ArgsUsage: "db COMMAND",
|
||||
Description: "Manage the local chunk database",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: dbExport,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "export",
|
||||
Usage: "export a local chunk database as a tar archive (use - to send to stdout)",
|
||||
ArgsUsage: "<chunkdb> <file>",
|
||||
Description: `
|
||||
Export a local chunk database as a tar archive (use - to send to stdout).
|
||||
|
||||
@@ -277,10 +403,11 @@ pv(1) tool to get a progress bar:
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: dbImport,
|
||||
Name: "import",
|
||||
Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)",
|
||||
ArgsUsage: "<chunkdb> <file>",
|
||||
Action: dbImport,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "import",
|
||||
Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)",
|
||||
ArgsUsage: "<chunkdb> <file>",
|
||||
Description: `
|
||||
Import chunks from a tar archive into a local chunk database (use - to read from stdin).
|
||||
|
||||
@@ -293,30 +420,24 @@ pv(1) tool to get a progress bar:
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: dbClean,
|
||||
Name: "clean",
|
||||
Usage: "remove corrupt entries from a local chunk database",
|
||||
ArgsUsage: "<chunkdb>",
|
||||
Description: `
|
||||
Remove corrupt entries from a local chunk database.
|
||||
`,
|
||||
Action: dbClean,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "clean",
|
||||
Usage: "remove corrupt entries from a local chunk database",
|
||||
ArgsUsage: "<chunkdb>",
|
||||
Description: "Remove corrupt entries from a local chunk database",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: func(ctx *cli.Context) {
|
||||
utils.Fatalf("ERROR: 'swarm cleandb' has been removed, please use 'swarm db clean'.")
|
||||
},
|
||||
Name: "cleandb",
|
||||
Usage: "DEPRECATED: use 'swarm db clean'",
|
||||
ArgsUsage: " ",
|
||||
Description: `
|
||||
DEPRECATED: use 'swarm db clean'.
|
||||
`,
|
||||
},
|
||||
|
||||
// See config.go
|
||||
DumpConfigCommand,
|
||||
}
|
||||
|
||||
// append a hidden help subcommand to all commands that have subcommands
|
||||
// if a help command was already defined above, that one will take precedence.
|
||||
addDefaultHelpSubcommands(app.Commands)
|
||||
|
||||
sort.Sort(cli.CommandsByName(app.Commands))
|
||||
|
||||
app.Flags = []cli.Flag{
|
||||
@@ -339,10 +460,11 @@ DEPRECATED: use 'swarm db clean'.
|
||||
CorsStringFlag,
|
||||
EnsAPIFlag,
|
||||
SwarmTomlConfigPathFlag,
|
||||
SwarmConfigPathFlag,
|
||||
SwarmSwapEnabledFlag,
|
||||
SwarmSwapAPIFlag,
|
||||
SwarmSyncEnabledFlag,
|
||||
SwarmSyncDisabledFlag,
|
||||
SwarmSyncUpdateDelay,
|
||||
SwarmDeliverySkipCheckFlag,
|
||||
SwarmListenAddrFlag,
|
||||
SwarmPortFlag,
|
||||
SwarmAccountFlag,
|
||||
@@ -350,23 +472,34 @@ DEPRECATED: use 'swarm db clean'.
|
||||
ChequebookAddrFlag,
|
||||
// upload flags
|
||||
SwarmApiFlag,
|
||||
SwarmRecursiveUploadFlag,
|
||||
SwarmRecursiveFlag,
|
||||
SwarmWantManifestFlag,
|
||||
SwarmUploadDefaultPath,
|
||||
SwarmUpFromStdinFlag,
|
||||
SwarmUploadMimeType,
|
||||
//deprecated flags
|
||||
DeprecatedEthAPIFlag,
|
||||
DeprecatedEnsAddrFlag,
|
||||
// storage flags
|
||||
SwarmStorePath,
|
||||
SwarmStoreCapacity,
|
||||
SwarmStoreCacheCapacity,
|
||||
}
|
||||
rpcFlags := []cli.Flag{
|
||||
utils.WSEnabledFlag,
|
||||
utils.WSListenAddrFlag,
|
||||
utils.WSPortFlag,
|
||||
utils.WSApiFlag,
|
||||
utils.WSAllowedOriginsFlag,
|
||||
}
|
||||
app.Flags = append(app.Flags, rpcFlags...)
|
||||
app.Flags = append(app.Flags, debug.Flags...)
|
||||
app.Flags = append(app.Flags, swarmmetrics.Flags...)
|
||||
app.Flags = append(app.Flags, tracing.Flags...)
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
if err := debug.Setup(ctx); err != nil {
|
||||
if err := debug.Setup(ctx, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
swarmmetrics.Setup(ctx)
|
||||
tracing.Setup(ctx)
|
||||
return nil
|
||||
}
|
||||
app.After = func(ctx *cli.Context) error {
|
||||
@@ -384,15 +517,12 @@ func main() {
|
||||
|
||||
func version(ctx *cli.Context) error {
|
||||
fmt.Println(strings.Title(clientIdentifier))
|
||||
fmt.Println("Version:", params.Version)
|
||||
fmt.Println("Version:", sv.VersionWithMeta)
|
||||
if gitCommit != "" {
|
||||
fmt.Println("Git Commit:", gitCommit)
|
||||
}
|
||||
fmt.Println("Network Id:", ctx.GlobalInt(utils.NetworkIdFlag.Name))
|
||||
fmt.Println("Go Version:", runtime.Version())
|
||||
fmt.Println("OS:", runtime.GOOS)
|
||||
fmt.Printf("GOPATH=%s\n", os.Getenv("GOPATH"))
|
||||
fmt.Printf("GOROOT=%s\n", runtime.GOROOT())
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -405,6 +535,10 @@ func bzzd(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
cfg := defaultNodeConfig
|
||||
|
||||
//pss operates on ws
|
||||
cfg.WSModules = append(cfg.WSModules, "pss")
|
||||
|
||||
//geth only supports --datadir via command line
|
||||
//in order to be consistent within swarm, if we pass --datadir via environment variable
|
||||
//or via config file, we get the same directory for geth and swarm
|
||||
@@ -421,7 +555,7 @@ func bzzd(ctx *cli.Context) error {
|
||||
//due to overriding behavior
|
||||
initSwarmNode(bzzconfig, stack, ctx)
|
||||
//register BZZ as node.Service in the ethereum node
|
||||
registerBzzService(bzzconfig, ctx, stack)
|
||||
registerBzzService(bzzconfig, stack)
|
||||
//start the node
|
||||
utils.StartNode(stack)
|
||||
|
||||
@@ -439,7 +573,7 @@ func bzzd(ctx *cli.Context) error {
|
||||
bootnodes := strings.Split(bzzconfig.BootNodes, ",")
|
||||
injectBootnodes(stack.Server(), bootnodes)
|
||||
} else {
|
||||
if bzzconfig.NetworkId == 3 {
|
||||
if bzzconfig.NetworkID == 3 {
|
||||
injectBootnodes(stack.Server(), testbetBootNodes)
|
||||
}
|
||||
}
|
||||
@@ -448,21 +582,11 @@ func bzzd(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func registerBzzService(bzzconfig *bzzapi.Config, ctx *cli.Context, stack *node.Node) {
|
||||
|
||||
func registerBzzService(bzzconfig *bzzapi.Config, stack *node.Node) {
|
||||
//define the swarm service boot function
|
||||
boot := func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
var swapClient *ethclient.Client
|
||||
var err error
|
||||
if bzzconfig.SwapApi != "" {
|
||||
log.Info("connecting to SWAP API", "url", bzzconfig.SwapApi)
|
||||
swapClient, err = ethclient.Dial(bzzconfig.SwapApi)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error connecting to SWAP API %s: %s", bzzconfig.SwapApi, err)
|
||||
}
|
||||
}
|
||||
|
||||
return swarm.NewSwarm(ctx, swapClient, bzzconfig)
|
||||
boot := func(_ *node.ServiceContext) (node.Service, error) {
|
||||
// In production, mockStore must be always nil.
|
||||
return swarm.NewSwarm(bzzconfig, nil)
|
||||
}
|
||||
//register within the ethereum node
|
||||
if err := stack.Register(boot); err != nil {
|
||||
@@ -487,6 +611,26 @@ func getAccount(bzzaccount string, ctx *cli.Context, stack *node.Node) *ecdsa.Pr
|
||||
return decryptStoreAccount(ks, bzzaccount, utils.MakePasswordList(ctx))
|
||||
}
|
||||
|
||||
// getPrivKey returns the private key of the specified bzzaccount
|
||||
// Used only by client commands, such as `resource`
|
||||
func getPrivKey(ctx *cli.Context) *ecdsa.PrivateKey {
|
||||
// booting up the swarm node just as we do in bzzd action
|
||||
bzzconfig, err := buildConfig(ctx)
|
||||
if err != nil {
|
||||
utils.Fatalf("unable to configure swarm: %v", err)
|
||||
}
|
||||
cfg := defaultNodeConfig
|
||||
if _, err := os.Stat(bzzconfig.Path); err == nil {
|
||||
cfg.DataDir = bzzconfig.Path
|
||||
}
|
||||
utils.SetNodeConfig(ctx, &cfg)
|
||||
stack, err := node.New(&cfg)
|
||||
if err != nil {
|
||||
utils.Fatalf("can't create node: %v", err)
|
||||
}
|
||||
return getAccount(bzzconfig.BzzAccount, ctx, stack)
|
||||
}
|
||||
|
||||
func decryptStoreAccount(ks *keystore.KeyStore, account string, passwords []string) *ecdsa.PrivateKey {
|
||||
var a accounts.Account
|
||||
var err error
|
||||
@@ -551,3 +695,16 @@ func injectBootnodes(srv *p2p.Server, nodes []string) {
|
||||
srv.AddPeer(n)
|
||||
}
|
||||
}
|
||||
|
||||
// addDefaultHelpSubcommand scans through defined CLI commands and adds
|
||||
// a basic help subcommand to each
|
||||
// if a help command is already defined, it will take precedence over the default.
|
||||
func addDefaultHelpSubcommands(commands []cli.Command) {
|
||||
for i := range commands {
|
||||
cmd := &commands[i]
|
||||
if cmd.Subcommands != nil {
|
||||
cmd.Subcommands = append(cmd.Subcommands, defaultSubcommandHelp)
|
||||
addDefaultHelpSubcommands(cmd.Subcommands)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user