Compare commits
429 Commits
Author | SHA1 | Date | |
---|---|---|---|
6c6c7b2af3 | |||
5c93462b5e | |||
701d60c889 | |||
9be07de539 | |||
885c13c2c9 | |||
5bbd7fb390 | |||
79b11121a7 | |||
72af509abe | |||
382c9266e6 | |||
f46adfac28 | |||
514b1587db | |||
66a7ef57e6 | |||
ecca2c3c1b | |||
7a7f6a4f29 | |||
c8e70186a6 | |||
794741b8b2 | |||
48705f8aea | |||
10b3f97c9d | |||
5596b664c4 | |||
10181b57a9 | |||
ac193e36ce | |||
5ba9225fe3 | |||
fc87bc5f52 | |||
c1740e4540 | |||
e3db1236de | |||
02b4d074f6 | |||
2dcb22afec | |||
69c8be7c86 | |||
55e5926f34 | |||
f30179d62e | |||
c4d21bc8e5 | |||
160add8570 | |||
564c8f3ae6 | |||
451ffdb62b | |||
6ff2c02991 | |||
f585f9eee8 | |||
4ea4d2dc34 | |||
1e67378df8 | |||
cc313e78b7 | |||
b0ca1b67ce | |||
03d00361f5 | |||
f90a193f92 | |||
8e14bb1448 | |||
cd6c861dc5 | |||
c91f7beb53 | |||
2bacf36d80 | |||
32d8d42274 | |||
da7d57e07c | |||
8cab3ab435 | |||
8f567dc8a2 | |||
504278e839 | |||
e7408b5552 | |||
1901521ed0 | |||
23b51a68cb | |||
dc92779c0a | |||
d70536b5d4 | |||
07635e43e2 | |||
64a3a3d23c | |||
777540628e | |||
a4df80f47f | |||
bc2a5578c0 | |||
ebf41d16a0 | |||
9d0c51fb0f | |||
08f27428b4 | |||
27a5622e99 | |||
8596fc5974 | |||
ad16aeb0a2 | |||
b872961ec8 | |||
68955ed2eb | |||
ff9a868232 | |||
20b818d206 | |||
63246e2542 | |||
3c48a25762 | |||
286ec5df40 | |||
4ee92f2d19 | |||
f7e39a7724 | |||
79cdbcfe64 | |||
79bf69b556 | |||
28aea46ac0 | |||
fc5f8a3dda | |||
3cc476c8ab | |||
2fd5ba6bd4 | |||
b4b27ebaea | |||
8c037dc487 | |||
3e14837c1c | |||
58f7f977e7 | |||
afdfdebd87 | |||
e311bb520a | |||
1ab3e30698 | |||
314246da78 | |||
bf1e263128 | |||
7e57fee355 | |||
a4da8416ee | |||
998abb9107 | |||
059c767adf | |||
d4f11d9b4f | |||
104375f398 | |||
1bbd400899 | |||
f9fb70d2ee | |||
1335a6cc8c | |||
b70a73cd3e | |||
0b978f91b6 | |||
64d199edf2 | |||
4e0fea4d30 | |||
9bd6068fef | |||
76069eef38 | |||
3df7142b3e | |||
3d123bcde6 | |||
3040243042 | |||
9facf6423d | |||
2403656373 | |||
ef0edc6e32 | |||
133de3d806 | |||
f8d8b56b28 | |||
d8aaa3a215 | |||
6131dd55c5 | |||
02656f9f61 | |||
02aa86e659 | |||
7bbdf3e268 | |||
6ca59d98f8 | |||
833eeb9f23 | |||
2b422b1a47 | |||
73c5aba21f | |||
6a56b15019 | |||
5d9ac49c7e | |||
db568a61e2 | |||
17ce0a37de | |||
26b2d6e1aa | |||
fff6e03a79 | |||
d375193797 | |||
374c49e0ac | |||
10ce8b0e3c | |||
9a7e99f75d | |||
6f8c7b0def | |||
1c45f2f42e | |||
e063d538b8 | |||
43437806fb | |||
8f06b7980d | |||
971079822e | |||
f42bd73ce5 | |||
f5925b0459 | |||
8edaaa227d | |||
bd74882d83 | |||
67439c1dba | |||
f59a49d591 | |||
2b50367fe9 | |||
46cf0a616b | |||
85454e7678 | |||
80de4dc72c | |||
8c2cf3c66c | |||
37e9fcacca | |||
b36f54c684 | |||
3991745c5f | |||
6dd2803b8e | |||
fc0c6c175c | |||
7c74e166b0 | |||
f7848c2aa5 | |||
19866075ac | |||
faafeef79e | |||
cd82b89fde | |||
3780d0b6f7 | |||
ff89a3ddce | |||
aee70ae30b | |||
392151e251 | |||
5b742fb82b | |||
b159cdd8dd | |||
524ca544b2 | |||
1059927f9c | |||
fca6e515d6 | |||
ca436f4b90 | |||
350bb6d9ca | |||
5e805aa865 | |||
455fcc8309 | |||
0cc9b8791e | |||
8b84bd283f | |||
4a260dc1f2 | |||
4371367cd1 | |||
bc0e6a5e68 | |||
60c858a529 | |||
e9b850805e | |||
53f3460ab5 | |||
bdf98b4fcd | |||
c259e6874e | |||
13cda8d9b6 | |||
4f9789b28d | |||
ee748d1451 | |||
0732ad4e47 | |||
3d32690b54 | |||
a602ee90f2 | |||
fc78ce61c0 | |||
ffebf00114 | |||
99da85c895 | |||
f4841ff43d | |||
3a678a15c9 | |||
3e0dbe0eaa | |||
1802682f65 | |||
4d2249773a | |||
8a8fc5f8ef | |||
671ba3791d | |||
9488e7fd5f | |||
23c6fcdbe8 | |||
cf5d4b5541 | |||
b95c2b58f0 | |||
8e9197f2a1 | |||
c65f10a17b | |||
a56f3dc0d9 | |||
47359301a2 | |||
688ee6d1e5 | |||
ad8d519eb5 | |||
9e80d9bee1 | |||
0ff35e170d | |||
8d6a5a3581 | |||
33a24bb731 | |||
2d47c6bfde | |||
d68ad36bb9 | |||
df74a43396 | |||
01cb9948f1 | |||
bf468a81ec | |||
ab5646c532 | |||
3b25012481 | |||
bd381be9c8 | |||
225de7ca0a | |||
bd01cd7183 | |||
0958770625 | |||
4f7a38001f | |||
65f0e905dd | |||
4b8860a7b4 | |||
34ec9913f6 | |||
f25486c3fb | |||
88b4fe7d21 | |||
5e38f7a664 | |||
4c1d0b164b | |||
48ee7f9de7 | |||
fe13949d9d | |||
138f26c93a | |||
8f12d76a47 | |||
be8f8409bc | |||
a633a2d7ea | |||
67aff49822 | |||
8c313eed26 | |||
c4d28aee9b | |||
a0aa071ca6 | |||
c7041fe145 | |||
41318f3776 | |||
65b96dc4b9 | |||
8bbd598ef4 | |||
ae11545bc5 | |||
0550957989 | |||
dfd076244d | |||
6dc32e897a | |||
e4301564c2 | |||
bae7565231 | |||
9e5f03b6c4 | |||
bb366271fe | |||
4a741df757 | |||
5421a08d2f | |||
cf611c50b9 | |||
c008176f9f | |||
f3359d5e58 | |||
feb2932706 | |||
ea1d1825a8 | |||
f321ed23fb | |||
413dc1d265 | |||
fdf2184b1e | |||
3c7338d6c8 | |||
ef8d4711d5 | |||
caa00b7e6d | |||
5603eb9116 | |||
78c04c920d | |||
10a45cb59b | |||
cd88f69715 | |||
46d0d04f97 | |||
514659a023 | |||
01c9cf1cb5 | |||
b751cf3901 | |||
b6e99deee9 | |||
d40179f882 | |||
beb708e6d7 | |||
f2c5b2cc1c | |||
a4277450b2 | |||
b664bedcf2 | |||
eebde1a2e2 | |||
b0b3cf2eeb | |||
c98d9b49bf | |||
0042f13d47 | |||
d432688886 | |||
58a1e13e6d | |||
a1f3878ec5 | |||
a20a02ce0b | |||
9a44e1035e | |||
c62d5422bb | |||
9012863ad7 | |||
a5d08c893d | |||
a4e4c76cb3 | |||
7a11e86442 | |||
4a1d516d78 | |||
b6b0e00198 | |||
3d66ba56ef | |||
767dc6c73d | |||
36e7963467 | |||
98e101ef8e | |||
50c18e6eb8 | |||
60e27b51bc | |||
693d9ccbfb | |||
5c53a5be66 | |||
431cf2a1e4 | |||
4f77857f74 | |||
fade09a7ff | |||
b58a501673 | |||
db6e695002 | |||
335abdceb1 | |||
732273094c | |||
b8793edd83 | |||
eb92522278 | |||
061889d4ea | |||
e3dfd55820 | |||
2fefe4baa0 | |||
ac9865791a | |||
80f7c6c299 | |||
bc24b7a912 | |||
1496b3aff6 | |||
1e9f86b49e | |||
65ea913e29 | |||
9a0e433b13 | |||
04d2de9119 | |||
3285a0fda3 | |||
6171d01b11 | |||
cf87713dd4 | |||
ac92d7c411 | |||
d5a79934dc | |||
0424192e61 | |||
9c2882b2e5 | |||
1a0eb903f1 | |||
0036e2a747 | |||
727eadacca | |||
99cba96f26 | |||
f272879e5a | |||
72dd51e25a | |||
799a469000 | |||
f4d81178d8 | |||
310d2e7ef4 | |||
3ecde4e2aa | |||
a355b401db | |||
cba33029a8 | |||
9702badd83 | |||
067dc2cbf5 | |||
65979770e6 | |||
41bdf49eed | |||
ea11f7dd7a | |||
8df24760d7 | |||
71814bf6c4 | |||
ec1700600a | |||
b0f30b0b37 | |||
e96f2981e2 | |||
09d59da3a1 | |||
280609c99b | |||
309da541de | |||
dd06c85843 | |||
ae40d51410 | |||
b865fad888 | |||
afb17cf071 | |||
08959bbc70 | |||
673c92db6b | |||
c2a494c743 | |||
afdd23b5ca | |||
cb809c03da | |||
45421d3130 | |||
115e7d71cc | |||
dd5ed01f3b | |||
b7ff0d42e3 | |||
c98bce709c | |||
17f0b11942 | |||
6231edcbab | |||
07aae19e5d | |||
b596b4ba5b | |||
8b1e4c4c5e | |||
846d091bd2 | |||
a346aedb90 | |||
ef25b826e6 | |||
261b3e2351 | |||
344f25fb3e | |||
1afaea4bfe | |||
11cf5b7ead | |||
069cb661c3 | |||
3b8915e387 | |||
437ceaa9be | |||
136f78ff0a | |||
aa73420207 | |||
3556962053 | |||
e1e87d8b1a | |||
30cc1c3bf0 | |||
10582a97ca | |||
e16a7ef60f | |||
a816e75662 | |||
3ee75bec9f | |||
04b668b232 | |||
da636c53d6 | |||
2a41e76b39 | |||
4a2c17b1ab | |||
bc75351edf | |||
33b158e0ed | |||
83721a95ce | |||
e7119ce12d | |||
a5f6a1cb7c | |||
e6aff513db | |||
8a4c1fb799 | |||
10a57fc3d4 | |||
a2f23ca9b1 | |||
e20158176d | |||
ef7b9fb7d0 | |||
b0d0fafd68 | |||
90c7155ef4 | |||
df4e7eccf5 | |||
7c707d14d1 | |||
953a995116 | |||
c5840ce12f | |||
3b3989de6a | |||
40976ea1a0 | |||
d18b509e40 | |||
2e4d23a793 | |||
60293820b7 | |||
82defe5c56 | |||
dd483d7d0d | |||
dddebe469b | |||
cf19586cfb | |||
fd5d51c9ae | |||
2ec5cf1673 | |||
36a800a1d2 | |||
93832b633e |
@ -1,3 +1,6 @@
|
||||
.git
|
||||
**/.git
|
||||
**/*_test.go
|
||||
|
||||
build/_workspace
|
||||
build/_bin
|
||||
tests/testdata
|
||||
|
3
.gitmodules
vendored
Normal file
3
.gitmodules
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
[submodule "tests"]
|
||||
path = tests/testdata
|
||||
url = https://github.com/ethereum/tests
|
39
.travis.yml
39
.travis.yml
@ -6,7 +6,19 @@ matrix:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.7.5
|
||||
go: 1.7.6
|
||||
script:
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
|
||||
- sudo modprobe fuse
|
||||
- sudo chmod 666 /dev/fuse
|
||||
- sudo chown root:$USER /etc/fuse.conf
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage
|
||||
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.8.3
|
||||
script:
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
|
||||
- sudo modprobe fuse
|
||||
@ -19,7 +31,7 @@ matrix:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.8.1
|
||||
go: 1.9.0
|
||||
script:
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
|
||||
- sudo modprobe fuse
|
||||
@ -29,7 +41,7 @@ matrix:
|
||||
- go run build/ci.go test -coverage -misspell
|
||||
|
||||
- os: osx
|
||||
go: 1.8.1
|
||||
go: 1.9.0
|
||||
sudo: required
|
||||
script:
|
||||
- brew update
|
||||
@ -42,7 +54,7 @@ matrix:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.8.1
|
||||
go: 1.9.0
|
||||
env:
|
||||
- ubuntu-ppa
|
||||
- azure-linux
|
||||
@ -53,6 +65,7 @@ matrix:
|
||||
- debhelper
|
||||
- dput
|
||||
- gcc-multilib
|
||||
- fakeroot
|
||||
script:
|
||||
# Build for the primary platforms that Trusty can manage
|
||||
- go run build/ci.go debsrc -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>" -upload ppa:ethereum/ethereum
|
||||
@ -80,7 +93,7 @@ matrix:
|
||||
sudo: required
|
||||
services:
|
||||
- docker
|
||||
go: 1.8.1
|
||||
go: 1.9.0
|
||||
env:
|
||||
- azure-linux-mips
|
||||
script:
|
||||
@ -120,16 +133,16 @@ matrix:
|
||||
- azure-android
|
||||
- maven-android
|
||||
before_install:
|
||||
- curl https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz | tar -xz
|
||||
- curl https://storage.googleapis.com/golang/go1.9.linux-amd64.tar.gz | tar -xz
|
||||
- export PATH=`pwd`/go/bin:$PATH
|
||||
- export GOROOT=`pwd`/go
|
||||
- export GOPATH=$HOME/go
|
||||
script:
|
||||
# Build the Android archive and upload it to Maven Central and Azure
|
||||
- curl https://dl.google.com/android/repository/android-ndk-r13b-linux-x86_64.zip -o android-ndk-r13b.zip
|
||||
- unzip -q android-ndk-r13b.zip && rm android-ndk-r13b.zip
|
||||
- mv android-ndk-r13b $HOME
|
||||
- export ANDROID_NDK=$HOME/android-ndk-r13b
|
||||
- curl https://dl.google.com/android/repository/android-ndk-r14b-linux-x86_64.zip -o android-ndk-r14b.zip
|
||||
- unzip -q android-ndk-r14b.zip && rm android-ndk-r14b.zip
|
||||
- mv android-ndk-r14b $HOME
|
||||
- export ANDROID_NDK=$HOME/android-ndk-r14b
|
||||
|
||||
- mkdir -p $GOPATH/src/github.com/ethereum
|
||||
- ln -s `pwd` $GOPATH/src/github.com/ethereum
|
||||
@ -137,7 +150,7 @@ matrix:
|
||||
|
||||
# This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads
|
||||
- os: osx
|
||||
go: 1.8.1
|
||||
go: 1.9.0
|
||||
env:
|
||||
- azure-osx
|
||||
- azure-ios
|
||||
@ -147,7 +160,7 @@ matrix:
|
||||
- go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -upload gethstore/builds
|
||||
|
||||
# Build the iOS framework and upload it to CocoaPods and Azure
|
||||
- gem uninstall cocoapods -a
|
||||
- gem uninstall cocoapods -a -x
|
||||
- gem install cocoapods
|
||||
|
||||
- mv ~/.cocoapods/repos/master ~/.cocoapods/repos/master.bak
|
||||
@ -163,7 +176,7 @@ matrix:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.8.1
|
||||
go: 1.9.0
|
||||
env:
|
||||
- azure-purge
|
||||
script:
|
||||
|
22
Dockerfile
22
Dockerfile
@ -1,14 +1,16 @@
|
||||
FROM alpine:3.5
|
||||
# Build Geth in a stock Go builder container
|
||||
FROM golang:1.9-alpine as builder
|
||||
|
||||
RUN apk add --no-cache make gcc musl-dev linux-headers
|
||||
|
||||
ADD . /go-ethereum
|
||||
RUN \
|
||||
apk add --update git go make gcc musl-dev linux-headers && \
|
||||
(cd go-ethereum && make geth) && \
|
||||
cp go-ethereum/build/bin/geth /geth && \
|
||||
apk del git go make gcc musl-dev linux-headers && \
|
||||
rm -rf /go-ethereum && rm -rf /var/cache/apk/*
|
||||
RUN cd /go-ethereum && make geth
|
||||
|
||||
EXPOSE 8545
|
||||
EXPOSE 30303
|
||||
# Pull Geth into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
|
||||
ENTRYPOINT ["/geth"]
|
||||
RUN apk add --no-cache ca-certificates
|
||||
COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/
|
||||
|
||||
EXPOSE 8545 8546 30303 30303/udp
|
||||
ENTRYPOINT ["geth"]
|
||||
|
10
Makefile
10
Makefile
@ -2,13 +2,13 @@
|
||||
# with Go source code. If you know what GOPATH is then you probably
|
||||
# don't need to bother with make.
|
||||
|
||||
.PHONY: geth android ios geth-cross evm all test clean
|
||||
.PHONY: geth android ios geth-cross swarm evm all test clean
|
||||
.PHONY: geth-linux geth-linux-386 geth-linux-amd64 geth-linux-mips64 geth-linux-mips64le
|
||||
.PHONY: geth-linux-arm geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64
|
||||
.PHONY: geth-darwin geth-darwin-386 geth-darwin-amd64
|
||||
.PHONY: geth-windows geth-windows-386 geth-windows-amd64
|
||||
|
||||
GOBIN = build/bin
|
||||
GOBIN = $(shell pwd)/build/bin
|
||||
GO ?= latest
|
||||
|
||||
geth:
|
||||
@ -16,10 +16,10 @@ geth:
|
||||
@echo "Done building."
|
||||
@echo "Run \"$(GOBIN)/geth\" to launch geth."
|
||||
|
||||
evm:
|
||||
build/env.sh go run build/ci.go install ./cmd/evm
|
||||
swarm:
|
||||
build/env.sh go run build/ci.go install ./cmd/swarm
|
||||
@echo "Done building."
|
||||
@echo "Run \"$(GOBIN)/evm\" to start the evm."
|
||||
@echo "Run \"$(GOBIN)/swarm\" to launch swarm."
|
||||
|
||||
all:
|
||||
build/env.sh go run build/ci.go install
|
||||
|
40
README.md
40
README.md
@ -1,4 +1,4 @@
|
||||
## Ethereum Go
|
||||
## Go Ethereum
|
||||
|
||||
Official golang implementation of the Ethereum protocol.
|
||||
|
||||
@ -32,14 +32,14 @@ The go-ethereum project comes with several wrappers/executables found in the `cm
|
||||
|
||||
| Command | Description |
|
||||
|:----------:|-------------|
|
||||
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default) archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options |
|
||||
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default) archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. |
|
||||
| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. |
|
||||
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
|
||||
| `disasm` | Bytecode disassembler to convert EVM (Ethereum Virtual Machine) bytecode into more user friendly assembly-like opcodes (e.g. `echo "6001" | disasm`). For details on the individual opcodes, please see pages 22-30 of the [Ethereum Yellow Paper](http://gavwood.com/paper.pdf). |
|
||||
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow insolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug`). |
|
||||
| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://github.com/ethereum/wiki/wiki/JSON-RPC) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. |
|
||||
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://github.com/ethereum/wiki/wiki/RLP)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
|
||||
| `swarm` | swarm daemon and tools. This is the entrypoint for the swarm network. `swarm --help` for command line options and subcommands. See https://swarm-guide.readthedocs.io for swarm documentation. |
|
||||
| `puppeth` | a CLI wizard that aids in creating a new Ethereum network. |
|
||||
|
||||
## Running geth
|
||||
|
||||
@ -70,7 +70,7 @@ This command will:
|
||||
(via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://github.com/ethereum/wiki/wiki/JavaScript-API)
|
||||
as well as Geth's own [management APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs).
|
||||
This too is optional and if you leave it out you can always attach to an already running Geth instance
|
||||
with `geth --attach`.
|
||||
with `geth attach`.
|
||||
|
||||
### Full node on the Ethereum test network
|
||||
|
||||
@ -84,21 +84,39 @@ $ geth --testnet --fast --cache=512 console
|
||||
```
|
||||
|
||||
The `--fast`, `--cache` flags and `console` subcommand have the exact same meaning as above and they
|
||||
are equially useful on the testnet too. Please see above for their explanations if you've skipped to
|
||||
are equally useful on the testnet too. Please see above for their explanations if you've skipped to
|
||||
here.
|
||||
|
||||
Specifying the `--testnet` flag however will reconfigure your Geth instance a bit:
|
||||
|
||||
* Instead of using the default data directory (`~/.ethereum` on Linux for example), Geth will nest
|
||||
itself one level deeper into a `testnet` subfolder (`~/.ethereum/testnet` on Linux).
|
||||
itself one level deeper into a `testnet` subfolder (`~/.ethereum/testnet` on Linux). Note, on OSX
|
||||
and Linux this also means that attaching to a running testnet node requires the use of a custom
|
||||
endpoint since `geth attach` will try to attach to a production node endpoint by default. E.g.
|
||||
`geth attach <datadir>/testnet/geth.ipc`. Windows users are not affected by this.
|
||||
* Instead of connecting the main Ethereum network, the client will connect to the test network,
|
||||
which uses different P2P bootnodes, different network IDs and genesis states.
|
||||
|
||||
|
||||
*Note: Although there are some internal protective measures to prevent transactions from crossing
|
||||
over between the main network and test network (different starting nonces), you should make sure to
|
||||
always use separate accounts for play-money and real-money. Unless you manually move accounts, Geth
|
||||
will by default correctly separate the two networks and will not make any accounts available between
|
||||
them.*
|
||||
over between the main network and test network, you should make sure to always use separate accounts
|
||||
for play-money and real-money. Unless you manually move accounts, Geth will by default correctly
|
||||
separate the two networks and will not make any accounts available between them.*
|
||||
|
||||
### Configuration
|
||||
|
||||
As an alternative to passing the numerous flags to the `geth` binary, you can also pass a configuration file via:
|
||||
|
||||
```
|
||||
$ geth --config /path/to/your_config.toml
|
||||
```
|
||||
|
||||
To get an idea how the file should look like you can use the `dumpconfig` subcommand to export your existing configuration:
|
||||
|
||||
```
|
||||
$ geth --your-favourite-flags dumpconfig
|
||||
```
|
||||
|
||||
*Note: This works only with geth v1.6.0 and above.*
|
||||
|
||||
#### Docker quick start
|
||||
|
||||
|
@ -17,11 +17,9 @@
|
||||
package abi
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
@ -67,7 +65,7 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
|
||||
}
|
||||
method = m
|
||||
}
|
||||
arguments, err := method.pack(method, args...)
|
||||
arguments, err := method.pack(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -78,199 +76,6 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
|
||||
return append(method.Id(), arguments...), nil
|
||||
}
|
||||
|
||||
// toGoSliceType parses the input and casts it to the proper slice defined by the ABI
|
||||
// argument in T.
|
||||
func toGoSlice(i int, t Argument, output []byte) (interface{}, error) {
|
||||
index := i * 32
|
||||
// The slice must, at very least be large enough for the index+32 which is exactly the size required
|
||||
// for the [offset in output, size of offset].
|
||||
if index+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go slice: insufficient size output %d require %d", len(output), index+32)
|
||||
}
|
||||
elem := t.Type.Elem
|
||||
|
||||
// first we need to create a slice of the type
|
||||
var refSlice reflect.Value
|
||||
switch elem.T {
|
||||
case IntTy, UintTy, BoolTy:
|
||||
// create a new reference slice matching the element type
|
||||
switch t.Type.Kind {
|
||||
case reflect.Bool:
|
||||
refSlice = reflect.ValueOf([]bool(nil))
|
||||
case reflect.Uint8:
|
||||
refSlice = reflect.ValueOf([]uint8(nil))
|
||||
case reflect.Uint16:
|
||||
refSlice = reflect.ValueOf([]uint16(nil))
|
||||
case reflect.Uint32:
|
||||
refSlice = reflect.ValueOf([]uint32(nil))
|
||||
case reflect.Uint64:
|
||||
refSlice = reflect.ValueOf([]uint64(nil))
|
||||
case reflect.Int8:
|
||||
refSlice = reflect.ValueOf([]int8(nil))
|
||||
case reflect.Int16:
|
||||
refSlice = reflect.ValueOf([]int16(nil))
|
||||
case reflect.Int32:
|
||||
refSlice = reflect.ValueOf([]int32(nil))
|
||||
case reflect.Int64:
|
||||
refSlice = reflect.ValueOf([]int64(nil))
|
||||
default:
|
||||
refSlice = reflect.ValueOf([]*big.Int(nil))
|
||||
}
|
||||
case AddressTy: // address must be of slice Address
|
||||
refSlice = reflect.ValueOf([]common.Address(nil))
|
||||
case HashTy: // hash must be of slice hash
|
||||
refSlice = reflect.ValueOf([]common.Hash(nil))
|
||||
case FixedBytesTy:
|
||||
refSlice = reflect.ValueOf([][]byte(nil))
|
||||
default: // no other types are supported
|
||||
return nil, fmt.Errorf("abi: unsupported slice type %v", elem.T)
|
||||
}
|
||||
|
||||
var slice []byte
|
||||
var size int
|
||||
var offset int
|
||||
if t.Type.IsSlice {
|
||||
// get the offset which determines the start of this array ...
|
||||
offset = int(binary.BigEndian.Uint64(output[index+24 : index+32]))
|
||||
if offset+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go slice: offset %d would go over slice boundary (len=%d)", len(output), offset+32)
|
||||
}
|
||||
|
||||
slice = output[offset:]
|
||||
// ... starting with the size of the array in elements ...
|
||||
size = int(binary.BigEndian.Uint64(slice[24:32]))
|
||||
slice = slice[32:]
|
||||
// ... and make sure that we've at the very least the amount of bytes
|
||||
// available in the buffer.
|
||||
if size*32 > len(slice) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go slice: insufficient size output %d require %d", len(output), offset+32+size*32)
|
||||
}
|
||||
|
||||
// reslice to match the required size
|
||||
slice = slice[:size*32]
|
||||
} else if t.Type.IsArray {
|
||||
//get the number of elements in the array
|
||||
size = t.Type.SliceSize
|
||||
|
||||
//check to make sure array size matches up
|
||||
if index+32*size > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go array: offset %d would go over slice boundary (len=%d)", len(output), index+32*size)
|
||||
}
|
||||
//slice is there for a fixed amount of times
|
||||
slice = output[index : index+size*32]
|
||||
}
|
||||
|
||||
for i := 0; i < size; i++ {
|
||||
var (
|
||||
inter interface{} // interface type
|
||||
returnOutput = slice[i*32 : i*32+32] // the return output
|
||||
)
|
||||
// set inter to the correct type (cast)
|
||||
switch elem.T {
|
||||
case IntTy, UintTy:
|
||||
inter = readInteger(t.Type.Kind, returnOutput)
|
||||
case BoolTy:
|
||||
inter = !allZero(returnOutput)
|
||||
case AddressTy:
|
||||
inter = common.BytesToAddress(returnOutput)
|
||||
case HashTy:
|
||||
inter = common.BytesToHash(returnOutput)
|
||||
case FixedBytesTy:
|
||||
inter = returnOutput
|
||||
}
|
||||
// append the item to our reflect slice
|
||||
refSlice = reflect.Append(refSlice, reflect.ValueOf(inter))
|
||||
}
|
||||
|
||||
// return the interface
|
||||
return refSlice.Interface(), nil
|
||||
}
|
||||
|
||||
func readInteger(kind reflect.Kind, b []byte) interface{} {
|
||||
switch kind {
|
||||
case reflect.Uint8:
|
||||
return uint8(b[len(b)-1])
|
||||
case reflect.Uint16:
|
||||
return binary.BigEndian.Uint16(b[len(b)-2:])
|
||||
case reflect.Uint32:
|
||||
return binary.BigEndian.Uint32(b[len(b)-4:])
|
||||
case reflect.Uint64:
|
||||
return binary.BigEndian.Uint64(b[len(b)-8:])
|
||||
case reflect.Int8:
|
||||
return int8(b[len(b)-1])
|
||||
case reflect.Int16:
|
||||
return int16(binary.BigEndian.Uint16(b[len(b)-2:]))
|
||||
case reflect.Int32:
|
||||
return int32(binary.BigEndian.Uint32(b[len(b)-4:]))
|
||||
case reflect.Int64:
|
||||
return int64(binary.BigEndian.Uint64(b[len(b)-8:]))
|
||||
default:
|
||||
return new(big.Int).SetBytes(b)
|
||||
}
|
||||
}
|
||||
|
||||
func allZero(b []byte) bool {
|
||||
for _, byte := range b {
|
||||
if byte != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// toGoType parses the input and casts it to the proper type defined by the ABI
|
||||
// argument in T.
|
||||
func toGoType(i int, t Argument, output []byte) (interface{}, error) {
|
||||
// we need to treat slices differently
|
||||
if (t.Type.IsSlice || t.Type.IsArray) && t.Type.T != BytesTy && t.Type.T != StringTy && t.Type.T != FixedBytesTy && t.Type.T != FunctionTy {
|
||||
return toGoSlice(i, t, output)
|
||||
}
|
||||
|
||||
index := i * 32
|
||||
if index+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), index+32)
|
||||
}
|
||||
|
||||
// Parse the given index output and check whether we need to read
|
||||
// a different offset and length based on the type (i.e. string, bytes)
|
||||
var returnOutput []byte
|
||||
switch t.Type.T {
|
||||
case StringTy, BytesTy: // variable arrays are written at the end of the return bytes
|
||||
// parse offset from which we should start reading
|
||||
offset := int(binary.BigEndian.Uint64(output[index+24 : index+32]))
|
||||
if offset+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), offset+32)
|
||||
}
|
||||
// parse the size up until we should be reading
|
||||
size := int(binary.BigEndian.Uint64(output[offset+24 : offset+32]))
|
||||
if offset+32+size > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), offset+32+size)
|
||||
}
|
||||
|
||||
// get the bytes for this return value
|
||||
returnOutput = output[offset+32 : offset+32+size]
|
||||
default:
|
||||
returnOutput = output[index : index+32]
|
||||
}
|
||||
|
||||
// convert the bytes to whatever is specified by the ABI.
|
||||
switch t.Type.T {
|
||||
case IntTy, UintTy:
|
||||
return readInteger(t.Type.Kind, returnOutput), nil
|
||||
case BoolTy:
|
||||
return !allZero(returnOutput), nil
|
||||
case AddressTy:
|
||||
return common.BytesToAddress(returnOutput), nil
|
||||
case HashTy:
|
||||
return common.BytesToHash(returnOutput), nil
|
||||
case BytesTy, FixedBytesTy, FunctionTy:
|
||||
return returnOutput, nil
|
||||
case StringTy:
|
||||
return string(returnOutput), nil
|
||||
}
|
||||
return nil, fmt.Errorf("abi: unknown type %v", t.Type.T)
|
||||
}
|
||||
|
||||
// these variable are used to determine certain types during type assertion for
|
||||
// assignment.
|
||||
var (
|
||||
|
@ -48,412 +48,6 @@ func pad(input []byte, size int, left bool) []byte {
|
||||
return common.RightPadBytes(input, size)
|
||||
}
|
||||
|
||||
func TestTypeCheck(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
typ string
|
||||
input interface{}
|
||||
err string
|
||||
}{
|
||||
{"uint", big.NewInt(1), ""},
|
||||
{"int", big.NewInt(1), ""},
|
||||
{"uint30", big.NewInt(1), ""},
|
||||
{"uint30", uint8(1), "abi: cannot use uint8 as type ptr as argument"},
|
||||
{"uint16", uint16(1), ""},
|
||||
{"uint16", uint8(1), "abi: cannot use uint8 as type uint16 as argument"},
|
||||
{"uint16[]", []uint16{1, 2, 3}, ""},
|
||||
{"uint16[]", [3]uint16{1, 2, 3}, ""},
|
||||
{"uint16[]", []uint32{1, 2, 3}, "abi: cannot use []uint32 as type []uint16 as argument"},
|
||||
{"uint16[3]", [3]uint32{1, 2, 3}, "abi: cannot use [3]uint32 as type [3]uint16 as argument"},
|
||||
{"uint16[3]", [4]uint16{1, 2, 3}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
|
||||
{"uint16[3]", []uint16{1, 2, 3}, ""},
|
||||
{"uint16[3]", []uint16{1, 2, 3, 4}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
|
||||
{"address[]", []common.Address{{1}}, ""},
|
||||
{"address[1]", []common.Address{{1}}, ""},
|
||||
{"address[1]", [1]common.Address{{1}}, ""},
|
||||
{"address[2]", [1]common.Address{{1}}, "abi: cannot use [1]array as type [2]array as argument"},
|
||||
{"bytes32", [32]byte{}, ""},
|
||||
{"bytes32", [33]byte{}, "abi: cannot use [33]uint8 as type [32]uint8 as argument"},
|
||||
{"bytes32", common.Hash{1}, ""},
|
||||
{"bytes31", [31]byte{}, ""},
|
||||
{"bytes31", [32]byte{}, "abi: cannot use [32]uint8 as type [31]uint8 as argument"},
|
||||
{"bytes", []byte{0, 1}, ""},
|
||||
{"bytes", [2]byte{0, 1}, ""},
|
||||
{"bytes", common.Hash{1}, ""},
|
||||
{"string", "hello world", ""},
|
||||
{"bytes32[]", [][32]byte{{}}, ""},
|
||||
{"function", [24]byte{}, ""},
|
||||
} {
|
||||
typ, err := NewType(test.typ)
|
||||
if err != nil {
|
||||
t.Fatal("unexpected parse error:", err)
|
||||
}
|
||||
|
||||
err = typeCheck(typ, reflect.ValueOf(test.input))
|
||||
if err != nil && len(test.err) == 0 {
|
||||
t.Errorf("%d failed. Expected no err but got: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if err == nil && len(test.err) != 0 {
|
||||
t.Errorf("%d failed. Expected err: %v but got none", i, test.err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil && len(test.err) != 0 && err.Error() != test.err {
|
||||
t.Errorf("%d failed. Expected err: '%v' got err: '%v'", i, test.err, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleMethodUnpack(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
def string // definition of the **output** ABI params
|
||||
marshalledOutput []byte // evm return data
|
||||
expectedOut interface{} // the expected output
|
||||
outVar string // the output variable (e.g. uint32, *big.Int, etc)
|
||||
err string // empty or error if expected
|
||||
}{
|
||||
{
|
||||
`[ { "type": "uint32" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
uint32(1),
|
||||
"uint32",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint32" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
nil,
|
||||
"uint16",
|
||||
"abi: cannot unmarshal uint32 in to uint16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint17" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
nil,
|
||||
"uint16",
|
||||
"abi: cannot unmarshal *big.Int in to uint16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint17" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
big.NewInt(1),
|
||||
"*big.Int",
|
||||
"",
|
||||
},
|
||||
|
||||
{
|
||||
`[ { "type": "int32" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
int32(1),
|
||||
"int32",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "int32" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
nil,
|
||||
"int16",
|
||||
"abi: cannot unmarshal int32 in to int16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "int17" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
nil,
|
||||
"int16",
|
||||
"abi: cannot unmarshal *big.Int in to int16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "int17" } ]`,
|
||||
pad([]byte{1}, 32, true),
|
||||
big.NewInt(1),
|
||||
"*big.Int",
|
||||
"",
|
||||
},
|
||||
|
||||
{
|
||||
`[ { "type": "address" } ]`,
|
||||
pad(pad([]byte{1}, 20, false), 32, true),
|
||||
common.Address{1},
|
||||
"address",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "bytes32" } ]`,
|
||||
pad([]byte{1}, 32, false),
|
||||
pad([]byte{1}, 32, false),
|
||||
"bytes",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "bytes32" } ]`,
|
||||
pad([]byte{1}, 32, false),
|
||||
pad([]byte{1}, 32, false),
|
||||
"hash",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "bytes32" } ]`,
|
||||
pad([]byte{1}, 32, false),
|
||||
pad([]byte{1}, 32, false),
|
||||
"interface",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "function" } ]`,
|
||||
pad([]byte{1}, 32, false),
|
||||
[24]byte{1},
|
||||
"function",
|
||||
"",
|
||||
},
|
||||
} {
|
||||
abiDefinition := fmt.Sprintf(`[{ "name" : "method", "outputs": %s}]`, test.def)
|
||||
abi, err := JSON(strings.NewReader(abiDefinition))
|
||||
if err != nil {
|
||||
t.Errorf("%d failed. %v", i, err)
|
||||
continue
|
||||
}
|
||||
|
||||
var outvar interface{}
|
||||
switch test.outVar {
|
||||
case "uint8":
|
||||
var v uint8
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint16":
|
||||
var v uint16
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint32":
|
||||
var v uint32
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint64":
|
||||
var v uint64
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int8":
|
||||
var v int8
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int16":
|
||||
var v int16
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int32":
|
||||
var v int32
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int64":
|
||||
var v int64
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "*big.Int":
|
||||
var v *big.Int
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "address":
|
||||
var v common.Address
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "bytes":
|
||||
var v []byte
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "hash":
|
||||
var v common.Hash
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "function":
|
||||
var v [24]byte
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "interface":
|
||||
err = abi.Unpack(&outvar, "method", test.marshalledOutput)
|
||||
default:
|
||||
t.Errorf("unsupported type '%v' please add it to the switch statement in this test", test.outVar)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil && len(test.err) == 0 {
|
||||
t.Errorf("%d failed. Expected no err but got: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if err == nil && len(test.err) != 0 {
|
||||
t.Errorf("%d failed. Expected err: %v but got none", i, test.err)
|
||||
continue
|
||||
}
|
||||
if err != nil && len(test.err) != 0 && err.Error() != test.err {
|
||||
t.Errorf("%d failed. Expected err: '%v' got err: '%v'", i, test.err, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
// bit of an ugly hack for hash type but I don't feel like finding a proper solution
|
||||
if test.outVar == "hash" {
|
||||
tmp := outvar.(common.Hash) // without assignment it's unaddressable
|
||||
outvar = tmp[:]
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(test.expectedOut, outvar) {
|
||||
t.Errorf("%d failed. Output error: expected %v, got %v", i, test.expectedOut, outvar)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnpackSetInterfaceSlice(t *testing.T) {
|
||||
var (
|
||||
var1 = new(uint8)
|
||||
var2 = new(uint8)
|
||||
)
|
||||
out := []interface{}{var1, var2}
|
||||
abi, err := JSON(strings.NewReader(`[{"type":"function", "name":"ints", "outputs":[{"type":"uint8"}, {"type":"uint8"}]}]`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
marshalledReturn := append(pad([]byte{1}, 32, true), pad([]byte{2}, 32, true)...)
|
||||
err = abi.Unpack(&out, "ints", marshalledReturn)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if *var1 != 1 {
|
||||
t.Error("expected var1 to be 1, got", *var1)
|
||||
}
|
||||
if *var2 != 2 {
|
||||
t.Error("expected var2 to be 2, got", *var2)
|
||||
}
|
||||
|
||||
out = []interface{}{var1}
|
||||
err = abi.Unpack(&out, "ints", marshalledReturn)
|
||||
|
||||
expErr := "abi: cannot marshal in to slices of unequal size (require: 2, got: 1)"
|
||||
if err == nil || err.Error() != expErr {
|
||||
t.Error("expected err:", expErr, "Got:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnpackSetInterfaceArrayOutput(t *testing.T) {
|
||||
var (
|
||||
var1 = new([1]uint32)
|
||||
var2 = new([1]uint32)
|
||||
)
|
||||
out := []interface{}{var1, var2}
|
||||
abi, err := JSON(strings.NewReader(`[{"type":"function", "name":"ints", "outputs":[{"type":"uint32[1]"}, {"type":"uint32[1]"}]}]`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
marshalledReturn := append(pad([]byte{1}, 32, true), pad([]byte{2}, 32, true)...)
|
||||
err = abi.Unpack(&out, "ints", marshalledReturn)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if *var1 != [1]uint32{1} {
|
||||
t.Error("expected var1 to be [1], got", *var1)
|
||||
}
|
||||
if *var2 != [1]uint32{2} {
|
||||
t.Error("expected var2 to be [2], got", *var2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPack(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
typ string
|
||||
|
||||
input interface{}
|
||||
output []byte
|
||||
}{
|
||||
{"uint16", uint16(2), pad([]byte{2}, 32, true)},
|
||||
{"uint16[]", []uint16{1, 2}, formatSliceOutput([]byte{1}, []byte{2})},
|
||||
{"bytes20", [20]byte{1}, pad([]byte{1}, 32, false)},
|
||||
{"uint256[]", []*big.Int{big.NewInt(1), big.NewInt(2)}, formatSliceOutput([]byte{1}, []byte{2})},
|
||||
{"address[]", []common.Address{{1}, {2}}, formatSliceOutput(pad([]byte{1}, 20, false), pad([]byte{2}, 20, false))},
|
||||
{"bytes32[]", []common.Hash{{1}, {2}}, formatSliceOutput(pad([]byte{1}, 32, false), pad([]byte{2}, 32, false))},
|
||||
{"function", [24]byte{1}, pad([]byte{1}, 32, false)},
|
||||
} {
|
||||
typ, err := NewType(test.typ)
|
||||
if err != nil {
|
||||
t.Fatal("unexpected parse error:", err)
|
||||
}
|
||||
|
||||
output, err := typ.pack(reflect.ValueOf(test.input))
|
||||
if err != nil {
|
||||
t.Fatal("unexpected pack error:", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(output, test.output) {
|
||||
t.Errorf("%d failed. Expected bytes: '%x' Got: '%x'", i, test.output, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMethodPack(t *testing.T) {
|
||||
abi, err := JSON(strings.NewReader(jsondata2))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sig := abi.Methods["slice"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
|
||||
packed, err := abi.Pack("slice", []uint32{1, 2})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
var addrA, addrB = common.Address{1}, common.Address{2}
|
||||
sig = abi.Methods["sliceAddress"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{32}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrA[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrB[:], 32)...)
|
||||
|
||||
packed, err = abi.Pack("sliceAddress", []common.Address{addrA, addrB})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
var addrC, addrD = common.Address{3}, common.Address{4}
|
||||
sig = abi.Methods["sliceMultiAddress"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{64}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{160}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrA[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrB[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrC[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrD[:], 32)...)
|
||||
|
||||
packed, err = abi.Pack("sliceMultiAddress", []common.Address{addrA, addrB}, []common.Address{addrC, addrD})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
sig = abi.Methods["slice256"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
|
||||
packed, err = abi.Pack("slice256", []*big.Int{big.NewInt(1), big.NewInt(2)})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
}
|
||||
|
||||
const jsondata = `
|
||||
[
|
||||
{ "type" : "function", "name" : "balance", "constant" : true },
|
||||
@ -843,399 +437,3 @@ func TestBareEvents(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiReturnWithStruct(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "multi", "constant" : false, "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// using buff to make the code readable
|
||||
buff := new(bytes.Buffer)
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005"))
|
||||
stringOut := "hello"
|
||||
buff.Write(common.RightPadBytes([]byte(stringOut), 32))
|
||||
|
||||
var inter struct {
|
||||
Int *big.Int
|
||||
String string
|
||||
}
|
||||
err = abi.Unpack(&inter, "multi", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if inter.Int == nil || inter.Int.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Error("expected Int to be 1 got", inter.Int)
|
||||
}
|
||||
|
||||
if inter.String != stringOut {
|
||||
t.Error("expected String to be", stringOut, "got", inter.String)
|
||||
}
|
||||
|
||||
var reversed struct {
|
||||
String string
|
||||
Int *big.Int
|
||||
}
|
||||
|
||||
err = abi.Unpack(&reversed, "multi", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if reversed.Int == nil || reversed.Int.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Error("expected Int to be 1 got", reversed.Int)
|
||||
}
|
||||
|
||||
if reversed.String != stringOut {
|
||||
t.Error("expected String to be", stringOut, "got", reversed.String)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiReturnWithSlice(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "multi", "constant" : false, "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// using buff to make the code readable
|
||||
buff := new(bytes.Buffer)
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005"))
|
||||
stringOut := "hello"
|
||||
buff.Write(common.RightPadBytes([]byte(stringOut), 32))
|
||||
|
||||
var inter []interface{}
|
||||
err = abi.Unpack(&inter, "multi", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(inter) != 2 {
|
||||
t.Fatal("expected 2 results got", len(inter))
|
||||
}
|
||||
|
||||
if num, ok := inter[0].(*big.Int); !ok || num.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Error("expected index 0 to be 1 got", num)
|
||||
}
|
||||
|
||||
if str, ok := inter[1].(string); !ok || str != stringOut {
|
||||
t.Error("expected index 1 to be", stringOut, "got", str)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalArrays(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "bytes32", "constant" : false, "outputs": [ { "type": "bytes32" } ] },
|
||||
{ "name" : "bytes10", "constant" : false, "outputs": [ { "type": "bytes10" } ] }
|
||||
]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
output := common.LeftPadBytes([]byte{1}, 32)
|
||||
|
||||
var bytes10 [10]byte
|
||||
err = abi.Unpack(&bytes10, "bytes32", output)
|
||||
if err == nil || err.Error() != "abi: cannot unmarshal src (len=32) in to dst (len=10)" {
|
||||
t.Error("expected error or bytes32 not be assignable to bytes10:", err)
|
||||
}
|
||||
|
||||
var bytes32 [32]byte
|
||||
err = abi.Unpack(&bytes32, "bytes32", output)
|
||||
if err != nil {
|
||||
t.Error("didn't expect error:", err)
|
||||
}
|
||||
if !bytes.Equal(bytes32[:], output) {
|
||||
t.Error("expected bytes32[31] to be 1 got", bytes32[31])
|
||||
}
|
||||
|
||||
type (
|
||||
B10 [10]byte
|
||||
B32 [32]byte
|
||||
)
|
||||
|
||||
var b10 B10
|
||||
err = abi.Unpack(&b10, "bytes32", output)
|
||||
if err == nil || err.Error() != "abi: cannot unmarshal src (len=32) in to dst (len=10)" {
|
||||
t.Error("expected error or bytes32 not be assignable to bytes10:", err)
|
||||
}
|
||||
|
||||
var b32 B32
|
||||
err = abi.Unpack(&b32, "bytes32", output)
|
||||
if err != nil {
|
||||
t.Error("didn't expect error:", err)
|
||||
}
|
||||
if !bytes.Equal(b32[:], output) {
|
||||
t.Error("expected bytes32[31] to be 1 got", bytes32[31])
|
||||
}
|
||||
|
||||
output[10] = 1
|
||||
var shortAssignLong [32]byte
|
||||
err = abi.Unpack(&shortAssignLong, "bytes10", output)
|
||||
if err != nil {
|
||||
t.Error("didn't expect error:", err)
|
||||
}
|
||||
if !bytes.Equal(output, shortAssignLong[:]) {
|
||||
t.Errorf("expected %x to be %x", shortAssignLong, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshal(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "int", "constant" : false, "outputs": [ { "type": "uint256" } ] },
|
||||
{ "name" : "bool", "constant" : false, "outputs": [ { "type": "bool" } ] },
|
||||
{ "name" : "bytes", "constant" : false, "outputs": [ { "type": "bytes" } ] },
|
||||
{ "name" : "fixed", "constant" : false, "outputs": [ { "type": "bytes32" } ] },
|
||||
{ "name" : "multi", "constant" : false, "outputs": [ { "type": "bytes" }, { "type": "bytes" } ] },
|
||||
{ "name" : "intArraySingle", "constant" : false, "outputs": [ { "type": "uint256[3]" } ] },
|
||||
{ "name" : "addressSliceSingle", "constant" : false, "outputs": [ { "type": "address[]" } ] },
|
||||
{ "name" : "addressSliceDouble", "constant" : false, "outputs": [ { "name": "a", "type": "address[]" }, { "name": "b", "type": "address[]" } ] },
|
||||
{ "name" : "mixedBytes", "constant" : true, "outputs": [ { "name": "a", "type": "bytes" }, { "name": "b", "type": "bytes32" } ] }]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
buff := new(bytes.Buffer)
|
||||
|
||||
// marshal int
|
||||
var Int *big.Int
|
||||
err = abi.Unpack(&Int, "int", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if Int == nil || Int.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Error("expected Int to be 1 got", Int)
|
||||
}
|
||||
|
||||
// marshal bool
|
||||
var Bool bool
|
||||
err = abi.Unpack(&Bool, "bool", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !Bool {
|
||||
t.Error("expected Bool to be true")
|
||||
}
|
||||
|
||||
// marshal dynamic bytes max length 32
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
bytesOut := common.RightPadBytes([]byte("hello"), 32)
|
||||
buff.Write(bytesOut)
|
||||
|
||||
var Bytes []byte
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(Bytes, bytesOut) {
|
||||
t.Errorf("expected %x got %x", bytesOut, Bytes)
|
||||
}
|
||||
|
||||
// marshall dynamic bytes max length 64
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
|
||||
bytesOut = common.RightPadBytes([]byte("hello"), 64)
|
||||
buff.Write(bytesOut)
|
||||
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(Bytes, bytesOut) {
|
||||
t.Errorf("expected %x got %x", bytesOut, Bytes)
|
||||
}
|
||||
|
||||
// marshall dynamic bytes max length 63
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000003f"))
|
||||
bytesOut = common.RightPadBytes([]byte("hello"), 63)
|
||||
buff.Write(bytesOut)
|
||||
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(Bytes, bytesOut) {
|
||||
t.Errorf("expected %x got %x", bytesOut, Bytes)
|
||||
}
|
||||
|
||||
// marshal dynamic bytes output empty
|
||||
err = abi.Unpack(&Bytes, "bytes", nil)
|
||||
if err == nil {
|
||||
t.Error("expected error")
|
||||
}
|
||||
|
||||
// marshal dynamic bytes length 5
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005"))
|
||||
buff.Write(common.RightPadBytes([]byte("hello"), 32))
|
||||
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(Bytes, []byte("hello")) {
|
||||
t.Errorf("expected %x got %x", bytesOut, Bytes)
|
||||
}
|
||||
|
||||
// marshal dynamic bytes length 5
|
||||
buff.Reset()
|
||||
buff.Write(common.RightPadBytes([]byte("hello"), 32))
|
||||
|
||||
var hash common.Hash
|
||||
err = abi.Unpack(&hash, "fixed", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
helloHash := common.BytesToHash(common.RightPadBytes([]byte("hello"), 32))
|
||||
if hash != helloHash {
|
||||
t.Errorf("Expected %x to equal %x", hash, helloHash)
|
||||
}
|
||||
|
||||
// marshal error
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
if err == nil {
|
||||
t.Error("expected error")
|
||||
}
|
||||
|
||||
err = abi.Unpack(&Bytes, "multi", make([]byte, 64))
|
||||
if err == nil {
|
||||
t.Error("expected error")
|
||||
}
|
||||
|
||||
// marshal mixed bytes
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
|
||||
fixed := common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")
|
||||
buff.Write(fixed)
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
bytesOut = common.RightPadBytes([]byte("hello"), 32)
|
||||
buff.Write(bytesOut)
|
||||
|
||||
var out []interface{}
|
||||
err = abi.Unpack(&out, "mixedBytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Fatal("didn't expect error:", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(bytesOut, out[0].([]byte)) {
|
||||
t.Errorf("expected %x, got %x", bytesOut, out[0])
|
||||
}
|
||||
|
||||
if !bytes.Equal(fixed, out[1].([]byte)) {
|
||||
t.Errorf("expected %x, got %x", fixed, out[1])
|
||||
}
|
||||
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
// marshal int array
|
||||
var intArray [3]*big.Int
|
||||
err = abi.Unpack(&intArray, "intArraySingle", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
var testAgainstIntArray [3]*big.Int
|
||||
testAgainstIntArray[0] = big.NewInt(1)
|
||||
testAgainstIntArray[1] = big.NewInt(2)
|
||||
testAgainstIntArray[2] = big.NewInt(3)
|
||||
|
||||
for i, Int := range intArray {
|
||||
if Int.Cmp(testAgainstIntArray[i]) != 0 {
|
||||
t.Errorf("expected %v, got %v", testAgainstIntArray[i], Int)
|
||||
}
|
||||
}
|
||||
// marshal address slice
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020")) // offset
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // size
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000100000000000000000000000000000000000000"))
|
||||
|
||||
var outAddr []common.Address
|
||||
err = abi.Unpack(&outAddr, "addressSliceSingle", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Fatal("didn't expect error:", err)
|
||||
}
|
||||
|
||||
if len(outAddr) != 1 {
|
||||
t.Fatal("expected 1 item, got", len(outAddr))
|
||||
}
|
||||
|
||||
if outAddr[0] != (common.Address{1}) {
|
||||
t.Errorf("expected %x, got %x", common.Address{1}, outAddr[0])
|
||||
}
|
||||
|
||||
// marshal multiple address slice
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040")) // offset
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000080")) // offset
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // size
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000100000000000000000000000000000000000000"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // size
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000200000000000000000000000000000000000000"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000300000000000000000000000000000000000000"))
|
||||
|
||||
var outAddrStruct struct {
|
||||
A []common.Address
|
||||
B []common.Address
|
||||
}
|
||||
err = abi.Unpack(&outAddrStruct, "addressSliceDouble", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Fatal("didn't expect error:", err)
|
||||
}
|
||||
|
||||
if len(outAddrStruct.A) != 1 {
|
||||
t.Fatal("expected 1 item, got", len(outAddrStruct.A))
|
||||
}
|
||||
|
||||
if outAddrStruct.A[0] != (common.Address{1}) {
|
||||
t.Errorf("expected %x, got %x", common.Address{1}, outAddrStruct.A[0])
|
||||
}
|
||||
|
||||
if len(outAddrStruct.B) != 2 {
|
||||
t.Fatal("expected 1 item, got", len(outAddrStruct.B))
|
||||
}
|
||||
|
||||
if outAddrStruct.B[0] != (common.Address{2}) {
|
||||
t.Errorf("expected %x, got %x", common.Address{2}, outAddrStruct.B[0])
|
||||
}
|
||||
if outAddrStruct.B[1] != (common.Address{3}) {
|
||||
t.Errorf("expected %x, got %x", common.Address{3}, outAddrStruct.B[1])
|
||||
}
|
||||
|
||||
// marshal invalid address slice
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000100"))
|
||||
|
||||
err = abi.Unpack(&outAddr, "addressSliceSingle", buff.Bytes())
|
||||
if err == nil {
|
||||
t.Fatal("expected error:", err)
|
||||
}
|
||||
}
|
||||
|
@ -33,7 +33,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
@ -61,7 +60,7 @@ func NewSimulatedBackend(alloc core.GenesisAlloc) *SimulatedBackend {
|
||||
database, _ := ethdb.NewMemDatabase()
|
||||
genesis := core.Genesis{Config: params.AllProtocolChanges, Alloc: alloc}
|
||||
genesis.MustCommit(database)
|
||||
blockchain, _ := core.NewBlockChain(database, genesis.Config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
|
||||
blockchain, _ := core.NewBlockChain(database, genesis.Config, ethash.NewFaker(), vm.Config{})
|
||||
backend := &SimulatedBackend{database: database, blockchain: blockchain, config: genesis.Config}
|
||||
backend.rollback()
|
||||
return backend
|
||||
@ -90,7 +89,7 @@ func (b *SimulatedBackend) Rollback() {
|
||||
func (b *SimulatedBackend) rollback() {
|
||||
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), b.database, 1, func(int, *core.BlockGen) {})
|
||||
b.pendingBlock = blocks[0]
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), b.database)
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), state.NewDatabase(b.database))
|
||||
}
|
||||
|
||||
// CodeAt returns the code associated with a certain account in the blockchain.
|
||||
@ -144,7 +143,8 @@ func (b *SimulatedBackend) StorageAt(ctx context.Context, contract common.Addres
|
||||
|
||||
// TransactionReceipt returns the receipt of a transaction.
|
||||
func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) {
|
||||
return core.GetReceipt(b.database, txHash), nil
|
||||
receipt, _, _, _ := core.GetReceipt(b.database, txHash)
|
||||
return receipt, nil
|
||||
}
|
||||
|
||||
// PendingCodeAt returns the code associated with an account in the pending state.
|
||||
@ -253,7 +253,8 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
|
||||
// about the transaction and calling mechanisms.
|
||||
vmenv := vm.NewEVM(evmContext, statedb, b.config, vm.Config{})
|
||||
gaspool := new(core.GasPool).AddGas(math.MaxBig256)
|
||||
ret, gasUsed, _, err := core.NewStateTransition(vmenv, msg, gaspool).TransitionDb()
|
||||
// TODO utilize returned failed flag to help gas estimation.
|
||||
ret, gasUsed, _, _, err := core.NewStateTransition(vmenv, msg, gaspool).TransitionDb()
|
||||
return ret, gasUsed, err
|
||||
}
|
||||
|
||||
@ -279,7 +280,7 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
|
||||
block.AddTx(tx)
|
||||
})
|
||||
b.pendingBlock = blocks[0]
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), b.database)
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), state.NewDatabase(b.database))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -122,7 +122,7 @@ func Bind(types []string, abis []string, bytecodes []string, pkg string, lang La
|
||||
}
|
||||
// For Go bindings pass the code through goimports to clean it up and double check
|
||||
if lang == LangGo {
|
||||
code, err := imports.Process("", buffer.Bytes(), nil)
|
||||
code, err := imports.Process(".", buffer.Bytes(), nil)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("%v\n%s", err, buffer)
|
||||
}
|
||||
|
@ -459,7 +459,7 @@ func TestBindings(t *testing.T) {
|
||||
}
|
||||
// Skip the test if the go-ethereum sources are symlinked (https://github.com/golang/go/issues/14845)
|
||||
linkTestCode := fmt.Sprintf("package linktest\nfunc CheckSymlinks(){\nfmt.Println(backends.NewSimulatedBackend(nil))\n}")
|
||||
linkTestDeps, err := imports.Process("", []byte(linkTestCode), nil)
|
||||
linkTestDeps, err := imports.Process(os.TempDir(), []byte(linkTestCode), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed check for goimports symlink bug: %v", err)
|
||||
}
|
||||
|
@ -52,8 +52,8 @@ var tmplSource = map[Lang]string{
|
||||
// tmplSourceGo is the Go source template use to generate the contract binding
|
||||
// based on.
|
||||
const tmplSourceGo = `
|
||||
// This file is an automatically generated Go binding. Do not modify as any
|
||||
// change will likely be lost upon the next re-generation!
|
||||
// Code generated - DO NOT EDIT.
|
||||
// This file is a generated binding and any manual changes will be lost.
|
||||
|
||||
package {{.Package}}
|
||||
|
||||
|
@ -17,10 +17,15 @@
|
||||
package abi
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
var (
|
||||
errBadBool = errors.New("abi: improperly encoded boolean value")
|
||||
)
|
||||
|
||||
// formatSliceString formats the reflection kind with the given slice size
|
||||
// and returns a formatted string representation.
|
||||
func formatSliceString(kind reflect.Kind, sliceSize int) string {
|
||||
|
@ -39,7 +39,7 @@ type Method struct {
|
||||
Outputs []Argument
|
||||
}
|
||||
|
||||
func (m Method) pack(method Method, args ...interface{}) ([]byte, error) {
|
||||
func (method Method) pack(args ...interface{}) ([]byte, error) {
|
||||
// Make sure arguments match up and pack them
|
||||
if len(args) != len(method.Inputs) {
|
||||
return nil, fmt.Errorf("argument count mismatch: %d for %d", len(args), len(method.Inputs))
|
||||
|
@ -62,19 +62,6 @@ func U256(n *big.Int) []byte {
|
||||
return math.PaddedBigBytes(math.U256(n), 32)
|
||||
}
|
||||
|
||||
// packNum packs the given number (using the reflect value) and will cast it to appropriate number representation
|
||||
func packNum(value reflect.Value) []byte {
|
||||
switch kind := value.Kind(); kind {
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return U256(new(big.Int).SetUint64(value.Uint()))
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return U256(big.NewInt(value.Int()))
|
||||
case reflect.Ptr:
|
||||
return U256(value.Interface().(*big.Int))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checks whether the given reflect value is signed. This also works for slices with a number type
|
||||
func isSigned(v reflect.Value) bool {
|
||||
switch v.Type() {
|
||||
|
@ -18,7 +18,6 @@ package abi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
@ -34,43 +33,6 @@ func TestNumberTypes(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPackNumber(t *testing.T) {
|
||||
tests := []struct {
|
||||
value reflect.Value
|
||||
packed []byte
|
||||
}{
|
||||
// Protocol limits
|
||||
{reflect.ValueOf(0), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
|
||||
{reflect.ValueOf(1), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}},
|
||||
{reflect.ValueOf(-1), []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}},
|
||||
|
||||
// Type corner cases
|
||||
{reflect.ValueOf(uint8(math.MaxUint8)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255}},
|
||||
{reflect.ValueOf(uint16(math.MaxUint16)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255}},
|
||||
{reflect.ValueOf(uint32(math.MaxUint32)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255}},
|
||||
{reflect.ValueOf(uint64(math.MaxUint64)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255}},
|
||||
|
||||
{reflect.ValueOf(int8(math.MaxInt8)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127}},
|
||||
{reflect.ValueOf(int16(math.MaxInt16)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 255}},
|
||||
{reflect.ValueOf(int32(math.MaxInt32)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 255, 255, 255}},
|
||||
{reflect.ValueOf(int64(math.MaxInt64)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 255, 255, 255, 255, 255, 255, 255}},
|
||||
|
||||
{reflect.ValueOf(int8(math.MinInt8)), []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 128}},
|
||||
{reflect.ValueOf(int16(math.MinInt16)), []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 128, 0}},
|
||||
{reflect.ValueOf(int32(math.MinInt32)), []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 128, 0, 0, 0}},
|
||||
{reflect.ValueOf(int64(math.MinInt64)), []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 128, 0, 0, 0, 0, 0, 0, 0}},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
packed := packNum(tt.value)
|
||||
if !bytes.Equal(packed, tt.packed) {
|
||||
t.Errorf("test %d: pack mismatch: have %x, want %x", i, packed, tt.packed)
|
||||
}
|
||||
}
|
||||
if packed := packNum(reflect.ValueOf("string")); packed != nil {
|
||||
t.Errorf("expected 'string' to pack to nil. got %x instead", packed)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSigned(t *testing.T) {
|
||||
if isSigned(reflect.ValueOf(uint(10))) {
|
||||
t.Error("signed")
|
||||
|
@ -17,6 +17,7 @@
|
||||
package abi
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"reflect"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -59,8 +60,20 @@ func packElement(t Type, reflectValue reflect.Value) []byte {
|
||||
if reflectValue.Kind() == reflect.Array {
|
||||
reflectValue = mustArrayToByteSlice(reflectValue)
|
||||
}
|
||||
|
||||
return common.RightPadBytes(reflectValue.Bytes(), 32)
|
||||
}
|
||||
panic("abi: fatal error")
|
||||
}
|
||||
|
||||
// packNum packs the given number (using the reflect value) and will cast it to appropriate number representation
|
||||
func packNum(value reflect.Value) []byte {
|
||||
switch kind := value.Kind(); kind {
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return U256(new(big.Int).SetUint64(value.Uint()))
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return U256(big.NewInt(value.Int()))
|
||||
case reflect.Ptr:
|
||||
return U256(value.Interface().(*big.Int))
|
||||
}
|
||||
return nil
|
||||
}
|
441
accounts/abi/pack_test.go
Normal file
441
accounts/abi/pack_test.go
Normal file
@ -0,0 +1,441 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package abi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
func TestPack(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
typ string
|
||||
|
||||
input interface{}
|
||||
output []byte
|
||||
}{
|
||||
{
|
||||
"uint8",
|
||||
uint8(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint8[]",
|
||||
[]uint8{1, 2},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint16",
|
||||
uint16(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint16[]",
|
||||
[]uint16{1, 2},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint32",
|
||||
uint32(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint32[]",
|
||||
[]uint32{1, 2},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint64",
|
||||
uint64(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint64[]",
|
||||
[]uint64{1, 2},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint256",
|
||||
big.NewInt(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"uint256[]",
|
||||
[]*big.Int{big.NewInt(1), big.NewInt(2)},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int8",
|
||||
int8(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int8[]",
|
||||
[]int8{1, 2},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int16",
|
||||
int16(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int16[]",
|
||||
[]int16{1, 2},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int32",
|
||||
int32(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int32[]",
|
||||
[]int32{1, 2},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int64",
|
||||
int64(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int64[]",
|
||||
[]int64{1, 2},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int256",
|
||||
big.NewInt(2),
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"int256[]",
|
||||
[]*big.Int{big.NewInt(1), big.NewInt(2)},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
{
|
||||
"bytes1",
|
||||
[1]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes2",
|
||||
[2]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes3",
|
||||
[3]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes4",
|
||||
[4]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes5",
|
||||
[5]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes6",
|
||||
[6]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes7",
|
||||
[7]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes8",
|
||||
[8]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes9",
|
||||
[9]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes10",
|
||||
[10]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes11",
|
||||
[11]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes12",
|
||||
[12]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes13",
|
||||
[13]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes14",
|
||||
[14]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes15",
|
||||
[15]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes16",
|
||||
[16]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes17",
|
||||
[17]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes18",
|
||||
[18]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes19",
|
||||
[19]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes20",
|
||||
[20]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes21",
|
||||
[21]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes22",
|
||||
[22]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes23",
|
||||
[23]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes24",
|
||||
[24]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes24",
|
||||
[24]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes25",
|
||||
[25]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes26",
|
||||
[26]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes27",
|
||||
[27]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes28",
|
||||
[28]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes29",
|
||||
[29]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes30",
|
||||
[30]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes31",
|
||||
[31]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes32",
|
||||
[32]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"address[]",
|
||||
[]common.Address{{1}, {2}},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"bytes32[]",
|
||||
[]common.Hash{{1}, {2}},
|
||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000201000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"function",
|
||||
[24]byte{1},
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
"string",
|
||||
"foobar",
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000006666f6f6261720000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
} {
|
||||
typ, err := NewType(test.typ)
|
||||
if err != nil {
|
||||
t.Fatal("unexpected parse error:", err)
|
||||
}
|
||||
|
||||
output, err := typ.pack(reflect.ValueOf(test.input))
|
||||
if err != nil {
|
||||
t.Fatal("unexpected pack error:", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(output, test.output) {
|
||||
t.Errorf("%d failed. Expected bytes: '%x' Got: '%x'", i, test.output, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMethodPack(t *testing.T) {
|
||||
abi, err := JSON(strings.NewReader(jsondata2))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sig := abi.Methods["slice"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
|
||||
packed, err := abi.Pack("slice", []uint32{1, 2})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
var addrA, addrB = common.Address{1}, common.Address{2}
|
||||
sig = abi.Methods["sliceAddress"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{32}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrA[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrB[:], 32)...)
|
||||
|
||||
packed, err = abi.Pack("sliceAddress", []common.Address{addrA, addrB})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
var addrC, addrD = common.Address{3}, common.Address{4}
|
||||
sig = abi.Methods["sliceMultiAddress"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{64}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{160}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrA[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrB[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrC[:], 32)...)
|
||||
sig = append(sig, common.LeftPadBytes(addrD[:], 32)...)
|
||||
|
||||
packed, err = abi.Pack("sliceMultiAddress", []common.Address{addrA, addrB}, []common.Address{addrC, addrD})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
|
||||
sig = abi.Methods["slice256"].Id()
|
||||
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||
|
||||
packed, err = abi.Pack("slice256", []*big.Int{big.NewInt(1), big.NewInt(2)})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(packed, sig) {
|
||||
t.Errorf("expected %x got %x", sig, packed)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPackNumber(t *testing.T) {
|
||||
tests := []struct {
|
||||
value reflect.Value
|
||||
packed []byte
|
||||
}{
|
||||
// Protocol limits
|
||||
{reflect.ValueOf(0), common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")},
|
||||
{reflect.ValueOf(1), common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")},
|
||||
{reflect.ValueOf(-1), common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")},
|
||||
|
||||
// Type corner cases
|
||||
{reflect.ValueOf(uint8(math.MaxUint8)), common.Hex2Bytes("00000000000000000000000000000000000000000000000000000000000000ff")},
|
||||
{reflect.ValueOf(uint16(math.MaxUint16)), common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000ffff")},
|
||||
{reflect.ValueOf(uint32(math.MaxUint32)), common.Hex2Bytes("00000000000000000000000000000000000000000000000000000000ffffffff")},
|
||||
{reflect.ValueOf(uint64(math.MaxUint64)), common.Hex2Bytes("000000000000000000000000000000000000000000000000ffffffffffffffff")},
|
||||
|
||||
{reflect.ValueOf(int8(math.MaxInt8)), common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000007f")},
|
||||
{reflect.ValueOf(int16(math.MaxInt16)), common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000007fff")},
|
||||
{reflect.ValueOf(int32(math.MaxInt32)), common.Hex2Bytes("000000000000000000000000000000000000000000000000000000007fffffff")},
|
||||
{reflect.ValueOf(int64(math.MaxInt64)), common.Hex2Bytes("0000000000000000000000000000000000000000000000007fffffffffffffff")},
|
||||
|
||||
{reflect.ValueOf(int8(math.MinInt8)), common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80")},
|
||||
{reflect.ValueOf(int16(math.MinInt16)), common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8000")},
|
||||
{reflect.ValueOf(int32(math.MinInt32)), common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffff80000000")},
|
||||
{reflect.ValueOf(int64(math.MinInt64)), common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffff8000000000000000")},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
packed := packNum(tt.value)
|
||||
if !bytes.Equal(packed, tt.packed) {
|
||||
t.Errorf("test %d: pack mismatch: have %x, want %x", i, packed, tt.packed)
|
||||
}
|
||||
}
|
||||
if packed := packNum(reflect.ValueOf("string")); packed != nil {
|
||||
t.Errorf("expected 'string' to pack to nil. got %x instead", packed)
|
||||
}
|
||||
}
|
@ -32,30 +32,30 @@ func indirect(v reflect.Value) reflect.Value {
|
||||
|
||||
// reflectIntKind returns the reflect using the given size and
|
||||
// unsignedness.
|
||||
func reflectIntKind(unsigned bool, size int) reflect.Kind {
|
||||
func reflectIntKindAndType(unsigned bool, size int) (reflect.Kind, reflect.Type) {
|
||||
switch size {
|
||||
case 8:
|
||||
if unsigned {
|
||||
return reflect.Uint8
|
||||
return reflect.Uint8, uint8_t
|
||||
}
|
||||
return reflect.Int8
|
||||
return reflect.Int8, int8_t
|
||||
case 16:
|
||||
if unsigned {
|
||||
return reflect.Uint16
|
||||
return reflect.Uint16, uint16_t
|
||||
}
|
||||
return reflect.Int16
|
||||
return reflect.Int16, int16_t
|
||||
case 32:
|
||||
if unsigned {
|
||||
return reflect.Uint32
|
||||
return reflect.Uint32, uint32_t
|
||||
}
|
||||
return reflect.Int32
|
||||
return reflect.Int32, int32_t
|
||||
case 64:
|
||||
if unsigned {
|
||||
return reflect.Uint64
|
||||
return reflect.Uint64, uint64_t
|
||||
}
|
||||
return reflect.Int64
|
||||
return reflect.Int64, int64_t
|
||||
}
|
||||
return reflect.Ptr
|
||||
return reflect.Ptr, big_t
|
||||
}
|
||||
|
||||
// mustArrayToBytesSlice creates a new byte slice with the exact same size as value
|
||||
|
@ -33,7 +33,7 @@ const (
|
||||
FixedBytesTy
|
||||
BytesTy
|
||||
HashTy
|
||||
FixedpointTy
|
||||
FixedPointTy
|
||||
FunctionTy
|
||||
)
|
||||
|
||||
@ -126,13 +126,11 @@ func NewType(t string) (typ Type, err error) {
|
||||
|
||||
switch varType {
|
||||
case "int":
|
||||
typ.Kind = reflectIntKind(false, varSize)
|
||||
typ.Type = big_t
|
||||
typ.Kind, typ.Type = reflectIntKindAndType(false, varSize)
|
||||
typ.Size = varSize
|
||||
typ.T = IntTy
|
||||
case "uint":
|
||||
typ.Kind = reflectIntKind(true, varSize)
|
||||
typ.Type = ubig_t
|
||||
typ.Kind, typ.Type = reflectIntKindAndType(true, varSize)
|
||||
typ.Size = varSize
|
||||
typ.T = UintTy
|
||||
case "bool":
|
||||
|
@ -17,8 +17,11 @@
|
||||
package abi
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
// typeWithoutStringer is a alias for the Type type which simply doesn't implement
|
||||
@ -31,26 +34,44 @@ func TestTypeRegexp(t *testing.T) {
|
||||
blob string
|
||||
kind Type
|
||||
}{
|
||||
{"int", Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}},
|
||||
{"int8", Type{Kind: reflect.Int8, Type: big_t, Size: 8, T: IntTy, stringKind: "int8"}},
|
||||
{"bool", Type{Kind: reflect.Bool, T: BoolTy, stringKind: "bool"}},
|
||||
{"bool[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Bool, T: BoolTy, Elem: &Type{Kind: reflect.Bool, T: BoolTy, stringKind: "bool"}, stringKind: "bool[]"}},
|
||||
{"bool[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Bool, T: BoolTy, Elem: &Type{Kind: reflect.Bool, T: BoolTy, stringKind: "bool"}, stringKind: "bool[2]"}},
|
||||
{"int8", Type{Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, stringKind: "int8"}},
|
||||
{"int16", Type{Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, stringKind: "int16"}},
|
||||
{"int32", Type{Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, stringKind: "int32"}},
|
||||
{"int64", Type{Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, stringKind: "int64"}},
|
||||
{"int256", Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}},
|
||||
{"int[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
|
||||
{"int[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
|
||||
{"int32[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Int32, Type: big_t, Size: 32, T: IntTy, Elem: &Type{Kind: reflect.Int32, Type: big_t, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
|
||||
{"int32[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Int32, Type: big_t, Size: 32, T: IntTy, Elem: &Type{Kind: reflect.Int32, Type: big_t, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
|
||||
{"uint", Type{Kind: reflect.Ptr, Type: ubig_t, Size: 256, T: UintTy, stringKind: "uint256"}},
|
||||
{"uint8", Type{Kind: reflect.Uint8, Type: ubig_t, Size: 8, T: UintTy, stringKind: "uint8"}},
|
||||
{"uint256", Type{Kind: reflect.Ptr, Type: ubig_t, Size: 256, T: UintTy, stringKind: "uint256"}},
|
||||
{"uint[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Ptr, Type: ubig_t, Size: 256, T: UintTy, Elem: &Type{Kind: reflect.Ptr, Type: ubig_t, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
|
||||
{"uint[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Ptr, Type: ubig_t, Size: 256, T: UintTy, Elem: &Type{Kind: reflect.Ptr, Type: ubig_t, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
|
||||
{"uint32[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Uint32, Type: ubig_t, Size: 32, T: UintTy, Elem: &Type{Kind: reflect.Uint32, Type: big_t, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
|
||||
{"uint32[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Uint32, Type: ubig_t, Size: 32, T: UintTy, Elem: &Type{Kind: reflect.Uint32, Type: big_t, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
|
||||
{"bytes", Type{IsSlice: true, SliceSize: -1, Elem: &Type{Kind: reflect.Uint8, Type: ubig_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: BytesTy, stringKind: "bytes"}},
|
||||
{"bytes32", Type{IsArray: true, SliceSize: 32, Elem: &Type{Kind: reflect.Uint8, Type: ubig_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: FixedBytesTy, stringKind: "bytes32"}},
|
||||
{"bytes[]", Type{IsSlice: true, SliceSize: -1, Elem: &Type{IsSlice: true, SliceSize: -1, Elem: &Type{Kind: reflect.Uint8, Type: ubig_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}},
|
||||
{"bytes[2]", Type{IsArray: true, SliceSize: 2, Elem: &Type{IsSlice: true, SliceSize: -1, Elem: &Type{Kind: reflect.Uint8, Type: ubig_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[2]"}},
|
||||
{"bytes32[]", Type{IsSlice: true, SliceSize: -1, Elem: &Type{IsArray: true, SliceSize: 32, Elem: &Type{Kind: reflect.Uint8, Type: ubig_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: FixedBytesTy, stringKind: "bytes32"}, stringKind: "bytes32[]"}},
|
||||
{"bytes32[2]", Type{IsArray: true, SliceSize: 2, Elem: &Type{IsArray: true, SliceSize: 32, Elem: &Type{Kind: reflect.Uint8, Type: ubig_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: FixedBytesTy, stringKind: "bytes32"}, stringKind: "bytes32[2]"}},
|
||||
{"int8[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, Elem: &Type{Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}},
|
||||
{"int8[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, Elem: &Type{Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}},
|
||||
{"int16[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, Elem: &Type{Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}},
|
||||
{"int16[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, Elem: &Type{Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}},
|
||||
{"int32[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, Elem: &Type{Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
|
||||
{"int32[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, Elem: &Type{Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
|
||||
{"int64[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, Elem: &Type{Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}},
|
||||
{"int64[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, Elem: &Type{Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}},
|
||||
{"int256[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
|
||||
{"int256[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
|
||||
{"uint8", Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}},
|
||||
{"uint16", Type{Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, stringKind: "uint16"}},
|
||||
{"uint32", Type{Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, stringKind: "uint32"}},
|
||||
{"uint64", Type{Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, stringKind: "uint64"}},
|
||||
{"uint256", Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, stringKind: "uint256"}},
|
||||
{"uint8[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}},
|
||||
{"uint8[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}},
|
||||
{"uint16[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, Elem: &Type{Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}},
|
||||
{"uint16[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, Elem: &Type{Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}},
|
||||
{"uint32[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, Elem: &Type{Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
|
||||
{"uint32[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, Elem: &Type{Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
|
||||
{"uint64[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, Elem: &Type{Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}},
|
||||
{"uint64[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, Elem: &Type{Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}},
|
||||
{"uint256[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
|
||||
{"uint256[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
|
||||
{"bytes32", Type{IsArray: true, SliceSize: 32, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: FixedBytesTy, stringKind: "bytes32"}},
|
||||
{"bytes[]", Type{IsSlice: true, SliceSize: -1, Elem: &Type{IsSlice: true, SliceSize: -1, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}},
|
||||
{"bytes[2]", Type{IsArray: true, SliceSize: 2, Elem: &Type{IsSlice: true, SliceSize: -1, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[2]"}},
|
||||
{"bytes32[]", Type{IsSlice: true, SliceSize: -1, Elem: &Type{IsArray: true, SliceSize: 32, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: FixedBytesTy, stringKind: "bytes32"}, stringKind: "bytes32[]"}},
|
||||
{"bytes32[2]", Type{IsArray: true, SliceSize: 2, Elem: &Type{IsArray: true, SliceSize: 32, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: FixedBytesTy, stringKind: "bytes32"}, stringKind: "bytes32[2]"}},
|
||||
{"string", Type{Kind: reflect.String, Size: -1, T: StringTy, stringKind: "string"}},
|
||||
{"string[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.String, T: StringTy, Size: -1, Elem: &Type{Kind: reflect.String, T: StringTy, Size: -1, stringKind: "string"}, stringKind: "string[]"}},
|
||||
{"string[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.String, T: StringTy, Size: -1, Elem: &Type{Kind: reflect.String, T: StringTy, Size: -1, stringKind: "string"}, stringKind: "string[2]"}},
|
||||
@ -76,3 +97,59 @@ func TestTypeRegexp(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTypeCheck(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
typ string
|
||||
input interface{}
|
||||
err string
|
||||
}{
|
||||
{"uint", big.NewInt(1), ""},
|
||||
{"int", big.NewInt(1), ""},
|
||||
{"uint30", big.NewInt(1), ""},
|
||||
{"uint30", uint8(1), "abi: cannot use uint8 as type ptr as argument"},
|
||||
{"uint16", uint16(1), ""},
|
||||
{"uint16", uint8(1), "abi: cannot use uint8 as type uint16 as argument"},
|
||||
{"uint16[]", []uint16{1, 2, 3}, ""},
|
||||
{"uint16[]", [3]uint16{1, 2, 3}, ""},
|
||||
{"uint16[]", []uint32{1, 2, 3}, "abi: cannot use []uint32 as type []uint16 as argument"},
|
||||
{"uint16[3]", [3]uint32{1, 2, 3}, "abi: cannot use [3]uint32 as type [3]uint16 as argument"},
|
||||
{"uint16[3]", [4]uint16{1, 2, 3}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
|
||||
{"uint16[3]", []uint16{1, 2, 3}, ""},
|
||||
{"uint16[3]", []uint16{1, 2, 3, 4}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
|
||||
{"address[]", []common.Address{{1}}, ""},
|
||||
{"address[1]", []common.Address{{1}}, ""},
|
||||
{"address[1]", [1]common.Address{{1}}, ""},
|
||||
{"address[2]", [1]common.Address{{1}}, "abi: cannot use [1]array as type [2]array as argument"},
|
||||
{"bytes32", [32]byte{}, ""},
|
||||
{"bytes32", [33]byte{}, "abi: cannot use [33]uint8 as type [32]uint8 as argument"},
|
||||
{"bytes32", common.Hash{1}, ""},
|
||||
{"bytes31", [31]byte{}, ""},
|
||||
{"bytes31", [32]byte{}, "abi: cannot use [32]uint8 as type [31]uint8 as argument"},
|
||||
{"bytes", []byte{0, 1}, ""},
|
||||
{"bytes", [2]byte{0, 1}, ""},
|
||||
{"bytes", common.Hash{1}, ""},
|
||||
{"string", "hello world", ""},
|
||||
{"bytes32[]", [][32]byte{{}}, ""},
|
||||
{"function", [24]byte{}, ""},
|
||||
} {
|
||||
typ, err := NewType(test.typ)
|
||||
if err != nil {
|
||||
t.Fatal("unexpected parse error:", err)
|
||||
}
|
||||
|
||||
err = typeCheck(typ, reflect.ValueOf(test.input))
|
||||
if err != nil && len(test.err) == 0 {
|
||||
t.Errorf("%d failed. Expected no err but got: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if err == nil && len(test.err) != 0 {
|
||||
t.Errorf("%d failed. Expected err: %v but got none", i, test.err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil && len(test.err) != 0 && err.Error() != test.err {
|
||||
t.Errorf("%d failed. Expected err: '%v' got err: '%v'", i, test.err, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
235
accounts/abi/unpack.go
Normal file
235
accounts/abi/unpack.go
Normal file
@ -0,0 +1,235 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package abi
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
// toGoSliceType parses the input and casts it to the proper slice defined by the ABI
|
||||
// argument in T.
|
||||
func toGoSlice(i int, t Argument, output []byte) (interface{}, error) {
|
||||
index := i * 32
|
||||
// The slice must, at very least be large enough for the index+32 which is exactly the size required
|
||||
// for the [offset in output, size of offset].
|
||||
if index+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go slice: insufficient size output %d require %d", len(output), index+32)
|
||||
}
|
||||
elem := t.Type.Elem
|
||||
|
||||
// first we need to create a slice of the type
|
||||
var refSlice reflect.Value
|
||||
switch elem.T {
|
||||
case IntTy, UintTy, BoolTy:
|
||||
// create a new reference slice matching the element type
|
||||
switch t.Type.Kind {
|
||||
case reflect.Bool:
|
||||
refSlice = reflect.ValueOf([]bool(nil))
|
||||
case reflect.Uint8:
|
||||
refSlice = reflect.ValueOf([]uint8(nil))
|
||||
case reflect.Uint16:
|
||||
refSlice = reflect.ValueOf([]uint16(nil))
|
||||
case reflect.Uint32:
|
||||
refSlice = reflect.ValueOf([]uint32(nil))
|
||||
case reflect.Uint64:
|
||||
refSlice = reflect.ValueOf([]uint64(nil))
|
||||
case reflect.Int8:
|
||||
refSlice = reflect.ValueOf([]int8(nil))
|
||||
case reflect.Int16:
|
||||
refSlice = reflect.ValueOf([]int16(nil))
|
||||
case reflect.Int32:
|
||||
refSlice = reflect.ValueOf([]int32(nil))
|
||||
case reflect.Int64:
|
||||
refSlice = reflect.ValueOf([]int64(nil))
|
||||
default:
|
||||
refSlice = reflect.ValueOf([]*big.Int(nil))
|
||||
}
|
||||
case AddressTy: // address must be of slice Address
|
||||
refSlice = reflect.ValueOf([]common.Address(nil))
|
||||
case HashTy: // hash must be of slice hash
|
||||
refSlice = reflect.ValueOf([]common.Hash(nil))
|
||||
case FixedBytesTy:
|
||||
refSlice = reflect.ValueOf([][]byte(nil))
|
||||
default: // no other types are supported
|
||||
return nil, fmt.Errorf("abi: unsupported slice type %v", elem.T)
|
||||
}
|
||||
|
||||
var slice []byte
|
||||
var size int
|
||||
var offset int
|
||||
if t.Type.IsSlice {
|
||||
// get the offset which determines the start of this array ...
|
||||
offset = int(binary.BigEndian.Uint64(output[index+24 : index+32]))
|
||||
if offset+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go slice: offset %d would go over slice boundary (len=%d)", len(output), offset+32)
|
||||
}
|
||||
|
||||
slice = output[offset:]
|
||||
// ... starting with the size of the array in elements ...
|
||||
size = int(binary.BigEndian.Uint64(slice[24:32]))
|
||||
slice = slice[32:]
|
||||
// ... and make sure that we've at the very least the amount of bytes
|
||||
// available in the buffer.
|
||||
if size*32 > len(slice) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go slice: insufficient size output %d require %d", len(output), offset+32+size*32)
|
||||
}
|
||||
|
||||
// reslice to match the required size
|
||||
slice = slice[:size*32]
|
||||
} else if t.Type.IsArray {
|
||||
//get the number of elements in the array
|
||||
size = t.Type.SliceSize
|
||||
|
||||
//check to make sure array size matches up
|
||||
if index+32*size > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go array: offset %d would go over slice boundary (len=%d)", len(output), index+32*size)
|
||||
}
|
||||
//slice is there for a fixed amount of times
|
||||
slice = output[index : index+size*32]
|
||||
}
|
||||
|
||||
for i := 0; i < size; i++ {
|
||||
var (
|
||||
inter interface{} // interface type
|
||||
returnOutput = slice[i*32 : i*32+32] // the return output
|
||||
err error
|
||||
)
|
||||
// set inter to the correct type (cast)
|
||||
switch elem.T {
|
||||
case IntTy, UintTy:
|
||||
inter = readInteger(t.Type.Kind, returnOutput)
|
||||
case BoolTy:
|
||||
inter, err = readBool(returnOutput)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case AddressTy:
|
||||
inter = common.BytesToAddress(returnOutput)
|
||||
case HashTy:
|
||||
inter = common.BytesToHash(returnOutput)
|
||||
case FixedBytesTy:
|
||||
inter = returnOutput
|
||||
}
|
||||
// append the item to our reflect slice
|
||||
refSlice = reflect.Append(refSlice, reflect.ValueOf(inter))
|
||||
}
|
||||
|
||||
// return the interface
|
||||
return refSlice.Interface(), nil
|
||||
}
|
||||
|
||||
func readInteger(kind reflect.Kind, b []byte) interface{} {
|
||||
switch kind {
|
||||
case reflect.Uint8:
|
||||
return uint8(b[len(b)-1])
|
||||
case reflect.Uint16:
|
||||
return binary.BigEndian.Uint16(b[len(b)-2:])
|
||||
case reflect.Uint32:
|
||||
return binary.BigEndian.Uint32(b[len(b)-4:])
|
||||
case reflect.Uint64:
|
||||
return binary.BigEndian.Uint64(b[len(b)-8:])
|
||||
case reflect.Int8:
|
||||
return int8(b[len(b)-1])
|
||||
case reflect.Int16:
|
||||
return int16(binary.BigEndian.Uint16(b[len(b)-2:]))
|
||||
case reflect.Int32:
|
||||
return int32(binary.BigEndian.Uint32(b[len(b)-4:]))
|
||||
case reflect.Int64:
|
||||
return int64(binary.BigEndian.Uint64(b[len(b)-8:]))
|
||||
default:
|
||||
return new(big.Int).SetBytes(b)
|
||||
}
|
||||
}
|
||||
|
||||
func readBool(word []byte) (bool, error) {
|
||||
if len(word) != 32 {
|
||||
return false, fmt.Errorf("abi: fatal error: incorrect word length")
|
||||
}
|
||||
|
||||
for i, b := range word {
|
||||
if b != 0 && i != 31 {
|
||||
return false, errBadBool
|
||||
}
|
||||
}
|
||||
switch word[31] {
|
||||
case 0:
|
||||
return false, nil
|
||||
case 1:
|
||||
return true, nil
|
||||
default:
|
||||
return false, errBadBool
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// toGoType parses the input and casts it to the proper type defined by the ABI
|
||||
// argument in T.
|
||||
func toGoType(i int, t Argument, output []byte) (interface{}, error) {
|
||||
// we need to treat slices differently
|
||||
if (t.Type.IsSlice || t.Type.IsArray) && t.Type.T != BytesTy && t.Type.T != StringTy && t.Type.T != FixedBytesTy && t.Type.T != FunctionTy {
|
||||
return toGoSlice(i, t, output)
|
||||
}
|
||||
|
||||
index := i * 32
|
||||
if index+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), index+32)
|
||||
}
|
||||
|
||||
// Parse the given index output and check whether we need to read
|
||||
// a different offset and length based on the type (i.e. string, bytes)
|
||||
var returnOutput []byte
|
||||
switch t.Type.T {
|
||||
case StringTy, BytesTy: // variable arrays are written at the end of the return bytes
|
||||
// parse offset from which we should start reading
|
||||
offset := int(binary.BigEndian.Uint64(output[index+24 : index+32]))
|
||||
if offset+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), offset+32)
|
||||
}
|
||||
// parse the size up until we should be reading
|
||||
size := int(binary.BigEndian.Uint64(output[offset+24 : offset+32]))
|
||||
if offset+32+size > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), offset+32+size)
|
||||
}
|
||||
|
||||
// get the bytes for this return value
|
||||
returnOutput = output[offset+32 : offset+32+size]
|
||||
default:
|
||||
returnOutput = output[index : index+32]
|
||||
}
|
||||
|
||||
// convert the bytes to whatever is specified by the ABI.
|
||||
switch t.Type.T {
|
||||
case IntTy, UintTy:
|
||||
return readInteger(t.Type.Kind, returnOutput), nil
|
||||
case BoolTy:
|
||||
return readBool(returnOutput)
|
||||
case AddressTy:
|
||||
return common.BytesToAddress(returnOutput), nil
|
||||
case HashTy:
|
||||
return common.BytesToHash(returnOutput), nil
|
||||
case BytesTy, FixedBytesTy, FunctionTy:
|
||||
return returnOutput, nil
|
||||
case StringTy:
|
||||
return string(returnOutput), nil
|
||||
}
|
||||
return nil, fmt.Errorf("abi: unknown type %v", t.Type.T)
|
||||
}
|
681
accounts/abi/unpack_test.go
Normal file
681
accounts/abi/unpack_test.go
Normal file
@ -0,0 +1,681 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package abi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
func TestSimpleMethodUnpack(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
def string // definition of the **output** ABI params
|
||||
marshalledOutput []byte // evm return data
|
||||
expectedOut interface{} // the expected output
|
||||
outVar string // the output variable (e.g. uint32, *big.Int, etc)
|
||||
err string // empty or error if expected
|
||||
}{
|
||||
{
|
||||
`[ { "type": "bool" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
bool(true),
|
||||
"bool",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint32" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
uint32(1),
|
||||
"uint32",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint32" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
nil,
|
||||
"uint16",
|
||||
"abi: cannot unmarshal uint32 in to uint16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint17" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
nil,
|
||||
"uint16",
|
||||
"abi: cannot unmarshal *big.Int in to uint16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint17" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
big.NewInt(1),
|
||||
"*big.Int",
|
||||
"",
|
||||
},
|
||||
|
||||
{
|
||||
`[ { "type": "int32" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
int32(1),
|
||||
"int32",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "int32" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
nil,
|
||||
"int16",
|
||||
"abi: cannot unmarshal int32 in to int16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "int17" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
nil,
|
||||
"int16",
|
||||
"abi: cannot unmarshal *big.Int in to int16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "int17" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
big.NewInt(1),
|
||||
"*big.Int",
|
||||
"",
|
||||
},
|
||||
|
||||
{
|
||||
`[ { "type": "address" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000100000000000000000000000000000000000000"),
|
||||
common.Address{1},
|
||||
"address",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "bytes32" } ]`,
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
"bytes",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "bytes32" } ]`,
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
"hash",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "bytes32" } ]`,
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
"interface",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "function" } ]`,
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
[24]byte{1},
|
||||
"function",
|
||||
"",
|
||||
},
|
||||
} {
|
||||
abiDefinition := fmt.Sprintf(`[{ "name" : "method", "outputs": %s}]`, test.def)
|
||||
abi, err := JSON(strings.NewReader(abiDefinition))
|
||||
if err != nil {
|
||||
t.Errorf("%d failed. %v", i, err)
|
||||
continue
|
||||
}
|
||||
|
||||
var outvar interface{}
|
||||
switch test.outVar {
|
||||
case "bool":
|
||||
var v bool
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint8":
|
||||
var v uint8
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint16":
|
||||
var v uint16
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint32":
|
||||
var v uint32
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint64":
|
||||
var v uint64
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int8":
|
||||
var v int8
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int16":
|
||||
var v int16
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int32":
|
||||
var v int32
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int64":
|
||||
var v int64
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "*big.Int":
|
||||
var v *big.Int
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "address":
|
||||
var v common.Address
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "bytes":
|
||||
var v []byte
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "hash":
|
||||
var v common.Hash
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v.Bytes()[:]
|
||||
case "function":
|
||||
var v [24]byte
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "interface":
|
||||
err = abi.Unpack(&outvar, "method", test.marshalledOutput)
|
||||
default:
|
||||
t.Errorf("unsupported type '%v' please add it to the switch statement in this test", test.outVar)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil && len(test.err) == 0 {
|
||||
t.Errorf("%d failed. Expected no err but got: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if err == nil && len(test.err) != 0 {
|
||||
t.Errorf("%d failed. Expected err: %v but got none", i, test.err)
|
||||
continue
|
||||
}
|
||||
if err != nil && len(test.err) != 0 && err.Error() != test.err {
|
||||
t.Errorf("%d failed. Expected err: '%v' got err: '%v'", i, test.err, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
if !reflect.DeepEqual(test.expectedOut, outvar) {
|
||||
t.Errorf("%d failed. Output error: expected %v, got %v", i, test.expectedOut, outvar)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnpackSetInterfaceSlice(t *testing.T) {
|
||||
var (
|
||||
var1 = new(uint8)
|
||||
var2 = new(uint8)
|
||||
)
|
||||
out := []interface{}{var1, var2}
|
||||
abi, err := JSON(strings.NewReader(`[{"type":"function", "name":"ints", "outputs":[{"type":"uint8"}, {"type":"uint8"}]}]`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
marshalledReturn := append(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"), common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")...)
|
||||
err = abi.Unpack(&out, "ints", marshalledReturn)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if *var1 != 1 {
|
||||
t.Error("expected var1 to be 1, got", *var1)
|
||||
}
|
||||
if *var2 != 2 {
|
||||
t.Error("expected var2 to be 2, got", *var2)
|
||||
}
|
||||
|
||||
out = []interface{}{var1}
|
||||
err = abi.Unpack(&out, "ints", marshalledReturn)
|
||||
|
||||
expErr := "abi: cannot marshal in to slices of unequal size (require: 2, got: 1)"
|
||||
if err == nil || err.Error() != expErr {
|
||||
t.Error("expected err:", expErr, "Got:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnpackSetInterfaceArrayOutput(t *testing.T) {
|
||||
var (
|
||||
var1 = new([1]uint32)
|
||||
var2 = new([1]uint32)
|
||||
)
|
||||
out := []interface{}{var1, var2}
|
||||
abi, err := JSON(strings.NewReader(`[{"type":"function", "name":"ints", "outputs":[{"type":"uint32[1]"}, {"type":"uint32[1]"}]}]`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
marshalledReturn := append(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"), common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")...)
|
||||
err = abi.Unpack(&out, "ints", marshalledReturn)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if *var1 != [1]uint32{1} {
|
||||
t.Error("expected var1 to be [1], got", *var1)
|
||||
}
|
||||
if *var2 != [1]uint32{2} {
|
||||
t.Error("expected var2 to be [2], got", *var2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiReturnWithStruct(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "multi", "constant" : false, "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// using buff to make the code readable
|
||||
buff := new(bytes.Buffer)
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005"))
|
||||
stringOut := "hello"
|
||||
buff.Write(common.RightPadBytes([]byte(stringOut), 32))
|
||||
|
||||
var inter struct {
|
||||
Int *big.Int
|
||||
String string
|
||||
}
|
||||
err = abi.Unpack(&inter, "multi", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if inter.Int == nil || inter.Int.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Error("expected Int to be 1 got", inter.Int)
|
||||
}
|
||||
|
||||
if inter.String != stringOut {
|
||||
t.Error("expected String to be", stringOut, "got", inter.String)
|
||||
}
|
||||
|
||||
var reversed struct {
|
||||
String string
|
||||
Int *big.Int
|
||||
}
|
||||
|
||||
err = abi.Unpack(&reversed, "multi", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if reversed.Int == nil || reversed.Int.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Error("expected Int to be 1 got", reversed.Int)
|
||||
}
|
||||
|
||||
if reversed.String != stringOut {
|
||||
t.Error("expected String to be", stringOut, "got", reversed.String)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiReturnWithSlice(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "multi", "constant" : false, "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// using buff to make the code readable
|
||||
buff := new(bytes.Buffer)
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005"))
|
||||
stringOut := "hello"
|
||||
buff.Write(common.RightPadBytes([]byte(stringOut), 32))
|
||||
|
||||
var inter []interface{}
|
||||
err = abi.Unpack(&inter, "multi", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(inter) != 2 {
|
||||
t.Fatal("expected 2 results got", len(inter))
|
||||
}
|
||||
|
||||
if num, ok := inter[0].(*big.Int); !ok || num.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Error("expected index 0 to be 1 got", num)
|
||||
}
|
||||
|
||||
if str, ok := inter[1].(string); !ok || str != stringOut {
|
||||
t.Error("expected index 1 to be", stringOut, "got", str)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalArrays(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "bytes32", "constant" : false, "outputs": [ { "type": "bytes32" } ] },
|
||||
{ "name" : "bytes10", "constant" : false, "outputs": [ { "type": "bytes10" } ] }
|
||||
]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
output := common.LeftPadBytes([]byte{1}, 32)
|
||||
|
||||
var bytes10 [10]byte
|
||||
err = abi.Unpack(&bytes10, "bytes32", output)
|
||||
if err == nil || err.Error() != "abi: cannot unmarshal src (len=32) in to dst (len=10)" {
|
||||
t.Error("expected error or bytes32 not be assignable to bytes10:", err)
|
||||
}
|
||||
|
||||
var bytes32 [32]byte
|
||||
err = abi.Unpack(&bytes32, "bytes32", output)
|
||||
if err != nil {
|
||||
t.Error("didn't expect error:", err)
|
||||
}
|
||||
if !bytes.Equal(bytes32[:], output) {
|
||||
t.Error("expected bytes32[31] to be 1 got", bytes32[31])
|
||||
}
|
||||
|
||||
type (
|
||||
B10 [10]byte
|
||||
B32 [32]byte
|
||||
)
|
||||
|
||||
var b10 B10
|
||||
err = abi.Unpack(&b10, "bytes32", output)
|
||||
if err == nil || err.Error() != "abi: cannot unmarshal src (len=32) in to dst (len=10)" {
|
||||
t.Error("expected error or bytes32 not be assignable to bytes10:", err)
|
||||
}
|
||||
|
||||
var b32 B32
|
||||
err = abi.Unpack(&b32, "bytes32", output)
|
||||
if err != nil {
|
||||
t.Error("didn't expect error:", err)
|
||||
}
|
||||
if !bytes.Equal(b32[:], output) {
|
||||
t.Error("expected bytes32[31] to be 1 got", bytes32[31])
|
||||
}
|
||||
|
||||
output[10] = 1
|
||||
var shortAssignLong [32]byte
|
||||
err = abi.Unpack(&shortAssignLong, "bytes10", output)
|
||||
if err != nil {
|
||||
t.Error("didn't expect error:", err)
|
||||
}
|
||||
if !bytes.Equal(output, shortAssignLong[:]) {
|
||||
t.Errorf("expected %x to be %x", shortAssignLong, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshal(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "int", "constant" : false, "outputs": [ { "type": "uint256" } ] },
|
||||
{ "name" : "bool", "constant" : false, "outputs": [ { "type": "bool" } ] },
|
||||
{ "name" : "bytes", "constant" : false, "outputs": [ { "type": "bytes" } ] },
|
||||
{ "name" : "fixed", "constant" : false, "outputs": [ { "type": "bytes32" } ] },
|
||||
{ "name" : "multi", "constant" : false, "outputs": [ { "type": "bytes" }, { "type": "bytes" } ] },
|
||||
{ "name" : "intArraySingle", "constant" : false, "outputs": [ { "type": "uint256[3]" } ] },
|
||||
{ "name" : "addressSliceSingle", "constant" : false, "outputs": [ { "type": "address[]" } ] },
|
||||
{ "name" : "addressSliceDouble", "constant" : false, "outputs": [ { "name": "a", "type": "address[]" }, { "name": "b", "type": "address[]" } ] },
|
||||
{ "name" : "mixedBytes", "constant" : true, "outputs": [ { "name": "a", "type": "bytes" }, { "name": "b", "type": "bytes32" } ] }]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
buff := new(bytes.Buffer)
|
||||
|
||||
// marshal int
|
||||
var Int *big.Int
|
||||
err = abi.Unpack(&Int, "int", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if Int == nil || Int.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Error("expected Int to be 1 got", Int)
|
||||
}
|
||||
|
||||
// marshal bool
|
||||
var Bool bool
|
||||
err = abi.Unpack(&Bool, "bool", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !Bool {
|
||||
t.Error("expected Bool to be true")
|
||||
}
|
||||
|
||||
// marshal dynamic bytes max length 32
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
bytesOut := common.RightPadBytes([]byte("hello"), 32)
|
||||
buff.Write(bytesOut)
|
||||
|
||||
var Bytes []byte
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(Bytes, bytesOut) {
|
||||
t.Errorf("expected %x got %x", bytesOut, Bytes)
|
||||
}
|
||||
|
||||
// marshall dynamic bytes max length 64
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
|
||||
bytesOut = common.RightPadBytes([]byte("hello"), 64)
|
||||
buff.Write(bytesOut)
|
||||
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(Bytes, bytesOut) {
|
||||
t.Errorf("expected %x got %x", bytesOut, Bytes)
|
||||
}
|
||||
|
||||
// marshall dynamic bytes max length 63
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000003f"))
|
||||
bytesOut = common.RightPadBytes([]byte("hello"), 63)
|
||||
buff.Write(bytesOut)
|
||||
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(Bytes, bytesOut) {
|
||||
t.Errorf("expected %x got %x", bytesOut, Bytes)
|
||||
}
|
||||
|
||||
// marshal dynamic bytes output empty
|
||||
err = abi.Unpack(&Bytes, "bytes", nil)
|
||||
if err == nil {
|
||||
t.Error("expected error")
|
||||
}
|
||||
|
||||
// marshal dynamic bytes length 5
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005"))
|
||||
buff.Write(common.RightPadBytes([]byte("hello"), 32))
|
||||
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(Bytes, []byte("hello")) {
|
||||
t.Errorf("expected %x got %x", bytesOut, Bytes)
|
||||
}
|
||||
|
||||
// marshal dynamic bytes length 5
|
||||
buff.Reset()
|
||||
buff.Write(common.RightPadBytes([]byte("hello"), 32))
|
||||
|
||||
var hash common.Hash
|
||||
err = abi.Unpack(&hash, "fixed", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
helloHash := common.BytesToHash(common.RightPadBytes([]byte("hello"), 32))
|
||||
if hash != helloHash {
|
||||
t.Errorf("Expected %x to equal %x", hash, helloHash)
|
||||
}
|
||||
|
||||
// marshal error
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
if err == nil {
|
||||
t.Error("expected error")
|
||||
}
|
||||
|
||||
err = abi.Unpack(&Bytes, "multi", make([]byte, 64))
|
||||
if err == nil {
|
||||
t.Error("expected error")
|
||||
}
|
||||
|
||||
// marshal mixed bytes
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
|
||||
fixed := common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")
|
||||
buff.Write(fixed)
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
bytesOut = common.RightPadBytes([]byte("hello"), 32)
|
||||
buff.Write(bytesOut)
|
||||
|
||||
var out []interface{}
|
||||
err = abi.Unpack(&out, "mixedBytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Fatal("didn't expect error:", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(bytesOut, out[0].([]byte)) {
|
||||
t.Errorf("expected %x, got %x", bytesOut, out[0])
|
||||
}
|
||||
|
||||
if !bytes.Equal(fixed, out[1].([]byte)) {
|
||||
t.Errorf("expected %x, got %x", fixed, out[1])
|
||||
}
|
||||
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
// marshal int array
|
||||
var intArray [3]*big.Int
|
||||
err = abi.Unpack(&intArray, "intArraySingle", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
var testAgainstIntArray [3]*big.Int
|
||||
testAgainstIntArray[0] = big.NewInt(1)
|
||||
testAgainstIntArray[1] = big.NewInt(2)
|
||||
testAgainstIntArray[2] = big.NewInt(3)
|
||||
|
||||
for i, Int := range intArray {
|
||||
if Int.Cmp(testAgainstIntArray[i]) != 0 {
|
||||
t.Errorf("expected %v, got %v", testAgainstIntArray[i], Int)
|
||||
}
|
||||
}
|
||||
// marshal address slice
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020")) // offset
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // size
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000100000000000000000000000000000000000000"))
|
||||
|
||||
var outAddr []common.Address
|
||||
err = abi.Unpack(&outAddr, "addressSliceSingle", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Fatal("didn't expect error:", err)
|
||||
}
|
||||
|
||||
if len(outAddr) != 1 {
|
||||
t.Fatal("expected 1 item, got", len(outAddr))
|
||||
}
|
||||
|
||||
if outAddr[0] != (common.Address{1}) {
|
||||
t.Errorf("expected %x, got %x", common.Address{1}, outAddr[0])
|
||||
}
|
||||
|
||||
// marshal multiple address slice
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040")) // offset
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000080")) // offset
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // size
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000100000000000000000000000000000000000000"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // size
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000200000000000000000000000000000000000000"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000300000000000000000000000000000000000000"))
|
||||
|
||||
var outAddrStruct struct {
|
||||
A []common.Address
|
||||
B []common.Address
|
||||
}
|
||||
err = abi.Unpack(&outAddrStruct, "addressSliceDouble", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Fatal("didn't expect error:", err)
|
||||
}
|
||||
|
||||
if len(outAddrStruct.A) != 1 {
|
||||
t.Fatal("expected 1 item, got", len(outAddrStruct.A))
|
||||
}
|
||||
|
||||
if outAddrStruct.A[0] != (common.Address{1}) {
|
||||
t.Errorf("expected %x, got %x", common.Address{1}, outAddrStruct.A[0])
|
||||
}
|
||||
|
||||
if len(outAddrStruct.B) != 2 {
|
||||
t.Fatal("expected 1 item, got", len(outAddrStruct.B))
|
||||
}
|
||||
|
||||
if outAddrStruct.B[0] != (common.Address{2}) {
|
||||
t.Errorf("expected %x, got %x", common.Address{2}, outAddrStruct.B[0])
|
||||
}
|
||||
if outAddrStruct.B[1] != (common.Address{3}) {
|
||||
t.Errorf("expected %x, got %x", common.Address{3}, outAddrStruct.B[1])
|
||||
}
|
||||
|
||||
// marshal invalid address slice
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000100"))
|
||||
|
||||
err = abi.Unpack(&outAddr, "addressSliceSingle", buff.Bytes())
|
||||
if err == nil {
|
||||
t.Fatal("expected error:", err)
|
||||
}
|
||||
}
|
@ -42,8 +42,9 @@ type Wallet interface {
|
||||
URL() URL
|
||||
|
||||
// Status returns a textual status to aid the user in the current state of the
|
||||
// wallet.
|
||||
Status() string
|
||||
// wallet. It also returns an error indicating any failure the wallet might have
|
||||
// encountered.
|
||||
Status() (string, error)
|
||||
|
||||
// Open initializes access to a wallet instance. It is not meant to unlock or
|
||||
// decrypt account keys, rather simply to establish a connection to hardware
|
||||
@ -147,9 +148,26 @@ type Backend interface {
|
||||
Subscribe(sink chan<- WalletEvent) event.Subscription
|
||||
}
|
||||
|
||||
// WalletEventType represents the different event types that can be fired by
|
||||
// the wallet subscription subsystem.
|
||||
type WalletEventType int
|
||||
|
||||
const (
|
||||
// WalletArrived is fired when a new wallet is detected either via USB or via
|
||||
// a filesystem event in the keystore.
|
||||
WalletArrived WalletEventType = iota
|
||||
|
||||
// WalletOpened is fired when a wallet is successfully opened with the purpose
|
||||
// of starting any background processes such as automatic key derivation.
|
||||
WalletOpened
|
||||
|
||||
// WalletDropped
|
||||
WalletDropped
|
||||
)
|
||||
|
||||
// WalletEvent is an event fired by an account backend when a wallet arrival or
|
||||
// departure is detected.
|
||||
type WalletEvent struct {
|
||||
Wallet Wallet // Wallet instance arrived or departed
|
||||
Arrive bool // Whether the wallet was added or removed
|
||||
Wallet Wallet // Wallet instance arrived or departed
|
||||
Kind WalletEventType // Event type that happened in the system
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ var ErrNotSupported = errors.New("not supported")
|
||||
var ErrInvalidPassphrase = errors.New("invalid passphrase")
|
||||
|
||||
// ErrWalletAlreadyOpen is returned if a wallet is attempted to be opened the
|
||||
// secodn time.
|
||||
// second time.
|
||||
var ErrWalletAlreadyOpen = errors.New("wallet already open")
|
||||
|
||||
// ErrWalletClosed is returned if a wallet is attempted to be opened the
|
||||
|
@ -27,12 +27,17 @@ import (
|
||||
// DefaultRootDerivationPath is the root path to which custom derivation endpoints
|
||||
// are appended. As such, the first account will be at m/44'/60'/0'/0, the second
|
||||
// at m/44'/60'/0'/1, etc.
|
||||
var DefaultRootDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0}
|
||||
var DefaultRootDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}
|
||||
|
||||
// DefaultBaseDerivationPath is the base path from which custom derivation endpoints
|
||||
// are incremented. As such, the first account will be at m/44'/60'/0'/0, the second
|
||||
// at m/44'/60'/0'/1, etc.
|
||||
var DefaultBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}
|
||||
var DefaultBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0}
|
||||
|
||||
// DefaultLedgerBaseDerivationPath is the base path from which custom derivation endpoints
|
||||
// are incremented. As such, the first account will be at m/44'/60'/0'/0, the second
|
||||
// at m/44'/60'/0'/1, etc.
|
||||
var DefaultLedgerBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}
|
||||
|
||||
// DerivationPath represents the computer friendly version of a hierarchical
|
||||
// deterministic wallet account derivaion path.
|
||||
|
@ -37,11 +37,11 @@ func TestHDPathParsing(t *testing.T) {
|
||||
{"m/2147483692/2147483708/2147483648/2147483648", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 0}},
|
||||
|
||||
// Plain relative derivation paths
|
||||
{"0", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}},
|
||||
{"128", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 128}},
|
||||
{"0'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 0}},
|
||||
{"128'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 128}},
|
||||
{"2147483648", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 0}},
|
||||
{"0", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0}},
|
||||
{"128", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 128}},
|
||||
{"0'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0x80000000 + 0}},
|
||||
{"128'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0x80000000 + 128}},
|
||||
{"2147483648", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0x80000000 + 0}},
|
||||
|
||||
// Hexadecimal absolute derivation paths
|
||||
{"m/0x2C'/0x3c'/0x00'/0x00", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}},
|
||||
@ -52,11 +52,11 @@ func TestHDPathParsing(t *testing.T) {
|
||||
{"m/0x8000002C/0x8000003c/0x80000000/0x80000000", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 0}},
|
||||
|
||||
// Hexadecimal relative derivation paths
|
||||
{"0x00", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}},
|
||||
{"0x80", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 128}},
|
||||
{"0x00'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 0}},
|
||||
{"0x80'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 128}},
|
||||
{"0x80000000", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 0}},
|
||||
{"0x00", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0}},
|
||||
{"0x80", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 128}},
|
||||
{"0x00'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0x80000000 + 0}},
|
||||
{"0x80'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0x80000000 + 128}},
|
||||
{"0x80000000", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0x80000000 + 0}},
|
||||
|
||||
// Weird inputs just to ensure they work
|
||||
{" m / 44 '\n/\n 60 \n\n\t' /\n0 ' /\t\t 0", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}},
|
||||
|
@ -91,14 +91,6 @@ type cipherparamsJSON struct {
|
||||
IV string `json:"iv"`
|
||||
}
|
||||
|
||||
type scryptParamsJSON struct {
|
||||
N int `json:"n"`
|
||||
R int `json:"r"`
|
||||
P int `json:"p"`
|
||||
DkLen int `json:"dklen"`
|
||||
Salt string `json:"salt"`
|
||||
}
|
||||
|
||||
func (k *Key) MarshalJSON() (j []byte, err error) {
|
||||
jStruct := plainKeyJSON{
|
||||
hex.EncodeToString(k.Address[:]),
|
||||
@ -124,14 +116,13 @@ func (k *Key) UnmarshalJSON(j []byte) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
privkey, err := hex.DecodeString(keyJSON.PrivateKey)
|
||||
privkey, err := crypto.HexToECDSA(keyJSON.PrivateKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
k.Address = common.BytesToAddress(addr)
|
||||
k.PrivateKey = crypto.ToECDSA(privkey)
|
||||
k.PrivateKey = privkey
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -143,14 +143,14 @@ func (ks *KeyStore) refreshWallets() {
|
||||
for _, account := range accs {
|
||||
// Drop wallets while they were in front of the next account
|
||||
for len(ks.wallets) > 0 && ks.wallets[0].URL().Cmp(account.URL) < 0 {
|
||||
events = append(events, accounts.WalletEvent{Wallet: ks.wallets[0], Arrive: false})
|
||||
events = append(events, accounts.WalletEvent{Wallet: ks.wallets[0], Kind: accounts.WalletDropped})
|
||||
ks.wallets = ks.wallets[1:]
|
||||
}
|
||||
// If there are no more wallets or the account is before the next, wrap new wallet
|
||||
if len(ks.wallets) == 0 || ks.wallets[0].URL().Cmp(account.URL) > 0 {
|
||||
wallet := &keystoreWallet{account: account, keystore: ks}
|
||||
|
||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Arrive: true})
|
||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletArrived})
|
||||
wallets = append(wallets, wallet)
|
||||
continue
|
||||
}
|
||||
@ -163,7 +163,7 @@ func (ks *KeyStore) refreshWallets() {
|
||||
}
|
||||
// Drop any leftover wallets and set the new batch
|
||||
for _, wallet := range ks.wallets {
|
||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Arrive: false})
|
||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletDropped})
|
||||
}
|
||||
ks.wallets = wallets
|
||||
ks.mu.Unlock()
|
||||
@ -450,7 +450,6 @@ func (ks *KeyStore) ImportECDSA(priv *ecdsa.PrivateKey, passphrase string) (acco
|
||||
if ks.cache.hasAddress(key.Address) {
|
||||
return accounts.Account{}, fmt.Errorf("account already exists")
|
||||
}
|
||||
|
||||
return ks.importKey(key, passphrase)
|
||||
}
|
||||
|
||||
|
@ -140,7 +140,7 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
|
||||
Cipher: "aes-128-ctr",
|
||||
CipherText: hex.EncodeToString(cipherText),
|
||||
CipherParams: cipherParamsJSON,
|
||||
KDF: "scrypt",
|
||||
KDF: keyHeaderKDF,
|
||||
KDFParams: scryptParamsJSON,
|
||||
MAC: hex.EncodeToString(mac),
|
||||
}
|
||||
@ -182,7 +182,8 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key := crypto.ToECDSA(keyBytes)
|
||||
key := crypto.ToECDSAUnsafe(keyBytes)
|
||||
|
||||
return &Key{
|
||||
Id: uuid.UUID(keyId),
|
||||
Address: crypto.PubkeyToAddress(key.PublicKey),
|
||||
@ -274,7 +275,7 @@ func getKDFKey(cryptoJSON cryptoJSON, auth string) ([]byte, error) {
|
||||
}
|
||||
dkLen := ensureInt(cryptoJSON.KDFParams["dklen"])
|
||||
|
||||
if cryptoJSON.KDF == "scrypt" {
|
||||
if cryptoJSON.KDF == keyHeaderKDF {
|
||||
n := ensureInt(cryptoJSON.KDFParams["n"])
|
||||
r := ensureInt(cryptoJSON.KDFParams["r"])
|
||||
p := ensureInt(cryptoJSON.KDFParams["p"])
|
||||
|
@ -46,7 +46,7 @@ func TestKeyEncryptDecrypt(t *testing.T) {
|
||||
// Decrypt with the correct password
|
||||
key, err := DecryptKey(keyjson, password)
|
||||
if err != nil {
|
||||
t.Errorf("test %d: json key failed to decrypt: %v", i, err)
|
||||
t.Fatalf("test %d: json key failed to decrypt: %v", i, err)
|
||||
}
|
||||
if key.Address != address {
|
||||
t.Errorf("test %d: key address mismatch: have %x, want %x", i, key.Address, address)
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
@ -140,21 +141,32 @@ func TestV3_PBKDF2_1(t *testing.T) {
|
||||
testDecryptV3(tests["wikipage_test_vector_pbkdf2"], t)
|
||||
}
|
||||
|
||||
var testsSubmodule = filepath.Join("..", "..", "tests", "testdata", "KeyStoreTests")
|
||||
|
||||
func skipIfSubmoduleMissing(t *testing.T) {
|
||||
if !common.FileExist(testsSubmodule) {
|
||||
t.Skipf("can't find JSON tests from submodule at %s", testsSubmodule)
|
||||
}
|
||||
}
|
||||
|
||||
func TestV3_PBKDF2_2(t *testing.T) {
|
||||
skipIfSubmoduleMissing(t)
|
||||
t.Parallel()
|
||||
tests := loadKeyStoreTestV3("../../tests/files/KeyStoreTests/basic_tests.json", t)
|
||||
tests := loadKeyStoreTestV3(filepath.Join(testsSubmodule, "basic_tests.json"), t)
|
||||
testDecryptV3(tests["test1"], t)
|
||||
}
|
||||
|
||||
func TestV3_PBKDF2_3(t *testing.T) {
|
||||
skipIfSubmoduleMissing(t)
|
||||
t.Parallel()
|
||||
tests := loadKeyStoreTestV3("../../tests/files/KeyStoreTests/basic_tests.json", t)
|
||||
tests := loadKeyStoreTestV3(filepath.Join(testsSubmodule, "basic_tests.json"), t)
|
||||
testDecryptV3(tests["python_generated_test_with_odd_iv"], t)
|
||||
}
|
||||
|
||||
func TestV3_PBKDF2_4(t *testing.T) {
|
||||
skipIfSubmoduleMissing(t)
|
||||
t.Parallel()
|
||||
tests := loadKeyStoreTestV3("../../tests/files/KeyStoreTests/basic_tests.json", t)
|
||||
tests := loadKeyStoreTestV3(filepath.Join(testsSubmodule, "basic_tests.json"), t)
|
||||
testDecryptV3(tests["evilnonce"], t)
|
||||
}
|
||||
|
||||
@ -165,8 +177,9 @@ func TestV3_Scrypt_1(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV3_Scrypt_2(t *testing.T) {
|
||||
skipIfSubmoduleMissing(t)
|
||||
t.Parallel()
|
||||
tests := loadKeyStoreTestV3("../../tests/files/KeyStoreTests/basic_tests.json", t)
|
||||
tests := loadKeyStoreTestV3(filepath.Join(testsSubmodule, "basic_tests.json"), t)
|
||||
testDecryptV3(tests["test2"], t)
|
||||
}
|
||||
|
||||
|
@ -296,8 +296,8 @@ func TestWalletNotifications(t *testing.T) {
|
||||
}
|
||||
select {
|
||||
case event := <-updates:
|
||||
if !event.Arrive {
|
||||
t.Errorf("departure event on account creation")
|
||||
if event.Kind != accounts.WalletArrived {
|
||||
t.Errorf("non-arrival event on account creation")
|
||||
}
|
||||
if event.Wallet.Accounts()[0] != account {
|
||||
t.Errorf("account mismatch on created wallet: have %v, want %v", event.Wallet.Accounts()[0], account)
|
||||
@ -319,8 +319,8 @@ func TestWalletNotifications(t *testing.T) {
|
||||
}
|
||||
select {
|
||||
case event := <-updates:
|
||||
if event.Arrive {
|
||||
t.Errorf("arrival event on account deletion")
|
||||
if event.Kind != accounts.WalletDropped {
|
||||
t.Errorf("non-drop event on account deletion")
|
||||
}
|
||||
if event.Wallet.Accounts()[0] != account {
|
||||
t.Errorf("account mismatch on deleted wallet: have %v, want %v", event.Wallet.Accounts()[0], account)
|
||||
|
@ -36,16 +36,16 @@ func (w *keystoreWallet) URL() accounts.URL {
|
||||
return w.account.URL
|
||||
}
|
||||
|
||||
// Status implements accounts.Wallet, always returning "open", since there is no
|
||||
// concept of open/close for plain keystore accounts.
|
||||
func (w *keystoreWallet) Status() string {
|
||||
// Status implements accounts.Wallet, returning whether the account held by the
|
||||
// keystore wallet is unlocked or not.
|
||||
func (w *keystoreWallet) Status() (string, error) {
|
||||
w.keystore.mu.RLock()
|
||||
defer w.keystore.mu.RUnlock()
|
||||
|
||||
if _, ok := w.keystore.unlocked[w.account.Address]; ok {
|
||||
return "Unlocked"
|
||||
return "Unlocked", nil
|
||||
}
|
||||
return "Locked"
|
||||
return "Locked", nil
|
||||
}
|
||||
|
||||
// Open implements accounts.Wallet, but is a noop for plain wallets since there
|
||||
|
@ -74,7 +74,8 @@ func decryptPreSaleKey(fileContent []byte, password string) (key *Key, err error
|
||||
return nil, err
|
||||
}
|
||||
ethPriv := crypto.Keccak256(plainText)
|
||||
ecKey := crypto.ToECDSA(ethPriv)
|
||||
ecKey := crypto.ToECDSAUnsafe(ethPriv)
|
||||
|
||||
key = &Key{
|
||||
Id: nil,
|
||||
Address: crypto.PubkeyToAddress(ecKey.PublicKey),
|
||||
|
@ -96,9 +96,10 @@ func (am *Manager) update() {
|
||||
case event := <-am.updates:
|
||||
// Wallet event arrived, update local cache
|
||||
am.lock.Lock()
|
||||
if event.Arrive {
|
||||
switch event.Kind {
|
||||
case WalletArrived:
|
||||
am.wallets = merge(am.wallets, event.Wallet)
|
||||
} else {
|
||||
case WalletDropped:
|
||||
am.wallets = drop(am.wallets, event.Wallet)
|
||||
}
|
||||
am.lock.Unlock()
|
||||
|
@ -14,10 +14,6 @@
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// This file contains the implementation for interacting with the Ledger hardware
|
||||
// wallets. The wire protocol spec can be found in the Ledger Blue GitHub repo:
|
||||
// https://raw.githubusercontent.com/LedgerHQ/blue-app-eth/master/doc/ethapp.asc
|
||||
|
||||
package usbwallet
|
||||
|
||||
import (
|
||||
@ -33,24 +29,28 @@ import (
|
||||
)
|
||||
|
||||
// LedgerScheme is the protocol scheme prefixing account and wallet URLs.
|
||||
var LedgerScheme = "ledger"
|
||||
const LedgerScheme = "ledger"
|
||||
|
||||
// ledgerDeviceIDs are the known device IDs that Ledger wallets use.
|
||||
var ledgerDeviceIDs = []deviceID{
|
||||
{Vendor: 0x2c97, Product: 0x0000}, // Ledger Blue
|
||||
{Vendor: 0x2c97, Product: 0x0001}, // Ledger Nano S
|
||||
}
|
||||
// TrezorScheme is the protocol scheme prefixing account and wallet URLs.
|
||||
const TrezorScheme = "trezor"
|
||||
|
||||
// Maximum time between wallet refreshes (if USB hotplug notifications don't work).
|
||||
const ledgerRefreshCycle = time.Second
|
||||
// refreshCycle is the maximum time between wallet refreshes (if USB hotplug
|
||||
// notifications don't work).
|
||||
const refreshCycle = time.Second
|
||||
|
||||
// Minimum time between wallet refreshes to avoid USB trashing.
|
||||
const ledgerRefreshThrottling = 500 * time.Millisecond
|
||||
// refreshThrottling is the minimum time between wallet refreshes to avoid USB
|
||||
// trashing.
|
||||
const refreshThrottling = 500 * time.Millisecond
|
||||
|
||||
// Hub is a accounts.Backend that can find and handle generic USB hardware wallets.
|
||||
type Hub struct {
|
||||
scheme string // Protocol scheme prefixing account and wallet URLs.
|
||||
vendorID uint16 // USB vendor identifier used for device discovery
|
||||
productIDs []uint16 // USB product identifiers used for device discovery
|
||||
makeDriver func(log.Logger) driver // Factory method to construct a vendor specific driver
|
||||
|
||||
// LedgerHub is a accounts.Backend that can find and handle Ledger hardware wallets.
|
||||
type LedgerHub struct {
|
||||
refreshed time.Time // Time instance when the list of wallets was last refreshed
|
||||
wallets []accounts.Wallet // List of Ledger devices currently tracking
|
||||
wallets []accounts.Wallet // List of USB wallet devices currently tracking
|
||||
updateFeed event.Feed // Event feed to notify wallet additions/removals
|
||||
updateScope event.SubscriptionScope // Subscription scope tracking current live listeners
|
||||
updating bool // Whether the event notification loop is running
|
||||
@ -65,20 +65,34 @@ type LedgerHub struct {
|
||||
}
|
||||
|
||||
// NewLedgerHub creates a new hardware wallet manager for Ledger devices.
|
||||
func NewLedgerHub() (*LedgerHub, error) {
|
||||
func NewLedgerHub() (*Hub, error) {
|
||||
return newHub(LedgerScheme, 0x2c97, []uint16{0x0000 /* Ledger Blue */, 0x0001 /* Ledger Nano S */}, newLedgerDriver)
|
||||
}
|
||||
|
||||
// NewTrezorHub creates a new hardware wallet manager for Trezor devices.
|
||||
func NewTrezorHub() (*Hub, error) {
|
||||
return newHub(TrezorScheme, 0x534c, []uint16{0x0001 /* Trezor 1 */}, newTrezorDriver)
|
||||
}
|
||||
|
||||
// newHub creates a new hardware wallet manager for generic USB devices.
|
||||
func newHub(scheme string, vendorID uint16, productIDs []uint16, makeDriver func(log.Logger) driver) (*Hub, error) {
|
||||
if !hid.Supported() {
|
||||
return nil, errors.New("unsupported platform")
|
||||
}
|
||||
hub := &LedgerHub{
|
||||
quit: make(chan chan error),
|
||||
hub := &Hub{
|
||||
scheme: scheme,
|
||||
vendorID: vendorID,
|
||||
productIDs: productIDs,
|
||||
makeDriver: makeDriver,
|
||||
quit: make(chan chan error),
|
||||
}
|
||||
hub.refreshWallets()
|
||||
return hub, nil
|
||||
}
|
||||
|
||||
// Wallets implements accounts.Backend, returning all the currently tracked USB
|
||||
// devices that appear to be Ledger hardware wallets.
|
||||
func (hub *LedgerHub) Wallets() []accounts.Wallet {
|
||||
// devices that appear to be hardware wallets.
|
||||
func (hub *Hub) Wallets() []accounts.Wallet {
|
||||
// Make sure the list of wallets is up to date
|
||||
hub.refreshWallets()
|
||||
|
||||
@ -92,17 +106,17 @@ func (hub *LedgerHub) Wallets() []accounts.Wallet {
|
||||
|
||||
// refreshWallets scans the USB devices attached to the machine and updates the
|
||||
// list of wallets based on the found devices.
|
||||
func (hub *LedgerHub) refreshWallets() {
|
||||
func (hub *Hub) refreshWallets() {
|
||||
// Don't scan the USB like crazy it the user fetches wallets in a loop
|
||||
hub.stateLock.RLock()
|
||||
elapsed := time.Since(hub.refreshed)
|
||||
hub.stateLock.RUnlock()
|
||||
|
||||
if elapsed < ledgerRefreshThrottling {
|
||||
if elapsed < refreshThrottling {
|
||||
return
|
||||
}
|
||||
// Retrieve the current list of Ledger devices
|
||||
var ledgers []hid.DeviceInfo
|
||||
// Retrieve the current list of USB wallet devices
|
||||
var devices []hid.DeviceInfo
|
||||
|
||||
if runtime.GOOS == "linux" {
|
||||
// hidapi on Linux opens the device during enumeration to retrieve some infos,
|
||||
@ -117,10 +131,10 @@ func (hub *LedgerHub) refreshWallets() {
|
||||
return
|
||||
}
|
||||
}
|
||||
for _, info := range hid.Enumerate(0, 0) { // Can't enumerate directly, one valid ID is the 0 wildcard
|
||||
for _, id := range ledgerDeviceIDs {
|
||||
if info.VendorID == id.Vendor && info.ProductID == id.Product {
|
||||
ledgers = append(ledgers, info)
|
||||
for _, info := range hid.Enumerate(hub.vendorID, 0) {
|
||||
for _, id := range hub.productIDs {
|
||||
if info.ProductID == id && info.Interface == 0 {
|
||||
devices = append(devices, info)
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -132,22 +146,29 @@ func (hub *LedgerHub) refreshWallets() {
|
||||
// Transform the current list of wallets into the new one
|
||||
hub.stateLock.Lock()
|
||||
|
||||
wallets := make([]accounts.Wallet, 0, len(ledgers))
|
||||
wallets := make([]accounts.Wallet, 0, len(devices))
|
||||
events := []accounts.WalletEvent{}
|
||||
|
||||
for _, ledger := range ledgers {
|
||||
url := accounts.URL{Scheme: LedgerScheme, Path: ledger.Path}
|
||||
for _, device := range devices {
|
||||
url := accounts.URL{Scheme: hub.scheme, Path: device.Path}
|
||||
|
||||
// Drop wallets in front of the next device or those that failed for some reason
|
||||
for len(hub.wallets) > 0 && (hub.wallets[0].URL().Cmp(url) < 0 || hub.wallets[0].(*ledgerWallet).failed()) {
|
||||
events = append(events, accounts.WalletEvent{Wallet: hub.wallets[0], Arrive: false})
|
||||
for len(hub.wallets) > 0 {
|
||||
// Abort if we're past the current device and found an operational one
|
||||
_, failure := hub.wallets[0].Status()
|
||||
if hub.wallets[0].URL().Cmp(url) >= 0 || failure == nil {
|
||||
break
|
||||
}
|
||||
// Drop the stale and failed devices
|
||||
events = append(events, accounts.WalletEvent{Wallet: hub.wallets[0], Kind: accounts.WalletDropped})
|
||||
hub.wallets = hub.wallets[1:]
|
||||
}
|
||||
// If there are no more wallets or the device is before the next, wrap new wallet
|
||||
if len(hub.wallets) == 0 || hub.wallets[0].URL().Cmp(url) > 0 {
|
||||
wallet := &ledgerWallet{hub: hub, url: &url, info: ledger, log: log.New("url", url)}
|
||||
logger := log.New("url", url)
|
||||
wallet := &wallet{hub: hub, driver: hub.makeDriver(logger), url: &url, info: device, log: logger}
|
||||
|
||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Arrive: true})
|
||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletArrived})
|
||||
wallets = append(wallets, wallet)
|
||||
continue
|
||||
}
|
||||
@ -160,7 +181,7 @@ func (hub *LedgerHub) refreshWallets() {
|
||||
}
|
||||
// Drop any leftover wallets and set the new batch
|
||||
for _, wallet := range hub.wallets {
|
||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Arrive: false})
|
||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletDropped})
|
||||
}
|
||||
hub.refreshed = time.Now()
|
||||
hub.wallets = wallets
|
||||
@ -173,8 +194,8 @@ func (hub *LedgerHub) refreshWallets() {
|
||||
}
|
||||
|
||||
// Subscribe implements accounts.Backend, creating an async subscription to
|
||||
// receive notifications on the addition or removal of Ledger wallets.
|
||||
func (hub *LedgerHub) Subscribe(sink chan<- accounts.WalletEvent) event.Subscription {
|
||||
// receive notifications on the addition or removal of USB wallets.
|
||||
func (hub *Hub) Subscribe(sink chan<- accounts.WalletEvent) event.Subscription {
|
||||
// We need the mutex to reliably start/stop the update loop
|
||||
hub.stateLock.Lock()
|
||||
defer hub.stateLock.Unlock()
|
||||
@ -190,18 +211,14 @@ func (hub *LedgerHub) Subscribe(sink chan<- accounts.WalletEvent) event.Subscrip
|
||||
return sub
|
||||
}
|
||||
|
||||
// updater is responsible for maintaining an up-to-date list of wallets stored in
|
||||
// the keystore, and for firing wallet addition/removal events. It listens for
|
||||
// account change events from the underlying account cache, and also periodically
|
||||
// forces a manual refresh (only triggers for systems where the filesystem notifier
|
||||
// is not running).
|
||||
func (hub *LedgerHub) updater() {
|
||||
// updater is responsible for maintaining an up-to-date list of wallets managed
|
||||
// by the USB hub, and for firing wallet addition/removal events.
|
||||
func (hub *Hub) updater() {
|
||||
for {
|
||||
// Wait for a USB hotplug event (not supported yet) or a refresh timeout
|
||||
select {
|
||||
//case <-hub.changes: // reenable on hutplug implementation
|
||||
case <-time.After(ledgerRefreshCycle):
|
||||
}
|
||||
// TODO: Wait for a USB hotplug event (not supported yet) or a refresh timeout
|
||||
// <-hub.changes
|
||||
time.Sleep(refreshCycle)
|
||||
|
||||
// Run the wallet refresher
|
||||
hub.refreshWallets()
|
||||
|
3081
accounts/usbwallet/internal/trezor/messages.pb.go
Normal file
3081
accounts/usbwallet/internal/trezor/messages.pb.go
Normal file
File diff suppressed because it is too large
Load Diff
903
accounts/usbwallet/internal/trezor/messages.proto
Normal file
903
accounts/usbwallet/internal/trezor/messages.proto
Normal file
@ -0,0 +1,903 @@
|
||||
// This file originates from the SatoshiLabs Trezor `common` repository at:
|
||||
// https://github.com/trezor/trezor-common/blob/master/protob/messages.proto
|
||||
// dated 28.07.2017, commit dd8ec3231fb5f7992360aff9bdfe30bb58130f4b.
|
||||
|
||||
/**
|
||||
* Messages for TREZOR communication
|
||||
*/
|
||||
|
||||
// Sugar for easier handling in Java
|
||||
option java_package = "com.satoshilabs.trezor.lib.protobuf";
|
||||
option java_outer_classname = "TrezorMessage";
|
||||
|
||||
import "types.proto";
|
||||
|
||||
/**
|
||||
* Mapping between Trezor wire identifier (uint) and a protobuf message
|
||||
*/
|
||||
enum MessageType {
|
||||
MessageType_Initialize = 0 [(wire_in) = true];
|
||||
MessageType_Ping = 1 [(wire_in) = true];
|
||||
MessageType_Success = 2 [(wire_out) = true];
|
||||
MessageType_Failure = 3 [(wire_out) = true];
|
||||
MessageType_ChangePin = 4 [(wire_in) = true];
|
||||
MessageType_WipeDevice = 5 [(wire_in) = true];
|
||||
MessageType_FirmwareErase = 6 [(wire_in) = true, (wire_bootloader) = true];
|
||||
MessageType_FirmwareUpload = 7 [(wire_in) = true, (wire_bootloader) = true];
|
||||
MessageType_FirmwareRequest = 8 [(wire_out) = true, (wire_bootloader) = true];
|
||||
MessageType_GetEntropy = 9 [(wire_in) = true];
|
||||
MessageType_Entropy = 10 [(wire_out) = true];
|
||||
MessageType_GetPublicKey = 11 [(wire_in) = true];
|
||||
MessageType_PublicKey = 12 [(wire_out) = true];
|
||||
MessageType_LoadDevice = 13 [(wire_in) = true];
|
||||
MessageType_ResetDevice = 14 [(wire_in) = true];
|
||||
MessageType_SignTx = 15 [(wire_in) = true];
|
||||
MessageType_SimpleSignTx = 16 [(wire_in) = true, deprecated = true];
|
||||
MessageType_Features = 17 [(wire_out) = true];
|
||||
MessageType_PinMatrixRequest = 18 [(wire_out) = true];
|
||||
MessageType_PinMatrixAck = 19 [(wire_in) = true, (wire_tiny) = true];
|
||||
MessageType_Cancel = 20 [(wire_in) = true];
|
||||
MessageType_TxRequest = 21 [(wire_out) = true];
|
||||
MessageType_TxAck = 22 [(wire_in) = true];
|
||||
MessageType_CipherKeyValue = 23 [(wire_in) = true];
|
||||
MessageType_ClearSession = 24 [(wire_in) = true];
|
||||
MessageType_ApplySettings = 25 [(wire_in) = true];
|
||||
MessageType_ButtonRequest = 26 [(wire_out) = true];
|
||||
MessageType_ButtonAck = 27 [(wire_in) = true, (wire_tiny) = true];
|
||||
MessageType_ApplyFlags = 28 [(wire_in) = true];
|
||||
MessageType_GetAddress = 29 [(wire_in) = true];
|
||||
MessageType_Address = 30 [(wire_out) = true];
|
||||
MessageType_SelfTest = 32 [(wire_in) = true, (wire_bootloader) = true];
|
||||
MessageType_BackupDevice = 34 [(wire_in) = true];
|
||||
MessageType_EntropyRequest = 35 [(wire_out) = true];
|
||||
MessageType_EntropyAck = 36 [(wire_in) = true];
|
||||
MessageType_SignMessage = 38 [(wire_in) = true];
|
||||
MessageType_VerifyMessage = 39 [(wire_in) = true];
|
||||
MessageType_MessageSignature = 40 [(wire_out) = true];
|
||||
MessageType_PassphraseRequest = 41 [(wire_out) = true];
|
||||
MessageType_PassphraseAck = 42 [(wire_in) = true, (wire_tiny) = true];
|
||||
MessageType_EstimateTxSize = 43 [(wire_in) = true, deprecated = true];
|
||||
MessageType_TxSize = 44 [(wire_out) = true, deprecated = true];
|
||||
MessageType_RecoveryDevice = 45 [(wire_in) = true];
|
||||
MessageType_WordRequest = 46 [(wire_out) = true];
|
||||
MessageType_WordAck = 47 [(wire_in) = true];
|
||||
MessageType_CipheredKeyValue = 48 [(wire_out) = true];
|
||||
MessageType_EncryptMessage = 49 [(wire_in) = true, deprecated = true];
|
||||
MessageType_EncryptedMessage = 50 [(wire_out) = true, deprecated = true];
|
||||
MessageType_DecryptMessage = 51 [(wire_in) = true, deprecated = true];
|
||||
MessageType_DecryptedMessage = 52 [(wire_out) = true, deprecated = true];
|
||||
MessageType_SignIdentity = 53 [(wire_in) = true];
|
||||
MessageType_SignedIdentity = 54 [(wire_out) = true];
|
||||
MessageType_GetFeatures = 55 [(wire_in) = true];
|
||||
MessageType_EthereumGetAddress = 56 [(wire_in) = true];
|
||||
MessageType_EthereumAddress = 57 [(wire_out) = true];
|
||||
MessageType_EthereumSignTx = 58 [(wire_in) = true];
|
||||
MessageType_EthereumTxRequest = 59 [(wire_out) = true];
|
||||
MessageType_EthereumTxAck = 60 [(wire_in) = true];
|
||||
MessageType_GetECDHSessionKey = 61 [(wire_in) = true];
|
||||
MessageType_ECDHSessionKey = 62 [(wire_out) = true];
|
||||
MessageType_SetU2FCounter = 63 [(wire_in) = true];
|
||||
MessageType_EthereumSignMessage = 64 [(wire_in) = true];
|
||||
MessageType_EthereumVerifyMessage = 65 [(wire_in) = true];
|
||||
MessageType_EthereumMessageSignature = 66 [(wire_out) = true];
|
||||
MessageType_DebugLinkDecision = 100 [(wire_debug_in) = true, (wire_tiny) = true];
|
||||
MessageType_DebugLinkGetState = 101 [(wire_debug_in) = true];
|
||||
MessageType_DebugLinkState = 102 [(wire_debug_out) = true];
|
||||
MessageType_DebugLinkStop = 103 [(wire_debug_in) = true];
|
||||
MessageType_DebugLinkLog = 104 [(wire_debug_out) = true];
|
||||
MessageType_DebugLinkMemoryRead = 110 [(wire_debug_in) = true];
|
||||
MessageType_DebugLinkMemory = 111 [(wire_debug_out) = true];
|
||||
MessageType_DebugLinkMemoryWrite = 112 [(wire_debug_in) = true];
|
||||
MessageType_DebugLinkFlashErase = 113 [(wire_debug_in) = true];
|
||||
}
|
||||
|
||||
////////////////////
|
||||
// Basic messages //
|
||||
////////////////////
|
||||
|
||||
/**
|
||||
* Request: Reset device to default state and ask for device details
|
||||
* @next Features
|
||||
*/
|
||||
message Initialize {
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask for device details (no device reset)
|
||||
* @next Features
|
||||
*/
|
||||
message GetFeatures {
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Reports various information about the device
|
||||
* @prev Initialize
|
||||
* @prev GetFeatures
|
||||
*/
|
||||
message Features {
|
||||
optional string vendor = 1; // name of the manufacturer, e.g. "bitcointrezor.com"
|
||||
optional uint32 major_version = 2; // major version of the device, e.g. 1
|
||||
optional uint32 minor_version = 3; // minor version of the device, e.g. 0
|
||||
optional uint32 patch_version = 4; // patch version of the device, e.g. 0
|
||||
optional bool bootloader_mode = 5; // is device in bootloader mode?
|
||||
optional string device_id = 6; // device's unique identifier
|
||||
optional bool pin_protection = 7; // is device protected by PIN?
|
||||
optional bool passphrase_protection = 8; // is node/mnemonic encrypted using passphrase?
|
||||
optional string language = 9; // device language
|
||||
optional string label = 10; // device description label
|
||||
repeated CoinType coins = 11; // supported coins
|
||||
optional bool initialized = 12; // does device contain seed?
|
||||
optional bytes revision = 13; // SCM revision of firmware
|
||||
optional bytes bootloader_hash = 14; // hash of the bootloader
|
||||
optional bool imported = 15; // was storage imported from an external source?
|
||||
optional bool pin_cached = 16; // is PIN already cached in session?
|
||||
optional bool passphrase_cached = 17; // is passphrase already cached in session?
|
||||
optional bool firmware_present = 18; // is valid firmware loaded?
|
||||
optional bool needs_backup = 19; // does storage need backup? (equals to Storage.needs_backup)
|
||||
optional uint32 flags = 20; // device flags (equals to Storage.flags)
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: clear session (removes cached PIN, passphrase, etc).
|
||||
* @next Success
|
||||
*/
|
||||
message ClearSession {
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: change language and/or label of the device
|
||||
* @next Success
|
||||
* @next Failure
|
||||
* @next ButtonRequest
|
||||
* @next PinMatrixRequest
|
||||
*/
|
||||
message ApplySettings {
|
||||
optional string language = 1;
|
||||
optional string label = 2;
|
||||
optional bool use_passphrase = 3;
|
||||
optional bytes homescreen = 4;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: set flags of the device
|
||||
* @next Success
|
||||
* @next Failure
|
||||
*/
|
||||
message ApplyFlags {
|
||||
optional uint32 flags = 1; // bitmask, can only set bits, not unset
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Starts workflow for setting/changing/removing the PIN
|
||||
* @next ButtonRequest
|
||||
* @next PinMatrixRequest
|
||||
*/
|
||||
message ChangePin {
|
||||
optional bool remove = 1; // is PIN removal requested?
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Test if the device is alive, device sends back the message in Success response
|
||||
* @next Success
|
||||
*/
|
||||
message Ping {
|
||||
optional string message = 1; // message to send back in Success message
|
||||
optional bool button_protection = 2; // ask for button press
|
||||
optional bool pin_protection = 3; // ask for PIN if set in device
|
||||
optional bool passphrase_protection = 4; // ask for passphrase if set in device
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Success of the previous request
|
||||
*/
|
||||
message Success {
|
||||
optional string message = 1; // human readable description of action or request-specific payload
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Failure of the previous request
|
||||
*/
|
||||
message Failure {
|
||||
optional FailureType code = 1; // computer-readable definition of the error state
|
||||
optional string message = 2; // human-readable message of the error state
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device is waiting for HW button press.
|
||||
* @next ButtonAck
|
||||
* @next Cancel
|
||||
*/
|
||||
message ButtonRequest {
|
||||
optional ButtonRequestType code = 1;
|
||||
optional string data = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Computer agrees to wait for HW button press
|
||||
* @prev ButtonRequest
|
||||
*/
|
||||
message ButtonAck {
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device is asking computer to show PIN matrix and awaits PIN encoded using this matrix scheme
|
||||
* @next PinMatrixAck
|
||||
* @next Cancel
|
||||
*/
|
||||
message PinMatrixRequest {
|
||||
optional PinMatrixRequestType type = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Computer responds with encoded PIN
|
||||
* @prev PinMatrixRequest
|
||||
*/
|
||||
message PinMatrixAck {
|
||||
required string pin = 1; // matrix encoded PIN entered by user
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Abort last operation that required user interaction
|
||||
* @prev ButtonRequest
|
||||
* @prev PinMatrixRequest
|
||||
* @prev PassphraseRequest
|
||||
*/
|
||||
message Cancel {
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device awaits encryption passphrase
|
||||
* @next PassphraseAck
|
||||
* @next Cancel
|
||||
*/
|
||||
message PassphraseRequest {
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Send passphrase back
|
||||
* @prev PassphraseRequest
|
||||
*/
|
||||
message PassphraseAck {
|
||||
required string passphrase = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Request a sample of random data generated by hardware RNG. May be used for testing.
|
||||
* @next ButtonRequest
|
||||
* @next Entropy
|
||||
* @next Failure
|
||||
*/
|
||||
message GetEntropy {
|
||||
required uint32 size = 1; // size of requested entropy
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Reply with random data generated by internal RNG
|
||||
* @prev GetEntropy
|
||||
*/
|
||||
message Entropy {
|
||||
required bytes entropy = 1; // stream of random generated bytes
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device for public key corresponding to address_n path
|
||||
* @next PassphraseRequest
|
||||
* @next PublicKey
|
||||
* @next Failure
|
||||
*/
|
||||
message GetPublicKey {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
|
||||
optional string ecdsa_curve_name = 2; // ECDSA curve name to use
|
||||
optional bool show_display = 3; // optionally show on display before sending the result
|
||||
optional string coin_name = 4 [default='Bitcoin'];
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Contains public key derived from device private seed
|
||||
* @prev GetPublicKey
|
||||
*/
|
||||
message PublicKey {
|
||||
required HDNodeType node = 1; // BIP32 public node
|
||||
optional string xpub = 2; // serialized form of public node
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device for address corresponding to address_n path
|
||||
* @next PassphraseRequest
|
||||
* @next Address
|
||||
* @next Failure
|
||||
*/
|
||||
message GetAddress {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
|
||||
optional string coin_name = 2 [default='Bitcoin'];
|
||||
optional bool show_display = 3 ; // optionally show on display before sending the result
|
||||
optional MultisigRedeemScriptType multisig = 4; // filled if we are showing a multisig address
|
||||
optional InputScriptType script_type = 5 [default=SPENDADDRESS]; // used to distinguish between various address formats (non-segwit, segwit, etc.)
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device for Ethereum address corresponding to address_n path
|
||||
* @next PassphraseRequest
|
||||
* @next EthereumAddress
|
||||
* @next Failure
|
||||
*/
|
||||
message EthereumGetAddress {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
|
||||
optional bool show_display = 2; // optionally show on display before sending the result
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Contains address derived from device private seed
|
||||
* @prev GetAddress
|
||||
*/
|
||||
message Address {
|
||||
required string address = 1; // Coin address in Base58 encoding
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Contains an Ethereum address derived from device private seed
|
||||
* @prev EthereumGetAddress
|
||||
*/
|
||||
message EthereumAddress {
|
||||
required bytes address = 1; // Coin address as an Ethereum 160 bit hash
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Request device to wipe all sensitive data and settings
|
||||
* @next ButtonRequest
|
||||
*/
|
||||
message WipeDevice {
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Load seed and related internal settings from the computer
|
||||
* @next ButtonRequest
|
||||
* @next Success
|
||||
* @next Failure
|
||||
*/
|
||||
message LoadDevice {
|
||||
optional string mnemonic = 1; // seed encoded as BIP-39 mnemonic (12, 18 or 24 words)
|
||||
optional HDNodeType node = 2; // BIP-32 node
|
||||
optional string pin = 3; // set PIN protection
|
||||
optional bool passphrase_protection = 4; // enable master node encryption using passphrase
|
||||
optional string language = 5 [default='english']; // device language
|
||||
optional string label = 6; // device label
|
||||
optional bool skip_checksum = 7; // do not test mnemonic for valid BIP-39 checksum
|
||||
optional uint32 u2f_counter = 8; // U2F counter
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device to do initialization involving user interaction
|
||||
* @next EntropyRequest
|
||||
* @next Failure
|
||||
*/
|
||||
message ResetDevice {
|
||||
optional bool display_random = 1; // display entropy generated by the device before asking for additional entropy
|
||||
optional uint32 strength = 2 [default=256]; // strength of seed in bits
|
||||
optional bool passphrase_protection = 3; // enable master node encryption using passphrase
|
||||
optional bool pin_protection = 4; // enable PIN protection
|
||||
optional string language = 5 [default='english']; // device language
|
||||
optional string label = 6; // device label
|
||||
optional uint32 u2f_counter = 7; // U2F counter
|
||||
optional bool skip_backup = 8; // postpone seed backup to BackupDevice workflow
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Perform backup of the device seed if not backed up using ResetDevice
|
||||
* @next ButtonRequest
|
||||
*/
|
||||
message BackupDevice {
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Ask for additional entropy from host computer
|
||||
* @prev ResetDevice
|
||||
* @next EntropyAck
|
||||
*/
|
||||
message EntropyRequest {
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Provide additional entropy for seed generation function
|
||||
* @prev EntropyRequest
|
||||
* @next ButtonRequest
|
||||
*/
|
||||
message EntropyAck {
|
||||
optional bytes entropy = 1; // 256 bits (32 bytes) of random data
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Start recovery workflow asking user for specific words of mnemonic
|
||||
* Used to recovery device safely even on untrusted computer.
|
||||
* @next WordRequest
|
||||
*/
|
||||
message RecoveryDevice {
|
||||
optional uint32 word_count = 1; // number of words in BIP-39 mnemonic
|
||||
optional bool passphrase_protection = 2; // enable master node encryption using passphrase
|
||||
optional bool pin_protection = 3; // enable PIN protection
|
||||
optional string language = 4 [default='english']; // device language
|
||||
optional string label = 5; // device label
|
||||
optional bool enforce_wordlist = 6; // enforce BIP-39 wordlist during the process
|
||||
// 7 reserved for unused recovery method
|
||||
optional uint32 type = 8; // supported recovery type (see RecoveryType)
|
||||
optional uint32 u2f_counter = 9; // U2F counter
|
||||
optional bool dry_run = 10; // perform dry-run recovery workflow (for safe mnemonic validation)
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device is waiting for user to enter word of the mnemonic
|
||||
* Its position is shown only on device's internal display.
|
||||
* @prev RecoveryDevice
|
||||
* @prev WordAck
|
||||
*/
|
||||
message WordRequest {
|
||||
optional WordRequestType type = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Computer replies with word from the mnemonic
|
||||
* @prev WordRequest
|
||||
* @next WordRequest
|
||||
* @next Success
|
||||
* @next Failure
|
||||
*/
|
||||
message WordAck {
|
||||
required string word = 1; // one word of mnemonic on asked position
|
||||
}
|
||||
|
||||
//////////////////////////////
|
||||
// Message signing messages //
|
||||
//////////////////////////////
|
||||
|
||||
/**
|
||||
* Request: Ask device to sign message
|
||||
* @next MessageSignature
|
||||
* @next Failure
|
||||
*/
|
||||
message SignMessage {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
|
||||
required bytes message = 2; // message to be signed
|
||||
optional string coin_name = 3 [default='Bitcoin']; // coin to use for signing
|
||||
optional InputScriptType script_type = 4 [default=SPENDADDRESS]; // used to distinguish between various address formats (non-segwit, segwit, etc.)
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device to verify message
|
||||
* @next Success
|
||||
* @next Failure
|
||||
*/
|
||||
message VerifyMessage {
|
||||
optional string address = 1; // address to verify
|
||||
optional bytes signature = 2; // signature to verify
|
||||
optional bytes message = 3; // message to verify
|
||||
optional string coin_name = 4 [default='Bitcoin']; // coin to use for verifying
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Signed message
|
||||
* @prev SignMessage
|
||||
*/
|
||||
message MessageSignature {
|
||||
optional string address = 1; // address used to sign the message
|
||||
optional bytes signature = 2; // signature of the message
|
||||
}
|
||||
|
||||
///////////////////////////
|
||||
// Encryption/decryption //
|
||||
///////////////////////////
|
||||
|
||||
/**
|
||||
* Request: Ask device to encrypt message
|
||||
* @next EncryptedMessage
|
||||
* @next Failure
|
||||
*/
|
||||
message EncryptMessage {
|
||||
optional bytes pubkey = 1; // public key
|
||||
optional bytes message = 2; // message to encrypt
|
||||
optional bool display_only = 3; // show just on display? (don't send back via wire)
|
||||
repeated uint32 address_n = 4; // BIP-32 path to derive the signing key from master node
|
||||
optional string coin_name = 5 [default='Bitcoin']; // coin to use for signing
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Encrypted message
|
||||
* @prev EncryptMessage
|
||||
*/
|
||||
message EncryptedMessage {
|
||||
optional bytes nonce = 1; // nonce used during encryption
|
||||
optional bytes message = 2; // encrypted message
|
||||
optional bytes hmac = 3; // message hmac
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device to decrypt message
|
||||
* @next Success
|
||||
* @next Failure
|
||||
*/
|
||||
message DecryptMessage {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the decryption key from master node
|
||||
optional bytes nonce = 2; // nonce used during encryption
|
||||
optional bytes message = 3; // message to decrypt
|
||||
optional bytes hmac = 4; // message hmac
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Decrypted message
|
||||
* @prev DecryptedMessage
|
||||
*/
|
||||
message DecryptedMessage {
|
||||
optional bytes message = 1; // decrypted message
|
||||
optional string address = 2; // address used to sign the message (if used)
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device to encrypt or decrypt value of given key
|
||||
* @next CipheredKeyValue
|
||||
* @next Failure
|
||||
*/
|
||||
message CipherKeyValue {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
|
||||
optional string key = 2; // key component of key:value
|
||||
optional bytes value = 3; // value component of key:value
|
||||
optional bool encrypt = 4; // are we encrypting (True) or decrypting (False)?
|
||||
optional bool ask_on_encrypt = 5; // should we ask on encrypt operation?
|
||||
optional bool ask_on_decrypt = 6; // should we ask on decrypt operation?
|
||||
optional bytes iv = 7; // initialization vector (will be computed if not set)
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Return ciphered/deciphered value
|
||||
* @prev CipherKeyValue
|
||||
*/
|
||||
message CipheredKeyValue {
|
||||
optional bytes value = 1; // ciphered/deciphered value
|
||||
}
|
||||
|
||||
//////////////////////////////////
|
||||
// Transaction signing messages //
|
||||
//////////////////////////////////
|
||||
|
||||
/**
|
||||
* Request: Estimated size of the transaction
|
||||
* This behaves exactly like SignTx, which means that it can ask using TxRequest
|
||||
* This call is non-blocking (except possible PassphraseRequest to unlock the seed)
|
||||
* @next TxSize
|
||||
* @next Failure
|
||||
*/
|
||||
message EstimateTxSize {
|
||||
required uint32 outputs_count = 1; // number of transaction outputs
|
||||
required uint32 inputs_count = 2; // number of transaction inputs
|
||||
optional string coin_name = 3 [default='Bitcoin']; // coin to use
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Estimated size of the transaction
|
||||
* @prev EstimateTxSize
|
||||
*/
|
||||
message TxSize {
|
||||
optional uint32 tx_size = 1; // estimated size of transaction in bytes
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device to sign transaction
|
||||
* @next PassphraseRequest
|
||||
* @next PinMatrixRequest
|
||||
* @next TxRequest
|
||||
* @next Failure
|
||||
*/
|
||||
message SignTx {
|
||||
required uint32 outputs_count = 1; // number of transaction outputs
|
||||
required uint32 inputs_count = 2; // number of transaction inputs
|
||||
optional string coin_name = 3 [default='Bitcoin']; // coin to use
|
||||
optional uint32 version = 4 [default=1]; // transaction version
|
||||
optional uint32 lock_time = 5 [default=0]; // transaction lock_time
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Simplified transaction signing
|
||||
* This method doesn't support streaming, so there are hardware limits in number of inputs and outputs.
|
||||
* In case of success, the result is returned using TxRequest message.
|
||||
* @next PassphraseRequest
|
||||
* @next PinMatrixRequest
|
||||
* @next TxRequest
|
||||
* @next Failure
|
||||
*/
|
||||
message SimpleSignTx {
|
||||
repeated TxInputType inputs = 1; // transaction inputs
|
||||
repeated TxOutputType outputs = 2; // transaction outputs
|
||||
repeated TransactionType transactions = 3; // transactions whose outputs are used to build current inputs
|
||||
optional string coin_name = 4 [default='Bitcoin']; // coin to use
|
||||
optional uint32 version = 5 [default=1]; // transaction version
|
||||
optional uint32 lock_time = 6 [default=0]; // transaction lock_time
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device asks for information for signing transaction or returns the last result
|
||||
* If request_index is set, device awaits TxAck message (with fields filled in according to request_type)
|
||||
* If signature_index is set, 'signature' contains signed input of signature_index's input
|
||||
* @prev SignTx
|
||||
* @prev SimpleSignTx
|
||||
* @prev TxAck
|
||||
*/
|
||||
message TxRequest {
|
||||
optional RequestType request_type = 1; // what should be filled in TxAck message?
|
||||
optional TxRequestDetailsType details = 2; // request for tx details
|
||||
optional TxRequestSerializedType serialized = 3; // serialized data and request for next
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Reported transaction data
|
||||
* @prev TxRequest
|
||||
* @next TxRequest
|
||||
*/
|
||||
message TxAck {
|
||||
optional TransactionType tx = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device to sign transaction
|
||||
* All fields are optional from the protocol's point of view. Each field defaults to value `0` if missing.
|
||||
* Note: the first at most 1024 bytes of data MUST be transmitted as part of this message.
|
||||
* @next PassphraseRequest
|
||||
* @next PinMatrixRequest
|
||||
* @next EthereumTxRequest
|
||||
* @next Failure
|
||||
*/
|
||||
message EthereumSignTx {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
|
||||
optional bytes nonce = 2; // <=256 bit unsigned big endian
|
||||
optional bytes gas_price = 3; // <=256 bit unsigned big endian (in wei)
|
||||
optional bytes gas_limit = 4; // <=256 bit unsigned big endian
|
||||
optional bytes to = 5; // 160 bit address hash
|
||||
optional bytes value = 6; // <=256 bit unsigned big endian (in wei)
|
||||
optional bytes data_initial_chunk = 7; // The initial data chunk (<= 1024 bytes)
|
||||
optional uint32 data_length = 8; // Length of transaction payload
|
||||
optional uint32 chain_id = 9; // Chain Id for EIP 155
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device asks for more data from transaction payload, or returns the signature.
|
||||
* If data_length is set, device awaits that many more bytes of payload.
|
||||
* Otherwise, the signature_* fields contain the computed transaction signature. All three fields will be present.
|
||||
* @prev EthereumSignTx
|
||||
* @next EthereumTxAck
|
||||
*/
|
||||
message EthereumTxRequest {
|
||||
optional uint32 data_length = 1; // Number of bytes being requested (<= 1024)
|
||||
optional uint32 signature_v = 2; // Computed signature (recovery parameter, limited to 27 or 28)
|
||||
optional bytes signature_r = 3; // Computed signature R component (256 bit)
|
||||
optional bytes signature_s = 4; // Computed signature S component (256 bit)
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Transaction payload data.
|
||||
* @prev EthereumTxRequest
|
||||
* @next EthereumTxRequest
|
||||
*/
|
||||
message EthereumTxAck {
|
||||
optional bytes data_chunk = 1; // Bytes from transaction payload (<= 1024 bytes)
|
||||
}
|
||||
|
||||
////////////////////////////////////////
|
||||
// Ethereum: Message signing messages //
|
||||
////////////////////////////////////////
|
||||
|
||||
/**
|
||||
* Request: Ask device to sign message
|
||||
* @next EthereumMessageSignature
|
||||
* @next Failure
|
||||
*/
|
||||
message EthereumSignMessage {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
|
||||
required bytes message = 2; // message to be signed
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device to verify message
|
||||
* @next Success
|
||||
* @next Failure
|
||||
*/
|
||||
message EthereumVerifyMessage {
|
||||
optional bytes address = 1; // address to verify
|
||||
optional bytes signature = 2; // signature to verify
|
||||
optional bytes message = 3; // message to verify
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Signed message
|
||||
* @prev EthereumSignMessage
|
||||
*/
|
||||
message EthereumMessageSignature {
|
||||
optional bytes address = 1; // address used to sign the message
|
||||
optional bytes signature = 2; // signature of the message
|
||||
}
|
||||
|
||||
///////////////////////
|
||||
// Identity messages //
|
||||
///////////////////////
|
||||
|
||||
/**
|
||||
* Request: Ask device to sign identity
|
||||
* @next SignedIdentity
|
||||
* @next Failure
|
||||
*/
|
||||
message SignIdentity {
|
||||
optional IdentityType identity = 1; // identity
|
||||
optional bytes challenge_hidden = 2; // non-visible challenge
|
||||
optional string challenge_visual = 3; // challenge shown on display (e.g. date+time)
|
||||
optional string ecdsa_curve_name = 4; // ECDSA curve name to use
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device provides signed identity
|
||||
* @prev SignIdentity
|
||||
*/
|
||||
message SignedIdentity {
|
||||
optional string address = 1; // identity address
|
||||
optional bytes public_key = 2; // identity public key
|
||||
optional bytes signature = 3; // signature of the identity data
|
||||
}
|
||||
|
||||
///////////////////
|
||||
// ECDH messages //
|
||||
///////////////////
|
||||
|
||||
/**
|
||||
* Request: Ask device to generate ECDH session key
|
||||
* @next ECDHSessionKey
|
||||
* @next Failure
|
||||
*/
|
||||
message GetECDHSessionKey {
|
||||
optional IdentityType identity = 1; // identity
|
||||
optional bytes peer_public_key = 2; // peer's public key
|
||||
optional string ecdsa_curve_name = 3; // ECDSA curve name to use
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device provides ECDH session key
|
||||
* @prev GetECDHSessionKey
|
||||
*/
|
||||
message ECDHSessionKey {
|
||||
optional bytes session_key = 1; // ECDH session key
|
||||
}
|
||||
|
||||
///////////////////
|
||||
// U2F messages //
|
||||
///////////////////
|
||||
|
||||
/**
|
||||
* Request: Set U2F counter
|
||||
* @next Success
|
||||
*/
|
||||
message SetU2FCounter {
|
||||
optional uint32 u2f_counter = 1; // counter
|
||||
}
|
||||
|
||||
/////////////////////////
|
||||
// Bootloader messages //
|
||||
/////////////////////////
|
||||
|
||||
/**
|
||||
* Request: Ask device to erase its firmware (so it can be replaced via FirmwareUpload)
|
||||
* @next Success
|
||||
* @next FirmwareRequest
|
||||
* @next Failure
|
||||
*/
|
||||
message FirmwareErase {
|
||||
optional uint32 length = 1; // length of new firmware
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Ask for firmware chunk
|
||||
* @next FirmwareUpload
|
||||
*/
|
||||
message FirmwareRequest {
|
||||
optional uint32 offset = 1; // offset of requested firmware chunk
|
||||
optional uint32 length = 2; // length of requested firmware chunk
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Send firmware in binary form to the device
|
||||
* @next Success
|
||||
* @next Failure
|
||||
*/
|
||||
message FirmwareUpload {
|
||||
required bytes payload = 1; // firmware to be loaded into device
|
||||
optional bytes hash = 2; // hash of the payload
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Request: Perform a device self-test
|
||||
* @next Success
|
||||
* @next Failure
|
||||
*/
|
||||
message SelfTest {
|
||||
optional bytes payload = 1; // payload to be used in self-test
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
// Debug messages (only available if DebugLink is enabled) //
|
||||
/////////////////////////////////////////////////////////////
|
||||
|
||||
/**
|
||||
* Request: "Press" the button on the device
|
||||
* @next Success
|
||||
*/
|
||||
message DebugLinkDecision {
|
||||
required bool yes_no = 1; // true for "Confirm", false for "Cancel"
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Computer asks for device state
|
||||
* @next DebugLinkState
|
||||
*/
|
||||
message DebugLinkGetState {
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device current state
|
||||
* @prev DebugLinkGetState
|
||||
*/
|
||||
message DebugLinkState {
|
||||
optional bytes layout = 1; // raw buffer of display
|
||||
optional string pin = 2; // current PIN, blank if PIN is not set/enabled
|
||||
optional string matrix = 3; // current PIN matrix
|
||||
optional string mnemonic = 4; // current BIP-39 mnemonic
|
||||
optional HDNodeType node = 5; // current BIP-32 node
|
||||
optional bool passphrase_protection = 6; // is node/mnemonic encrypted using passphrase?
|
||||
optional string reset_word = 7; // word on device display during ResetDevice workflow
|
||||
optional bytes reset_entropy = 8; // current entropy during ResetDevice workflow
|
||||
optional string recovery_fake_word = 9; // (fake) word on display during RecoveryDevice workflow
|
||||
optional uint32 recovery_word_pos = 10; // index of mnemonic word the device is expecting during RecoveryDevice workflow
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device to restart
|
||||
*/
|
||||
message DebugLinkStop {
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device wants host to log event
|
||||
*/
|
||||
message DebugLinkLog {
|
||||
optional uint32 level = 1;
|
||||
optional string bucket = 2;
|
||||
optional string text = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Read memory from device
|
||||
* @next DebugLinkMemory
|
||||
*/
|
||||
message DebugLinkMemoryRead {
|
||||
optional uint32 address = 1;
|
||||
optional uint32 length = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device sends memory back
|
||||
* @prev DebugLinkMemoryRead
|
||||
*/
|
||||
message DebugLinkMemory {
|
||||
optional bytes memory = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Write memory to device.
|
||||
* WARNING: Writing to the wrong location can irreparably break the device.
|
||||
*/
|
||||
message DebugLinkMemoryWrite {
|
||||
optional uint32 address = 1;
|
||||
optional bytes memory = 2;
|
||||
optional bool flash = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Erase block of flash on device
|
||||
* WARNING: Writing to the wrong location can irreparably break the device.
|
||||
*/
|
||||
message DebugLinkFlashErase {
|
||||
optional uint32 sector = 1;
|
||||
}
|
46
accounts/usbwallet/internal/trezor/trezor.go
Normal file
46
accounts/usbwallet/internal/trezor/trezor.go
Normal file
@ -0,0 +1,46 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// This file contains the implementation for interacting with the Trezor hardware
|
||||
// wallets. The wire protocol spec can be found on the SatoshiLabs website:
|
||||
// https://doc.satoshilabs.com/trezor-tech/api-protobuf.html
|
||||
|
||||
//go:generate protoc --go_out=Mgoogle/protobuf/descriptor.proto=github.com/golang/protobuf/protoc-gen-go/descriptor,import_path=trezor:. types.proto messages.proto
|
||||
|
||||
// Package trezor contains the wire protocol wrapper in Go.
|
||||
package trezor
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
// Type returns the protocol buffer type number of a specific message. If the
|
||||
// message is nil, this method panics!
|
||||
func Type(msg proto.Message) uint16 {
|
||||
return uint16(MessageType_value["MessageType_"+reflect.TypeOf(msg).Elem().Name()])
|
||||
}
|
||||
|
||||
// Name returns the friendly message type name of a specific protocol buffer
|
||||
// type numbers.
|
||||
func Name(kind uint16) string {
|
||||
name := MessageType_name[int32(kind)]
|
||||
if len(name) < 12 {
|
||||
return name
|
||||
}
|
||||
return name[12:]
|
||||
}
|
1333
accounts/usbwallet/internal/trezor/types.pb.go
Normal file
1333
accounts/usbwallet/internal/trezor/types.pb.go
Normal file
File diff suppressed because it is too large
Load Diff
276
accounts/usbwallet/internal/trezor/types.proto
Normal file
276
accounts/usbwallet/internal/trezor/types.proto
Normal file
@ -0,0 +1,276 @@
|
||||
// This file originates from the SatoshiLabs Trezor `common` repository at:
|
||||
// https://github.com/trezor/trezor-common/blob/master/protob/types.proto
|
||||
// dated 28.07.2017, commit dd8ec3231fb5f7992360aff9bdfe30bb58130f4b.
|
||||
|
||||
/**
|
||||
* Types for TREZOR communication
|
||||
*
|
||||
* @author Marek Palatinus <slush@satoshilabs.com>
|
||||
* @version 1.2
|
||||
*/
|
||||
|
||||
// Sugar for easier handling in Java
|
||||
option java_package = "com.satoshilabs.trezor.lib.protobuf";
|
||||
option java_outer_classname = "TrezorType";
|
||||
|
||||
import "google/protobuf/descriptor.proto";
|
||||
|
||||
/**
|
||||
* Options for specifying message direction and type of wire (normal/debug)
|
||||
*/
|
||||
extend google.protobuf.EnumValueOptions {
|
||||
optional bool wire_in = 50002; // message can be transmitted via wire from PC to TREZOR
|
||||
optional bool wire_out = 50003; // message can be transmitted via wire from TREZOR to PC
|
||||
optional bool wire_debug_in = 50004; // message can be transmitted via debug wire from PC to TREZOR
|
||||
optional bool wire_debug_out = 50005; // message can be transmitted via debug wire from TREZOR to PC
|
||||
optional bool wire_tiny = 50006; // message is handled by TREZOR when the USB stack is in tiny mode
|
||||
optional bool wire_bootloader = 50007; // message is only handled by TREZOR Bootloader
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of failures returned by Failure message
|
||||
* @used_in Failure
|
||||
*/
|
||||
enum FailureType {
|
||||
Failure_UnexpectedMessage = 1;
|
||||
Failure_ButtonExpected = 2;
|
||||
Failure_DataError = 3;
|
||||
Failure_ActionCancelled = 4;
|
||||
Failure_PinExpected = 5;
|
||||
Failure_PinCancelled = 6;
|
||||
Failure_PinInvalid = 7;
|
||||
Failure_InvalidSignature = 8;
|
||||
Failure_ProcessError = 9;
|
||||
Failure_NotEnoughFunds = 10;
|
||||
Failure_NotInitialized = 11;
|
||||
Failure_FirmwareError = 99;
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of script which will be used for transaction output
|
||||
* @used_in TxOutputType
|
||||
*/
|
||||
enum OutputScriptType {
|
||||
PAYTOADDRESS = 0; // used for all addresses (bitcoin, p2sh, witness)
|
||||
PAYTOSCRIPTHASH = 1; // p2sh address (deprecated; use PAYTOADDRESS)
|
||||
PAYTOMULTISIG = 2; // only for change output
|
||||
PAYTOOPRETURN = 3; // op_return
|
||||
PAYTOWITNESS = 4; // only for change output
|
||||
PAYTOP2SHWITNESS = 5; // only for change output
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of script which will be used for transaction output
|
||||
* @used_in TxInputType
|
||||
*/
|
||||
enum InputScriptType {
|
||||
SPENDADDRESS = 0; // standard p2pkh address
|
||||
SPENDMULTISIG = 1; // p2sh multisig address
|
||||
EXTERNAL = 2; // reserved for external inputs (coinjoin)
|
||||
SPENDWITNESS = 3; // native segwit
|
||||
SPENDP2SHWITNESS = 4; // segwit over p2sh (backward compatible)
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of information required by transaction signing process
|
||||
* @used_in TxRequest
|
||||
*/
|
||||
enum RequestType {
|
||||
TXINPUT = 0;
|
||||
TXOUTPUT = 1;
|
||||
TXMETA = 2;
|
||||
TXFINISHED = 3;
|
||||
TXEXTRADATA = 4;
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of button request
|
||||
* @used_in ButtonRequest
|
||||
*/
|
||||
enum ButtonRequestType {
|
||||
ButtonRequest_Other = 1;
|
||||
ButtonRequest_FeeOverThreshold = 2;
|
||||
ButtonRequest_ConfirmOutput = 3;
|
||||
ButtonRequest_ResetDevice = 4;
|
||||
ButtonRequest_ConfirmWord = 5;
|
||||
ButtonRequest_WipeDevice = 6;
|
||||
ButtonRequest_ProtectCall = 7;
|
||||
ButtonRequest_SignTx = 8;
|
||||
ButtonRequest_FirmwareCheck = 9;
|
||||
ButtonRequest_Address = 10;
|
||||
ButtonRequest_PublicKey = 11;
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of PIN request
|
||||
* @used_in PinMatrixRequest
|
||||
*/
|
||||
enum PinMatrixRequestType {
|
||||
PinMatrixRequestType_Current = 1;
|
||||
PinMatrixRequestType_NewFirst = 2;
|
||||
PinMatrixRequestType_NewSecond = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of recovery procedure. These should be used as bitmask, e.g.,
|
||||
* `RecoveryDeviceType_ScrambledWords | RecoveryDeviceType_Matrix`
|
||||
* listing every method supported by the host computer.
|
||||
*
|
||||
* Note that ScrambledWords must be supported by every implementation
|
||||
* for backward compatibility; there is no way to not support it.
|
||||
*
|
||||
* @used_in RecoveryDevice
|
||||
*/
|
||||
enum RecoveryDeviceType {
|
||||
// use powers of two when extending this field
|
||||
RecoveryDeviceType_ScrambledWords = 0; // words in scrambled order
|
||||
RecoveryDeviceType_Matrix = 1; // matrix recovery type
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of Recovery Word request
|
||||
* @used_in WordRequest
|
||||
*/
|
||||
enum WordRequestType {
|
||||
WordRequestType_Plain = 0;
|
||||
WordRequestType_Matrix9 = 1;
|
||||
WordRequestType_Matrix6 = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing BIP32 (hierarchical deterministic) node
|
||||
* Used for imports of private key into the device and exporting public key out of device
|
||||
* @used_in PublicKey
|
||||
* @used_in LoadDevice
|
||||
* @used_in DebugLinkState
|
||||
* @used_in Storage
|
||||
*/
|
||||
message HDNodeType {
|
||||
required uint32 depth = 1;
|
||||
required uint32 fingerprint = 2;
|
||||
required uint32 child_num = 3;
|
||||
required bytes chain_code = 4;
|
||||
optional bytes private_key = 5;
|
||||
optional bytes public_key = 6;
|
||||
}
|
||||
|
||||
message HDNodePathType {
|
||||
required HDNodeType node = 1; // BIP-32 node in deserialized form
|
||||
repeated uint32 address_n = 2; // BIP-32 path to derive the key from node
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing Coin
|
||||
* @used_in Features
|
||||
*/
|
||||
message CoinType {
|
||||
optional string coin_name = 1;
|
||||
optional string coin_shortcut = 2;
|
||||
optional uint32 address_type = 3 [default=0];
|
||||
optional uint64 maxfee_kb = 4;
|
||||
optional uint32 address_type_p2sh = 5 [default=5];
|
||||
optional string signed_message_header = 8;
|
||||
optional uint32 xpub_magic = 9 [default=76067358]; // default=0x0488b21e
|
||||
optional uint32 xprv_magic = 10 [default=76066276]; // default=0x0488ade4
|
||||
optional bool segwit = 11;
|
||||
optional uint32 forkid = 12;
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of redeem script used in input
|
||||
* @used_in TxInputType
|
||||
*/
|
||||
message MultisigRedeemScriptType {
|
||||
repeated HDNodePathType pubkeys = 1; // pubkeys from multisig address (sorted lexicographically)
|
||||
repeated bytes signatures = 2; // existing signatures for partially signed input
|
||||
optional uint32 m = 3; // "m" from n, how many valid signatures is necessary for spending
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing transaction input
|
||||
* @used_in SimpleSignTx
|
||||
* @used_in TransactionType
|
||||
*/
|
||||
message TxInputType {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
|
||||
required bytes prev_hash = 2; // hash of previous transaction output to spend by this input
|
||||
required uint32 prev_index = 3; // index of previous output to spend
|
||||
optional bytes script_sig = 4; // script signature, unset for tx to sign
|
||||
optional uint32 sequence = 5 [default=4294967295]; // sequence (default=0xffffffff)
|
||||
optional InputScriptType script_type = 6 [default=SPENDADDRESS]; // defines template of input script
|
||||
optional MultisigRedeemScriptType multisig = 7; // Filled if input is going to spend multisig tx
|
||||
optional uint64 amount = 8; // amount of previous transaction output (for segwit only)
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing transaction output
|
||||
* @used_in SimpleSignTx
|
||||
* @used_in TransactionType
|
||||
*/
|
||||
message TxOutputType {
|
||||
optional string address = 1; // target coin address in Base58 encoding
|
||||
repeated uint32 address_n = 2; // BIP-32 path to derive the key from master node; has higher priority than "address"
|
||||
required uint64 amount = 3; // amount to spend in satoshis
|
||||
required OutputScriptType script_type = 4; // output script type
|
||||
optional MultisigRedeemScriptType multisig = 5; // defines multisig address; script_type must be PAYTOMULTISIG
|
||||
optional bytes op_return_data = 6; // defines op_return data; script_type must be PAYTOOPRETURN, amount must be 0
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing compiled transaction output
|
||||
* @used_in TransactionType
|
||||
*/
|
||||
message TxOutputBinType {
|
||||
required uint64 amount = 1;
|
||||
required bytes script_pubkey = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing transaction
|
||||
* @used_in SimpleSignTx
|
||||
*/
|
||||
message TransactionType {
|
||||
optional uint32 version = 1;
|
||||
repeated TxInputType inputs = 2;
|
||||
repeated TxOutputBinType bin_outputs = 3;
|
||||
repeated TxOutputType outputs = 5;
|
||||
optional uint32 lock_time = 4;
|
||||
optional uint32 inputs_cnt = 6;
|
||||
optional uint32 outputs_cnt = 7;
|
||||
optional bytes extra_data = 8;
|
||||
optional uint32 extra_data_len = 9;
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing request details
|
||||
* @used_in TxRequest
|
||||
*/
|
||||
message TxRequestDetailsType {
|
||||
optional uint32 request_index = 1; // device expects TxAck message from the computer
|
||||
optional bytes tx_hash = 2; // tx_hash of requested transaction
|
||||
optional uint32 extra_data_len = 3; // length of requested extra data
|
||||
optional uint32 extra_data_offset = 4; // offset of requested extra data
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing serialized data
|
||||
* @used_in TxRequest
|
||||
*/
|
||||
message TxRequestSerializedType {
|
||||
optional uint32 signature_index = 1; // 'signature' field contains signed input of this index
|
||||
optional bytes signature = 2; // signature of the signature_index input
|
||||
optional bytes serialized_tx = 3; // part of serialized and signed transaction
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing identity data
|
||||
* @used_in IdentityType
|
||||
*/
|
||||
message IdentityType {
|
||||
optional string proto = 1; // proto part of URI
|
||||
optional string user = 2; // user part of URI
|
||||
optional string host = 3; // host part of URI
|
||||
optional string port = 4; // port part of URI
|
||||
optional string path = 5; // path part of URI
|
||||
optional uint32 index = 6 [default=0]; // identity index
|
||||
}
|
464
accounts/usbwallet/ledger.go
Normal file
464
accounts/usbwallet/ledger.go
Normal file
@ -0,0 +1,464 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// This file contains the implementation for interacting with the Ledger hardware
|
||||
// wallets. The wire protocol spec can be found in the Ledger Blue GitHub repo:
|
||||
// https://raw.githubusercontent.com/LedgerHQ/blue-app-eth/master/doc/ethapp.asc
|
||||
|
||||
package usbwallet
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// ledgerOpcode is an enumeration encoding the supported Ledger opcodes.
|
||||
type ledgerOpcode byte
|
||||
|
||||
// ledgerParam1 is an enumeration encoding the supported Ledger parameters for
|
||||
// specific opcodes. The same parameter values may be reused between opcodes.
|
||||
type ledgerParam1 byte
|
||||
|
||||
// ledgerParam2 is an enumeration encoding the supported Ledger parameters for
|
||||
// specific opcodes. The same parameter values may be reused between opcodes.
|
||||
type ledgerParam2 byte
|
||||
|
||||
const (
|
||||
ledgerOpRetrieveAddress ledgerOpcode = 0x02 // Returns the public key and Ethereum address for a given BIP 32 path
|
||||
ledgerOpSignTransaction ledgerOpcode = 0x04 // Signs an Ethereum transaction after having the user validate the parameters
|
||||
ledgerOpGetConfiguration ledgerOpcode = 0x06 // Returns specific wallet application configuration
|
||||
|
||||
ledgerP1DirectlyFetchAddress ledgerParam1 = 0x00 // Return address directly from the wallet
|
||||
ledgerP1ConfirmFetchAddress ledgerParam1 = 0x01 // Require a user confirmation before returning the address
|
||||
ledgerP1InitTransactionData ledgerParam1 = 0x00 // First transaction data block for signing
|
||||
ledgerP1ContTransactionData ledgerParam1 = 0x80 // Subsequent transaction data block for signing
|
||||
ledgerP2DiscardAddressChainCode ledgerParam2 = 0x00 // Do not return the chain code along with the address
|
||||
ledgerP2ReturnAddressChainCode ledgerParam2 = 0x01 // Require a user confirmation before returning the address
|
||||
)
|
||||
|
||||
// errLedgerReplyInvalidHeader is the error message returned by a Ledger data exchange
|
||||
// if the device replies with a mismatching header. This usually means the device
|
||||
// is in browser mode.
|
||||
var errLedgerReplyInvalidHeader = errors.New("ledger: invalid reply header")
|
||||
|
||||
// errLedgerInvalidVersionReply is the error message returned by a Ledger version retrieval
|
||||
// when a response does arrive, but it does not contain the expected data.
|
||||
var errLedgerInvalidVersionReply = errors.New("ledger: invalid version reply")
|
||||
|
||||
// ledgerDriver implements the communication with a Ledger hardware wallet.
|
||||
type ledgerDriver struct {
|
||||
device io.ReadWriter // USB device connection to communicate through
|
||||
version [3]byte // Current version of the Ledger firmware (zero if app is offline)
|
||||
browser bool // Flag whether the Ledger is in browser mode (reply channel mismatch)
|
||||
failure error // Any failure that would make the device unusable
|
||||
log log.Logger // Contextual logger to tag the ledger with its id
|
||||
}
|
||||
|
||||
// newLedgerDriver creates a new instance of a Ledger USB protocol driver.
|
||||
func newLedgerDriver(logger log.Logger) driver {
|
||||
return &ledgerDriver{
|
||||
log: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// Status implements usbwallet.driver, returning various states the Ledger can
|
||||
// currently be in.
|
||||
func (w *ledgerDriver) Status() (string, error) {
|
||||
if w.failure != nil {
|
||||
return fmt.Sprintf("Failed: %v", w.failure), w.failure
|
||||
}
|
||||
if w.browser {
|
||||
return "Ethereum app in browser mode", w.failure
|
||||
}
|
||||
if w.offline() {
|
||||
return "Ethereum app offline", w.failure
|
||||
}
|
||||
return fmt.Sprintf("Ethereum app v%d.%d.%d online", w.version[0], w.version[1], w.version[2]), w.failure
|
||||
}
|
||||
|
||||
// offline returns whether the wallet and the Ethereum app is offline or not.
|
||||
//
|
||||
// The method assumes that the state lock is held!
|
||||
func (w *ledgerDriver) offline() bool {
|
||||
return w.version == [3]byte{0, 0, 0}
|
||||
}
|
||||
|
||||
// Open implements usbwallet.driver, attempting to initialize the connection to the
|
||||
// Ledger hardware wallet. The Ledger does not require a user passphrase, so that
|
||||
// parameter is silently discarded.
|
||||
func (w *ledgerDriver) Open(device io.ReadWriter, passphrase string) error {
|
||||
w.device, w.failure = device, nil
|
||||
|
||||
_, err := w.ledgerDerive(accounts.DefaultBaseDerivationPath)
|
||||
if err != nil {
|
||||
// Ethereum app is not running or in browser mode, nothing more to do, return
|
||||
if err == errLedgerReplyInvalidHeader {
|
||||
w.browser = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Try to resolve the Ethereum app's version, will fail prior to v1.0.2
|
||||
if w.version, err = w.ledgerVersion(); err != nil {
|
||||
w.version = [3]byte{1, 0, 0} // Assume worst case, can't verify if v1.0.0 or v1.0.1
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close implements usbwallet.driver, cleaning up and metadata maintained within
|
||||
// the Ledger driver.
|
||||
func (w *ledgerDriver) Close() error {
|
||||
w.browser, w.version = false, [3]byte{}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Heartbeat implements usbwallet.driver, performing a sanity check against the
|
||||
// Ledger to see if it's still online.
|
||||
func (w *ledgerDriver) Heartbeat() error {
|
||||
if _, err := w.ledgerVersion(); err != nil && err != errLedgerInvalidVersionReply {
|
||||
w.failure = err
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Derive implements usbwallet.driver, sending a derivation request to the Ledger
|
||||
// and returning the Ethereum address located on that derivation path.
|
||||
func (w *ledgerDriver) Derive(path accounts.DerivationPath) (common.Address, error) {
|
||||
return w.ledgerDerive(path)
|
||||
}
|
||||
|
||||
// SignTx implements usbwallet.driver, sending the transaction to the Ledger and
|
||||
// waiting for the user to confirm or deny the transaction.
|
||||
//
|
||||
// Note, if the version of the Ethereum application running on the Ledger wallet is
|
||||
// too old to sign EIP-155 transactions, but such is requested nonetheless, an error
|
||||
// will be returned opposed to silently signing in Homestead mode.
|
||||
func (w *ledgerDriver) SignTx(path accounts.DerivationPath, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error) {
|
||||
// If the Ethereum app doesn't run, abort
|
||||
if w.offline() {
|
||||
return common.Address{}, nil, accounts.ErrWalletClosed
|
||||
}
|
||||
// Ensure the wallet is capable of signing the given transaction
|
||||
if chainID != nil && w.version[0] <= 1 && w.version[1] <= 0 && w.version[2] <= 2 {
|
||||
return common.Address{}, nil, fmt.Errorf("Ledger v%d.%d.%d doesn't support signing this transaction, please update to v1.0.3 at least", w.version[0], w.version[1], w.version[2])
|
||||
}
|
||||
// All infos gathered and metadata checks out, request signing
|
||||
return w.ledgerSign(path, tx, chainID)
|
||||
}
|
||||
|
||||
// ledgerVersion retrieves the current version of the Ethereum wallet app running
|
||||
// on the Ledger wallet.
|
||||
//
|
||||
// The version retrieval protocol is defined as follows:
|
||||
//
|
||||
// CLA | INS | P1 | P2 | Lc | Le
|
||||
// ----+-----+----+----+----+---
|
||||
// E0 | 06 | 00 | 00 | 00 | 04
|
||||
//
|
||||
// With no input data, and the output data being:
|
||||
//
|
||||
// Description | Length
|
||||
// ---------------------------------------------------+--------
|
||||
// Flags 01: arbitrary data signature enabled by user | 1 byte
|
||||
// Application major version | 1 byte
|
||||
// Application minor version | 1 byte
|
||||
// Application patch version | 1 byte
|
||||
func (w *ledgerDriver) ledgerVersion() ([3]byte, error) {
|
||||
// Send the request and wait for the response
|
||||
reply, err := w.ledgerExchange(ledgerOpGetConfiguration, 0, 0, nil)
|
||||
if err != nil {
|
||||
return [3]byte{}, err
|
||||
}
|
||||
if len(reply) != 4 {
|
||||
return [3]byte{}, errLedgerInvalidVersionReply
|
||||
}
|
||||
// Cache the version for future reference
|
||||
var version [3]byte
|
||||
copy(version[:], reply[1:])
|
||||
return version, nil
|
||||
}
|
||||
|
||||
// ledgerDerive retrieves the currently active Ethereum address from a Ledger
|
||||
// wallet at the specified derivation path.
|
||||
//
|
||||
// The address derivation protocol is defined as follows:
|
||||
//
|
||||
// CLA | INS | P1 | P2 | Lc | Le
|
||||
// ----+-----+----+----+-----+---
|
||||
// E0 | 02 | 00 return address
|
||||
// 01 display address and confirm before returning
|
||||
// | 00: do not return the chain code
|
||||
// | 01: return the chain code
|
||||
// | var | 00
|
||||
//
|
||||
// Where the input data is:
|
||||
//
|
||||
// Description | Length
|
||||
// -------------------------------------------------+--------
|
||||
// Number of BIP 32 derivations to perform (max 10) | 1 byte
|
||||
// First derivation index (big endian) | 4 bytes
|
||||
// ... | 4 bytes
|
||||
// Last derivation index (big endian) | 4 bytes
|
||||
//
|
||||
// And the output data is:
|
||||
//
|
||||
// Description | Length
|
||||
// ------------------------+-------------------
|
||||
// Public Key length | 1 byte
|
||||
// Uncompressed Public Key | arbitrary
|
||||
// Ethereum address length | 1 byte
|
||||
// Ethereum address | 40 bytes hex ascii
|
||||
// Chain code if requested | 32 bytes
|
||||
func (w *ledgerDriver) ledgerDerive(derivationPath []uint32) (common.Address, error) {
|
||||
// Flatten the derivation path into the Ledger request
|
||||
path := make([]byte, 1+4*len(derivationPath))
|
||||
path[0] = byte(len(derivationPath))
|
||||
for i, component := range derivationPath {
|
||||
binary.BigEndian.PutUint32(path[1+4*i:], component)
|
||||
}
|
||||
// Send the request and wait for the response
|
||||
reply, err := w.ledgerExchange(ledgerOpRetrieveAddress, ledgerP1DirectlyFetchAddress, ledgerP2DiscardAddressChainCode, path)
|
||||
if err != nil {
|
||||
return common.Address{}, err
|
||||
}
|
||||
// Discard the public key, we don't need that for now
|
||||
if len(reply) < 1 || len(reply) < 1+int(reply[0]) {
|
||||
return common.Address{}, errors.New("reply lacks public key entry")
|
||||
}
|
||||
reply = reply[1+int(reply[0]):]
|
||||
|
||||
// Extract the Ethereum hex address string
|
||||
if len(reply) < 1 || len(reply) < 1+int(reply[0]) {
|
||||
return common.Address{}, errors.New("reply lacks address entry")
|
||||
}
|
||||
hexstr := reply[1 : 1+int(reply[0])]
|
||||
|
||||
// Decode the hex sting into an Ethereum address and return
|
||||
var address common.Address
|
||||
hex.Decode(address[:], hexstr)
|
||||
return address, nil
|
||||
}
|
||||
|
||||
// ledgerSign sends the transaction to the Ledger wallet, and waits for the user
|
||||
// to confirm or deny the transaction.
|
||||
//
|
||||
// The transaction signing protocol is defined as follows:
|
||||
//
|
||||
// CLA | INS | P1 | P2 | Lc | Le
|
||||
// ----+-----+----+----+-----+---
|
||||
// E0 | 04 | 00: first transaction data block
|
||||
// 80: subsequent transaction data block
|
||||
// | 00 | variable | variable
|
||||
//
|
||||
// Where the input for the first transaction block (first 255 bytes) is:
|
||||
//
|
||||
// Description | Length
|
||||
// -------------------------------------------------+----------
|
||||
// Number of BIP 32 derivations to perform (max 10) | 1 byte
|
||||
// First derivation index (big endian) | 4 bytes
|
||||
// ... | 4 bytes
|
||||
// Last derivation index (big endian) | 4 bytes
|
||||
// RLP transaction chunk | arbitrary
|
||||
//
|
||||
// And the input for subsequent transaction blocks (first 255 bytes) are:
|
||||
//
|
||||
// Description | Length
|
||||
// ----------------------+----------
|
||||
// RLP transaction chunk | arbitrary
|
||||
//
|
||||
// And the output data is:
|
||||
//
|
||||
// Description | Length
|
||||
// ------------+---------
|
||||
// signature V | 1 byte
|
||||
// signature R | 32 bytes
|
||||
// signature S | 32 bytes
|
||||
func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error) {
|
||||
// Flatten the derivation path into the Ledger request
|
||||
path := make([]byte, 1+4*len(derivationPath))
|
||||
path[0] = byte(len(derivationPath))
|
||||
for i, component := range derivationPath {
|
||||
binary.BigEndian.PutUint32(path[1+4*i:], component)
|
||||
}
|
||||
// Create the transaction RLP based on whether legacy or EIP155 signing was requeste
|
||||
var (
|
||||
txrlp []byte
|
||||
err error
|
||||
)
|
||||
if chainID == nil {
|
||||
if txrlp, err = rlp.EncodeToBytes([]interface{}{tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data()}); err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
} else {
|
||||
if txrlp, err = rlp.EncodeToBytes([]interface{}{tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data(), chainID, big.NewInt(0), big.NewInt(0)}); err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
}
|
||||
payload := append(path, txrlp...)
|
||||
|
||||
// Send the request and wait for the response
|
||||
var (
|
||||
op = ledgerP1InitTransactionData
|
||||
reply []byte
|
||||
)
|
||||
for len(payload) > 0 {
|
||||
// Calculate the size of the next data chunk
|
||||
chunk := 255
|
||||
if chunk > len(payload) {
|
||||
chunk = len(payload)
|
||||
}
|
||||
// Send the chunk over, ensuring it's processed correctly
|
||||
reply, err = w.ledgerExchange(ledgerOpSignTransaction, op, 0, payload[:chunk])
|
||||
if err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
// Shift the payload and ensure subsequent chunks are marked as such
|
||||
payload = payload[chunk:]
|
||||
op = ledgerP1ContTransactionData
|
||||
}
|
||||
// Extract the Ethereum signature and do a sanity validation
|
||||
if len(reply) != 65 {
|
||||
return common.Address{}, nil, errors.New("reply lacks signature")
|
||||
}
|
||||
signature := append(reply[1:], reply[0])
|
||||
|
||||
// Create the correct signer and signature transform based on the chain ID
|
||||
var signer types.Signer
|
||||
if chainID == nil {
|
||||
signer = new(types.HomesteadSigner)
|
||||
} else {
|
||||
signer = types.NewEIP155Signer(chainID)
|
||||
signature[64] = signature[64] - byte(chainID.Uint64()*2+35)
|
||||
}
|
||||
signed, err := tx.WithSignature(signer, signature)
|
||||
if err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
sender, err := types.Sender(signer, signed)
|
||||
if err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
return sender, signed, nil
|
||||
}
|
||||
|
||||
// ledgerExchange performs a data exchange with the Ledger wallet, sending it a
|
||||
// message and retrieving the response.
|
||||
//
|
||||
// The common transport header is defined as follows:
|
||||
//
|
||||
// Description | Length
|
||||
// --------------------------------------+----------
|
||||
// Communication channel ID (big endian) | 2 bytes
|
||||
// Command tag | 1 byte
|
||||
// Packet sequence index (big endian) | 2 bytes
|
||||
// Payload | arbitrary
|
||||
//
|
||||
// The Communication channel ID allows commands multiplexing over the same
|
||||
// physical link. It is not used for the time being, and should be set to 0101
|
||||
// to avoid compatibility issues with implementations ignoring a leading 00 byte.
|
||||
//
|
||||
// The Command tag describes the message content. Use TAG_APDU (0x05) for standard
|
||||
// APDU payloads, or TAG_PING (0x02) for a simple link test.
|
||||
//
|
||||
// The Packet sequence index describes the current sequence for fragmented payloads.
|
||||
// The first fragment index is 0x00.
|
||||
//
|
||||
// APDU Command payloads are encoded as follows:
|
||||
//
|
||||
// Description | Length
|
||||
// -----------------------------------
|
||||
// APDU length (big endian) | 2 bytes
|
||||
// APDU CLA | 1 byte
|
||||
// APDU INS | 1 byte
|
||||
// APDU P1 | 1 byte
|
||||
// APDU P2 | 1 byte
|
||||
// APDU length | 1 byte
|
||||
// Optional APDU data | arbitrary
|
||||
func (w *ledgerDriver) ledgerExchange(opcode ledgerOpcode, p1 ledgerParam1, p2 ledgerParam2, data []byte) ([]byte, error) {
|
||||
// Construct the message payload, possibly split into multiple chunks
|
||||
apdu := make([]byte, 2, 7+len(data))
|
||||
|
||||
binary.BigEndian.PutUint16(apdu, uint16(5+len(data)))
|
||||
apdu = append(apdu, []byte{0xe0, byte(opcode), byte(p1), byte(p2), byte(len(data))}...)
|
||||
apdu = append(apdu, data...)
|
||||
|
||||
// Stream all the chunks to the device
|
||||
header := []byte{0x01, 0x01, 0x05, 0x00, 0x00} // Channel ID and command tag appended
|
||||
chunk := make([]byte, 64)
|
||||
space := len(chunk) - len(header)
|
||||
|
||||
for i := 0; len(apdu) > 0; i++ {
|
||||
// Construct the new message to stream
|
||||
chunk = append(chunk[:0], header...)
|
||||
binary.BigEndian.PutUint16(chunk[3:], uint16(i))
|
||||
|
||||
if len(apdu) > space {
|
||||
chunk = append(chunk, apdu[:space]...)
|
||||
apdu = apdu[space:]
|
||||
} else {
|
||||
chunk = append(chunk, apdu...)
|
||||
apdu = nil
|
||||
}
|
||||
// Send over to the device
|
||||
w.log.Trace("Data chunk sent to the Ledger", "chunk", hexutil.Bytes(chunk))
|
||||
if _, err := w.device.Write(chunk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Stream the reply back from the wallet in 64 byte chunks
|
||||
var reply []byte
|
||||
chunk = chunk[:64] // Yeah, we surely have enough space
|
||||
for {
|
||||
// Read the next chunk from the Ledger wallet
|
||||
if _, err := io.ReadFull(w.device, chunk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.log.Trace("Data chunk received from the Ledger", "chunk", hexutil.Bytes(chunk))
|
||||
|
||||
// Make sure the transport header matches
|
||||
if chunk[0] != 0x01 || chunk[1] != 0x01 || chunk[2] != 0x05 {
|
||||
return nil, errLedgerReplyInvalidHeader
|
||||
}
|
||||
// If it's the first chunk, retrieve the total message length
|
||||
var payload []byte
|
||||
|
||||
if chunk[3] == 0x00 && chunk[4] == 0x00 {
|
||||
reply = make([]byte, 0, int(binary.BigEndian.Uint16(chunk[5:7])))
|
||||
payload = chunk[7:]
|
||||
} else {
|
||||
payload = chunk[5:]
|
||||
}
|
||||
// Append to the reply and stop when filled up
|
||||
if left := cap(reply) - len(reply); left > len(payload) {
|
||||
reply = append(reply, payload...)
|
||||
} else {
|
||||
reply = append(reply, payload[:left]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
return reply[:len(reply)-2], nil
|
||||
}
|
@ -1,903 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// This file contains the implementation for interacting with the Ledger hardware
|
||||
// wallets. The wire protocol spec can be found in the Ledger Blue GitHub repo:
|
||||
// https://raw.githubusercontent.com/LedgerHQ/blue-app-eth/master/doc/ethapp.asc
|
||||
|
||||
package usbwallet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
ethereum "github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/karalabe/hid"
|
||||
)
|
||||
|
||||
// Maximum time between wallet health checks to detect USB unplugs.
|
||||
const ledgerHeartbeatCycle = time.Second
|
||||
|
||||
// Minimum time to wait between self derivation attempts, even it the user is
|
||||
// requesting accounts like crazy.
|
||||
const ledgerSelfDeriveThrottling = time.Second
|
||||
|
||||
// ledgerOpcode is an enumeration encoding the supported Ledger opcodes.
|
||||
type ledgerOpcode byte
|
||||
|
||||
// ledgerParam1 is an enumeration encoding the supported Ledger parameters for
|
||||
// specific opcodes. The same parameter values may be reused between opcodes.
|
||||
type ledgerParam1 byte
|
||||
|
||||
// ledgerParam2 is an enumeration encoding the supported Ledger parameters for
|
||||
// specific opcodes. The same parameter values may be reused between opcodes.
|
||||
type ledgerParam2 byte
|
||||
|
||||
const (
|
||||
ledgerOpRetrieveAddress ledgerOpcode = 0x02 // Returns the public key and Ethereum address for a given BIP 32 path
|
||||
ledgerOpSignTransaction ledgerOpcode = 0x04 // Signs an Ethereum transaction after having the user validate the parameters
|
||||
ledgerOpGetConfiguration ledgerOpcode = 0x06 // Returns specific wallet application configuration
|
||||
|
||||
ledgerP1DirectlyFetchAddress ledgerParam1 = 0x00 // Return address directly from the wallet
|
||||
ledgerP1ConfirmFetchAddress ledgerParam1 = 0x01 // Require a user confirmation before returning the address
|
||||
ledgerP1InitTransactionData ledgerParam1 = 0x00 // First transaction data block for signing
|
||||
ledgerP1ContTransactionData ledgerParam1 = 0x80 // Subsequent transaction data block for signing
|
||||
ledgerP2DiscardAddressChainCode ledgerParam2 = 0x00 // Do not return the chain code along with the address
|
||||
ledgerP2ReturnAddressChainCode ledgerParam2 = 0x01 // Require a user confirmation before returning the address
|
||||
)
|
||||
|
||||
// errReplyInvalidHeader is the error message returned by a Ledger data exchange
|
||||
// if the device replies with a mismatching header. This usually means the device
|
||||
// is in browser mode.
|
||||
var errReplyInvalidHeader = errors.New("invalid reply header")
|
||||
|
||||
// errInvalidVersionReply is the error message returned by a Ledger version retrieval
|
||||
// when a response does arrive, but it does not contain the expected data.
|
||||
var errInvalidVersionReply = errors.New("invalid version reply")
|
||||
|
||||
// ledgerWallet represents a live USB Ledger hardware wallet.
|
||||
type ledgerWallet struct {
|
||||
hub *LedgerHub // USB hub the device originates from (TODO(karalabe): remove if hotplug lands on Windows)
|
||||
url *accounts.URL // Textual URL uniquely identifying this wallet
|
||||
|
||||
info hid.DeviceInfo // Known USB device infos about the wallet
|
||||
device *hid.Device // USB device advertising itself as a Ledger wallet
|
||||
failure error // Any failure that would make the device unusable
|
||||
|
||||
version [3]byte // Current version of the Ledger Ethereum app (zero if app is offline)
|
||||
browser bool // Flag whether the Ledger is in browser mode (reply channel mismatch)
|
||||
accounts []accounts.Account // List of derive accounts pinned on the Ledger
|
||||
paths map[common.Address]accounts.DerivationPath // Known derivation paths for signing operations
|
||||
|
||||
deriveNextPath accounts.DerivationPath // Next derivation path for account auto-discovery
|
||||
deriveNextAddr common.Address // Next derived account address for auto-discovery
|
||||
deriveChain ethereum.ChainStateReader // Blockchain state reader to discover used account with
|
||||
deriveReq chan chan struct{} // Channel to request a self-derivation on
|
||||
deriveQuit chan chan error // Channel to terminate the self-deriver with
|
||||
|
||||
healthQuit chan chan error
|
||||
|
||||
// Locking a hardware wallet is a bit special. Since hardware devices are lower
|
||||
// performing, any communication with them might take a non negligible amount of
|
||||
// time. Worse still, waiting for user confirmation can take arbitrarily long,
|
||||
// but exclusive communication must be upheld during. Locking the entire wallet
|
||||
// in the mean time however would stall any parts of the system that don't want
|
||||
// to communicate, just read some state (e.g. list the accounts).
|
||||
//
|
||||
// As such, a hardware wallet needs two locks to function correctly. A state
|
||||
// lock can be used to protect the wallet's software-side internal state, which
|
||||
// must not be held exlusively during hardware communication. A communication
|
||||
// lock can be used to achieve exclusive access to the device itself, this one
|
||||
// however should allow "skipping" waiting for operations that might want to
|
||||
// use the device, but can live without too (e.g. account self-derivation).
|
||||
//
|
||||
// Since we have two locks, it's important to know how to properly use them:
|
||||
// - Communication requires the `device` to not change, so obtaining the
|
||||
// commsLock should be done after having a stateLock.
|
||||
// - Communication must not disable read access to the wallet state, so it
|
||||
// must only ever hold a *read* lock to stateLock.
|
||||
commsLock chan struct{} // Mutex (buf=1) for the USB comms without keeping the state locked
|
||||
stateLock sync.RWMutex // Protects read and write access to the wallet struct fields
|
||||
|
||||
log log.Logger // Contextual logger to tag the ledger with its id
|
||||
}
|
||||
|
||||
// URL implements accounts.Wallet, returning the URL of the Ledger device.
|
||||
func (w *ledgerWallet) URL() accounts.URL {
|
||||
return *w.url // Immutable, no need for a lock
|
||||
}
|
||||
|
||||
// Status implements accounts.Wallet, always whether the Ledger is opened, closed
|
||||
// or whether the Ethereum app was not started on it.
|
||||
func (w *ledgerWallet) Status() string {
|
||||
w.stateLock.RLock() // No device communication, state lock is enough
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
if w.failure != nil {
|
||||
return fmt.Sprintf("Failed: %v", w.failure)
|
||||
}
|
||||
if w.device == nil {
|
||||
return "Closed"
|
||||
}
|
||||
if w.browser {
|
||||
return "Ethereum app in browser mode"
|
||||
}
|
||||
if w.offline() {
|
||||
return "Ethereum app offline"
|
||||
}
|
||||
return fmt.Sprintf("Ethereum app v%d.%d.%d online", w.version[0], w.version[1], w.version[2])
|
||||
}
|
||||
|
||||
// offline returns whether the wallet and the Ethereum app is offline or not.
|
||||
//
|
||||
// The method assumes that the state lock is held!
|
||||
func (w *ledgerWallet) offline() bool {
|
||||
return w.version == [3]byte{0, 0, 0}
|
||||
}
|
||||
|
||||
// failed returns if the USB device wrapped by the wallet failed for some reason.
|
||||
// This is used by the device scanner to report failed wallets as departed.
|
||||
//
|
||||
// The method assumes that the state lock is *not* held!
|
||||
func (w *ledgerWallet) failed() bool {
|
||||
w.stateLock.RLock() // No device communication, state lock is enough
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
return w.failure != nil
|
||||
}
|
||||
|
||||
// Open implements accounts.Wallet, attempting to open a USB connection to the
|
||||
// Ledger hardware wallet. The Ledger does not require a user passphrase, so that
|
||||
// parameter is silently discarded.
|
||||
func (w *ledgerWallet) Open(passphrase string) error {
|
||||
w.stateLock.Lock() // State lock is enough since there's no connection yet at this point
|
||||
defer w.stateLock.Unlock()
|
||||
|
||||
// If the wallet was already opened, don't try to open again
|
||||
if w.device != nil {
|
||||
return accounts.ErrWalletAlreadyOpen
|
||||
}
|
||||
// Otherwise iterate over all USB devices and find this again (no way to directly do this)
|
||||
device, err := w.info.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Wallet seems to be successfully opened, guess if the Ethereum app is running
|
||||
w.device = device
|
||||
w.commsLock = make(chan struct{}, 1)
|
||||
w.commsLock <- struct{}{} // Enable lock
|
||||
|
||||
w.paths = make(map[common.Address]accounts.DerivationPath)
|
||||
|
||||
w.deriveReq = make(chan chan struct{})
|
||||
w.deriveQuit = make(chan chan error)
|
||||
w.healthQuit = make(chan chan error)
|
||||
|
||||
defer func() {
|
||||
go w.heartbeat()
|
||||
go w.selfDerive()
|
||||
}()
|
||||
|
||||
if _, err = w.ledgerDerive(accounts.DefaultBaseDerivationPath); err != nil {
|
||||
// Ethereum app is not running or in browser mode, nothing more to do, return
|
||||
if err == errReplyInvalidHeader {
|
||||
w.browser = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Try to resolve the Ethereum app's version, will fail prior to v1.0.2
|
||||
if w.version, err = w.ledgerVersion(); err != nil {
|
||||
w.version = [3]byte{1, 0, 0} // Assume worst case, can't verify if v1.0.0 or v1.0.1
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// heartbeat is a health check loop for the Ledger wallets to periodically verify
|
||||
// whether they are still present or if they malfunctioned. It is needed because:
|
||||
// - libusb on Windows doesn't support hotplug, so we can't detect USB unplugs
|
||||
// - communication timeout on the Ledger requires a device power cycle to fix
|
||||
func (w *ledgerWallet) heartbeat() {
|
||||
w.log.Debug("Ledger health-check started")
|
||||
defer w.log.Debug("Ledger health-check stopped")
|
||||
|
||||
// Execute heartbeat checks until termination or error
|
||||
var (
|
||||
errc chan error
|
||||
err error
|
||||
)
|
||||
for errc == nil && err == nil {
|
||||
// Wait until termination is requested or the heartbeat cycle arrives
|
||||
select {
|
||||
case errc = <-w.healthQuit:
|
||||
// Termination requested
|
||||
continue
|
||||
case <-time.After(ledgerHeartbeatCycle):
|
||||
// Heartbeat time
|
||||
}
|
||||
// Execute a tiny data exchange to see responsiveness
|
||||
w.stateLock.RLock()
|
||||
if w.device == nil {
|
||||
// Terminated while waiting for the lock
|
||||
w.stateLock.RUnlock()
|
||||
continue
|
||||
}
|
||||
<-w.commsLock // Don't lock state while resolving version
|
||||
_, err = w.ledgerVersion()
|
||||
w.commsLock <- struct{}{}
|
||||
w.stateLock.RUnlock()
|
||||
|
||||
if err != nil && err != errInvalidVersionReply {
|
||||
w.stateLock.Lock() // Lock state to tear the wallet down
|
||||
w.failure = err
|
||||
w.close()
|
||||
w.stateLock.Unlock()
|
||||
}
|
||||
// Ignore non hardware related errors
|
||||
err = nil
|
||||
}
|
||||
// In case of error, wait for termination
|
||||
if err != nil {
|
||||
w.log.Debug("Ledger health-check failed", "err", err)
|
||||
errc = <-w.healthQuit
|
||||
}
|
||||
errc <- err
|
||||
}
|
||||
|
||||
// Close implements accounts.Wallet, closing the USB connection to the Ledger.
|
||||
func (w *ledgerWallet) Close() error {
|
||||
// Ensure the wallet was opened
|
||||
w.stateLock.RLock()
|
||||
hQuit, dQuit := w.healthQuit, w.deriveQuit
|
||||
w.stateLock.RUnlock()
|
||||
|
||||
// Terminate the health checks
|
||||
var herr error
|
||||
if hQuit != nil {
|
||||
errc := make(chan error)
|
||||
hQuit <- errc
|
||||
herr = <-errc // Save for later, we *must* close the USB
|
||||
}
|
||||
// Terminate the self-derivations
|
||||
var derr error
|
||||
if dQuit != nil {
|
||||
errc := make(chan error)
|
||||
dQuit <- errc
|
||||
derr = <-errc // Save for later, we *must* close the USB
|
||||
}
|
||||
// Terminate the device connection
|
||||
w.stateLock.Lock()
|
||||
defer w.stateLock.Unlock()
|
||||
|
||||
w.healthQuit = nil
|
||||
w.deriveQuit = nil
|
||||
w.deriveReq = nil
|
||||
|
||||
if err := w.close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if herr != nil {
|
||||
return herr
|
||||
}
|
||||
return derr
|
||||
}
|
||||
|
||||
// close is the internal wallet closer that terminates the USB connection and
|
||||
// resets all the fields to their defaults.
|
||||
//
|
||||
// Note, close assumes the state lock is held!
|
||||
func (w *ledgerWallet) close() error {
|
||||
// Allow duplicate closes, especially for health-check failures
|
||||
if w.device == nil {
|
||||
return nil
|
||||
}
|
||||
// Close the device, clear everything, then return
|
||||
w.device.Close()
|
||||
w.device = nil
|
||||
|
||||
w.browser, w.version = false, [3]byte{}
|
||||
w.accounts, w.paths = nil, nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Accounts implements accounts.Wallet, returning the list of accounts pinned to
|
||||
// the Ledger hardware wallet. If self-derivation was enabled, the account list
|
||||
// is periodically expanded based on current chain state.
|
||||
func (w *ledgerWallet) Accounts() []accounts.Account {
|
||||
// Attempt self-derivation if it's running
|
||||
reqc := make(chan struct{}, 1)
|
||||
select {
|
||||
case w.deriveReq <- reqc:
|
||||
// Self-derivation request accepted, wait for it
|
||||
<-reqc
|
||||
default:
|
||||
// Self-derivation offline, throttled or busy, skip
|
||||
}
|
||||
// Return whatever account list we ended up with
|
||||
w.stateLock.RLock()
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
cpy := make([]accounts.Account, len(w.accounts))
|
||||
copy(cpy, w.accounts)
|
||||
return cpy
|
||||
}
|
||||
|
||||
// selfDerive is an account derivation loop that upon request attempts to find
|
||||
// new non-zero accounts.
|
||||
func (w *ledgerWallet) selfDerive() {
|
||||
w.log.Debug("Ledger self-derivation started")
|
||||
defer w.log.Debug("Ledger self-derivation stopped")
|
||||
|
||||
// Execute self-derivations until termination or error
|
||||
var (
|
||||
reqc chan struct{}
|
||||
errc chan error
|
||||
err error
|
||||
)
|
||||
for errc == nil && err == nil {
|
||||
// Wait until either derivation or termination is requested
|
||||
select {
|
||||
case errc = <-w.deriveQuit:
|
||||
// Termination requested
|
||||
continue
|
||||
case reqc = <-w.deriveReq:
|
||||
// Account discovery requested
|
||||
}
|
||||
// Derivation needs a chain and device access, skip if either unavailable
|
||||
w.stateLock.RLock()
|
||||
if w.device == nil || w.deriveChain == nil || w.offline() {
|
||||
w.stateLock.RUnlock()
|
||||
reqc <- struct{}{}
|
||||
continue
|
||||
}
|
||||
select {
|
||||
case <-w.commsLock:
|
||||
default:
|
||||
w.stateLock.RUnlock()
|
||||
reqc <- struct{}{}
|
||||
continue
|
||||
}
|
||||
// Device lock obtained, derive the next batch of accounts
|
||||
var (
|
||||
accs []accounts.Account
|
||||
paths []accounts.DerivationPath
|
||||
|
||||
nextAddr = w.deriveNextAddr
|
||||
nextPath = w.deriveNextPath
|
||||
|
||||
context = context.Background()
|
||||
)
|
||||
for empty := false; !empty; {
|
||||
// Retrieve the next derived Ethereum account
|
||||
if nextAddr == (common.Address{}) {
|
||||
if nextAddr, err = w.ledgerDerive(nextPath); err != nil {
|
||||
w.log.Warn("Ledger account derivation failed", "err", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
// Check the account's status against the current chain state
|
||||
var (
|
||||
balance *big.Int
|
||||
nonce uint64
|
||||
)
|
||||
balance, err = w.deriveChain.BalanceAt(context, nextAddr, nil)
|
||||
if err != nil {
|
||||
w.log.Warn("Ledger balance retrieval failed", "err", err)
|
||||
break
|
||||
}
|
||||
nonce, err = w.deriveChain.NonceAt(context, nextAddr, nil)
|
||||
if err != nil {
|
||||
w.log.Warn("Ledger nonce retrieval failed", "err", err)
|
||||
break
|
||||
}
|
||||
// If the next account is empty, stop self-derivation, but add it nonetheless
|
||||
if balance.Sign() == 0 && nonce == 0 {
|
||||
empty = true
|
||||
}
|
||||
// We've just self-derived a new account, start tracking it locally
|
||||
path := make(accounts.DerivationPath, len(nextPath))
|
||||
copy(path[:], nextPath[:])
|
||||
paths = append(paths, path)
|
||||
|
||||
account := accounts.Account{
|
||||
Address: nextAddr,
|
||||
URL: accounts.URL{Scheme: w.url.Scheme, Path: fmt.Sprintf("%s/%s", w.url.Path, path)},
|
||||
}
|
||||
accs = append(accs, account)
|
||||
|
||||
// Display a log message to the user for new (or previously empty accounts)
|
||||
if _, known := w.paths[nextAddr]; !known || (!empty && nextAddr == w.deriveNextAddr) {
|
||||
w.log.Info("Ledger discovered new account", "address", nextAddr, "path", path, "balance", balance, "nonce", nonce)
|
||||
}
|
||||
// Fetch the next potential account
|
||||
if !empty {
|
||||
nextAddr = common.Address{}
|
||||
nextPath[len(nextPath)-1]++
|
||||
}
|
||||
}
|
||||
// Self derivation complete, release device lock
|
||||
w.commsLock <- struct{}{}
|
||||
w.stateLock.RUnlock()
|
||||
|
||||
// Insert any accounts successfully derived
|
||||
w.stateLock.Lock()
|
||||
for i := 0; i < len(accs); i++ {
|
||||
if _, ok := w.paths[accs[i].Address]; !ok {
|
||||
w.accounts = append(w.accounts, accs[i])
|
||||
w.paths[accs[i].Address] = paths[i]
|
||||
}
|
||||
}
|
||||
// Shift the self-derivation forward
|
||||
// TODO(karalabe): don't overwrite changes from wallet.SelfDerive
|
||||
w.deriveNextAddr = nextAddr
|
||||
w.deriveNextPath = nextPath
|
||||
w.stateLock.Unlock()
|
||||
|
||||
// Notify the user of termination and loop after a bit of time (to avoid trashing)
|
||||
reqc <- struct{}{}
|
||||
if err == nil {
|
||||
select {
|
||||
case errc = <-w.deriveQuit:
|
||||
// Termination requested, abort
|
||||
case <-time.After(ledgerSelfDeriveThrottling):
|
||||
// Waited enough, willing to self-derive again
|
||||
}
|
||||
}
|
||||
}
|
||||
// In case of error, wait for termination
|
||||
if err != nil {
|
||||
w.log.Debug("Ledger self-derivation failed", "err", err)
|
||||
errc = <-w.deriveQuit
|
||||
}
|
||||
errc <- err
|
||||
}
|
||||
|
||||
// Contains implements accounts.Wallet, returning whether a particular account is
|
||||
// or is not pinned into this Ledger instance. Although we could attempt to resolve
|
||||
// unpinned accounts, that would be an non-negligible hardware operation.
|
||||
func (w *ledgerWallet) Contains(account accounts.Account) bool {
|
||||
w.stateLock.RLock()
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
_, exists := w.paths[account.Address]
|
||||
return exists
|
||||
}
|
||||
|
||||
// Derive implements accounts.Wallet, deriving a new account at the specific
|
||||
// derivation path. If pin is set to true, the account will be added to the list
|
||||
// of tracked accounts.
|
||||
func (w *ledgerWallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Account, error) {
|
||||
// Try to derive the actual account and update its URL if successful
|
||||
w.stateLock.RLock() // Avoid device disappearing during derivation
|
||||
|
||||
if w.device == nil || w.offline() {
|
||||
w.stateLock.RUnlock()
|
||||
return accounts.Account{}, accounts.ErrWalletClosed
|
||||
}
|
||||
<-w.commsLock // Avoid concurrent hardware access
|
||||
address, err := w.ledgerDerive(path)
|
||||
w.commsLock <- struct{}{}
|
||||
|
||||
w.stateLock.RUnlock()
|
||||
|
||||
// If an error occurred or no pinning was requested, return
|
||||
if err != nil {
|
||||
return accounts.Account{}, err
|
||||
}
|
||||
account := accounts.Account{
|
||||
Address: address,
|
||||
URL: accounts.URL{Scheme: w.url.Scheme, Path: fmt.Sprintf("%s/%s", w.url.Path, path)},
|
||||
}
|
||||
if !pin {
|
||||
return account, nil
|
||||
}
|
||||
// Pinning needs to modify the state
|
||||
w.stateLock.Lock()
|
||||
defer w.stateLock.Unlock()
|
||||
|
||||
if _, ok := w.paths[address]; !ok {
|
||||
w.accounts = append(w.accounts, account)
|
||||
w.paths[address] = path
|
||||
}
|
||||
return account, nil
|
||||
}
|
||||
|
||||
// SelfDerive implements accounts.Wallet, trying to discover accounts that the
|
||||
// user used previously (based on the chain state), but ones that he/she did not
|
||||
// explicitly pin to the wallet manually. To avoid chain head monitoring, self
|
||||
// derivation only runs during account listing (and even then throttled).
|
||||
func (w *ledgerWallet) SelfDerive(base accounts.DerivationPath, chain ethereum.ChainStateReader) {
|
||||
w.stateLock.Lock()
|
||||
defer w.stateLock.Unlock()
|
||||
|
||||
w.deriveNextPath = make(accounts.DerivationPath, len(base))
|
||||
copy(w.deriveNextPath[:], base[:])
|
||||
|
||||
w.deriveNextAddr = common.Address{}
|
||||
w.deriveChain = chain
|
||||
}
|
||||
|
||||
// SignHash implements accounts.Wallet, however signing arbitrary data is not
|
||||
// supported for Ledger wallets, so this method will always return an error.
|
||||
func (w *ledgerWallet) SignHash(acc accounts.Account, hash []byte) ([]byte, error) {
|
||||
return nil, accounts.ErrNotSupported
|
||||
}
|
||||
|
||||
// SignTx implements accounts.Wallet. It sends the transaction over to the Ledger
|
||||
// wallet to request a confirmation from the user. It returns either the signed
|
||||
// transaction or a failure if the user denied the transaction.
|
||||
//
|
||||
// Note, if the version of the Ethereum application running on the Ledger wallet is
|
||||
// too old to sign EIP-155 transactions, but such is requested nonetheless, an error
|
||||
// will be returned opposed to silently signing in Homestead mode.
|
||||
func (w *ledgerWallet) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
|
||||
w.stateLock.RLock() // Comms have own mutex, this is for the state fields
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
// If the wallet is closed, or the Ethereum app doesn't run, abort
|
||||
if w.device == nil || w.offline() {
|
||||
return nil, accounts.ErrWalletClosed
|
||||
}
|
||||
// Make sure the requested account is contained within
|
||||
path, ok := w.paths[account.Address]
|
||||
if !ok {
|
||||
return nil, accounts.ErrUnknownAccount
|
||||
}
|
||||
// Ensure the wallet is capable of signing the given transaction
|
||||
if chainID != nil && w.version[0] <= 1 && w.version[1] <= 0 && w.version[2] <= 2 {
|
||||
return nil, fmt.Errorf("Ledger v%d.%d.%d doesn't support signing this transaction, please update to v1.0.3 at least", w.version[0], w.version[1], w.version[2])
|
||||
}
|
||||
// All infos gathered and metadata checks out, request signing
|
||||
<-w.commsLock
|
||||
defer func() { w.commsLock <- struct{}{} }()
|
||||
|
||||
// Ensure the device isn't screwed with while user confirmation is pending
|
||||
// TODO(karalabe): remove if hotplug lands on Windows
|
||||
w.hub.commsLock.Lock()
|
||||
w.hub.commsPend++
|
||||
w.hub.commsLock.Unlock()
|
||||
|
||||
defer func() {
|
||||
w.hub.commsLock.Lock()
|
||||
w.hub.commsPend--
|
||||
w.hub.commsLock.Unlock()
|
||||
}()
|
||||
return w.ledgerSign(path, account.Address, tx, chainID)
|
||||
}
|
||||
|
||||
// SignHashWithPassphrase implements accounts.Wallet, however signing arbitrary
|
||||
// data is not supported for Ledger wallets, so this method will always return
|
||||
// an error.
|
||||
func (w *ledgerWallet) SignHashWithPassphrase(account accounts.Account, passphrase string, hash []byte) ([]byte, error) {
|
||||
return nil, accounts.ErrNotSupported
|
||||
}
|
||||
|
||||
// SignTxWithPassphrase implements accounts.Wallet, attempting to sign the given
|
||||
// transaction with the given account using passphrase as extra authentication.
|
||||
// Since the Ledger does not support extra passphrases, it is silently ignored.
|
||||
func (w *ledgerWallet) SignTxWithPassphrase(account accounts.Account, passphrase string, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
|
||||
return w.SignTx(account, tx, chainID)
|
||||
}
|
||||
|
||||
// ledgerVersion retrieves the current version of the Ethereum wallet app running
|
||||
// on the Ledger wallet.
|
||||
//
|
||||
// The version retrieval protocol is defined as follows:
|
||||
//
|
||||
// CLA | INS | P1 | P2 | Lc | Le
|
||||
// ----+-----+----+----+----+---
|
||||
// E0 | 06 | 00 | 00 | 00 | 04
|
||||
//
|
||||
// With no input data, and the output data being:
|
||||
//
|
||||
// Description | Length
|
||||
// ---------------------------------------------------+--------
|
||||
// Flags 01: arbitrary data signature enabled by user | 1 byte
|
||||
// Application major version | 1 byte
|
||||
// Application minor version | 1 byte
|
||||
// Application patch version | 1 byte
|
||||
func (w *ledgerWallet) ledgerVersion() ([3]byte, error) {
|
||||
// Send the request and wait for the response
|
||||
reply, err := w.ledgerExchange(ledgerOpGetConfiguration, 0, 0, nil)
|
||||
if err != nil {
|
||||
return [3]byte{}, err
|
||||
}
|
||||
if len(reply) != 4 {
|
||||
return [3]byte{}, errInvalidVersionReply
|
||||
}
|
||||
// Cache the version for future reference
|
||||
var version [3]byte
|
||||
copy(version[:], reply[1:])
|
||||
return version, nil
|
||||
}
|
||||
|
||||
// ledgerDerive retrieves the currently active Ethereum address from a Ledger
|
||||
// wallet at the specified derivation path.
|
||||
//
|
||||
// The address derivation protocol is defined as follows:
|
||||
//
|
||||
// CLA | INS | P1 | P2 | Lc | Le
|
||||
// ----+-----+----+----+-----+---
|
||||
// E0 | 02 | 00 return address
|
||||
// 01 display address and confirm before returning
|
||||
// | 00: do not return the chain code
|
||||
// | 01: return the chain code
|
||||
// | var | 00
|
||||
//
|
||||
// Where the input data is:
|
||||
//
|
||||
// Description | Length
|
||||
// -------------------------------------------------+--------
|
||||
// Number of BIP 32 derivations to perform (max 10) | 1 byte
|
||||
// First derivation index (big endian) | 4 bytes
|
||||
// ... | 4 bytes
|
||||
// Last derivation index (big endian) | 4 bytes
|
||||
//
|
||||
// And the output data is:
|
||||
//
|
||||
// Description | Length
|
||||
// ------------------------+-------------------
|
||||
// Public Key length | 1 byte
|
||||
// Uncompressed Public Key | arbitrary
|
||||
// Ethereum address length | 1 byte
|
||||
// Ethereum address | 40 bytes hex ascii
|
||||
// Chain code if requested | 32 bytes
|
||||
func (w *ledgerWallet) ledgerDerive(derivationPath []uint32) (common.Address, error) {
|
||||
// Flatten the derivation path into the Ledger request
|
||||
path := make([]byte, 1+4*len(derivationPath))
|
||||
path[0] = byte(len(derivationPath))
|
||||
for i, component := range derivationPath {
|
||||
binary.BigEndian.PutUint32(path[1+4*i:], component)
|
||||
}
|
||||
// Send the request and wait for the response
|
||||
reply, err := w.ledgerExchange(ledgerOpRetrieveAddress, ledgerP1DirectlyFetchAddress, ledgerP2DiscardAddressChainCode, path)
|
||||
if err != nil {
|
||||
return common.Address{}, err
|
||||
}
|
||||
// Discard the public key, we don't need that for now
|
||||
if len(reply) < 1 || len(reply) < 1+int(reply[0]) {
|
||||
return common.Address{}, errors.New("reply lacks public key entry")
|
||||
}
|
||||
reply = reply[1+int(reply[0]):]
|
||||
|
||||
// Extract the Ethereum hex address string
|
||||
if len(reply) < 1 || len(reply) < 1+int(reply[0]) {
|
||||
return common.Address{}, errors.New("reply lacks address entry")
|
||||
}
|
||||
hexstr := reply[1 : 1+int(reply[0])]
|
||||
|
||||
// Decode the hex sting into an Ethereum address and return
|
||||
var address common.Address
|
||||
hex.Decode(address[:], hexstr)
|
||||
return address, nil
|
||||
}
|
||||
|
||||
// ledgerSign sends the transaction to the Ledger wallet, and waits for the user
|
||||
// to confirm or deny the transaction.
|
||||
//
|
||||
// The transaction signing protocol is defined as follows:
|
||||
//
|
||||
// CLA | INS | P1 | P2 | Lc | Le
|
||||
// ----+-----+----+----+-----+---
|
||||
// E0 | 04 | 00: first transaction data block
|
||||
// 80: subsequent transaction data block
|
||||
// | 00 | variable | variable
|
||||
//
|
||||
// Where the input for the first transaction block (first 255 bytes) is:
|
||||
//
|
||||
// Description | Length
|
||||
// -------------------------------------------------+----------
|
||||
// Number of BIP 32 derivations to perform (max 10) | 1 byte
|
||||
// First derivation index (big endian) | 4 bytes
|
||||
// ... | 4 bytes
|
||||
// Last derivation index (big endian) | 4 bytes
|
||||
// RLP transaction chunk | arbitrary
|
||||
//
|
||||
// And the input for subsequent transaction blocks (first 255 bytes) are:
|
||||
//
|
||||
// Description | Length
|
||||
// ----------------------+----------
|
||||
// RLP transaction chunk | arbitrary
|
||||
//
|
||||
// And the output data is:
|
||||
//
|
||||
// Description | Length
|
||||
// ------------+---------
|
||||
// signature V | 1 byte
|
||||
// signature R | 32 bytes
|
||||
// signature S | 32 bytes
|
||||
func (w *ledgerWallet) ledgerSign(derivationPath []uint32, address common.Address, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
|
||||
// Flatten the derivation path into the Ledger request
|
||||
path := make([]byte, 1+4*len(derivationPath))
|
||||
path[0] = byte(len(derivationPath))
|
||||
for i, component := range derivationPath {
|
||||
binary.BigEndian.PutUint32(path[1+4*i:], component)
|
||||
}
|
||||
// Create the transaction RLP based on whether legacy or EIP155 signing was requeste
|
||||
var (
|
||||
txrlp []byte
|
||||
err error
|
||||
)
|
||||
if chainID == nil {
|
||||
if txrlp, err = rlp.EncodeToBytes([]interface{}{tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data()}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if txrlp, err = rlp.EncodeToBytes([]interface{}{tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data(), chainID, big.NewInt(0), big.NewInt(0)}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
payload := append(path, txrlp...)
|
||||
|
||||
// Send the request and wait for the response
|
||||
var (
|
||||
op = ledgerP1InitTransactionData
|
||||
reply []byte
|
||||
)
|
||||
for len(payload) > 0 {
|
||||
// Calculate the size of the next data chunk
|
||||
chunk := 255
|
||||
if chunk > len(payload) {
|
||||
chunk = len(payload)
|
||||
}
|
||||
// Send the chunk over, ensuring it's processed correctly
|
||||
reply, err = w.ledgerExchange(ledgerOpSignTransaction, op, 0, payload[:chunk])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Shift the payload and ensure subsequent chunks are marked as such
|
||||
payload = payload[chunk:]
|
||||
op = ledgerP1ContTransactionData
|
||||
}
|
||||
// Extract the Ethereum signature and do a sanity validation
|
||||
if len(reply) != 65 {
|
||||
return nil, errors.New("reply lacks signature")
|
||||
}
|
||||
signature := append(reply[1:], reply[0])
|
||||
|
||||
// Create the correct signer and signature transform based on the chain ID
|
||||
var signer types.Signer
|
||||
if chainID == nil {
|
||||
signer = new(types.HomesteadSigner)
|
||||
} else {
|
||||
signer = types.NewEIP155Signer(chainID)
|
||||
signature[64] = signature[64] - byte(chainID.Uint64()*2+35)
|
||||
}
|
||||
// Inject the final signature into the transaction and sanity check the sender
|
||||
signed, err := tx.WithSignature(signer, signature)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sender, err := types.Sender(signer, signed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if sender != address {
|
||||
return nil, fmt.Errorf("signer mismatch: expected %s, got %s", address.Hex(), sender.Hex())
|
||||
}
|
||||
return signed, nil
|
||||
}
|
||||
|
||||
// ledgerExchange performs a data exchange with the Ledger wallet, sending it a
|
||||
// message and retrieving the response.
|
||||
//
|
||||
// The common transport header is defined as follows:
|
||||
//
|
||||
// Description | Length
|
||||
// --------------------------------------+----------
|
||||
// Communication channel ID (big endian) | 2 bytes
|
||||
// Command tag | 1 byte
|
||||
// Packet sequence index (big endian) | 2 bytes
|
||||
// Payload | arbitrary
|
||||
//
|
||||
// The Communication channel ID allows commands multiplexing over the same
|
||||
// physical link. It is not used for the time being, and should be set to 0101
|
||||
// to avoid compatibility issues with implementations ignoring a leading 00 byte.
|
||||
//
|
||||
// The Command tag describes the message content. Use TAG_APDU (0x05) for standard
|
||||
// APDU payloads, or TAG_PING (0x02) for a simple link test.
|
||||
//
|
||||
// The Packet sequence index describes the current sequence for fragmented payloads.
|
||||
// The first fragment index is 0x00.
|
||||
//
|
||||
// APDU Command payloads are encoded as follows:
|
||||
//
|
||||
// Description | Length
|
||||
// -----------------------------------
|
||||
// APDU length (big endian) | 2 bytes
|
||||
// APDU CLA | 1 byte
|
||||
// APDU INS | 1 byte
|
||||
// APDU P1 | 1 byte
|
||||
// APDU P2 | 1 byte
|
||||
// APDU length | 1 byte
|
||||
// Optional APDU data | arbitrary
|
||||
func (w *ledgerWallet) ledgerExchange(opcode ledgerOpcode, p1 ledgerParam1, p2 ledgerParam2, data []byte) ([]byte, error) {
|
||||
// Construct the message payload, possibly split into multiple chunks
|
||||
apdu := make([]byte, 2, 7+len(data))
|
||||
|
||||
binary.BigEndian.PutUint16(apdu, uint16(5+len(data)))
|
||||
apdu = append(apdu, []byte{0xe0, byte(opcode), byte(p1), byte(p2), byte(len(data))}...)
|
||||
apdu = append(apdu, data...)
|
||||
|
||||
// Stream all the chunks to the device
|
||||
header := []byte{0x01, 0x01, 0x05, 0x00, 0x00} // Channel ID and command tag appended
|
||||
chunk := make([]byte, 64)
|
||||
space := len(chunk) - len(header)
|
||||
|
||||
for i := 0; len(apdu) > 0; i++ {
|
||||
// Construct the new message to stream
|
||||
chunk = append(chunk[:0], header...)
|
||||
binary.BigEndian.PutUint16(chunk[3:], uint16(i))
|
||||
|
||||
if len(apdu) > space {
|
||||
chunk = append(chunk, apdu[:space]...)
|
||||
apdu = apdu[space:]
|
||||
} else {
|
||||
chunk = append(chunk, apdu...)
|
||||
apdu = nil
|
||||
}
|
||||
// Send over to the device
|
||||
w.log.Trace("Data chunk sent to the Ledger", "chunk", hexutil.Bytes(chunk))
|
||||
if _, err := w.device.Write(chunk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Stream the reply back from the wallet in 64 byte chunks
|
||||
var reply []byte
|
||||
chunk = chunk[:64] // Yeah, we surely have enough space
|
||||
for {
|
||||
// Read the next chunk from the Ledger wallet
|
||||
if _, err := io.ReadFull(w.device, chunk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.log.Trace("Data chunk received from the Ledger", "chunk", hexutil.Bytes(chunk))
|
||||
|
||||
// Make sure the transport header matches
|
||||
if chunk[0] != 0x01 || chunk[1] != 0x01 || chunk[2] != 0x05 {
|
||||
return nil, errReplyInvalidHeader
|
||||
}
|
||||
// If it's the first chunk, retrieve the total message length
|
||||
var payload []byte
|
||||
|
||||
if chunk[3] == 0x00 && chunk[4] == 0x00 {
|
||||
reply = make([]byte, 0, int(binary.BigEndian.Uint16(chunk[5:7])))
|
||||
payload = chunk[7:]
|
||||
} else {
|
||||
payload = chunk[5:]
|
||||
}
|
||||
// Append to the reply and stop when filled up
|
||||
if left := cap(reply) - len(reply); left > len(payload) {
|
||||
reply = append(reply, payload...)
|
||||
} else {
|
||||
reply = append(reply, payload[:left]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
return reply[:len(reply)-2], nil
|
||||
}
|
330
accounts/usbwallet/trezor.go
Normal file
330
accounts/usbwallet/trezor.go
Normal file
@ -0,0 +1,330 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// This file contains the implementation for interacting with the Trezor hardware
|
||||
// wallets. The wire protocol spec can be found on the SatoshiLabs website:
|
||||
// https://doc.satoshilabs.com/trezor-tech/api-protobuf.html
|
||||
|
||||
package usbwallet
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/accounts/usbwallet/internal/trezor"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
// ErrTrezorPINNeeded is returned if opening the trezor requires a PIN code. In
|
||||
// this case, the calling application should display a pinpad and send back the
|
||||
// encoded passphrase.
|
||||
var ErrTrezorPINNeeded = errors.New("trezor: pin needed")
|
||||
|
||||
// errTrezorReplyInvalidHeader is the error message returned by a Trezor data exchange
|
||||
// if the device replies with a mismatching header. This usually means the device
|
||||
// is in browser mode.
|
||||
var errTrezorReplyInvalidHeader = errors.New("trezor: invalid reply header")
|
||||
|
||||
// trezorDriver implements the communication with a Trezor hardware wallet.
|
||||
type trezorDriver struct {
|
||||
device io.ReadWriter // USB device connection to communicate through
|
||||
version [3]uint32 // Current version of the Trezor firmware
|
||||
label string // Current textual label of the Trezor device
|
||||
pinwait bool // Flags whether the device is waiting for PIN entry
|
||||
failure error // Any failure that would make the device unusable
|
||||
log log.Logger // Contextual logger to tag the trezor with its id
|
||||
}
|
||||
|
||||
// newTrezorDriver creates a new instance of a Trezor USB protocol driver.
|
||||
func newTrezorDriver(logger log.Logger) driver {
|
||||
return &trezorDriver{
|
||||
log: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// Status implements accounts.Wallet, always whether the Trezor is opened, closed
|
||||
// or whether the Ethereum app was not started on it.
|
||||
func (w *trezorDriver) Status() (string, error) {
|
||||
if w.failure != nil {
|
||||
return fmt.Sprintf("Failed: %v", w.failure), w.failure
|
||||
}
|
||||
if w.device == nil {
|
||||
return "Closed", w.failure
|
||||
}
|
||||
if w.pinwait {
|
||||
return fmt.Sprintf("Trezor v%d.%d.%d '%s' waiting for PIN", w.version[0], w.version[1], w.version[2], w.label), w.failure
|
||||
}
|
||||
return fmt.Sprintf("Trezor v%d.%d.%d '%s' online", w.version[0], w.version[1], w.version[2], w.label), w.failure
|
||||
}
|
||||
|
||||
// Open implements usbwallet.driver, attempting to initialize the connection to
|
||||
// the Trezor hardware wallet. Initializing the Trezor is a two phase operation:
|
||||
// * The first phase is to initialize the connection and read the wallet's
|
||||
// features. This phase is invoked is the provided passphrase is empty. The
|
||||
// device will display the pinpad as a result and will return an appropriate
|
||||
// error to notify the user that a second open phase is needed.
|
||||
// * The second phase is to unlock access to the Trezor, which is done by the
|
||||
// user actually providing a passphrase mapping a keyboard keypad to the pin
|
||||
// number of the user (shuffled according to the pinpad displayed).
|
||||
func (w *trezorDriver) Open(device io.ReadWriter, passphrase string) error {
|
||||
w.device, w.failure = device, nil
|
||||
|
||||
// If phase 1 is requested, init the connection and wait for user callback
|
||||
if passphrase == "" {
|
||||
// If we're already waiting for a PIN entry, insta-return
|
||||
if w.pinwait {
|
||||
return ErrTrezorPINNeeded
|
||||
}
|
||||
// Initialize a connection to the device
|
||||
features := new(trezor.Features)
|
||||
if _, err := w.trezorExchange(&trezor.Initialize{}, features); err != nil {
|
||||
return err
|
||||
}
|
||||
w.version = [3]uint32{features.GetMajorVersion(), features.GetMinorVersion(), features.GetPatchVersion()}
|
||||
w.label = features.GetLabel()
|
||||
|
||||
// Do a manual ping, forcing the device to ask for its PIN
|
||||
askPin := true
|
||||
res, err := w.trezorExchange(&trezor.Ping{PinProtection: &askPin}, new(trezor.PinMatrixRequest), new(trezor.Success))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Only return the PIN request if the device wasn't unlocked until now
|
||||
if res == 1 {
|
||||
return nil // Device responded with trezor.Success
|
||||
}
|
||||
w.pinwait = true
|
||||
return ErrTrezorPINNeeded
|
||||
}
|
||||
// Phase 2 requested with actual PIN entry
|
||||
w.pinwait = false
|
||||
|
||||
if _, err := w.trezorExchange(&trezor.PinMatrixAck{Pin: &passphrase}, new(trezor.Success)); err != nil {
|
||||
w.failure = err
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close implements usbwallet.driver, cleaning up and metadata maintained within
|
||||
// the Trezor driver.
|
||||
func (w *trezorDriver) Close() error {
|
||||
w.version, w.label, w.pinwait = [3]uint32{}, "", false
|
||||
return nil
|
||||
}
|
||||
|
||||
// Heartbeat implements usbwallet.driver, performing a sanity check against the
|
||||
// Trezor to see if it's still online.
|
||||
func (w *trezorDriver) Heartbeat() error {
|
||||
if _, err := w.trezorExchange(&trezor.Ping{}, new(trezor.Success)); err != nil {
|
||||
w.failure = err
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Derive implements usbwallet.driver, sending a derivation request to the Trezor
|
||||
// and returning the Ethereum address located on that derivation path.
|
||||
func (w *trezorDriver) Derive(path accounts.DerivationPath) (common.Address, error) {
|
||||
return w.trezorDerive(path)
|
||||
}
|
||||
|
||||
// SignTx implements usbwallet.driver, sending the transaction to the Trezor and
|
||||
// waiting for the user to confirm or deny the transaction.
|
||||
func (w *trezorDriver) SignTx(path accounts.DerivationPath, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error) {
|
||||
if w.device == nil {
|
||||
return common.Address{}, nil, accounts.ErrWalletClosed
|
||||
}
|
||||
return w.trezorSign(path, tx, chainID)
|
||||
}
|
||||
|
||||
// trezorDerive sends a derivation request to the Trezor device and returns the
|
||||
// Ethereum address located on that path.
|
||||
func (w *trezorDriver) trezorDerive(derivationPath []uint32) (common.Address, error) {
|
||||
address := new(trezor.EthereumAddress)
|
||||
if _, err := w.trezorExchange(&trezor.EthereumGetAddress{AddressN: derivationPath}, address); err != nil {
|
||||
return common.Address{}, err
|
||||
}
|
||||
return common.BytesToAddress(address.GetAddress()), nil
|
||||
}
|
||||
|
||||
// trezorSign sends the transaction to the Trezor wallet, and waits for the user
|
||||
// to confirm or deny the transaction.
|
||||
func (w *trezorDriver) trezorSign(derivationPath []uint32, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error) {
|
||||
// Create the transaction initiation message
|
||||
data := tx.Data()
|
||||
length := uint32(len(data))
|
||||
|
||||
request := &trezor.EthereumSignTx{
|
||||
AddressN: derivationPath,
|
||||
Nonce: new(big.Int).SetUint64(tx.Nonce()).Bytes(),
|
||||
GasPrice: tx.GasPrice().Bytes(),
|
||||
GasLimit: tx.Gas().Bytes(),
|
||||
Value: tx.Value().Bytes(),
|
||||
DataLength: &length,
|
||||
}
|
||||
if to := tx.To(); to != nil {
|
||||
request.To = (*to)[:] // Non contract deploy, set recipient explicitly
|
||||
}
|
||||
if length > 1024 { // Send the data chunked if that was requested
|
||||
request.DataInitialChunk, data = data[:1024], data[1024:]
|
||||
} else {
|
||||
request.DataInitialChunk, data = data, nil
|
||||
}
|
||||
if chainID != nil { // EIP-155 transaction, set chain ID explicitly (only 32 bit is supported!?)
|
||||
id := uint32(chainID.Int64())
|
||||
request.ChainId = &id
|
||||
}
|
||||
// Send the initiation message and stream content until a signature is returned
|
||||
response := new(trezor.EthereumTxRequest)
|
||||
if _, err := w.trezorExchange(request, response); err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
for response.DataLength != nil && int(*response.DataLength) <= len(data) {
|
||||
chunk := data[:*response.DataLength]
|
||||
data = data[*response.DataLength:]
|
||||
|
||||
if _, err := w.trezorExchange(&trezor.EthereumTxAck{DataChunk: chunk}, response); err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
}
|
||||
// Extract the Ethereum signature and do a sanity validation
|
||||
if len(response.GetSignatureR()) == 0 || len(response.GetSignatureS()) == 0 || response.GetSignatureV() == 0 {
|
||||
return common.Address{}, nil, errors.New("reply lacks signature")
|
||||
}
|
||||
signature := append(append(response.GetSignatureR(), response.GetSignatureS()...), byte(response.GetSignatureV()))
|
||||
|
||||
// Create the correct signer and signature transform based on the chain ID
|
||||
var signer types.Signer
|
||||
if chainID == nil {
|
||||
signer = new(types.HomesteadSigner)
|
||||
} else {
|
||||
signer = types.NewEIP155Signer(chainID)
|
||||
signature[64] = signature[64] - byte(chainID.Uint64()*2+35)
|
||||
}
|
||||
// Inject the final signature into the transaction and sanity check the sender
|
||||
signed, err := tx.WithSignature(signer, signature)
|
||||
if err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
sender, err := types.Sender(signer, signed)
|
||||
if err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
return sender, signed, nil
|
||||
}
|
||||
|
||||
// trezorExchange performs a data exchange with the Trezor wallet, sending it a
|
||||
// message and retrieving the response. If multiple responses are possible, the
|
||||
// method will also return the index of the destination object used.
|
||||
func (w *trezorDriver) trezorExchange(req proto.Message, results ...proto.Message) (int, error) {
|
||||
// Construct the original message payload to chunk up
|
||||
data, err := proto.Marshal(req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
payload := make([]byte, 8+len(data))
|
||||
copy(payload, []byte{0x23, 0x23})
|
||||
binary.BigEndian.PutUint16(payload[2:], trezor.Type(req))
|
||||
binary.BigEndian.PutUint32(payload[4:], uint32(len(data)))
|
||||
copy(payload[8:], data)
|
||||
|
||||
// Stream all the chunks to the device
|
||||
chunk := make([]byte, 64)
|
||||
chunk[0] = 0x3f // Report ID magic number
|
||||
|
||||
for len(payload) > 0 {
|
||||
// Construct the new message to stream, padding with zeroes if needed
|
||||
if len(payload) > 63 {
|
||||
copy(chunk[1:], payload[:63])
|
||||
payload = payload[63:]
|
||||
} else {
|
||||
copy(chunk[1:], payload)
|
||||
copy(chunk[1+len(payload):], make([]byte, 63-len(payload)))
|
||||
payload = nil
|
||||
}
|
||||
// Send over to the device
|
||||
w.log.Trace("Data chunk sent to the Trezor", "chunk", hexutil.Bytes(chunk))
|
||||
if _, err := w.device.Write(chunk); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
// Stream the reply back from the wallet in 64 byte chunks
|
||||
var (
|
||||
kind uint16
|
||||
reply []byte
|
||||
)
|
||||
for {
|
||||
// Read the next chunk from the Trezor wallet
|
||||
if _, err := io.ReadFull(w.device, chunk); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
w.log.Trace("Data chunk received from the Trezor", "chunk", hexutil.Bytes(chunk))
|
||||
|
||||
// Make sure the transport header matches
|
||||
if chunk[0] != 0x3f || (len(reply) == 0 && (chunk[1] != 0x23 || chunk[2] != 0x23)) {
|
||||
return 0, errTrezorReplyInvalidHeader
|
||||
}
|
||||
// If it's the first chunk, retrieve the reply message type and total message length
|
||||
var payload []byte
|
||||
|
||||
if len(reply) == 0 {
|
||||
kind = binary.BigEndian.Uint16(chunk[3:5])
|
||||
reply = make([]byte, 0, int(binary.BigEndian.Uint32(chunk[5:9])))
|
||||
payload = chunk[9:]
|
||||
} else {
|
||||
payload = chunk[1:]
|
||||
}
|
||||
// Append to the reply and stop when filled up
|
||||
if left := cap(reply) - len(reply); left > len(payload) {
|
||||
reply = append(reply, payload...)
|
||||
} else {
|
||||
reply = append(reply, payload[:left]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
// Try to parse the reply into the requested reply message
|
||||
if kind == uint16(trezor.MessageType_MessageType_Failure) {
|
||||
// Trezor returned a failure, extract and return the message
|
||||
failure := new(trezor.Failure)
|
||||
if err := proto.Unmarshal(reply, failure); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return 0, errors.New("trezor: " + failure.GetMessage())
|
||||
}
|
||||
if kind == uint16(trezor.MessageType_MessageType_ButtonRequest) {
|
||||
// Trezor is waiting for user confirmation, ack and wait for the next message
|
||||
return w.trezorExchange(&trezor.ButtonAck{}, results...)
|
||||
}
|
||||
for i, res := range results {
|
||||
if trezor.Type(res) == kind {
|
||||
return i, proto.Unmarshal(reply, res)
|
||||
}
|
||||
}
|
||||
expected := make([]string, len(results))
|
||||
for i, res := range results {
|
||||
expected[i] = trezor.Name(trezor.Type(res))
|
||||
}
|
||||
return 0, fmt.Errorf("trezor: expected reply types %s, got %s", expected, trezor.Name(kind))
|
||||
}
|
562
accounts/usbwallet/wallet.go
Normal file
562
accounts/usbwallet/wallet.go
Normal file
@ -0,0 +1,562 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package usbwallet implements support for USB hardware wallets.
|
||||
package usbwallet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
ethereum "github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/karalabe/hid"
|
||||
)
|
||||
|
||||
// Maximum time between wallet health checks to detect USB unplugs.
|
||||
const heartbeatCycle = time.Second
|
||||
|
||||
// Minimum time to wait between self derivation attempts, even it the user is
|
||||
// requesting accounts like crazy.
|
||||
const selfDeriveThrottling = time.Second
|
||||
|
||||
// driver defines the vendor specific functionality hardware wallets instances
|
||||
// must implement to allow using them with the wallet lifecycle management.
|
||||
type driver interface {
|
||||
// Status returns a textual status to aid the user in the current state of the
|
||||
// wallet. It also returns an error indicating any failure the wallet might have
|
||||
// encountered.
|
||||
Status() (string, error)
|
||||
|
||||
// Open initializes access to a wallet instance. The passphrase parameter may
|
||||
// or may not be used by the implementation of a particular wallet instance.
|
||||
Open(device io.ReadWriter, passphrase string) error
|
||||
|
||||
// Close releases any resources held by an open wallet instance.
|
||||
Close() error
|
||||
|
||||
// Heartbeat performs a sanity check against the hardware wallet to see if it
|
||||
// is still online and healthy.
|
||||
Heartbeat() error
|
||||
|
||||
// Derive sends a derivation request to the USB device and returns the Ethereum
|
||||
// address located on that path.
|
||||
Derive(path accounts.DerivationPath) (common.Address, error)
|
||||
|
||||
// SignTx sends the transaction to the USB device and waits for the user to confirm
|
||||
// or deny the transaction.
|
||||
SignTx(path accounts.DerivationPath, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error)
|
||||
}
|
||||
|
||||
// wallet represents the common functionality shared by all USB hardware
|
||||
// wallets to prevent reimplementing the same complex maintenance mechanisms
|
||||
// for different vendors.
|
||||
type wallet struct {
|
||||
hub *Hub // USB hub scanning
|
||||
driver driver // Hardware implementation of the low level device operations
|
||||
url *accounts.URL // Textual URL uniquely identifying this wallet
|
||||
|
||||
info hid.DeviceInfo // Known USB device infos about the wallet
|
||||
device *hid.Device // USB device advertising itself as a hardware wallet
|
||||
|
||||
accounts []accounts.Account // List of derive accounts pinned on the hardware wallet
|
||||
paths map[common.Address]accounts.DerivationPath // Known derivation paths for signing operations
|
||||
|
||||
deriveNextPath accounts.DerivationPath // Next derivation path for account auto-discovery
|
||||
deriveNextAddr common.Address // Next derived account address for auto-discovery
|
||||
deriveChain ethereum.ChainStateReader // Blockchain state reader to discover used account with
|
||||
deriveReq chan chan struct{} // Channel to request a self-derivation on
|
||||
deriveQuit chan chan error // Channel to terminate the self-deriver with
|
||||
|
||||
healthQuit chan chan error
|
||||
|
||||
// Locking a hardware wallet is a bit special. Since hardware devices are lower
|
||||
// performing, any communication with them might take a non negligible amount of
|
||||
// time. Worse still, waiting for user confirmation can take arbitrarily long,
|
||||
// but exclusive communication must be upheld during. Locking the entire wallet
|
||||
// in the mean time however would stall any parts of the system that don't want
|
||||
// to communicate, just read some state (e.g. list the accounts).
|
||||
//
|
||||
// As such, a hardware wallet needs two locks to function correctly. A state
|
||||
// lock can be used to protect the wallet's software-side internal state, which
|
||||
// must not be held exlusively during hardware communication. A communication
|
||||
// lock can be used to achieve exclusive access to the device itself, this one
|
||||
// however should allow "skipping" waiting for operations that might want to
|
||||
// use the device, but can live without too (e.g. account self-derivation).
|
||||
//
|
||||
// Since we have two locks, it's important to know how to properly use them:
|
||||
// - Communication requires the `device` to not change, so obtaining the
|
||||
// commsLock should be done after having a stateLock.
|
||||
// - Communication must not disable read access to the wallet state, so it
|
||||
// must only ever hold a *read* lock to stateLock.
|
||||
commsLock chan struct{} // Mutex (buf=1) for the USB comms without keeping the state locked
|
||||
stateLock sync.RWMutex // Protects read and write access to the wallet struct fields
|
||||
|
||||
log log.Logger // Contextual logger to tag the base with its id
|
||||
}
|
||||
|
||||
// URL implements accounts.Wallet, returning the URL of the USB hardware device.
|
||||
func (w *wallet) URL() accounts.URL {
|
||||
return *w.url // Immutable, no need for a lock
|
||||
}
|
||||
|
||||
// Status implements accounts.Wallet, returning a custom status message from the
|
||||
// underlying vendor-specific hardware wallet implementation.
|
||||
func (w *wallet) Status() (string, error) {
|
||||
w.stateLock.RLock() // No device communication, state lock is enough
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
status, failure := w.driver.Status()
|
||||
if w.device == nil {
|
||||
return "Closed", failure
|
||||
}
|
||||
return status, failure
|
||||
}
|
||||
|
||||
// Open implements accounts.Wallet, attempting to open a USB connection to the
|
||||
// hardware wallet.
|
||||
func (w *wallet) Open(passphrase string) error {
|
||||
w.stateLock.Lock() // State lock is enough since there's no connection yet at this point
|
||||
defer w.stateLock.Unlock()
|
||||
|
||||
// If the device was already opened once, refuse to try again
|
||||
if w.paths != nil {
|
||||
return accounts.ErrWalletAlreadyOpen
|
||||
}
|
||||
// Make sure the actual device connection is done only once
|
||||
if w.device == nil {
|
||||
device, err := w.info.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.device = device
|
||||
w.commsLock = make(chan struct{}, 1)
|
||||
w.commsLock <- struct{}{} // Enable lock
|
||||
}
|
||||
// Delegate device initialization to the underlying driver
|
||||
if err := w.driver.Open(w.device, passphrase); err != nil {
|
||||
return err
|
||||
}
|
||||
// Connection successful, start life-cycle management
|
||||
w.paths = make(map[common.Address]accounts.DerivationPath)
|
||||
|
||||
w.deriveReq = make(chan chan struct{})
|
||||
w.deriveQuit = make(chan chan error)
|
||||
w.healthQuit = make(chan chan error)
|
||||
|
||||
go w.heartbeat()
|
||||
go w.selfDerive()
|
||||
|
||||
// Notify anyone listening for wallet events that a new device is accessible
|
||||
go w.hub.updateFeed.Send(accounts.WalletEvent{Wallet: w, Kind: accounts.WalletOpened})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// heartbeat is a health check loop for the USB wallets to periodically verify
|
||||
// whether they are still present or if they malfunctioned.
|
||||
func (w *wallet) heartbeat() {
|
||||
w.log.Debug("USB wallet health-check started")
|
||||
defer w.log.Debug("USB wallet health-check stopped")
|
||||
|
||||
// Execute heartbeat checks until termination or error
|
||||
var (
|
||||
errc chan error
|
||||
err error
|
||||
)
|
||||
for errc == nil && err == nil {
|
||||
// Wait until termination is requested or the heartbeat cycle arrives
|
||||
select {
|
||||
case errc = <-w.healthQuit:
|
||||
// Termination requested
|
||||
continue
|
||||
case <-time.After(heartbeatCycle):
|
||||
// Heartbeat time
|
||||
}
|
||||
// Execute a tiny data exchange to see responsiveness
|
||||
w.stateLock.RLock()
|
||||
if w.device == nil {
|
||||
// Terminated while waiting for the lock
|
||||
w.stateLock.RUnlock()
|
||||
continue
|
||||
}
|
||||
<-w.commsLock // Don't lock state while resolving version
|
||||
err = w.driver.Heartbeat()
|
||||
w.commsLock <- struct{}{}
|
||||
w.stateLock.RUnlock()
|
||||
|
||||
if err != nil {
|
||||
w.stateLock.Lock() // Lock state to tear the wallet down
|
||||
w.close()
|
||||
w.stateLock.Unlock()
|
||||
}
|
||||
// Ignore non hardware related errors
|
||||
err = nil
|
||||
}
|
||||
// In case of error, wait for termination
|
||||
if err != nil {
|
||||
w.log.Debug("USB wallet health-check failed", "err", err)
|
||||
errc = <-w.healthQuit
|
||||
}
|
||||
errc <- err
|
||||
}
|
||||
|
||||
// Close implements accounts.Wallet, closing the USB connection to the device.
|
||||
func (w *wallet) Close() error {
|
||||
// Ensure the wallet was opened
|
||||
w.stateLock.RLock()
|
||||
hQuit, dQuit := w.healthQuit, w.deriveQuit
|
||||
w.stateLock.RUnlock()
|
||||
|
||||
// Terminate the health checks
|
||||
var herr error
|
||||
if hQuit != nil {
|
||||
errc := make(chan error)
|
||||
hQuit <- errc
|
||||
herr = <-errc // Save for later, we *must* close the USB
|
||||
}
|
||||
// Terminate the self-derivations
|
||||
var derr error
|
||||
if dQuit != nil {
|
||||
errc := make(chan error)
|
||||
dQuit <- errc
|
||||
derr = <-errc // Save for later, we *must* close the USB
|
||||
}
|
||||
// Terminate the device connection
|
||||
w.stateLock.Lock()
|
||||
defer w.stateLock.Unlock()
|
||||
|
||||
w.healthQuit = nil
|
||||
w.deriveQuit = nil
|
||||
w.deriveReq = nil
|
||||
|
||||
if err := w.close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if herr != nil {
|
||||
return herr
|
||||
}
|
||||
return derr
|
||||
}
|
||||
|
||||
// close is the internal wallet closer that terminates the USB connection and
|
||||
// resets all the fields to their defaults.
|
||||
//
|
||||
// Note, close assumes the state lock is held!
|
||||
func (w *wallet) close() error {
|
||||
// Allow duplicate closes, especially for health-check failures
|
||||
if w.device == nil {
|
||||
return nil
|
||||
}
|
||||
// Close the device, clear everything, then return
|
||||
w.device.Close()
|
||||
w.device = nil
|
||||
|
||||
w.accounts, w.paths = nil, nil
|
||||
w.driver.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Accounts implements accounts.Wallet, returning the list of accounts pinned to
|
||||
// the USB hardware wallet. If self-derivation was enabled, the account list is
|
||||
// periodically expanded based on current chain state.
|
||||
func (w *wallet) Accounts() []accounts.Account {
|
||||
// Attempt self-derivation if it's running
|
||||
reqc := make(chan struct{}, 1)
|
||||
select {
|
||||
case w.deriveReq <- reqc:
|
||||
// Self-derivation request accepted, wait for it
|
||||
<-reqc
|
||||
default:
|
||||
// Self-derivation offline, throttled or busy, skip
|
||||
}
|
||||
// Return whatever account list we ended up with
|
||||
w.stateLock.RLock()
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
cpy := make([]accounts.Account, len(w.accounts))
|
||||
copy(cpy, w.accounts)
|
||||
return cpy
|
||||
}
|
||||
|
||||
// selfDerive is an account derivation loop that upon request attempts to find
|
||||
// new non-zero accounts.
|
||||
func (w *wallet) selfDerive() {
|
||||
w.log.Debug("USB wallet self-derivation started")
|
||||
defer w.log.Debug("USB wallet self-derivation stopped")
|
||||
|
||||
// Execute self-derivations until termination or error
|
||||
var (
|
||||
reqc chan struct{}
|
||||
errc chan error
|
||||
err error
|
||||
)
|
||||
for errc == nil && err == nil {
|
||||
// Wait until either derivation or termination is requested
|
||||
select {
|
||||
case errc = <-w.deriveQuit:
|
||||
// Termination requested
|
||||
continue
|
||||
case reqc = <-w.deriveReq:
|
||||
// Account discovery requested
|
||||
}
|
||||
// Derivation needs a chain and device access, skip if either unavailable
|
||||
w.stateLock.RLock()
|
||||
if w.device == nil || w.deriveChain == nil {
|
||||
w.stateLock.RUnlock()
|
||||
reqc <- struct{}{}
|
||||
continue
|
||||
}
|
||||
select {
|
||||
case <-w.commsLock:
|
||||
default:
|
||||
w.stateLock.RUnlock()
|
||||
reqc <- struct{}{}
|
||||
continue
|
||||
}
|
||||
// Device lock obtained, derive the next batch of accounts
|
||||
var (
|
||||
accs []accounts.Account
|
||||
paths []accounts.DerivationPath
|
||||
|
||||
nextAddr = w.deriveNextAddr
|
||||
nextPath = w.deriveNextPath
|
||||
|
||||
context = context.Background()
|
||||
)
|
||||
for empty := false; !empty; {
|
||||
// Retrieve the next derived Ethereum account
|
||||
if nextAddr == (common.Address{}) {
|
||||
if nextAddr, err = w.driver.Derive(nextPath); err != nil {
|
||||
w.log.Warn("USB wallet account derivation failed", "err", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
// Check the account's status against the current chain state
|
||||
var (
|
||||
balance *big.Int
|
||||
nonce uint64
|
||||
)
|
||||
balance, err = w.deriveChain.BalanceAt(context, nextAddr, nil)
|
||||
if err != nil {
|
||||
w.log.Warn("USB wallet balance retrieval failed", "err", err)
|
||||
break
|
||||
}
|
||||
nonce, err = w.deriveChain.NonceAt(context, nextAddr, nil)
|
||||
if err != nil {
|
||||
w.log.Warn("USB wallet nonce retrieval failed", "err", err)
|
||||
break
|
||||
}
|
||||
// If the next account is empty, stop self-derivation, but add it nonetheless
|
||||
if balance.Sign() == 0 && nonce == 0 {
|
||||
empty = true
|
||||
}
|
||||
// We've just self-derived a new account, start tracking it locally
|
||||
path := make(accounts.DerivationPath, len(nextPath))
|
||||
copy(path[:], nextPath[:])
|
||||
paths = append(paths, path)
|
||||
|
||||
account := accounts.Account{
|
||||
Address: nextAddr,
|
||||
URL: accounts.URL{Scheme: w.url.Scheme, Path: fmt.Sprintf("%s/%s", w.url.Path, path)},
|
||||
}
|
||||
accs = append(accs, account)
|
||||
|
||||
// Display a log message to the user for new (or previously empty accounts)
|
||||
if _, known := w.paths[nextAddr]; !known || (!empty && nextAddr == w.deriveNextAddr) {
|
||||
w.log.Info("USB wallet discovered new account", "address", nextAddr, "path", path, "balance", balance, "nonce", nonce)
|
||||
}
|
||||
// Fetch the next potential account
|
||||
if !empty {
|
||||
nextAddr = common.Address{}
|
||||
nextPath[len(nextPath)-1]++
|
||||
}
|
||||
}
|
||||
// Self derivation complete, release device lock
|
||||
w.commsLock <- struct{}{}
|
||||
w.stateLock.RUnlock()
|
||||
|
||||
// Insert any accounts successfully derived
|
||||
w.stateLock.Lock()
|
||||
for i := 0; i < len(accs); i++ {
|
||||
if _, ok := w.paths[accs[i].Address]; !ok {
|
||||
w.accounts = append(w.accounts, accs[i])
|
||||
w.paths[accs[i].Address] = paths[i]
|
||||
}
|
||||
}
|
||||
// Shift the self-derivation forward
|
||||
// TODO(karalabe): don't overwrite changes from wallet.SelfDerive
|
||||
w.deriveNextAddr = nextAddr
|
||||
w.deriveNextPath = nextPath
|
||||
w.stateLock.Unlock()
|
||||
|
||||
// Notify the user of termination and loop after a bit of time (to avoid trashing)
|
||||
reqc <- struct{}{}
|
||||
if err == nil {
|
||||
select {
|
||||
case errc = <-w.deriveQuit:
|
||||
// Termination requested, abort
|
||||
case <-time.After(selfDeriveThrottling):
|
||||
// Waited enough, willing to self-derive again
|
||||
}
|
||||
}
|
||||
}
|
||||
// In case of error, wait for termination
|
||||
if err != nil {
|
||||
w.log.Debug("USB wallet self-derivation failed", "err", err)
|
||||
errc = <-w.deriveQuit
|
||||
}
|
||||
errc <- err
|
||||
}
|
||||
|
||||
// Contains implements accounts.Wallet, returning whether a particular account is
|
||||
// or is not pinned into this wallet instance. Although we could attempt to resolve
|
||||
// unpinned accounts, that would be an non-negligible hardware operation.
|
||||
func (w *wallet) Contains(account accounts.Account) bool {
|
||||
w.stateLock.RLock()
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
_, exists := w.paths[account.Address]
|
||||
return exists
|
||||
}
|
||||
|
||||
// Derive implements accounts.Wallet, deriving a new account at the specific
|
||||
// derivation path. If pin is set to true, the account will be added to the list
|
||||
// of tracked accounts.
|
||||
func (w *wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Account, error) {
|
||||
// Try to derive the actual account and update its URL if successful
|
||||
w.stateLock.RLock() // Avoid device disappearing during derivation
|
||||
|
||||
if w.device == nil {
|
||||
w.stateLock.RUnlock()
|
||||
return accounts.Account{}, accounts.ErrWalletClosed
|
||||
}
|
||||
<-w.commsLock // Avoid concurrent hardware access
|
||||
address, err := w.driver.Derive(path)
|
||||
w.commsLock <- struct{}{}
|
||||
|
||||
w.stateLock.RUnlock()
|
||||
|
||||
// If an error occurred or no pinning was requested, return
|
||||
if err != nil {
|
||||
return accounts.Account{}, err
|
||||
}
|
||||
account := accounts.Account{
|
||||
Address: address,
|
||||
URL: accounts.URL{Scheme: w.url.Scheme, Path: fmt.Sprintf("%s/%s", w.url.Path, path)},
|
||||
}
|
||||
if !pin {
|
||||
return account, nil
|
||||
}
|
||||
// Pinning needs to modify the state
|
||||
w.stateLock.Lock()
|
||||
defer w.stateLock.Unlock()
|
||||
|
||||
if _, ok := w.paths[address]; !ok {
|
||||
w.accounts = append(w.accounts, account)
|
||||
w.paths[address] = path
|
||||
}
|
||||
return account, nil
|
||||
}
|
||||
|
||||
// SelfDerive implements accounts.Wallet, trying to discover accounts that the
|
||||
// user used previously (based on the chain state), but ones that he/she did not
|
||||
// explicitly pin to the wallet manually. To avoid chain head monitoring, self
|
||||
// derivation only runs during account listing (and even then throttled).
|
||||
func (w *wallet) SelfDerive(base accounts.DerivationPath, chain ethereum.ChainStateReader) {
|
||||
w.stateLock.Lock()
|
||||
defer w.stateLock.Unlock()
|
||||
|
||||
w.deriveNextPath = make(accounts.DerivationPath, len(base))
|
||||
copy(w.deriveNextPath[:], base[:])
|
||||
|
||||
w.deriveNextAddr = common.Address{}
|
||||
w.deriveChain = chain
|
||||
}
|
||||
|
||||
// SignHash implements accounts.Wallet, however signing arbitrary data is not
|
||||
// supported for hardware wallets, so this method will always return an error.
|
||||
func (w *wallet) SignHash(account accounts.Account, hash []byte) ([]byte, error) {
|
||||
return nil, accounts.ErrNotSupported
|
||||
}
|
||||
|
||||
// SignTx implements accounts.Wallet. It sends the transaction over to the Ledger
|
||||
// wallet to request a confirmation from the user. It returns either the signed
|
||||
// transaction or a failure if the user denied the transaction.
|
||||
//
|
||||
// Note, if the version of the Ethereum application running on the Ledger wallet is
|
||||
// too old to sign EIP-155 transactions, but such is requested nonetheless, an error
|
||||
// will be returned opposed to silently signing in Homestead mode.
|
||||
func (w *wallet) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
|
||||
w.stateLock.RLock() // Comms have own mutex, this is for the state fields
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
// If the wallet is closed, abort
|
||||
if w.device == nil {
|
||||
return nil, accounts.ErrWalletClosed
|
||||
}
|
||||
// Make sure the requested account is contained within
|
||||
path, ok := w.paths[account.Address]
|
||||
if !ok {
|
||||
return nil, accounts.ErrUnknownAccount
|
||||
}
|
||||
// All infos gathered and metadata checks out, request signing
|
||||
<-w.commsLock
|
||||
defer func() { w.commsLock <- struct{}{} }()
|
||||
|
||||
// Ensure the device isn't screwed with while user confirmation is pending
|
||||
// TODO(karalabe): remove if hotplug lands on Windows
|
||||
w.hub.commsLock.Lock()
|
||||
w.hub.commsPend++
|
||||
w.hub.commsLock.Unlock()
|
||||
|
||||
defer func() {
|
||||
w.hub.commsLock.Lock()
|
||||
w.hub.commsPend--
|
||||
w.hub.commsLock.Unlock()
|
||||
}()
|
||||
// Sign the transaction and verify the sender to avoid hardware fault surprises
|
||||
sender, signed, err := w.driver.SignTx(path, tx, chainID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if sender != account.Address {
|
||||
return nil, fmt.Errorf("signer mismatch: expected %s, got %s", account.Address.Hex(), sender.Hex())
|
||||
}
|
||||
return signed, nil
|
||||
}
|
||||
|
||||
// SignHashWithPassphrase implements accounts.Wallet, however signing arbitrary
|
||||
// data is not supported for Ledger wallets, so this method will always return
|
||||
// an error.
|
||||
func (w *wallet) SignHashWithPassphrase(account accounts.Account, passphrase string, hash []byte) ([]byte, error) {
|
||||
return w.SignHash(account, hash)
|
||||
}
|
||||
|
||||
// SignTxWithPassphrase implements accounts.Wallet, attempting to sign the given
|
||||
// transaction with the given account using passphrase as extra authentication.
|
||||
// Since USB wallets don't rely on passphrases, these are silently ignored.
|
||||
func (w *wallet) SignTxWithPassphrase(account accounts.Account, passphrase string, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
|
||||
return w.SignTx(account, tx, chainID)
|
||||
}
|
@ -21,9 +21,10 @@ environment:
|
||||
PATH: C:\msys64\mingw32\bin\;C:\Program Files (x86)\NSIS\;%PATH%
|
||||
|
||||
install:
|
||||
- git submodule update --init
|
||||
- rmdir C:\go /s /q
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.8.1.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.8.1.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.9.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.9.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- go version
|
||||
- gcc --version
|
||||
|
||||
|
562
bmt/bmt.go
Normal file
562
bmt/bmt.go
Normal file
@ -0,0 +1,562 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package bmt provides a binary merkle tree implementation
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
/*
|
||||
Binary Merkle Tree Hash is a hash function over arbitrary datachunks of limited size
|
||||
It is defined as the root hash of the binary merkle tree built over fixed size segments
|
||||
of the underlying chunk using any base hash function (e.g keccak 256 SHA3)
|
||||
|
||||
It is used as the chunk hash function in swarm which in turn is the basis for the
|
||||
128 branching swarm hash http://swarm-guide.readthedocs.io/en/latest/architecture.html#swarm-hash
|
||||
|
||||
The BMT is optimal for providing compact inclusion proofs, i.e. prove that a
|
||||
segment is a substring of a chunk starting at a particular offset
|
||||
The size of the underlying segments is fixed at 32 bytes (called the resolution
|
||||
of the BMT hash), the EVM word size to optimize for on-chain BMT verification
|
||||
as well as the hash size optimal for inclusion proofs in the merkle tree of the swarm hash.
|
||||
|
||||
Two implementations are provided:
|
||||
|
||||
* RefHasher is optimized for code simplicity and meant as a reference implementation
|
||||
* Hasher is optimized for speed taking advantage of concurrency with minimalistic
|
||||
control structure to coordinate the concurrent routines
|
||||
It implements the ChunkHash interface as well as the go standard hash.Hash interface
|
||||
|
||||
*/
|
||||
|
||||
const (
|
||||
// DefaultSegmentCount is the maximum number of segments of the underlying chunk
|
||||
DefaultSegmentCount = 128 // Should be equal to storage.DefaultBranches
|
||||
// DefaultPoolSize is the maximum number of bmt trees used by the hashers, i.e,
|
||||
// the maximum number of concurrent BMT hashing operations performed by the same hasher
|
||||
DefaultPoolSize = 8
|
||||
)
|
||||
|
||||
// BaseHasher is a hash.Hash constructor function used for the base hash of the BMT.
|
||||
type BaseHasher func() hash.Hash
|
||||
|
||||
// Hasher a reusable hasher for fixed maximum size chunks representing a BMT
|
||||
// implements the hash.Hash interface
|
||||
// reuse pool of Tree-s for amortised memory allocation and resource control
|
||||
// supports order-agnostic concurrent segment writes
|
||||
// as well as sequential read and write
|
||||
// can not be called concurrently on more than one chunk
|
||||
// can be further appended after Sum
|
||||
// Reset gives back the Tree to the pool and guaranteed to leave
|
||||
// the tree and itself in a state reusable for hashing a new chunk
|
||||
type Hasher struct {
|
||||
pool *TreePool // BMT resource pool
|
||||
bmt *Tree // prebuilt BMT resource for flowcontrol and proofs
|
||||
blocksize int // segment size (size of hash) also for hash.Hash
|
||||
count int // segment count
|
||||
size int // for hash.Hash same as hashsize
|
||||
cur int // cursor position for righmost currently open chunk
|
||||
segment []byte // the rightmost open segment (not complete)
|
||||
depth int // index of last level
|
||||
result chan []byte // result channel
|
||||
hash []byte // to record the result
|
||||
max int32 // max segments for SegmentWriter interface
|
||||
blockLength []byte // The block length that needes to be added in Sum
|
||||
}
|
||||
|
||||
// New creates a reusable Hasher
|
||||
// implements the hash.Hash interface
|
||||
// pulls a new Tree from a resource pool for hashing each chunk
|
||||
func New(p *TreePool) *Hasher {
|
||||
return &Hasher{
|
||||
pool: p,
|
||||
depth: depth(p.SegmentCount),
|
||||
size: p.SegmentSize,
|
||||
blocksize: p.SegmentSize,
|
||||
count: p.SegmentCount,
|
||||
result: make(chan []byte),
|
||||
}
|
||||
}
|
||||
|
||||
// Node is a reuseable segment hasher representing a node in a BMT
|
||||
// it allows for continued writes after a Sum
|
||||
// and is left in completely reusable state after Reset
|
||||
type Node struct {
|
||||
level, index int // position of node for information/logging only
|
||||
initial bool // first and last node
|
||||
root bool // whether the node is root to a smaller BMT
|
||||
isLeft bool // whether it is left side of the parent double segment
|
||||
unbalanced bool // indicates if a node has only the left segment
|
||||
parent *Node // BMT connections
|
||||
state int32 // atomic increment impl concurrent boolean toggle
|
||||
left, right []byte
|
||||
}
|
||||
|
||||
// NewNode constructor for segment hasher nodes in the BMT
|
||||
func NewNode(level, index int, parent *Node) *Node {
|
||||
return &Node{
|
||||
parent: parent,
|
||||
level: level,
|
||||
index: index,
|
||||
initial: index == 0,
|
||||
isLeft: index%2 == 0,
|
||||
}
|
||||
}
|
||||
|
||||
// TreePool provides a pool of Trees used as resources by Hasher
|
||||
// a Tree popped from the pool is guaranteed to have clean state
|
||||
// for hashing a new chunk
|
||||
// Hasher Reset releases the Tree to the pool
|
||||
type TreePool struct {
|
||||
lock sync.Mutex
|
||||
c chan *Tree
|
||||
hasher BaseHasher
|
||||
SegmentSize int
|
||||
SegmentCount int
|
||||
Capacity int
|
||||
count int
|
||||
}
|
||||
|
||||
// NewTreePool creates a Tree pool with hasher, segment size, segment count and capacity
|
||||
// on GetTree it reuses free Trees or creates a new one if size is not reached
|
||||
func NewTreePool(hasher BaseHasher, segmentCount, capacity int) *TreePool {
|
||||
return &TreePool{
|
||||
c: make(chan *Tree, capacity),
|
||||
hasher: hasher,
|
||||
SegmentSize: hasher().Size(),
|
||||
SegmentCount: segmentCount,
|
||||
Capacity: capacity,
|
||||
}
|
||||
}
|
||||
|
||||
// Drain drains the pool uptil it has no more than n resources
|
||||
func (self *TreePool) Drain(n int) {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
for len(self.c) > n {
|
||||
<-self.c
|
||||
self.count--
|
||||
}
|
||||
}
|
||||
|
||||
// Reserve is blocking until it returns an available Tree
|
||||
// it reuses free Trees or creates a new one if size is not reached
|
||||
func (self *TreePool) Reserve() *Tree {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
var t *Tree
|
||||
if self.count == self.Capacity {
|
||||
return <-self.c
|
||||
}
|
||||
select {
|
||||
case t = <-self.c:
|
||||
default:
|
||||
t = NewTree(self.hasher, self.SegmentSize, self.SegmentCount)
|
||||
self.count++
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Release gives back a Tree to the pool.
|
||||
// This Tree is guaranteed to be in reusable state
|
||||
// does not need locking
|
||||
func (self *TreePool) Release(t *Tree) {
|
||||
self.c <- t // can never fail but...
|
||||
}
|
||||
|
||||
// Tree is a reusable control structure representing a BMT
|
||||
// organised in a binary tree
|
||||
// Hasher uses a TreePool to pick one for each chunk hash
|
||||
// the Tree is 'locked' while not in the pool
|
||||
type Tree struct {
|
||||
leaves []*Node
|
||||
}
|
||||
|
||||
// Draw draws the BMT (badly)
|
||||
func (self *Tree) Draw(hash []byte, d int) string {
|
||||
var left, right []string
|
||||
var anc []*Node
|
||||
for i, n := range self.leaves {
|
||||
left = append(left, fmt.Sprintf("%v", hashstr(n.left)))
|
||||
if i%2 == 0 {
|
||||
anc = append(anc, n.parent)
|
||||
}
|
||||
right = append(right, fmt.Sprintf("%v", hashstr(n.right)))
|
||||
}
|
||||
anc = self.leaves
|
||||
var hashes [][]string
|
||||
for l := 0; len(anc) > 0; l++ {
|
||||
var nodes []*Node
|
||||
hash := []string{""}
|
||||
for i, n := range anc {
|
||||
hash = append(hash, fmt.Sprintf("%v|%v", hashstr(n.left), hashstr(n.right)))
|
||||
if i%2 == 0 && n.parent != nil {
|
||||
nodes = append(nodes, n.parent)
|
||||
}
|
||||
}
|
||||
hash = append(hash, "")
|
||||
hashes = append(hashes, hash)
|
||||
anc = nodes
|
||||
}
|
||||
hashes = append(hashes, []string{"", fmt.Sprintf("%v", hashstr(hash)), ""})
|
||||
total := 60
|
||||
del := " "
|
||||
var rows []string
|
||||
for i := len(hashes) - 1; i >= 0; i-- {
|
||||
var textlen int
|
||||
hash := hashes[i]
|
||||
for _, s := range hash {
|
||||
textlen += len(s)
|
||||
}
|
||||
if total < textlen {
|
||||
total = textlen + len(hash)
|
||||
}
|
||||
delsize := (total - textlen) / (len(hash) - 1)
|
||||
if delsize > len(del) {
|
||||
delsize = len(del)
|
||||
}
|
||||
row := fmt.Sprintf("%v: %v", len(hashes)-i-1, strings.Join(hash, del[:delsize]))
|
||||
rows = append(rows, row)
|
||||
|
||||
}
|
||||
rows = append(rows, strings.Join(left, " "))
|
||||
rows = append(rows, strings.Join(right, " "))
|
||||
return strings.Join(rows, "\n") + "\n"
|
||||
}
|
||||
|
||||
// NewTree initialises the Tree by building up the nodes of a BMT
|
||||
// segment size is stipulated to be the size of the hash
|
||||
// segmentCount needs to be positive integer and does not need to be
|
||||
// a power of two and can even be an odd number
|
||||
// segmentSize * segmentCount determines the maximum chunk size
|
||||
// hashed using the tree
|
||||
func NewTree(hasher BaseHasher, segmentSize, segmentCount int) *Tree {
|
||||
n := NewNode(0, 0, nil)
|
||||
n.root = true
|
||||
prevlevel := []*Node{n}
|
||||
// iterate over levels and creates 2^level nodes
|
||||
level := 1
|
||||
count := 2
|
||||
for d := 1; d <= depth(segmentCount); d++ {
|
||||
nodes := make([]*Node, count)
|
||||
for i := 0; i < len(nodes); i++ {
|
||||
var parent *Node
|
||||
parent = prevlevel[i/2]
|
||||
t := NewNode(level, i, parent)
|
||||
nodes[i] = t
|
||||
}
|
||||
prevlevel = nodes
|
||||
level++
|
||||
count *= 2
|
||||
}
|
||||
// the datanode level is the nodes on the last level where
|
||||
return &Tree{
|
||||
leaves: prevlevel,
|
||||
}
|
||||
}
|
||||
|
||||
// methods needed by hash.Hash
|
||||
|
||||
// Size returns the size
|
||||
func (self *Hasher) Size() int {
|
||||
return self.size
|
||||
}
|
||||
|
||||
// BlockSize returns the block size
|
||||
func (self *Hasher) BlockSize() int {
|
||||
return self.blocksize
|
||||
}
|
||||
|
||||
// Sum returns the hash of the buffer
|
||||
// hash.Hash interface Sum method appends the byte slice to the underlying
|
||||
// data before it calculates and returns the hash of the chunk
|
||||
func (self *Hasher) Sum(b []byte) (r []byte) {
|
||||
t := self.bmt
|
||||
i := self.cur
|
||||
n := t.leaves[i]
|
||||
j := i
|
||||
// must run strictly before all nodes calculate
|
||||
// datanodes are guaranteed to have a parent
|
||||
if len(self.segment) > self.size && i > 0 && n.parent != nil {
|
||||
n = n.parent
|
||||
} else {
|
||||
i *= 2
|
||||
}
|
||||
d := self.finalise(n, i)
|
||||
self.writeSegment(j, self.segment, d)
|
||||
c := <-self.result
|
||||
self.releaseTree()
|
||||
|
||||
// sha3(length + BMT(pure_chunk))
|
||||
if self.blockLength == nil {
|
||||
return c
|
||||
}
|
||||
res := self.pool.hasher()
|
||||
res.Reset()
|
||||
res.Write(self.blockLength)
|
||||
res.Write(c)
|
||||
return res.Sum(nil)
|
||||
}
|
||||
|
||||
// Hasher implements the SwarmHash interface
|
||||
|
||||
// Hash waits for the hasher result and returns it
|
||||
// caller must call this on a BMT Hasher being written to
|
||||
func (self *Hasher) Hash() []byte {
|
||||
return <-self.result
|
||||
}
|
||||
|
||||
// Hasher implements the io.Writer interface
|
||||
|
||||
// Write fills the buffer to hash
|
||||
// with every full segment complete launches a hasher go routine
|
||||
// that shoots up the BMT
|
||||
func (self *Hasher) Write(b []byte) (int, error) {
|
||||
l := len(b)
|
||||
if l <= 0 {
|
||||
return 0, nil
|
||||
}
|
||||
s := self.segment
|
||||
i := self.cur
|
||||
count := (self.count + 1) / 2
|
||||
need := self.count*self.size - self.cur*2*self.size
|
||||
size := self.size
|
||||
if need > size {
|
||||
size *= 2
|
||||
}
|
||||
if l < need {
|
||||
need = l
|
||||
}
|
||||
// calculate missing bit to complete current open segment
|
||||
rest := size - len(s)
|
||||
if need < rest {
|
||||
rest = need
|
||||
}
|
||||
s = append(s, b[:rest]...)
|
||||
need -= rest
|
||||
// read full segments and the last possibly partial segment
|
||||
for need > 0 && i < count-1 {
|
||||
// push all finished chunks we read
|
||||
self.writeSegment(i, s, self.depth)
|
||||
need -= size
|
||||
if need < 0 {
|
||||
size += need
|
||||
}
|
||||
s = b[rest : rest+size]
|
||||
rest += size
|
||||
i++
|
||||
}
|
||||
self.segment = s
|
||||
self.cur = i
|
||||
// otherwise, we can assume len(s) == 0, so all buffer is read and chunk is not yet full
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// Hasher implements the io.ReaderFrom interface
|
||||
|
||||
// ReadFrom reads from io.Reader and appends to the data to hash using Write
|
||||
// it reads so that chunk to hash is maximum length or reader reaches EOF
|
||||
// caller must Reset the hasher prior to call
|
||||
func (self *Hasher) ReadFrom(r io.Reader) (m int64, err error) {
|
||||
bufsize := self.size*self.count - self.size*self.cur - len(self.segment)
|
||||
buf := make([]byte, bufsize)
|
||||
var read int
|
||||
for {
|
||||
var n int
|
||||
n, err = r.Read(buf)
|
||||
read += n
|
||||
if err == io.EOF || read == len(buf) {
|
||||
hash := self.Sum(buf[:n])
|
||||
if read == len(buf) {
|
||||
err = NewEOC(hash)
|
||||
}
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
n, err = self.Write(buf[:n])
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return int64(read), err
|
||||
}
|
||||
|
||||
// Reset needs to be called before writing to the hasher
|
||||
func (self *Hasher) Reset() {
|
||||
self.getTree()
|
||||
self.blockLength = nil
|
||||
}
|
||||
|
||||
// Hasher implements the SwarmHash interface
|
||||
|
||||
// ResetWithLength needs to be called before writing to the hasher
|
||||
// the argument is supposed to be the byte slice binary representation of
|
||||
// the legth of the data subsumed under the hash
|
||||
func (self *Hasher) ResetWithLength(l []byte) {
|
||||
self.Reset()
|
||||
self.blockLength = l
|
||||
|
||||
}
|
||||
|
||||
// Release gives back the Tree to the pool whereby it unlocks
|
||||
// it resets tree, segment and index
|
||||
func (self *Hasher) releaseTree() {
|
||||
if self.bmt != nil {
|
||||
n := self.bmt.leaves[self.cur]
|
||||
for ; n != nil; n = n.parent {
|
||||
n.unbalanced = false
|
||||
if n.parent != nil {
|
||||
n.root = false
|
||||
}
|
||||
}
|
||||
self.pool.Release(self.bmt)
|
||||
self.bmt = nil
|
||||
|
||||
}
|
||||
self.cur = 0
|
||||
self.segment = nil
|
||||
}
|
||||
|
||||
func (self *Hasher) writeSegment(i int, s []byte, d int) {
|
||||
h := self.pool.hasher()
|
||||
n := self.bmt.leaves[i]
|
||||
|
||||
if len(s) > self.size && n.parent != nil {
|
||||
go func() {
|
||||
h.Reset()
|
||||
h.Write(s)
|
||||
s = h.Sum(nil)
|
||||
|
||||
if n.root {
|
||||
self.result <- s
|
||||
return
|
||||
}
|
||||
self.run(n.parent, h, d, n.index, s)
|
||||
}()
|
||||
return
|
||||
}
|
||||
go self.run(n, h, d, i*2, s)
|
||||
}
|
||||
|
||||
func (self *Hasher) run(n *Node, h hash.Hash, d int, i int, s []byte) {
|
||||
isLeft := i%2 == 0
|
||||
for {
|
||||
if isLeft {
|
||||
n.left = s
|
||||
} else {
|
||||
n.right = s
|
||||
}
|
||||
if !n.unbalanced && n.toggle() {
|
||||
return
|
||||
}
|
||||
if !n.unbalanced || !isLeft || i == 0 && d == 0 {
|
||||
h.Reset()
|
||||
h.Write(n.left)
|
||||
h.Write(n.right)
|
||||
s = h.Sum(nil)
|
||||
|
||||
} else {
|
||||
s = append(n.left, n.right...)
|
||||
}
|
||||
|
||||
self.hash = s
|
||||
if n.root {
|
||||
self.result <- s
|
||||
return
|
||||
}
|
||||
|
||||
isLeft = n.isLeft
|
||||
n = n.parent
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
// getTree obtains a BMT resource by reserving one from the pool
|
||||
func (self *Hasher) getTree() *Tree {
|
||||
if self.bmt != nil {
|
||||
return self.bmt
|
||||
}
|
||||
t := self.pool.Reserve()
|
||||
self.bmt = t
|
||||
return t
|
||||
}
|
||||
|
||||
// atomic bool toggle implementing a concurrent reusable 2-state object
|
||||
// atomic addint with %2 implements atomic bool toggle
|
||||
// it returns true if the toggler just put it in the active/waiting state
|
||||
func (self *Node) toggle() bool {
|
||||
return atomic.AddInt32(&self.state, 1)%2 == 1
|
||||
}
|
||||
|
||||
func hashstr(b []byte) string {
|
||||
end := len(b)
|
||||
if end > 4 {
|
||||
end = 4
|
||||
}
|
||||
return fmt.Sprintf("%x", b[:end])
|
||||
}
|
||||
|
||||
func depth(n int) (d int) {
|
||||
for l := (n - 1) / 2; l > 0; l /= 2 {
|
||||
d++
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// finalise is following the zigzags on the tree belonging
|
||||
// to the final datasegment
|
||||
func (self *Hasher) finalise(n *Node, i int) (d int) {
|
||||
isLeft := i%2 == 0
|
||||
for {
|
||||
// when the final segment's path is going via left segments
|
||||
// the incoming data is pushed to the parent upon pulling the left
|
||||
// we do not need toogle the state since this condition is
|
||||
// detectable
|
||||
n.unbalanced = isLeft
|
||||
n.right = nil
|
||||
if n.initial {
|
||||
n.root = true
|
||||
return d
|
||||
}
|
||||
isLeft = n.isLeft
|
||||
n = n.parent
|
||||
d++
|
||||
}
|
||||
}
|
||||
|
||||
// EOC (end of chunk) implements the error interface
|
||||
type EOC struct {
|
||||
Hash []byte // read the hash of the chunk off the error
|
||||
}
|
||||
|
||||
// Error returns the error string
|
||||
func (self *EOC) Error() string {
|
||||
return fmt.Sprintf("hasher limit reached, chunk hash: %x", self.Hash)
|
||||
}
|
||||
|
||||
// NewEOC creates new end of chunk error with the hash
|
||||
func NewEOC(hash []byte) *EOC {
|
||||
return &EOC{hash}
|
||||
}
|
85
bmt/bmt_r.go
Normal file
85
bmt/bmt_r.go
Normal file
@ -0,0 +1,85 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// simple nonconcurrent reference implementation for hashsize segment based
|
||||
// Binary Merkle tree hash on arbitrary but fixed maximum chunksize
|
||||
//
|
||||
// This implementation does not take advantage of any paralellisms and uses
|
||||
// far more memory than necessary, but it is easy to see that it is correct.
|
||||
// It can be used for generating test cases for optimized implementations.
|
||||
// see testBMTHasherCorrectness function in bmt_test.go
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"hash"
|
||||
)
|
||||
|
||||
// RefHasher is the non-optimized easy to read reference implementation of BMT
|
||||
type RefHasher struct {
|
||||
span int
|
||||
section int
|
||||
cap int
|
||||
h hash.Hash
|
||||
}
|
||||
|
||||
// NewRefHasher returns a new RefHasher
|
||||
func NewRefHasher(hasher BaseHasher, count int) *RefHasher {
|
||||
h := hasher()
|
||||
hashsize := h.Size()
|
||||
maxsize := hashsize * count
|
||||
c := 2
|
||||
for ; c < count; c *= 2 {
|
||||
}
|
||||
if c > 2 {
|
||||
c /= 2
|
||||
}
|
||||
return &RefHasher{
|
||||
section: 2 * hashsize,
|
||||
span: c * hashsize,
|
||||
cap: maxsize,
|
||||
h: h,
|
||||
}
|
||||
}
|
||||
|
||||
// Hash returns the BMT hash of the byte slice
|
||||
// implements the SwarmHash interface
|
||||
func (rh *RefHasher) Hash(d []byte) []byte {
|
||||
if len(d) > rh.cap {
|
||||
d = d[:rh.cap]
|
||||
}
|
||||
|
||||
return rh.hash(d, rh.span)
|
||||
}
|
||||
|
||||
func (rh *RefHasher) hash(d []byte, s int) []byte {
|
||||
l := len(d)
|
||||
left := d
|
||||
var right []byte
|
||||
if l > rh.section {
|
||||
for ; s >= l; s /= 2 {
|
||||
}
|
||||
left = rh.hash(d[:s], s)
|
||||
right = d[s:]
|
||||
if l-s > rh.section/2 {
|
||||
right = rh.hash(right, s)
|
||||
}
|
||||
}
|
||||
defer rh.h.Reset()
|
||||
rh.h.Write(left)
|
||||
rh.h.Write(right)
|
||||
h := rh.h.Sum(nil)
|
||||
return h
|
||||
}
|
481
bmt/bmt_test.go
Normal file
481
bmt/bmt_test.go
Normal file
@ -0,0 +1,481 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
crand "crypto/rand"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||
)
|
||||
|
||||
const (
|
||||
maxproccnt = 8
|
||||
)
|
||||
|
||||
// TestRefHasher tests that the RefHasher computes the expected BMT hash for
|
||||
// all data lengths between 0 and 256 bytes
|
||||
func TestRefHasher(t *testing.T) {
|
||||
hashFunc := sha3.NewKeccak256
|
||||
|
||||
sha3 := func(data ...[]byte) []byte {
|
||||
h := hashFunc()
|
||||
for _, v := range data {
|
||||
h.Write(v)
|
||||
}
|
||||
return h.Sum(nil)
|
||||
}
|
||||
|
||||
// the test struct is used to specify the expected BMT hash for data
|
||||
// lengths between "from" and "to"
|
||||
type test struct {
|
||||
from int64
|
||||
to int64
|
||||
expected func([]byte) []byte
|
||||
}
|
||||
|
||||
var tests []*test
|
||||
|
||||
// all lengths in [0,64] should be:
|
||||
//
|
||||
// sha3(data)
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 0,
|
||||
to: 64,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(data)
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [65,96] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// data[64:]
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 65,
|
||||
to: 96,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(data[:64]), data[64:])
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [97,128] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:])
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 97,
|
||||
to: 128,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(data[:64]), sha3(data[64:]))
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [129,160] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// data[128:]
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 129,
|
||||
to: 160,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), data[128:])
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [161,192] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(data[128:])
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 161,
|
||||
to: 192,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(data[128:]))
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [193,224] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(
|
||||
// sha3(data[128:192])
|
||||
// data[192:]
|
||||
// )
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 193,
|
||||
to: 224,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), data[192:]))
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [225,256] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(
|
||||
// sha3(data[128:192])
|
||||
// sha3(data[192:])
|
||||
// )
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 225,
|
||||
to: 256,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), sha3(data[192:])))
|
||||
},
|
||||
})
|
||||
|
||||
// run the tests
|
||||
for _, x := range tests {
|
||||
for length := x.from; length <= x.to; length++ {
|
||||
t.Run(fmt.Sprintf("%d_bytes", length), func(t *testing.T) {
|
||||
data := make([]byte, length)
|
||||
if _, err := io.ReadFull(crand.Reader, data); err != nil && err != io.EOF {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := x.expected(data)
|
||||
actual := NewRefHasher(hashFunc, 128).Hash(data)
|
||||
if !bytes.Equal(actual, expected) {
|
||||
t.Fatalf("expected %x, got %x", expected, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testDataReader(l int) (r io.Reader) {
|
||||
return io.LimitReader(crand.Reader, int64(l))
|
||||
}
|
||||
|
||||
func TestHasherCorrectness(t *testing.T) {
|
||||
err := testHasher(testBaseHasher)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testHasher(f func(BaseHasher, []byte, int, int) error) error {
|
||||
tdata := testDataReader(4128)
|
||||
data := make([]byte, 4128)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
size := hasher().Size()
|
||||
counts := []int{1, 2, 3, 4, 5, 8, 16, 32, 64, 128}
|
||||
|
||||
var err error
|
||||
for _, count := range counts {
|
||||
max := count * size
|
||||
incr := 1
|
||||
for n := 0; n <= max+incr; n += incr {
|
||||
err = f(hasher, data, n, count)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestHasherReuseWithoutRelease(t *testing.T) {
|
||||
testHasherReuse(1, t)
|
||||
}
|
||||
|
||||
func TestHasherReuseWithRelease(t *testing.T) {
|
||||
testHasherReuse(maxproccnt, t)
|
||||
}
|
||||
|
||||
func testHasherReuse(i int, t *testing.T) {
|
||||
hasher := sha3.NewKeccak256
|
||||
pool := NewTreePool(hasher, 128, i)
|
||||
defer pool.Drain(0)
|
||||
bmt := New(pool)
|
||||
|
||||
for i := 0; i < 500; i++ {
|
||||
n := rand.Intn(4096)
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
|
||||
err := testHasherCorrectness(bmt, hasher, data, n, 128)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasherConcurrency(t *testing.T) {
|
||||
hasher := sha3.NewKeccak256
|
||||
pool := NewTreePool(hasher, 128, maxproccnt)
|
||||
defer pool.Drain(0)
|
||||
wg := sync.WaitGroup{}
|
||||
cycles := 100
|
||||
wg.Add(maxproccnt * cycles)
|
||||
errc := make(chan error)
|
||||
|
||||
for p := 0; p < maxproccnt; p++ {
|
||||
for i := 0; i < cycles; i++ {
|
||||
go func() {
|
||||
bmt := New(pool)
|
||||
n := rand.Intn(4096)
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
err := testHasherCorrectness(bmt, hasher, data, n, 128)
|
||||
wg.Done()
|
||||
if err != nil {
|
||||
errc <- err
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(errc)
|
||||
}()
|
||||
var err error
|
||||
select {
|
||||
case <-time.NewTimer(5 * time.Second).C:
|
||||
err = fmt.Errorf("timed out")
|
||||
case err = <-errc:
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testBaseHasher(hasher BaseHasher, d []byte, n, count int) error {
|
||||
pool := NewTreePool(hasher, count, 1)
|
||||
defer pool.Drain(0)
|
||||
bmt := New(pool)
|
||||
return testHasherCorrectness(bmt, hasher, d, n, count)
|
||||
}
|
||||
|
||||
func testHasherCorrectness(bmt hash.Hash, hasher BaseHasher, d []byte, n, count int) (err error) {
|
||||
data := d[:n]
|
||||
rbmt := NewRefHasher(hasher, count)
|
||||
exp := rbmt.Hash(data)
|
||||
timeout := time.NewTimer(time.Second)
|
||||
c := make(chan error)
|
||||
|
||||
go func() {
|
||||
bmt.Reset()
|
||||
bmt.Write(data)
|
||||
got := bmt.Sum(nil)
|
||||
if !bytes.Equal(got, exp) {
|
||||
c <- fmt.Errorf("wrong hash: expected %x, got %x", exp, got)
|
||||
}
|
||||
close(c)
|
||||
}()
|
||||
select {
|
||||
case <-timeout.C:
|
||||
err = fmt.Errorf("BMT hash calculation timed out")
|
||||
case err = <-c:
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func BenchmarkSHA3_4k(t *testing.B) { benchmarkSHA3(4096, t) }
|
||||
func BenchmarkSHA3_2k(t *testing.B) { benchmarkSHA3(4096/2, t) }
|
||||
func BenchmarkSHA3_1k(t *testing.B) { benchmarkSHA3(4096/4, t) }
|
||||
func BenchmarkSHA3_512b(t *testing.B) { benchmarkSHA3(4096/8, t) }
|
||||
func BenchmarkSHA3_256b(t *testing.B) { benchmarkSHA3(4096/16, t) }
|
||||
func BenchmarkSHA3_128b(t *testing.B) { benchmarkSHA3(4096/32, t) }
|
||||
|
||||
func BenchmarkBMTBaseline_4k(t *testing.B) { benchmarkBMTBaseline(4096, t) }
|
||||
func BenchmarkBMTBaseline_2k(t *testing.B) { benchmarkBMTBaseline(4096/2, t) }
|
||||
func BenchmarkBMTBaseline_1k(t *testing.B) { benchmarkBMTBaseline(4096/4, t) }
|
||||
func BenchmarkBMTBaseline_512b(t *testing.B) { benchmarkBMTBaseline(4096/8, t) }
|
||||
func BenchmarkBMTBaseline_256b(t *testing.B) { benchmarkBMTBaseline(4096/16, t) }
|
||||
func BenchmarkBMTBaseline_128b(t *testing.B) { benchmarkBMTBaseline(4096/32, t) }
|
||||
|
||||
func BenchmarkRefHasher_4k(t *testing.B) { benchmarkRefHasher(4096, t) }
|
||||
func BenchmarkRefHasher_2k(t *testing.B) { benchmarkRefHasher(4096/2, t) }
|
||||
func BenchmarkRefHasher_1k(t *testing.B) { benchmarkRefHasher(4096/4, t) }
|
||||
func BenchmarkRefHasher_512b(t *testing.B) { benchmarkRefHasher(4096/8, t) }
|
||||
func BenchmarkRefHasher_256b(t *testing.B) { benchmarkRefHasher(4096/16, t) }
|
||||
func BenchmarkRefHasher_128b(t *testing.B) { benchmarkRefHasher(4096/32, t) }
|
||||
|
||||
func BenchmarkHasher_4k(t *testing.B) { benchmarkHasher(4096, t) }
|
||||
func BenchmarkHasher_2k(t *testing.B) { benchmarkHasher(4096/2, t) }
|
||||
func BenchmarkHasher_1k(t *testing.B) { benchmarkHasher(4096/4, t) }
|
||||
func BenchmarkHasher_512b(t *testing.B) { benchmarkHasher(4096/8, t) }
|
||||
func BenchmarkHasher_256b(t *testing.B) { benchmarkHasher(4096/16, t) }
|
||||
func BenchmarkHasher_128b(t *testing.B) { benchmarkHasher(4096/32, t) }
|
||||
|
||||
func BenchmarkHasherNoReuse_4k(t *testing.B) { benchmarkHasherReuse(1, 4096, t) }
|
||||
func BenchmarkHasherNoReuse_2k(t *testing.B) { benchmarkHasherReuse(1, 4096/2, t) }
|
||||
func BenchmarkHasherNoReuse_1k(t *testing.B) { benchmarkHasherReuse(1, 4096/4, t) }
|
||||
func BenchmarkHasherNoReuse_512b(t *testing.B) { benchmarkHasherReuse(1, 4096/8, t) }
|
||||
func BenchmarkHasherNoReuse_256b(t *testing.B) { benchmarkHasherReuse(1, 4096/16, t) }
|
||||
func BenchmarkHasherNoReuse_128b(t *testing.B) { benchmarkHasherReuse(1, 4096/32, t) }
|
||||
|
||||
func BenchmarkHasherReuse_4k(t *testing.B) { benchmarkHasherReuse(16, 4096, t) }
|
||||
func BenchmarkHasherReuse_2k(t *testing.B) { benchmarkHasherReuse(16, 4096/2, t) }
|
||||
func BenchmarkHasherReuse_1k(t *testing.B) { benchmarkHasherReuse(16, 4096/4, t) }
|
||||
func BenchmarkHasherReuse_512b(t *testing.B) { benchmarkHasherReuse(16, 4096/8, t) }
|
||||
func BenchmarkHasherReuse_256b(t *testing.B) { benchmarkHasherReuse(16, 4096/16, t) }
|
||||
func BenchmarkHasherReuse_128b(t *testing.B) { benchmarkHasherReuse(16, 4096/32, t) }
|
||||
|
||||
// benchmarks the minimum hashing time for a balanced (for simplicity) BMT
|
||||
// by doing count/segmentsize parallel hashings of 2*segmentsize bytes
|
||||
// doing it on n maxproccnt each reusing the base hasher
|
||||
// the premise is that this is the minimum computation needed for a BMT
|
||||
// therefore this serves as a theoretical optimum for concurrent implementations
|
||||
func benchmarkBMTBaseline(n int, t *testing.B) {
|
||||
tdata := testDataReader(64)
|
||||
data := make([]byte, 64)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
count := int32((n-1)/hasher().Size() + 1)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(maxproccnt)
|
||||
var i int32
|
||||
for j := 0; j < maxproccnt; j++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
h := hasher()
|
||||
for atomic.AddInt32(&i, 1) < count {
|
||||
h.Reset()
|
||||
h.Write(data)
|
||||
h.Sum(nil)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkHasher(n int, t *testing.B) {
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
|
||||
size := 1
|
||||
hasher := sha3.NewKeccak256
|
||||
segmentCount := 128
|
||||
pool := NewTreePool(hasher, segmentCount, size)
|
||||
bmt := New(pool)
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
bmt.Reset()
|
||||
bmt.Write(data)
|
||||
bmt.Sum(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkHasherReuse(poolsize, n int, t *testing.B) {
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
|
||||
hasher := sha3.NewKeccak256
|
||||
segmentCount := 128
|
||||
pool := NewTreePool(hasher, segmentCount, poolsize)
|
||||
cycles := 200
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(cycles)
|
||||
for j := 0; j < cycles; j++ {
|
||||
bmt := New(pool)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
bmt.Reset()
|
||||
bmt.Write(data)
|
||||
bmt.Sum(nil)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkSHA3(n int, t *testing.B) {
|
||||
data := make([]byte, n)
|
||||
tdata := testDataReader(n)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
h := hasher()
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
h.Reset()
|
||||
h.Write(data)
|
||||
h.Sum(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkRefHasher(n int, t *testing.B) {
|
||||
data := make([]byte, n)
|
||||
tdata := testDataReader(n)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
rbmt := NewRefHasher(hasher, 128)
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
rbmt.Hash(data)
|
||||
}
|
||||
}
|
@ -21,18 +21,18 @@ variable which Travis CI makes available to certain builds.
|
||||
We want to build go-ethereum with the most recent version of Go, irrespective of the Go
|
||||
version that is available in the main Ubuntu repository. In order to make this possible,
|
||||
our PPA depends on the ~gophers/ubuntu/archive PPA. Our source package build-depends on
|
||||
golang-1.8, which is co-installable alongside the regular golang package. PPA dependencies
|
||||
golang-1.9, which is co-installable alongside the regular golang package. PPA dependencies
|
||||
can be edited at https://launchpad.net/%7Eethereum/+archive/ubuntu/ethereum/+edit-dependencies
|
||||
|
||||
## Building Packages Locally (for testing)
|
||||
|
||||
You need to run Ubuntu to do test packaging.
|
||||
|
||||
Add the gophers PPA and install Go 1.8 and Debian packaging tools:
|
||||
Add the gophers PPA and install Go 1.9 and Debian packaging tools:
|
||||
|
||||
$ sudo apt-add-repository ppa:gophers/ubuntu/archive
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install build-essential golang-1.8 devscripts debhelper
|
||||
$ sudo apt-get install build-essential golang-1.9 devscripts debhelper
|
||||
|
||||
Create the source packages:
|
||||
|
||||
|
10
build/ci.go
10
build/ci.go
@ -119,7 +119,8 @@ var (
|
||||
// Distros for which packages are created.
|
||||
// Note: vivid is unsupported because there is no golang-1.6 package for it.
|
||||
// Note: wily is unsupported because it was officially deprecated on lanchpad.
|
||||
debDistros = []string{"trusty", "xenial", "yakkety", "zesty"}
|
||||
// Note: yakkety is unsupported because it was officially deprecated on lanchpad.
|
||||
debDistros = []string{"trusty", "xenial", "zesty"}
|
||||
)
|
||||
|
||||
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
|
||||
@ -175,7 +176,7 @@ func doInstall(cmdline []string) {
|
||||
|
||||
// Check Go version. People regularly open issues about compilation
|
||||
// failure with outdated Go. This should save them the trouble.
|
||||
if runtime.Version() < "go1.7" && !strings.HasPrefix(runtime.Version(), "devel") {
|
||||
if runtime.Version() < "go1.7" && !strings.Contains(runtime.Version(), "devel") {
|
||||
log.Println("You have Go version", runtime.Version())
|
||||
log.Println("go-ethereum requires at least Go version 1.7 and cannot")
|
||||
log.Println("be compiled with an earlier version. Please upgrade your Go installation.")
|
||||
@ -249,10 +250,7 @@ func goTool(subcmd string, args ...string) *exec.Cmd {
|
||||
}
|
||||
|
||||
func goToolArch(arch string, subcmd string, args ...string) *exec.Cmd {
|
||||
gocmd := filepath.Join(runtime.GOROOT(), "bin", "go")
|
||||
cmd := exec.Command(gocmd, subcmd)
|
||||
cmd.Args = append(cmd.Args, args...)
|
||||
|
||||
cmd := build.GoTool(subcmd, args...)
|
||||
if subcmd == "build" || subcmd == "install" || subcmd == "test" {
|
||||
// Go CGO has a Windows linker error prior to 1.8 (https://github.com/golang/go/issues/8756).
|
||||
// Work around issue by allowing multiple definitions for <1.8 builds.
|
||||
|
@ -2,7 +2,7 @@ Source: {{.Name}}
|
||||
Section: science
|
||||
Priority: extra
|
||||
Maintainer: {{.Author}}
|
||||
Build-Depends: debhelper (>= 8.0.0), golang-1.8
|
||||
Build-Depends: debhelper (>= 8.0.0), golang-1.9
|
||||
Standards-Version: 3.9.5
|
||||
Homepage: https://ethereum.org
|
||||
Vcs-Git: git://github.com/ethereum/go-ethereum.git
|
||||
|
@ -5,7 +5,7 @@
|
||||
#export DH_VERBOSE=1
|
||||
|
||||
override_dh_auto_build:
|
||||
build/env.sh /usr/lib/go-1.8/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
|
||||
build/env.sh /usr/lib/go-1.9/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
|
||||
|
||||
override_dh_auto_test:
|
||||
|
||||
|
@ -45,13 +45,16 @@ var (
|
||||
// paths with any of these prefixes will be skipped
|
||||
skipPrefixes = []string{
|
||||
// boring stuff
|
||||
"vendor/", "tests/files/", "build/",
|
||||
"vendor/", "tests/testdata/", "build/",
|
||||
// don't relicense vendored sources
|
||||
"crypto/sha3/", "crypto/ecies/", "log/",
|
||||
"crypto/secp256k1/curve.go",
|
||||
"consensus/ethash/xor.go",
|
||||
"internal/jsre/deps",
|
||||
"cmd/internal/browser",
|
||||
"consensus/ethash/xor.go",
|
||||
"crypto/bn256/",
|
||||
"crypto/ecies/",
|
||||
"crypto/secp256k1/curve.go",
|
||||
"crypto/sha3/",
|
||||
"internal/jsre/deps",
|
||||
"log/",
|
||||
// don't license generated files
|
||||
"contracts/chequebook/contract/",
|
||||
"contracts/ens/contract/",
|
||||
|
@ -46,8 +46,5 @@ func disasmCmd(ctx *cli.Context) error {
|
||||
|
||||
code := strings.TrimSpace(string(in[:]))
|
||||
fmt.Printf("%v\n", code)
|
||||
if err = asm.PrintDisassembled(code); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return asm.PrintDisassembled(code)
|
||||
}
|
||||
|
71
cmd/evm/json_logger.go
Normal file
71
cmd/evm/json_logger.go
Normal file
@ -0,0 +1,71 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
)
|
||||
|
||||
type JSONLogger struct {
|
||||
encoder *json.Encoder
|
||||
cfg *vm.LogConfig
|
||||
}
|
||||
|
||||
func NewJSONLogger(cfg *vm.LogConfig, writer io.Writer) *JSONLogger {
|
||||
return &JSONLogger{json.NewEncoder(writer), cfg}
|
||||
}
|
||||
|
||||
// CaptureState outputs state information on the logger.
|
||||
func (l *JSONLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error {
|
||||
log := vm.StructLog{
|
||||
Pc: pc,
|
||||
Op: op,
|
||||
Gas: gas + cost,
|
||||
GasCost: cost,
|
||||
MemorySize: memory.Len(),
|
||||
Storage: nil,
|
||||
Depth: depth,
|
||||
Err: err,
|
||||
}
|
||||
if !l.cfg.DisableMemory {
|
||||
log.Memory = memory.Data()
|
||||
}
|
||||
if !l.cfg.DisableStack {
|
||||
log.Stack = stack.Data()
|
||||
}
|
||||
return l.encoder.Encode(log)
|
||||
}
|
||||
|
||||
// CaptureEnd is triggered at end of execution.
|
||||
func (l *JSONLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) error {
|
||||
type endLog struct {
|
||||
Output string `json:"output"`
|
||||
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
|
||||
Time time.Duration `json:"time"`
|
||||
Err string `json:"error,omitempty"`
|
||||
}
|
||||
if err != nil {
|
||||
return l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t, err.Error()})
|
||||
}
|
||||
return l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t, ""})
|
||||
}
|
@ -35,13 +35,25 @@ var (
|
||||
Name: "debug",
|
||||
Usage: "output full trace logs",
|
||||
}
|
||||
MemProfileFlag = cli.StringFlag{
|
||||
Name: "memprofile",
|
||||
Usage: "creates a memory profile at the given path",
|
||||
}
|
||||
CPUProfileFlag = cli.StringFlag{
|
||||
Name: "cpuprofile",
|
||||
Usage: "creates a CPU profile at the given path",
|
||||
}
|
||||
StatDumpFlag = cli.BoolFlag{
|
||||
Name: "statdump",
|
||||
Usage: "displays stack and heap memory information",
|
||||
}
|
||||
CodeFlag = cli.StringFlag{
|
||||
Name: "code",
|
||||
Usage: "EVM code",
|
||||
}
|
||||
CodeFileFlag = cli.StringFlag{
|
||||
Name: "codefile",
|
||||
Usage: "file containing EVM code",
|
||||
Usage: "File containing EVM code. If '-' is specified, code is read from stdin ",
|
||||
}
|
||||
GasFlag = cli.Uint64Flag{
|
||||
Name: "gas",
|
||||
@ -78,6 +90,30 @@ var (
|
||||
Name: "nogasmetering",
|
||||
Usage: "disable gas metering",
|
||||
}
|
||||
GenesisFlag = cli.StringFlag{
|
||||
Name: "prestate",
|
||||
Usage: "JSON file with prestate (genesis) config",
|
||||
}
|
||||
MachineFlag = cli.BoolFlag{
|
||||
Name: "json",
|
||||
Usage: "output trace logs in machine readable format (json)",
|
||||
}
|
||||
SenderFlag = cli.StringFlag{
|
||||
Name: "sender",
|
||||
Usage: "The transaction origin",
|
||||
}
|
||||
ReceiverFlag = cli.StringFlag{
|
||||
Name: "receiver",
|
||||
Usage: "The transaction receiver (execution context)",
|
||||
}
|
||||
DisableMemoryFlag = cli.BoolFlag{
|
||||
Name: "nomemory",
|
||||
Usage: "disable memory output",
|
||||
}
|
||||
DisableStackFlag = cli.BoolFlag{
|
||||
Name: "nostack",
|
||||
Usage: "disable stack output",
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -93,11 +129,21 @@ func init() {
|
||||
DumpFlag,
|
||||
InputFlag,
|
||||
DisableGasMeteringFlag,
|
||||
MemProfileFlag,
|
||||
CPUProfileFlag,
|
||||
StatDumpFlag,
|
||||
GenesisFlag,
|
||||
MachineFlag,
|
||||
SenderFlag,
|
||||
ReceiverFlag,
|
||||
DisableMemoryFlag,
|
||||
DisableStackFlag,
|
||||
}
|
||||
app.Commands = []cli.Command{
|
||||
compileCommand,
|
||||
disasmCommand,
|
||||
runCommand,
|
||||
stateTestCommand,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -18,9 +18,11 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"time"
|
||||
|
||||
goruntime "runtime"
|
||||
@ -28,11 +30,13 @@ import (
|
||||
"github.com/ethereum/go-ethereum/cmd/evm/internal/compiler"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/core/vm/runtime"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
@ -44,93 +48,180 @@ var runCommand = cli.Command{
|
||||
Description: `The run command runs arbitrary EVM code.`,
|
||||
}
|
||||
|
||||
// readGenesis will read the given JSON format genesis file and return
|
||||
// the initialized Genesis structure
|
||||
func readGenesis(genesisPath string) *core.Genesis {
|
||||
// Make sure we have a valid genesis JSON
|
||||
//genesisPath := ctx.Args().First()
|
||||
if len(genesisPath) == 0 {
|
||||
utils.Fatalf("Must supply path to genesis JSON file")
|
||||
}
|
||||
file, err := os.Open(genesisPath)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read genesis file: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
genesis := new(core.Genesis)
|
||||
if err := json.NewDecoder(file).Decode(genesis); err != nil {
|
||||
utils.Fatalf("invalid genesis file: %v", err)
|
||||
}
|
||||
return genesis
|
||||
}
|
||||
|
||||
func runCmd(ctx *cli.Context) error {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
||||
glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name)))
|
||||
log.Root().SetHandler(glogger)
|
||||
logconfig := &vm.LogConfig{
|
||||
DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name),
|
||||
DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
|
||||
}
|
||||
|
||||
var (
|
||||
db, _ = ethdb.NewMemDatabase()
|
||||
statedb, _ = state.New(common.Hash{}, db)
|
||||
sender = common.StringToAddress("sender")
|
||||
logger = vm.NewStructLogger(nil)
|
||||
tracer vm.Tracer
|
||||
debugLogger *vm.StructLogger
|
||||
statedb *state.StateDB
|
||||
chainConfig *params.ChainConfig
|
||||
sender = common.StringToAddress("sender")
|
||||
receiver = common.StringToAddress("receiver")
|
||||
)
|
||||
if ctx.GlobalBool(MachineFlag.Name) {
|
||||
tracer = NewJSONLogger(logconfig, os.Stdout)
|
||||
} else if ctx.GlobalBool(DebugFlag.Name) {
|
||||
debugLogger = vm.NewStructLogger(logconfig)
|
||||
tracer = debugLogger
|
||||
} else {
|
||||
debugLogger = vm.NewStructLogger(logconfig)
|
||||
}
|
||||
if ctx.GlobalString(GenesisFlag.Name) != "" {
|
||||
gen := readGenesis(ctx.GlobalString(GenesisFlag.Name))
|
||||
_, statedb = gen.ToBlock()
|
||||
chainConfig = gen.Config
|
||||
} else {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
statedb, _ = state.New(common.Hash{}, state.NewDatabase(db))
|
||||
}
|
||||
if ctx.GlobalString(SenderFlag.Name) != "" {
|
||||
sender = common.HexToAddress(ctx.GlobalString(SenderFlag.Name))
|
||||
}
|
||||
statedb.CreateAccount(sender)
|
||||
|
||||
if ctx.GlobalString(ReceiverFlag.Name) != "" {
|
||||
receiver = common.HexToAddress(ctx.GlobalString(ReceiverFlag.Name))
|
||||
}
|
||||
|
||||
var (
|
||||
code []byte
|
||||
ret []byte
|
||||
err error
|
||||
)
|
||||
if fn := ctx.Args().First(); len(fn) > 0 {
|
||||
// The '--code' or '--codefile' flag overrides code in state
|
||||
if ctx.GlobalString(CodeFileFlag.Name) != "" {
|
||||
var hexcode []byte
|
||||
var err error
|
||||
// If - is specified, it means that code comes from stdin
|
||||
if ctx.GlobalString(CodeFileFlag.Name) == "-" {
|
||||
//Try reading from stdin
|
||||
if hexcode, err = ioutil.ReadAll(os.Stdin); err != nil {
|
||||
fmt.Printf("Could not load code from stdin: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
// Codefile with hex assembly
|
||||
if hexcode, err = ioutil.ReadFile(ctx.GlobalString(CodeFileFlag.Name)); err != nil {
|
||||
fmt.Printf("Could not load code from file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
code = common.Hex2Bytes(string(bytes.TrimRight(hexcode, "\n")))
|
||||
|
||||
} else if ctx.GlobalString(CodeFlag.Name) != "" {
|
||||
code = common.Hex2Bytes(ctx.GlobalString(CodeFlag.Name))
|
||||
} else if fn := ctx.Args().First(); len(fn) > 0 {
|
||||
// EASM-file to compile
|
||||
src, err := ioutil.ReadFile(fn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bin, err := compiler.Compile(fn, src, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
code = common.Hex2Bytes(bin)
|
||||
} else if ctx.GlobalString(CodeFlag.Name) != "" {
|
||||
code = common.Hex2Bytes(ctx.GlobalString(CodeFlag.Name))
|
||||
} else {
|
||||
var hexcode []byte
|
||||
if ctx.GlobalString(CodeFileFlag.Name) != "" {
|
||||
var err error
|
||||
hexcode, err = ioutil.ReadFile(ctx.GlobalString(CodeFileFlag.Name))
|
||||
if err != nil {
|
||||
fmt.Printf("Could not load code from file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
hexcode, err = ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
fmt.Printf("Could not load code from stdin: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
code = common.Hex2Bytes(string(bytes.TrimRight(hexcode, "\n")))
|
||||
}
|
||||
|
||||
initialGas := ctx.GlobalUint64(GasFlag.Name)
|
||||
runtimeConfig := runtime.Config{
|
||||
Origin: sender,
|
||||
State: statedb,
|
||||
GasLimit: ctx.GlobalUint64(GasFlag.Name),
|
||||
GasLimit: initialGas,
|
||||
GasPrice: utils.GlobalBig(ctx, PriceFlag.Name),
|
||||
Value: utils.GlobalBig(ctx, ValueFlag.Name),
|
||||
EVMConfig: vm.Config{
|
||||
Tracer: logger,
|
||||
Debug: ctx.GlobalBool(DebugFlag.Name),
|
||||
Tracer: tracer,
|
||||
Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name),
|
||||
DisableGasMetering: ctx.GlobalBool(DisableGasMeteringFlag.Name),
|
||||
},
|
||||
}
|
||||
|
||||
if cpuProfilePath := ctx.GlobalString(CPUProfileFlag.Name); cpuProfilePath != "" {
|
||||
f, err := os.Create(cpuProfilePath)
|
||||
if err != nil {
|
||||
fmt.Println("could not create CPU profile: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
fmt.Println("could not start CPU profile: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
|
||||
if chainConfig != nil {
|
||||
runtimeConfig.ChainConfig = chainConfig
|
||||
}
|
||||
tstart := time.Now()
|
||||
var leftOverGas uint64
|
||||
if ctx.GlobalBool(CreateFlag.Name) {
|
||||
input := append(code, common.Hex2Bytes(ctx.GlobalString(InputFlag.Name))...)
|
||||
ret, _, err = runtime.Create(input, &runtimeConfig)
|
||||
ret, _, leftOverGas, err = runtime.Create(input, &runtimeConfig)
|
||||
} else {
|
||||
receiver := common.StringToAddress("receiver")
|
||||
statedb.SetCode(receiver, code)
|
||||
|
||||
ret, err = runtime.Call(receiver, common.Hex2Bytes(ctx.GlobalString(InputFlag.Name)), &runtimeConfig)
|
||||
if len(code) > 0 {
|
||||
statedb.SetCode(receiver, code)
|
||||
}
|
||||
ret, leftOverGas, err = runtime.Call(receiver, common.Hex2Bytes(ctx.GlobalString(InputFlag.Name)), &runtimeConfig)
|
||||
}
|
||||
execTime := time.Since(tstart)
|
||||
|
||||
if ctx.GlobalBool(DumpFlag.Name) {
|
||||
statedb.Commit(true)
|
||||
statedb.IntermediateRoot(true)
|
||||
fmt.Println(string(statedb.Dump()))
|
||||
}
|
||||
|
||||
if memProfilePath := ctx.GlobalString(MemProfileFlag.Name); memProfilePath != "" {
|
||||
f, err := os.Create(memProfilePath)
|
||||
if err != nil {
|
||||
fmt.Println("could not create memory profile: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := pprof.WriteHeapProfile(f); err != nil {
|
||||
fmt.Println("could not write memory profile: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
|
||||
if ctx.GlobalBool(DebugFlag.Name) {
|
||||
fmt.Fprintln(os.Stderr, "#### TRACE ####")
|
||||
vm.WriteTrace(os.Stderr, logger.StructLogs())
|
||||
if debugLogger != nil {
|
||||
fmt.Fprintln(os.Stderr, "#### TRACE ####")
|
||||
vm.WriteTrace(os.Stderr, debugLogger.StructLogs())
|
||||
}
|
||||
fmt.Fprintln(os.Stderr, "#### LOGS ####")
|
||||
vm.WriteLogs(os.Stderr, statedb.Logs())
|
||||
}
|
||||
|
||||
if ctx.GlobalBool(StatDumpFlag.Name) {
|
||||
var mem goruntime.MemStats
|
||||
goruntime.ReadMemStats(&mem)
|
||||
fmt.Fprintf(os.Stderr, `evm execution time: %v
|
||||
@ -138,14 +229,18 @@ heap objects: %d
|
||||
allocations: %d
|
||||
total allocations: %d
|
||||
GC calls: %d
|
||||
Gas used: %d
|
||||
|
||||
`, execTime, mem.HeapObjects, mem.Alloc, mem.TotalAlloc, mem.NumGC)
|
||||
`, execTime, mem.HeapObjects, mem.Alloc, mem.TotalAlloc, mem.NumGC, initialGas-leftOverGas)
|
||||
}
|
||||
if tracer != nil {
|
||||
tracer.CaptureEnd(ret, initialGas-leftOverGas, execTime, err)
|
||||
} else {
|
||||
fmt.Printf("0x%x\n", ret)
|
||||
if err != nil {
|
||||
fmt.Printf(" error: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("0x%x", ret)
|
||||
if err != nil {
|
||||
fmt.Printf(" error: %v", err)
|
||||
}
|
||||
fmt.Println()
|
||||
return nil
|
||||
}
|
||||
|
119
cmd/evm/staterunner.go
Normal file
119
cmd/evm/staterunner.go
Normal file
@ -0,0 +1,119 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/tests"
|
||||
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var stateTestCommand = cli.Command{
|
||||
Action: stateTestCmd,
|
||||
Name: "statetest",
|
||||
Usage: "executes the given state tests",
|
||||
ArgsUsage: "<file>",
|
||||
}
|
||||
|
||||
type StatetestResult struct {
|
||||
Name string `json:"name"`
|
||||
Pass bool `json:"pass"`
|
||||
Fork string `json:"fork"`
|
||||
Error string `json:"error,omitempty"`
|
||||
State *state.Dump `json:"state,omitempty"`
|
||||
}
|
||||
|
||||
func stateTestCmd(ctx *cli.Context) error {
|
||||
if len(ctx.Args().First()) == 0 {
|
||||
return errors.New("path-to-test argument required")
|
||||
}
|
||||
// Configure the go-ethereum logger
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
||||
glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name)))
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
// Configure the EVM logger
|
||||
config := &vm.LogConfig{
|
||||
DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name),
|
||||
DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
|
||||
}
|
||||
var (
|
||||
tracer vm.Tracer
|
||||
debugger *vm.StructLogger
|
||||
)
|
||||
switch {
|
||||
case ctx.GlobalBool(MachineFlag.Name):
|
||||
tracer = NewJSONLogger(config, os.Stderr)
|
||||
|
||||
case ctx.GlobalBool(DebugFlag.Name):
|
||||
debugger = vm.NewStructLogger(config)
|
||||
tracer = debugger
|
||||
|
||||
default:
|
||||
debugger = vm.NewStructLogger(config)
|
||||
}
|
||||
// Load the test content from the input file
|
||||
src, err := ioutil.ReadFile(ctx.Args().First())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var tests map[string]tests.StateTest
|
||||
if err = json.Unmarshal(src, &tests); err != nil {
|
||||
return err
|
||||
}
|
||||
// Iterate over all the tests, run them and aggregate the results
|
||||
cfg := vm.Config{
|
||||
Tracer: tracer,
|
||||
Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name),
|
||||
}
|
||||
results := make([]StatetestResult, 0, len(tests))
|
||||
for key, test := range tests {
|
||||
for _, st := range test.Subtests() {
|
||||
// Run the test and aggregate the result
|
||||
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
|
||||
if state, err := test.Run(st, cfg); err != nil {
|
||||
// Test failed, mark as so and dump any state to aid debugging
|
||||
result.Pass, result.Error = false, err.Error()
|
||||
if ctx.GlobalBool(DumpFlag.Name) && state != nil {
|
||||
dump := state.RawDump()
|
||||
result.State = &dump
|
||||
}
|
||||
}
|
||||
results = append(results, *result)
|
||||
|
||||
// Print any structured logs collected
|
||||
if ctx.GlobalBool(DebugFlag.Name) {
|
||||
if debugger != nil {
|
||||
fmt.Fprintln(os.Stderr, "#### TRACE ####")
|
||||
vm.WriteTrace(os.Stderr, debugger.StructLogs())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
out, _ := json.MarshalIndent(results, "", " ")
|
||||
fmt.Println(string(out))
|
||||
return nil
|
||||
}
|
@ -102,7 +102,7 @@ func main() {
|
||||
if amount == 1 {
|
||||
amounts[i] = strings.TrimSuffix(amounts[i], "s")
|
||||
}
|
||||
// Calcualte the period for th enext tier and format it
|
||||
// Calculate the period for the next tier and format it
|
||||
period := *minutesFlag * int(math.Pow(3, float64(i)))
|
||||
periods[i] = fmt.Sprintf("%d mins", period)
|
||||
if period%60 == 0 {
|
||||
@ -413,8 +413,9 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
// Iterate over all the files and look for Ethereum addresses
|
||||
var address common.Address
|
||||
for _, file := range gist.Files {
|
||||
if len(file.Content) == 2+common.AddressLength*2 {
|
||||
address = common.HexToAddress(file.Content)
|
||||
content := strings.TrimSpace(file.Content)
|
||||
if len(content) == 2+common.AddressLength*2 {
|
||||
address = common.HexToAddress(content)
|
||||
}
|
||||
}
|
||||
if address == (common.Address{}) {
|
||||
|
@ -31,25 +31,40 @@ import (
|
||||
|
||||
var (
|
||||
walletCommand = cli.Command{
|
||||
Name: "wallet",
|
||||
Usage: "Import Ethereum presale wallets",
|
||||
Action: utils.MigrateFlags(importWallet),
|
||||
Category: "ACCOUNT COMMANDS",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.KeyStoreDirFlag,
|
||||
utils.PasswordFileFlag,
|
||||
utils.LightKDFFlag,
|
||||
},
|
||||
Name: "wallet",
|
||||
Usage: "Manage Ethereum presale wallets",
|
||||
ArgsUsage: "",
|
||||
Category: "ACCOUNT COMMANDS",
|
||||
Description: `
|
||||
geth wallet [options] /path/to/my/presale.wallet
|
||||
geth wallet import /path/to/my/presale.wallet
|
||||
|
||||
will prompt for your password and imports your ether presale account.
|
||||
It can be used non-interactively with the --password option taking a
|
||||
passwordfile as argument containing the wallet password in plaintext.
|
||||
will prompt for your password and imports your ether presale account.
|
||||
It can be used non-interactively with the --password option taking a
|
||||
passwordfile as argument containing the wallet password in plaintext.`,
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
|
||||
`,
|
||||
Name: "import",
|
||||
Usage: "Import Ethereum presale wallet",
|
||||
ArgsUsage: "<keyFile>",
|
||||
Action: utils.MigrateFlags(importWallet),
|
||||
Category: "ACCOUNT COMMANDS",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.KeyStoreDirFlag,
|
||||
utils.PasswordFileFlag,
|
||||
utils.LightKDFFlag,
|
||||
},
|
||||
Description: `
|
||||
geth wallet [options] /path/to/my/presale.wallet
|
||||
|
||||
will prompt for your password and imports your ether presale account.
|
||||
It can be used non-interactively with the --password option taking a
|
||||
passwordfile as argument containing the wallet password in plaintext.`,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
accountCommand = cli.Command{
|
||||
Name: "account",
|
||||
Usage: "Manage accounts",
|
||||
@ -218,7 +233,7 @@ func unlockAccount(ctx *cli.Context, ks *keystore.KeyStore, address string, i in
|
||||
return accounts.Account{}, ""
|
||||
}
|
||||
|
||||
// getPassPhrase retrieves the passwor associated with an account, either fetched
|
||||
// getPassPhrase retrieves the password associated with an account, either fetched
|
||||
// from a list of preloaded passphrases, or requested interactively from the user.
|
||||
func getPassPhrase(prompt string, confirmation bool, i int, passwords []string) string {
|
||||
// If a list of passwords was supplied, retrieve from them
|
||||
|
@ -44,21 +44,21 @@ func tmpDatadirWithKeystore(t *testing.T) string {
|
||||
|
||||
func TestAccountListEmpty(t *testing.T) {
|
||||
geth := runGeth(t, "account", "list")
|
||||
geth.expectExit()
|
||||
geth.ExpectExit()
|
||||
}
|
||||
|
||||
func TestAccountList(t *testing.T) {
|
||||
datadir := tmpDatadirWithKeystore(t)
|
||||
geth := runGeth(t, "account", "list", "--datadir", datadir)
|
||||
defer geth.expectExit()
|
||||
defer geth.ExpectExit()
|
||||
if runtime.GOOS == "windows" {
|
||||
geth.expect(`
|
||||
geth.Expect(`
|
||||
Account #0: {7ef5a6135f1fd6a02593eedc869c6d41d934aef8} keystore://{{.Datadir}}\keystore\UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8
|
||||
Account #1: {f466859ead1932d743d622cb74fc058882e8648a} keystore://{{.Datadir}}\keystore\aaa
|
||||
Account #2: {289d485d9771714cce91d3393d764e1311907acc} keystore://{{.Datadir}}\keystore\zzz
|
||||
`)
|
||||
} else {
|
||||
geth.expect(`
|
||||
geth.Expect(`
|
||||
Account #0: {7ef5a6135f1fd6a02593eedc869c6d41d934aef8} keystore://{{.Datadir}}/keystore/UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8
|
||||
Account #1: {f466859ead1932d743d622cb74fc058882e8648a} keystore://{{.Datadir}}/keystore/aaa
|
||||
Account #2: {289d485d9771714cce91d3393d764e1311907acc} keystore://{{.Datadir}}/keystore/zzz
|
||||
@ -68,20 +68,20 @@ Account #2: {289d485d9771714cce91d3393d764e1311907acc} keystore://{{.Datadir}}/k
|
||||
|
||||
func TestAccountNew(t *testing.T) {
|
||||
geth := runGeth(t, "account", "new", "--lightkdf")
|
||||
defer geth.expectExit()
|
||||
geth.expect(`
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
Your new account is locked with a password. Please give a password. Do not forget this password.
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "foobar"}}
|
||||
Repeat passphrase: {{.InputLine "foobar"}}
|
||||
`)
|
||||
geth.expectRegexp(`Address: \{[0-9a-f]{40}\}\n`)
|
||||
geth.ExpectRegexp(`Address: \{[0-9a-f]{40}\}\n`)
|
||||
}
|
||||
|
||||
func TestAccountNewBadRepeat(t *testing.T) {
|
||||
geth := runGeth(t, "account", "new", "--lightkdf")
|
||||
defer geth.expectExit()
|
||||
geth.expect(`
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
Your new account is locked with a password. Please give a password. Do not forget this password.
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "something"}}
|
||||
@ -95,8 +95,8 @@ func TestAccountUpdate(t *testing.T) {
|
||||
geth := runGeth(t, "account", "update",
|
||||
"--datadir", datadir, "--lightkdf",
|
||||
"f466859ead1932d743d622cb74fc058882e8648a")
|
||||
defer geth.expectExit()
|
||||
geth.expect(`
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "foobar"}}
|
||||
@ -108,8 +108,8 @@ Repeat passphrase: {{.InputLine "foobar2"}}
|
||||
|
||||
func TestWalletImport(t *testing.T) {
|
||||
geth := runGeth(t, "wallet", "import", "--lightkdf", "testdata/guswallet.json")
|
||||
defer geth.expectExit()
|
||||
geth.expect(`
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "foo"}}
|
||||
Address: {d4584b5f6229b7be90727b0fc8c6b91bb427821f}
|
||||
@ -123,8 +123,8 @@ Address: {d4584b5f6229b7be90727b0fc8c6b91bb427821f}
|
||||
|
||||
func TestWalletImportBadPassword(t *testing.T) {
|
||||
geth := runGeth(t, "wallet", "import", "--lightkdf", "testdata/guswallet.json")
|
||||
defer geth.expectExit()
|
||||
geth.expect(`
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "wrong"}}
|
||||
Fatal: could not decrypt key with given passphrase
|
||||
@ -137,19 +137,19 @@ func TestUnlockFlag(t *testing.T) {
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a",
|
||||
"js", "testdata/empty.js")
|
||||
geth.expect(`
|
||||
geth.Expect(`
|
||||
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "foobar"}}
|
||||
`)
|
||||
geth.expectExit()
|
||||
geth.ExpectExit()
|
||||
|
||||
wantMessages := []string{
|
||||
"Unlocked account",
|
||||
"=0xf466859ead1932d743d622cb74fc058882e8648a",
|
||||
"=0xf466859eAD1932D743d622CB74FC058882E8648A",
|
||||
}
|
||||
for _, m := range wantMessages {
|
||||
if !strings.Contains(geth.stderrText(), m) {
|
||||
if !strings.Contains(geth.StderrText(), m) {
|
||||
t.Errorf("stderr text does not contain %q", m)
|
||||
}
|
||||
}
|
||||
@ -160,8 +160,8 @@ func TestUnlockFlagWrongPassword(t *testing.T) {
|
||||
geth := runGeth(t,
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a")
|
||||
defer geth.expectExit()
|
||||
geth.expect(`
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "wrong1"}}
|
||||
@ -180,22 +180,22 @@ func TestUnlockFlagMultiIndex(t *testing.T) {
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--unlock", "0,2",
|
||||
"js", "testdata/empty.js")
|
||||
geth.expect(`
|
||||
geth.Expect(`
|
||||
Unlocking account 0 | Attempt 1/3
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "foobar"}}
|
||||
Unlocking account 2 | Attempt 1/3
|
||||
Passphrase: {{.InputLine "foobar"}}
|
||||
`)
|
||||
geth.expectExit()
|
||||
geth.ExpectExit()
|
||||
|
||||
wantMessages := []string{
|
||||
"Unlocked account",
|
||||
"=0x7ef5a6135f1fd6a02593eedc869c6d41d934aef8",
|
||||
"=0x289d485d9771714cce91d3393d764e1311907acc",
|
||||
"=0x7EF5A6135f1FD6a02593eEdC869c6D41D934aef8",
|
||||
"=0x289d485D9771714CCe91D3393D764E1311907ACc",
|
||||
}
|
||||
for _, m := range wantMessages {
|
||||
if !strings.Contains(geth.stderrText(), m) {
|
||||
if !strings.Contains(geth.StderrText(), m) {
|
||||
t.Errorf("stderr text does not contain %q", m)
|
||||
}
|
||||
}
|
||||
@ -207,15 +207,15 @@ func TestUnlockFlagPasswordFile(t *testing.T) {
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--password", "testdata/passwords.txt", "--unlock", "0,2",
|
||||
"js", "testdata/empty.js")
|
||||
geth.expectExit()
|
||||
geth.ExpectExit()
|
||||
|
||||
wantMessages := []string{
|
||||
"Unlocked account",
|
||||
"=0x7ef5a6135f1fd6a02593eedc869c6d41d934aef8",
|
||||
"=0x289d485d9771714cce91d3393d764e1311907acc",
|
||||
"=0x7EF5A6135f1FD6a02593eEdC869c6D41D934aef8",
|
||||
"=0x289d485D9771714CCe91D3393D764E1311907ACc",
|
||||
}
|
||||
for _, m := range wantMessages {
|
||||
if !strings.Contains(geth.stderrText(), m) {
|
||||
if !strings.Contains(geth.StderrText(), m) {
|
||||
t.Errorf("stderr text does not contain %q", m)
|
||||
}
|
||||
}
|
||||
@ -226,8 +226,8 @@ func TestUnlockFlagPasswordFileWrongPassword(t *testing.T) {
|
||||
geth := runGeth(t,
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--password", "testdata/wrong-passwords.txt", "--unlock", "0,2")
|
||||
defer geth.expectExit()
|
||||
geth.expect(`
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
Fatal: Failed to unlock account 0 (could not decrypt key with given passphrase)
|
||||
`)
|
||||
}
|
||||
@ -238,14 +238,14 @@ func TestUnlockFlagAmbiguous(t *testing.T) {
|
||||
"--keystore", store, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a",
|
||||
"js", "testdata/empty.js")
|
||||
defer geth.expectExit()
|
||||
defer geth.ExpectExit()
|
||||
|
||||
// Helper for the expect template, returns absolute keystore path.
|
||||
geth.setTemplateFunc("keypath", func(file string) string {
|
||||
geth.SetTemplateFunc("keypath", func(file string) string {
|
||||
abs, _ := filepath.Abs(filepath.Join(store, file))
|
||||
return abs
|
||||
})
|
||||
geth.expect(`
|
||||
geth.Expect(`
|
||||
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "foobar"}}
|
||||
@ -257,14 +257,14 @@ Your passphrase unlocked keystore://{{keypath "1"}}
|
||||
In order to avoid this warning, you need to remove the following duplicate key files:
|
||||
keystore://{{keypath "2"}}
|
||||
`)
|
||||
geth.expectExit()
|
||||
geth.ExpectExit()
|
||||
|
||||
wantMessages := []string{
|
||||
"Unlocked account",
|
||||
"=0xf466859ead1932d743d622cb74fc058882e8648a",
|
||||
"=0xf466859eAD1932D743d622CB74FC058882E8648A",
|
||||
}
|
||||
for _, m := range wantMessages {
|
||||
if !strings.Contains(geth.stderrText(), m) {
|
||||
if !strings.Contains(geth.StderrText(), m) {
|
||||
t.Errorf("stderr text does not contain %q", m)
|
||||
}
|
||||
}
|
||||
@ -275,14 +275,14 @@ func TestUnlockFlagAmbiguousWrongPassword(t *testing.T) {
|
||||
geth := runGeth(t,
|
||||
"--keystore", store, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a")
|
||||
defer geth.expectExit()
|
||||
defer geth.ExpectExit()
|
||||
|
||||
// Helper for the expect template, returns absolute keystore path.
|
||||
geth.setTemplateFunc("keypath", func(file string) string {
|
||||
geth.SetTemplateFunc("keypath", func(file string) string {
|
||||
abs, _ := filepath.Abs(filepath.Join(store, file))
|
||||
return abs
|
||||
})
|
||||
geth.expect(`
|
||||
geth.Expect(`
|
||||
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "wrong"}}
|
||||
@ -292,5 +292,5 @@ Multiple key files exist for address f466859ead1932d743d622cb74fc058882e8648a:
|
||||
Testing your passphrase against all of them...
|
||||
Fatal: None of the listed files could be unlocked.
|
||||
`)
|
||||
geth.expectExit()
|
||||
geth.ExpectExit()
|
||||
}
|
||||
|
@ -312,7 +312,7 @@ func dump(ctx *cli.Context) error {
|
||||
fmt.Println("{}")
|
||||
utils.Fatalf("block not found")
|
||||
} else {
|
||||
state, err := state.New(block.Root(), chainDb)
|
||||
state, err := state.New(block.Root(), state.NewDatabase(chainDb))
|
||||
if err != nil {
|
||||
utils.Fatalf("could not create new state: %v", err)
|
||||
}
|
||||
|
@ -33,6 +33,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
whisper "github.com/ethereum/go-ethereum/whisper/whisperv5"
|
||||
"github.com/naoina/toml"
|
||||
)
|
||||
|
||||
@ -42,7 +43,7 @@ var (
|
||||
Name: "dumpconfig",
|
||||
Usage: "Show configuration values",
|
||||
ArgsUsage: "",
|
||||
Flags: append(nodeFlags, rpcFlags...),
|
||||
Flags: append(append(nodeFlags, rpcFlags...), whisperFlags...),
|
||||
Category: "MISCELLANEOUS COMMANDS",
|
||||
Description: `The dumpconfig command shows configuration values.`,
|
||||
}
|
||||
@ -76,6 +77,7 @@ type ethstatsConfig struct {
|
||||
|
||||
type gethConfig struct {
|
||||
Eth eth.Config
|
||||
Shh whisper.Config
|
||||
Node node.Config
|
||||
Ethstats ethstatsConfig
|
||||
}
|
||||
@ -99,8 +101,8 @@ func defaultNodeConfig() node.Config {
|
||||
cfg := node.DefaultConfig
|
||||
cfg.Name = clientIdentifier
|
||||
cfg.Version = params.VersionWithCommit(gitCommit)
|
||||
cfg.HTTPModules = append(cfg.HTTPModules, "eth")
|
||||
cfg.WSModules = append(cfg.WSModules, "eth")
|
||||
cfg.HTTPModules = append(cfg.HTTPModules, "eth", "shh")
|
||||
cfg.WSModules = append(cfg.WSModules, "eth", "shh")
|
||||
cfg.IPCPath = "geth.ipc"
|
||||
return cfg
|
||||
}
|
||||
@ -109,6 +111,7 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
|
||||
// Load defaults.
|
||||
cfg := gethConfig{
|
||||
Eth: eth.DefaultConfig,
|
||||
Shh: whisper.DefaultConfig,
|
||||
Node: defaultNodeConfig(),
|
||||
}
|
||||
|
||||
@ -130,19 +133,37 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
|
||||
cfg.Ethstats.URL = ctx.GlobalString(utils.EthStatsURLFlag.Name)
|
||||
}
|
||||
|
||||
utils.SetShhConfig(ctx, stack, &cfg.Shh)
|
||||
|
||||
return stack, cfg
|
||||
}
|
||||
|
||||
// enableWhisper returns true in case one of the whisper flags is set.
|
||||
func enableWhisper(ctx *cli.Context) bool {
|
||||
for _, flag := range whisperFlags {
|
||||
if ctx.GlobalIsSet(flag.GetName()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func makeFullNode(ctx *cli.Context) *node.Node {
|
||||
stack, cfg := makeConfigNode(ctx)
|
||||
|
||||
utils.RegisterEthService(stack, &cfg.Eth)
|
||||
|
||||
// Whisper must be explicitly enabled, but is auto-enabled in --dev mode.
|
||||
shhEnabled := ctx.GlobalBool(utils.WhisperEnabledFlag.Name)
|
||||
// Whisper must be explicitly enabled by specifying at least 1 whisper flag or in dev mode
|
||||
shhEnabled := enableWhisper(ctx)
|
||||
shhAutoEnabled := !ctx.GlobalIsSet(utils.WhisperEnabledFlag.Name) && ctx.GlobalIsSet(utils.DevModeFlag.Name)
|
||||
if shhEnabled || shhAutoEnabled {
|
||||
utils.RegisterShhService(stack)
|
||||
if ctx.GlobalIsSet(utils.WhisperMaxMessageSizeFlag.Name) {
|
||||
cfg.Shh.MaxMessageSize = uint32(ctx.Int(utils.WhisperMaxMessageSizeFlag.Name))
|
||||
}
|
||||
if ctx.GlobalIsSet(utils.WhisperMinPOWFlag.Name) {
|
||||
cfg.Shh.MinimumAcceptedPOW = ctx.Float64(utils.WhisperMinPOWFlag.Name)
|
||||
}
|
||||
utils.RegisterShhService(stack, &cfg.Shh)
|
||||
}
|
||||
|
||||
// Add the Ethereum Stats daemon if requested.
|
||||
|
@ -30,14 +30,12 @@ import (
|
||||
|
||||
var (
|
||||
consoleFlags = []cli.Flag{utils.JSpathFlag, utils.ExecFlag, utils.PreloadJSFlag}
|
||||
)
|
||||
|
||||
var (
|
||||
consoleCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(localConsole),
|
||||
Name: "console",
|
||||
Usage: "Start an interactive JavaScript environment",
|
||||
Flags: append(append(nodeFlags, rpcFlags...), consoleFlags...),
|
||||
Flags: append(append(append(nodeFlags, rpcFlags...), consoleFlags...), whisperFlags...),
|
||||
Category: "CONSOLE COMMANDS",
|
||||
Description: `
|
||||
The Geth console is an interactive shell for the JavaScript runtime environment
|
||||
|
@ -47,15 +47,15 @@ func TestConsoleWelcome(t *testing.T) {
|
||||
"console")
|
||||
|
||||
// Gather all the infos the welcome message needs to contain
|
||||
geth.setTemplateFunc("goos", func() string { return runtime.GOOS })
|
||||
geth.setTemplateFunc("goarch", func() string { return runtime.GOARCH })
|
||||
geth.setTemplateFunc("gover", runtime.Version)
|
||||
geth.setTemplateFunc("gethver", func() string { return params.Version })
|
||||
geth.setTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
|
||||
geth.setTemplateFunc("apis", func() string { return ipcAPIs })
|
||||
geth.SetTemplateFunc("goos", func() string { return runtime.GOOS })
|
||||
geth.SetTemplateFunc("goarch", func() string { return runtime.GOARCH })
|
||||
geth.SetTemplateFunc("gover", runtime.Version)
|
||||
geth.SetTemplateFunc("gethver", func() string { return params.Version })
|
||||
geth.SetTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
|
||||
geth.SetTemplateFunc("apis", func() string { return ipcAPIs })
|
||||
|
||||
// Verify the actual welcome message to the required template
|
||||
geth.expect(`
|
||||
geth.Expect(`
|
||||
Welcome to the Geth JavaScript console!
|
||||
|
||||
instance: Geth/v{{gethver}}/{{goos}}-{{goarch}}/{{gover}}
|
||||
@ -66,7 +66,7 @@ at block: 0 ({{niltime}})
|
||||
|
||||
> {{.InputLine "exit"}}
|
||||
`)
|
||||
geth.expectExit()
|
||||
geth.ExpectExit()
|
||||
}
|
||||
|
||||
// Tests that a console can be attached to a running node via various means.
|
||||
@ -90,8 +90,8 @@ func TestIPCAttachWelcome(t *testing.T) {
|
||||
time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open
|
||||
testAttachWelcome(t, geth, "ipc:"+ipc, ipcAPIs)
|
||||
|
||||
geth.interrupt()
|
||||
geth.expectExit()
|
||||
geth.Interrupt()
|
||||
geth.ExpectExit()
|
||||
}
|
||||
|
||||
func TestHTTPAttachWelcome(t *testing.T) {
|
||||
@ -104,8 +104,8 @@ func TestHTTPAttachWelcome(t *testing.T) {
|
||||
time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open
|
||||
testAttachWelcome(t, geth, "http://localhost:"+port, httpAPIs)
|
||||
|
||||
geth.interrupt()
|
||||
geth.expectExit()
|
||||
geth.Interrupt()
|
||||
geth.ExpectExit()
|
||||
}
|
||||
|
||||
func TestWSAttachWelcome(t *testing.T) {
|
||||
@ -119,29 +119,29 @@ func TestWSAttachWelcome(t *testing.T) {
|
||||
time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open
|
||||
testAttachWelcome(t, geth, "ws://localhost:"+port, httpAPIs)
|
||||
|
||||
geth.interrupt()
|
||||
geth.expectExit()
|
||||
geth.Interrupt()
|
||||
geth.ExpectExit()
|
||||
}
|
||||
|
||||
func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) {
|
||||
// Attach to a running geth note and terminate immediately
|
||||
attach := runGeth(t, "attach", endpoint)
|
||||
defer attach.expectExit()
|
||||
attach.stdin.Close()
|
||||
defer attach.ExpectExit()
|
||||
attach.CloseStdin()
|
||||
|
||||
// Gather all the infos the welcome message needs to contain
|
||||
attach.setTemplateFunc("goos", func() string { return runtime.GOOS })
|
||||
attach.setTemplateFunc("goarch", func() string { return runtime.GOARCH })
|
||||
attach.setTemplateFunc("gover", runtime.Version)
|
||||
attach.setTemplateFunc("gethver", func() string { return params.Version })
|
||||
attach.setTemplateFunc("etherbase", func() string { return geth.Etherbase })
|
||||
attach.setTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
|
||||
attach.setTemplateFunc("ipc", func() bool { return strings.HasPrefix(endpoint, "ipc") })
|
||||
attach.setTemplateFunc("datadir", func() string { return geth.Datadir })
|
||||
attach.setTemplateFunc("apis", func() string { return apis })
|
||||
attach.SetTemplateFunc("goos", func() string { return runtime.GOOS })
|
||||
attach.SetTemplateFunc("goarch", func() string { return runtime.GOARCH })
|
||||
attach.SetTemplateFunc("gover", runtime.Version)
|
||||
attach.SetTemplateFunc("gethver", func() string { return params.Version })
|
||||
attach.SetTemplateFunc("etherbase", func() string { return geth.Etherbase })
|
||||
attach.SetTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
|
||||
attach.SetTemplateFunc("ipc", func() bool { return strings.HasPrefix(endpoint, "ipc") })
|
||||
attach.SetTemplateFunc("datadir", func() string { return geth.Datadir })
|
||||
attach.SetTemplateFunc("apis", func() string { return apis })
|
||||
|
||||
// Verify the actual welcome message to the required template
|
||||
attach.expect(`
|
||||
attach.Expect(`
|
||||
Welcome to the Geth JavaScript console!
|
||||
|
||||
instance: Geth/v{{gethver}}/{{goos}}-{{goarch}}/{{gover}}
|
||||
@ -152,7 +152,7 @@ at block: 0 ({{niltime}}){{if ipc}}
|
||||
|
||||
> {{.InputLine "exit" }}
|
||||
`)
|
||||
attach.expectExit()
|
||||
attach.ExpectExit()
|
||||
}
|
||||
|
||||
// trulyRandInt generates a crypto random integer used by the console tests to
|
||||
|
@ -89,7 +89,7 @@ func TestDAOForkBlockNewChain(t *testing.T) {
|
||||
expectVote bool
|
||||
}{
|
||||
// Test DAO Default Mainnet
|
||||
{"", params.MainNetDAOForkBlock, true},
|
||||
{"", params.MainnetChainConfig.DAOForkBlock, true},
|
||||
// test DAO Init Old Privnet
|
||||
{daoOldGenesis, nil, false},
|
||||
// test DAO Default No Fork Privnet
|
||||
@ -112,12 +112,12 @@ func testDAOForkBlockNewChain(t *testing.T, test int, genesis string, expectBloc
|
||||
if err := ioutil.WriteFile(json, []byte(genesis), 0600); err != nil {
|
||||
t.Fatalf("test %d: failed to write genesis file: %v", test, err)
|
||||
}
|
||||
runGeth(t, "--datadir", datadir, "init", json).cmd.Wait()
|
||||
runGeth(t, "--datadir", datadir, "init", json).WaitExit()
|
||||
} else {
|
||||
// Force chain initialization
|
||||
args := []string{"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", "--ipcdisable", "--datadir", datadir}
|
||||
geth := runGeth(t, append(args, []string{"--exec", "2+2", "console"}...)...)
|
||||
geth.cmd.Wait()
|
||||
geth.WaitExit()
|
||||
}
|
||||
// Retrieve the DAO config flag from the database
|
||||
path := filepath.Join(datadir, "geth", "chaindata")
|
||||
|
@ -97,14 +97,14 @@ func TestCustomGenesis(t *testing.T) {
|
||||
if err := ioutil.WriteFile(json, []byte(tt.genesis), 0600); err != nil {
|
||||
t.Fatalf("test %d: failed to write genesis file: %v", i, err)
|
||||
}
|
||||
runGeth(t, "--datadir", datadir, "init", json).cmd.Wait()
|
||||
runGeth(t, "--datadir", datadir, "init", json).WaitExit()
|
||||
|
||||
// Query the custom genesis block
|
||||
geth := runGeth(t,
|
||||
"--datadir", datadir, "--maxpeers", "0", "--port", "0",
|
||||
"--nodiscover", "--nat", "none", "--ipcdisable",
|
||||
"--exec", tt.query, "console")
|
||||
geth.expectRegexp(tt.result)
|
||||
geth.expectExit()
|
||||
geth.ExpectRegexp(tt.result)
|
||||
geth.ExpectExit()
|
||||
}
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -55,6 +56,8 @@ var (
|
||||
utils.UnlockedAccountFlag,
|
||||
utils.PasswordFileFlag,
|
||||
utils.BootnodesFlag,
|
||||
utils.BootnodesV4Flag,
|
||||
utils.BootnodesV5Flag,
|
||||
utils.DataDirFlag,
|
||||
utils.KeyStoreDirFlag,
|
||||
utils.NoUSBFlag,
|
||||
@ -64,6 +67,16 @@ var (
|
||||
utils.EthashDatasetDirFlag,
|
||||
utils.EthashDatasetsInMemoryFlag,
|
||||
utils.EthashDatasetsOnDiskFlag,
|
||||
utils.TxPoolNoLocalsFlag,
|
||||
utils.TxPoolJournalFlag,
|
||||
utils.TxPoolRejournalFlag,
|
||||
utils.TxPoolPriceLimitFlag,
|
||||
utils.TxPoolPriceBumpFlag,
|
||||
utils.TxPoolAccountSlotsFlag,
|
||||
utils.TxPoolGlobalSlotsFlag,
|
||||
utils.TxPoolAccountQueueFlag,
|
||||
utils.TxPoolGlobalQueueFlag,
|
||||
utils.TxPoolLifetimeFlag,
|
||||
utils.FastSyncFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
@ -86,7 +99,6 @@ var (
|
||||
utils.NetrestrictFlag,
|
||||
utils.NodeKeyFileFlag,
|
||||
utils.NodeKeyHexFlag,
|
||||
utils.WhisperEnabledFlag,
|
||||
utils.DevModeFlag,
|
||||
utils.TestnetFlag,
|
||||
utils.RinkebyFlag,
|
||||
@ -116,6 +128,12 @@ var (
|
||||
utils.IPCDisabledFlag,
|
||||
utils.IPCPathFlag,
|
||||
}
|
||||
|
||||
whisperFlags = []cli.Flag{
|
||||
utils.WhisperEnabledFlag,
|
||||
utils.WhisperMaxMessageSizeFlag,
|
||||
utils.WhisperMinPOWFlag,
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -140,6 +158,7 @@ func init() {
|
||||
attachCommand,
|
||||
javascriptCommand,
|
||||
// See misccmd.go:
|
||||
makecacheCommand,
|
||||
makedagCommand,
|
||||
versionCommand,
|
||||
bugCommand,
|
||||
@ -147,11 +166,13 @@ func init() {
|
||||
// See config.go
|
||||
dumpConfigCommand,
|
||||
}
|
||||
sort.Sort(cli.CommandsByName(app.Commands))
|
||||
|
||||
app.Flags = append(app.Flags, nodeFlags...)
|
||||
app.Flags = append(app.Flags, rpcFlags...)
|
||||
app.Flags = append(app.Flags, consoleFlags...)
|
||||
app.Flags = append(app.Flags, debug.Flags...)
|
||||
app.Flags = append(app.Flags, whisperFlags...)
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
@ -218,24 +239,30 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
}
|
||||
stateReader := ethclient.NewClient(rpcClient)
|
||||
|
||||
// Open and self derive any wallets already attached
|
||||
// Open any wallets already attached
|
||||
for _, wallet := range stack.AccountManager().Wallets() {
|
||||
if err := wallet.Open(""); err != nil {
|
||||
log.Warn("Failed to open wallet", "url", wallet.URL(), "err", err)
|
||||
} else {
|
||||
wallet.SelfDerive(accounts.DefaultBaseDerivationPath, stateReader)
|
||||
}
|
||||
}
|
||||
// Listen for wallet event till termination
|
||||
for event := range events {
|
||||
if event.Arrive {
|
||||
switch event.Kind {
|
||||
case accounts.WalletArrived:
|
||||
if err := event.Wallet.Open(""); err != nil {
|
||||
log.Warn("New wallet appeared, failed to open", "url", event.Wallet.URL(), "err", err)
|
||||
}
|
||||
case accounts.WalletOpened:
|
||||
status, _ := event.Wallet.Status()
|
||||
log.Info("New wallet appeared", "url", event.Wallet.URL(), "status", status)
|
||||
|
||||
if event.Wallet.URL().Scheme == "ledger" {
|
||||
event.Wallet.SelfDerive(accounts.DefaultLedgerBaseDerivationPath, stateReader)
|
||||
} else {
|
||||
log.Info("New wallet appeared", "url", event.Wallet.URL(), "status", event.Wallet.Status())
|
||||
event.Wallet.SelfDerive(accounts.DefaultBaseDerivationPath, stateReader)
|
||||
}
|
||||
} else {
|
||||
|
||||
case accounts.WalletDropped:
|
||||
log.Info("Old wallet dropped", "url", event.Wallet.URL())
|
||||
event.Wallet.Close()
|
||||
}
|
||||
@ -243,10 +270,12 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
}()
|
||||
// Start auxiliary services if enabled
|
||||
if ctx.GlobalBool(utils.MiningEnabledFlag.Name) {
|
||||
// Mining only makes sense if a full Ethereum node is running
|
||||
var ethereum *eth.Ethereum
|
||||
if err := stack.Service(ðereum); err != nil {
|
||||
utils.Fatalf("ethereum service not running: %v", err)
|
||||
}
|
||||
// Use a reduced number of threads if requested
|
||||
if threads := ctx.GlobalInt(utils.MinerThreadsFlag.Name); threads > 0 {
|
||||
type threaded interface {
|
||||
SetThreads(threads int)
|
||||
@ -255,6 +284,8 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
th.SetThreads(threads)
|
||||
}
|
||||
}
|
||||
// Set the gas price to the limits from the CLI and start mining
|
||||
ethereum.TxPool().SetGasPrice(utils.GlobalBig(ctx, utils.GasPriceFlag.Name))
|
||||
if err := ethereum.StartMining(true); err != nil {
|
||||
utils.Fatalf("Failed to start mining: %v", err)
|
||||
}
|
||||
|
@ -18,9 +18,7 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -33,14 +31,27 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
makedagCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(makedag),
|
||||
Name: "makedag",
|
||||
Usage: "Generate ethash DAG (for testing)",
|
||||
makecacheCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(makecache),
|
||||
Name: "makecache",
|
||||
Usage: "Generate ethash verification cache (for testing)",
|
||||
ArgsUsage: "<blockNum> <outputDir>",
|
||||
Category: "MISCELLANEOUS COMMANDS",
|
||||
Description: `
|
||||
The makedag command generates an ethash DAG in /tmp/dag.
|
||||
The makecache command generates an ethash cache in <outputDir>.
|
||||
|
||||
This command exists to support the system testing project.
|
||||
Regular users do not need to execute it.
|
||||
`,
|
||||
}
|
||||
makedagCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(makedag),
|
||||
Name: "makedag",
|
||||
Usage: "Generate ethash mining DAG (for testing)",
|
||||
ArgsUsage: "<blockNum> <outputDir>",
|
||||
Category: "MISCELLANEOUS COMMANDS",
|
||||
Description: `
|
||||
The makedag command generates an ethash DAG in <outputDir>.
|
||||
|
||||
This command exists to support the system testing project.
|
||||
Regular users do not need to execute it.
|
||||
@ -65,33 +76,33 @@ The output of this command is supposed to be machine-readable.
|
||||
}
|
||||
)
|
||||
|
||||
// makecache generates an ethash verification cache into the provided folder.
|
||||
func makecache(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf(`Usage: geth makecache <block number> <outputdir>`)
|
||||
}
|
||||
block, err := strconv.ParseUint(args[0], 0, 64)
|
||||
if err != nil {
|
||||
utils.Fatalf("Invalid block number: %v", err)
|
||||
}
|
||||
ethash.MakeCache(block, args[1])
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// makedag generates an ethash mining DAG into the provided folder.
|
||||
func makedag(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
wrongArgs := func() {
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf(`Usage: geth makedag <block number> <outputdir>`)
|
||||
}
|
||||
switch {
|
||||
case len(args) == 2:
|
||||
blockNum, err := strconv.ParseUint(args[0], 0, 64)
|
||||
dir := args[1]
|
||||
if err != nil {
|
||||
wrongArgs()
|
||||
} else {
|
||||
dir = filepath.Clean(dir)
|
||||
// seems to require a trailing slash
|
||||
if !strings.HasSuffix(dir, "/") {
|
||||
dir = dir + "/"
|
||||
}
|
||||
_, err = ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
utils.Fatalf("Can't find dir")
|
||||
}
|
||||
fmt.Println("making DAG, this could take awhile...")
|
||||
ethash.MakeDataset(blockNum, dir)
|
||||
}
|
||||
default:
|
||||
wrongArgs()
|
||||
block, err := strconv.ParseUint(args[0], 0, 64)
|
||||
if err != nil {
|
||||
utils.Fatalf("Invalid block number: %v", err)
|
||||
}
|
||||
ethash.MakeDataset(block, args[1])
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -17,18 +17,13 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"sync"
|
||||
"testing"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/ethereum/go-ethereum/internal/cmdtest"
|
||||
)
|
||||
|
||||
func tmpdir(t *testing.T) string {
|
||||
@ -40,36 +35,37 @@ func tmpdir(t *testing.T) string {
|
||||
}
|
||||
|
||||
type testgeth struct {
|
||||
// For total convenience, all testing methods are available.
|
||||
*testing.T
|
||||
// template variables for expect
|
||||
Datadir string
|
||||
Executable string
|
||||
Etherbase string
|
||||
Func template.FuncMap
|
||||
*cmdtest.TestCmd
|
||||
|
||||
removeDatadir bool
|
||||
cmd *exec.Cmd
|
||||
stdout *bufio.Reader
|
||||
stdin io.WriteCloser
|
||||
stderr *testlogger
|
||||
// template variables for expect
|
||||
Datadir string
|
||||
Etherbase string
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Run the app if we're the child process for runGeth.
|
||||
if os.Getenv("GETH_TEST_CHILD") != "" {
|
||||
// Run the app if we've been exec'd as "geth-test" in runGeth.
|
||||
reexec.Register("geth-test", func() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
})
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
// check if we have been reexec'd
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
// spawns geth with the given command line args. If the args don't set --datadir, the
|
||||
// child g gets a temporary data directory.
|
||||
func runGeth(t *testing.T, args ...string) *testgeth {
|
||||
tt := &testgeth{T: t, Executable: os.Args[0]}
|
||||
tt := &testgeth{}
|
||||
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
|
||||
for i, arg := range args {
|
||||
switch {
|
||||
case arg == "-datadir" || arg == "--datadir":
|
||||
@ -84,215 +80,19 @@ func runGeth(t *testing.T, args ...string) *testgeth {
|
||||
}
|
||||
if tt.Datadir == "" {
|
||||
tt.Datadir = tmpdir(t)
|
||||
tt.removeDatadir = true
|
||||
tt.Cleanup = func() { os.RemoveAll(tt.Datadir) }
|
||||
args = append([]string{"-datadir", tt.Datadir}, args...)
|
||||
// Remove the temporary datadir if something fails below.
|
||||
defer func() {
|
||||
if t.Failed() {
|
||||
os.RemoveAll(tt.Datadir)
|
||||
tt.Cleanup()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Boot "geth". This actually runs the test binary but the init function
|
||||
// will prevent any tests from running.
|
||||
tt.stderr = &testlogger{t: t}
|
||||
tt.cmd = exec.Command(os.Args[0], args...)
|
||||
tt.cmd.Env = append(os.Environ(), "GETH_TEST_CHILD=1")
|
||||
tt.cmd.Stderr = tt.stderr
|
||||
stdout, err := tt.cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tt.stdout = bufio.NewReader(stdout)
|
||||
if tt.stdin, err = tt.cmd.StdinPipe(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := tt.cmd.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Boot "geth". This actually runs the test binary but the TestMain
|
||||
// function will prevent any tests from running.
|
||||
tt.Run("geth-test", args...)
|
||||
|
||||
return tt
|
||||
}
|
||||
|
||||
// InputLine writes the given text to the childs stdin.
|
||||
// This method can also be called from an expect template, e.g.:
|
||||
//
|
||||
// geth.expect(`Passphrase: {{.InputLine "password"}}`)
|
||||
func (tt *testgeth) InputLine(s string) string {
|
||||
io.WriteString(tt.stdin, s+"\n")
|
||||
return ""
|
||||
}
|
||||
|
||||
func (tt *testgeth) setTemplateFunc(name string, fn interface{}) {
|
||||
if tt.Func == nil {
|
||||
tt.Func = make(map[string]interface{})
|
||||
}
|
||||
tt.Func[name] = fn
|
||||
}
|
||||
|
||||
// expect runs its argument as a template, then expects the
|
||||
// child process to output the result of the template within 5s.
|
||||
//
|
||||
// If the template starts with a newline, the newline is removed
|
||||
// before matching.
|
||||
func (tt *testgeth) expect(tplsource string) {
|
||||
// Generate the expected output by running the template.
|
||||
tpl := template.Must(template.New("").Funcs(tt.Func).Parse(tplsource))
|
||||
wantbuf := new(bytes.Buffer)
|
||||
if err := tpl.Execute(wantbuf, tt); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// Trim exactly one newline at the beginning. This makes tests look
|
||||
// much nicer because all expect strings are at column 0.
|
||||
want := bytes.TrimPrefix(wantbuf.Bytes(), []byte("\n"))
|
||||
if err := tt.matchExactOutput(want); err != nil {
|
||||
tt.Fatal(err)
|
||||
}
|
||||
tt.Logf("Matched stdout text:\n%s", want)
|
||||
}
|
||||
|
||||
func (tt *testgeth) matchExactOutput(want []byte) error {
|
||||
buf := make([]byte, len(want))
|
||||
n := 0
|
||||
tt.withKillTimeout(func() { n, _ = io.ReadFull(tt.stdout, buf) })
|
||||
buf = buf[:n]
|
||||
if n < len(want) || !bytes.Equal(buf, want) {
|
||||
// Grab any additional buffered output in case of mismatch
|
||||
// because it might help with debugging.
|
||||
buf = append(buf, make([]byte, tt.stdout.Buffered())...)
|
||||
tt.stdout.Read(buf[n:])
|
||||
// Find the mismatch position.
|
||||
for i := 0; i < n; i++ {
|
||||
if want[i] != buf[i] {
|
||||
return fmt.Errorf("Output mismatch at ◊:\n---------------- (stdout text)\n%s◊%s\n---------------- (expected text)\n%s",
|
||||
buf[:i], buf[i:n], want)
|
||||
}
|
||||
}
|
||||
if n < len(want) {
|
||||
return fmt.Errorf("Not enough output, got until ◊:\n---------------- (stdout text)\n%s\n---------------- (expected text)\n%s◊%s",
|
||||
buf, want[:n], want[n:])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// expectRegexp expects the child process to output text matching the
|
||||
// given regular expression within 5s.
|
||||
//
|
||||
// Note that an arbitrary amount of output may be consumed by the
|
||||
// regular expression. This usually means that expect cannot be used
|
||||
// after expectRegexp.
|
||||
func (tt *testgeth) expectRegexp(resource string) (*regexp.Regexp, []string) {
|
||||
var (
|
||||
re = regexp.MustCompile(resource)
|
||||
rtee = &runeTee{in: tt.stdout}
|
||||
matches []int
|
||||
)
|
||||
tt.withKillTimeout(func() { matches = re.FindReaderSubmatchIndex(rtee) })
|
||||
output := rtee.buf.Bytes()
|
||||
if matches == nil {
|
||||
tt.Fatalf("Output did not match:\n---------------- (stdout text)\n%s\n---------------- (regular expression)\n%s",
|
||||
output, resource)
|
||||
return re, nil
|
||||
}
|
||||
tt.Logf("Matched stdout text:\n%s", output)
|
||||
var submatch []string
|
||||
for i := 0; i < len(matches); i += 2 {
|
||||
submatch = append(submatch, string(output[i:i+1]))
|
||||
}
|
||||
return re, submatch
|
||||
}
|
||||
|
||||
// expectExit expects the child process to exit within 5s without
|
||||
// printing any additional text on stdout.
|
||||
func (tt *testgeth) expectExit() {
|
||||
var output []byte
|
||||
tt.withKillTimeout(func() {
|
||||
output, _ = ioutil.ReadAll(tt.stdout)
|
||||
})
|
||||
tt.cmd.Wait()
|
||||
if tt.removeDatadir {
|
||||
os.RemoveAll(tt.Datadir)
|
||||
}
|
||||
if len(output) > 0 {
|
||||
tt.Errorf("Unmatched stdout text:\n%s", output)
|
||||
}
|
||||
}
|
||||
|
||||
func (tt *testgeth) interrupt() {
|
||||
tt.cmd.Process.Signal(os.Interrupt)
|
||||
}
|
||||
|
||||
// stderrText returns any stderr output written so far.
|
||||
// The returned text holds all log lines after expectExit has
|
||||
// returned.
|
||||
func (tt *testgeth) stderrText() string {
|
||||
tt.stderr.mu.Lock()
|
||||
defer tt.stderr.mu.Unlock()
|
||||
return tt.stderr.buf.String()
|
||||
}
|
||||
|
||||
func (tt *testgeth) withKillTimeout(fn func()) {
|
||||
timeout := time.AfterFunc(5*time.Second, func() {
|
||||
tt.Log("killing the child process (timeout)")
|
||||
tt.cmd.Process.Kill()
|
||||
if tt.removeDatadir {
|
||||
os.RemoveAll(tt.Datadir)
|
||||
}
|
||||
})
|
||||
defer timeout.Stop()
|
||||
fn()
|
||||
}
|
||||
|
||||
// testlogger logs all written lines via t.Log and also
|
||||
// collects them for later inspection.
|
||||
type testlogger struct {
|
||||
t *testing.T
|
||||
mu sync.Mutex
|
||||
buf bytes.Buffer
|
||||
}
|
||||
|
||||
func (tl *testlogger) Write(b []byte) (n int, err error) {
|
||||
lines := bytes.Split(b, []byte("\n"))
|
||||
for _, line := range lines {
|
||||
if len(line) > 0 {
|
||||
tl.t.Logf("(stderr) %s", line)
|
||||
}
|
||||
}
|
||||
tl.mu.Lock()
|
||||
tl.buf.Write(b)
|
||||
tl.mu.Unlock()
|
||||
return len(b), err
|
||||
}
|
||||
|
||||
// runeTee collects text read through it into buf.
|
||||
type runeTee struct {
|
||||
in interface {
|
||||
io.Reader
|
||||
io.ByteReader
|
||||
io.RuneReader
|
||||
}
|
||||
buf bytes.Buffer
|
||||
}
|
||||
|
||||
func (rtee *runeTee) Read(b []byte) (n int, err error) {
|
||||
n, err = rtee.in.Read(b)
|
||||
rtee.buf.Write(b[:n])
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (rtee *runeTee) ReadRune() (r rune, size int, err error) {
|
||||
r, size, err = rtee.in.ReadRune()
|
||||
if err == nil {
|
||||
rtee.buf.WriteRune(r)
|
||||
}
|
||||
return r, size, err
|
||||
}
|
||||
|
||||
func (rtee *runeTee) ReadByte() (b byte, err error) {
|
||||
b, err = rtee.in.ReadByte()
|
||||
if err == nil {
|
||||
rtee.buf.WriteByte(b)
|
||||
}
|
||||
return b, err
|
||||
}
|
||||
|
@ -20,6 +20,7 @@ package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
@ -91,6 +92,21 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.EthashDatasetsOnDiskFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "TRANSACTION POOL",
|
||||
Flags: []cli.Flag{
|
||||
utils.TxPoolNoLocalsFlag,
|
||||
utils.TxPoolJournalFlag,
|
||||
utils.TxPoolRejournalFlag,
|
||||
utils.TxPoolPriceLimitFlag,
|
||||
utils.TxPoolPriceBumpFlag,
|
||||
utils.TxPoolAccountSlotsFlag,
|
||||
utils.TxPoolGlobalSlotsFlag,
|
||||
utils.TxPoolAccountQueueFlag,
|
||||
utils.TxPoolGlobalQueueFlag,
|
||||
utils.TxPoolLifetimeFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "PERFORMANCE TUNING",
|
||||
Flags: []cli.Flag{
|
||||
@ -129,6 +145,8 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
Name: "NETWORKING",
|
||||
Flags: []cli.Flag{
|
||||
utils.BootnodesFlag,
|
||||
utils.BootnodesV4Flag,
|
||||
utils.BootnodesV5Flag,
|
||||
utils.ListenPortFlag,
|
||||
utils.MaxPeersFlag,
|
||||
utils.MaxPendingPeersFlag,
|
||||
@ -172,6 +190,10 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.NoCompactionFlag,
|
||||
}, debug.Flags...),
|
||||
},
|
||||
{
|
||||
Name: "WHISPER (EXPERIMENTAL)",
|
||||
Flags: whisperFlags,
|
||||
},
|
||||
{
|
||||
Name: "DEPRECATED",
|
||||
Flags: []cli.Flag{
|
||||
@ -180,13 +202,43 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "EXPERIMENTAL",
|
||||
Flags: []cli.Flag{
|
||||
utils.WhisperEnabledFlag,
|
||||
},
|
||||
Name: "MISC",
|
||||
},
|
||||
}
|
||||
|
||||
// byCategory sorts an array of flagGroup by Name in the order
|
||||
// defined in AppHelpFlagGroups.
|
||||
type byCategory []flagGroup
|
||||
|
||||
func (a byCategory) Len() int { return len(a) }
|
||||
func (a byCategory) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a byCategory) Less(i, j int) bool {
|
||||
iCat, jCat := a[i].Name, a[j].Name
|
||||
iIdx, jIdx := len(AppHelpFlagGroups), len(AppHelpFlagGroups) // ensure non categorized flags come last
|
||||
|
||||
for i, group := range AppHelpFlagGroups {
|
||||
if iCat == group.Name {
|
||||
iIdx = i
|
||||
}
|
||||
if jCat == group.Name {
|
||||
jIdx = i
|
||||
}
|
||||
}
|
||||
|
||||
return iIdx < jIdx
|
||||
}
|
||||
|
||||
func flagCategory(flag cli.Flag) string {
|
||||
for _, category := range AppHelpFlagGroups {
|
||||
for _, flg := range category.Flags {
|
||||
if flg.GetName() == flag.GetName() {
|
||||
return category.Name
|
||||
}
|
||||
}
|
||||
}
|
||||
return "MISC"
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Override the default app help template
|
||||
cli.AppHelpTemplate = AppHelpTemplate
|
||||
@ -196,6 +248,7 @@ func init() {
|
||||
App interface{}
|
||||
FlagGroups []flagGroup
|
||||
}
|
||||
|
||||
// Override the default app help printer, but only for the global app help
|
||||
originalHelpPrinter := cli.HelpPrinter
|
||||
cli.HelpPrinter = func(w io.Writer, tmpl string, data interface{}) {
|
||||
@ -225,6 +278,27 @@ func init() {
|
||||
}
|
||||
// Render out custom usage screen
|
||||
originalHelpPrinter(w, tmpl, helpData{data, AppHelpFlagGroups})
|
||||
} else if tmpl == utils.CommandHelpTemplate {
|
||||
// Iterate over all command specific flags and categorize them
|
||||
categorized := make(map[string][]cli.Flag)
|
||||
for _, flag := range data.(cli.Command).Flags {
|
||||
if _, ok := categorized[flag.String()]; !ok {
|
||||
categorized[flagCategory(flag)] = append(categorized[flagCategory(flag)], flag)
|
||||
}
|
||||
}
|
||||
|
||||
// sort to get a stable ordering
|
||||
sorted := make([]flagGroup, 0, len(categorized))
|
||||
for cat, flgs := range categorized {
|
||||
sorted = append(sorted, flagGroup{cat, flgs})
|
||||
}
|
||||
sort.Sort(byCategory(sorted))
|
||||
|
||||
// add sorted array to data and render with default printer
|
||||
originalHelpPrinter(w, tmpl, map[string]interface{}{
|
||||
"cmd": data,
|
||||
"categorizedFlags": sorted,
|
||||
})
|
||||
} else {
|
||||
originalHelpPrinter(w, tmpl, data)
|
||||
}
|
||||
|
@ -425,6 +425,11 @@ services:
|
||||
- "{{.Port}}:80"{{else}}
|
||||
environment:
|
||||
- VIRTUAL_HOST={{.VHost}}{{end}}
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "1m"
|
||||
max-file: "10"
|
||||
restart: always
|
||||
`
|
||||
|
||||
|
@ -42,7 +42,7 @@ RUN \
|
||||
WORKDIR /eth-netstats
|
||||
EXPOSE 3000
|
||||
|
||||
RUN echo 'module.exports = {trusted: [{{.Trusted}}], banned: []};' > lib/utils/config.js
|
||||
RUN echo 'module.exports = {trusted: [{{.Trusted}}], banned: [{{.Banned}}], reserved: ["yournode"]};' > lib/utils/config.js
|
||||
|
||||
CMD ["npm", "start"]
|
||||
`
|
||||
@ -59,25 +59,37 @@ services:
|
||||
- "{{.Port}}:3000"{{end}}
|
||||
environment:
|
||||
- WS_SECRET={{.Secret}}{{if .VHost}}
|
||||
- VIRTUAL_HOST={{.VHost}}{{end}}
|
||||
- VIRTUAL_HOST={{.VHost}}{{end}}{{if .Banned}}
|
||||
- BANNED={{.Banned}}{{end}}
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "1m"
|
||||
max-file: "10"
|
||||
restart: always
|
||||
`
|
||||
|
||||
// deployEthstats deploys a new ethstats container to a remote machine via SSH,
|
||||
// docker and docker-compose. If an instance with the specified network name
|
||||
// already exists there, it will be overwritten!
|
||||
func deployEthstats(client *sshClient, network string, port int, secret string, vhost string, trusted []string) ([]byte, error) {
|
||||
func deployEthstats(client *sshClient, network string, port int, secret string, vhost string, trusted []string, banned []string) ([]byte, error) {
|
||||
// Generate the content to upload to the server
|
||||
workdir := fmt.Sprintf("%d", rand.Int63())
|
||||
files := make(map[string][]byte)
|
||||
|
||||
trustedLabels := make([]string, len(trusted))
|
||||
for i, address := range trusted {
|
||||
trusted[i] = fmt.Sprintf("\"%s\"", address)
|
||||
trustedLabels[i] = fmt.Sprintf("\"%s\"", address)
|
||||
}
|
||||
bannedLabels := make([]string, len(banned))
|
||||
for i, address := range banned {
|
||||
bannedLabels[i] = fmt.Sprintf("\"%s\"", address)
|
||||
}
|
||||
|
||||
dockerfile := new(bytes.Buffer)
|
||||
template.Must(template.New("").Parse(ethstatsDockerfile)).Execute(dockerfile, map[string]interface{}{
|
||||
"Trusted": strings.Join(trusted, ", "),
|
||||
"Trusted": strings.Join(trustedLabels, ", "),
|
||||
"Banned": strings.Join(bannedLabels, ", "),
|
||||
})
|
||||
files[filepath.Join(workdir, "Dockerfile")] = dockerfile.Bytes()
|
||||
|
||||
@ -87,6 +99,7 @@ func deployEthstats(client *sshClient, network string, port int, secret string,
|
||||
"Port": port,
|
||||
"Secret": secret,
|
||||
"VHost": vhost,
|
||||
"Banned": strings.Join(banned, ","),
|
||||
})
|
||||
files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes()
|
||||
|
||||
@ -107,11 +120,12 @@ type ethstatsInfos struct {
|
||||
port int
|
||||
secret string
|
||||
config string
|
||||
banned []string
|
||||
}
|
||||
|
||||
// String implements the stringer interface.
|
||||
func (info *ethstatsInfos) String() string {
|
||||
return fmt.Sprintf("host=%s, port=%d, secret=%s", info.host, info.port, info.secret)
|
||||
return fmt.Sprintf("host=%s, port=%d, secret=%s, banned=%v", info.host, info.port, info.secret, info.banned)
|
||||
}
|
||||
|
||||
// checkEthstats does a health-check against an ethstats server to verify whether
|
||||
@ -145,6 +159,9 @@ func checkEthstats(client *sshClient, network string) (*ethstatsInfos, error) {
|
||||
if port != 80 && port != 443 {
|
||||
config += fmt.Sprintf(":%d", port)
|
||||
}
|
||||
// Retrieve the IP blacklist
|
||||
banned := strings.Split(infos.envvars["BANNED"], ",")
|
||||
|
||||
// Run a sanity check to see if the port is reachable
|
||||
if err = checkPort(host, port); err != nil {
|
||||
log.Warn("Ethstats service seems unreachable", "server", host, "port", port, "err", err)
|
||||
@ -155,5 +172,6 @@ func checkEthstats(client *sshClient, network string) (*ethstatsInfos, error) {
|
||||
port: port,
|
||||
secret: secret,
|
||||
config: config,
|
||||
banned: banned,
|
||||
}, nil
|
||||
}
|
||||
|
@ -82,6 +82,11 @@ services:
|
||||
- CAPTCHA_SECRET={{.CaptchaSecret}}{{if .VHost}}
|
||||
- VIRTUAL_HOST={{.VHost}}
|
||||
- VIRTUAL_PORT=8080{{end}}
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "1m"
|
||||
max-file: "10"
|
||||
restart: always
|
||||
`
|
||||
|
||||
|
@ -43,6 +43,11 @@ services:
|
||||
- "{{.Port}}:80"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/tmp/docker.sock:ro
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "1m"
|
||||
max-file: "10"
|
||||
restart: always
|
||||
`
|
||||
|
||||
|
@ -30,7 +30,7 @@ import (
|
||||
|
||||
// nodeDockerfile is the Dockerfile required to run an Ethereum node.
|
||||
var nodeDockerfile = `
|
||||
FROM ethereum/client-go:alpine-develop
|
||||
FROM ethereum/client-go:latest
|
||||
|
||||
ADD genesis.json /genesis.json
|
||||
{{if .Unlock}}
|
||||
@ -38,9 +38,9 @@ ADD genesis.json /genesis.json
|
||||
ADD signer.pass /signer.pass
|
||||
{{end}}
|
||||
RUN \
|
||||
echo '/geth init /genesis.json' > geth.sh && \{{if .Unlock}}
|
||||
echo 'geth init /genesis.json' > geth.sh && \{{if .Unlock}}
|
||||
echo 'mkdir -p /root/.ethereum/keystore/ && cp /signer.json /root/.ethereum/keystore/' >> geth.sh && \{{end}}
|
||||
echo $'/geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine{{end}}{{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}}' >> geth.sh
|
||||
echo $'geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .BootV4}}--bootnodesv4 {{.BootV4}}{{end}} {{if .BootV5}}--bootnodesv5 {{.BootV5}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine{{end}}{{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --targetgaslimit {{.GasTarget}} --gasprice {{.GasPrice}}' >> geth.sh
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "geth.sh"]
|
||||
`
|
||||
@ -66,17 +66,25 @@ services:
|
||||
- LIGHT_PEERS={{.LightPeers}}
|
||||
- STATS_NAME={{.Ethstats}}
|
||||
- MINER_NAME={{.Etherbase}}
|
||||
- GAS_TARGET={{.GasTarget}}
|
||||
- GAS_PRICE={{.GasPrice}}
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "1m"
|
||||
max-file: "10"
|
||||
restart: always
|
||||
`
|
||||
|
||||
// deployNode deploys a new Ethereum node container to a remote machine via SSH,
|
||||
// docker and docker-compose. If an instance with the specified network name
|
||||
// already exists there, it will be overwritten!
|
||||
func deployNode(client *sshClient, network string, bootnodes []string, config *nodeInfos) ([]byte, error) {
|
||||
func deployNode(client *sshClient, network string, bootv4, bootv5 []string, config *nodeInfos) ([]byte, error) {
|
||||
kind := "sealnode"
|
||||
if config.keyJSON == "" && config.etherbase == "" {
|
||||
kind = "bootnode"
|
||||
bootnodes = make([]string, 0)
|
||||
bootv4 = make([]string, 0)
|
||||
bootv5 = make([]string, 0)
|
||||
}
|
||||
// Generate the content to upload to the server
|
||||
workdir := fmt.Sprintf("%d", rand.Int63())
|
||||
@ -92,9 +100,12 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n
|
||||
"Port": config.portFull,
|
||||
"Peers": config.peersTotal,
|
||||
"LightFlag": lightFlag,
|
||||
"Bootnodes": strings.Join(bootnodes, ","),
|
||||
"BootV4": strings.Join(bootv4, ","),
|
||||
"BootV5": strings.Join(bootv5, ","),
|
||||
"Ethstats": config.ethstats,
|
||||
"Etherbase": config.etherbase,
|
||||
"GasTarget": uint64(1000000 * config.gasTarget),
|
||||
"GasPrice": uint64(1000000000 * config.gasPrice),
|
||||
"Unlock": config.keyJSON != "",
|
||||
})
|
||||
files[filepath.Join(workdir, "Dockerfile")] = dockerfile.Bytes()
|
||||
@ -111,6 +122,8 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n
|
||||
"LightPeers": config.peersLight,
|
||||
"Ethstats": config.ethstats[:strings.Index(config.ethstats, ":")],
|
||||
"Etherbase": config.etherbase,
|
||||
"GasTarget": config.gasTarget,
|
||||
"GasPrice": config.gasPrice,
|
||||
})
|
||||
files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes()
|
||||
|
||||
@ -127,7 +140,7 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n
|
||||
}
|
||||
defer client.Run("rm -rf " + workdir)
|
||||
|
||||
// Build and deploy the bootnode service
|
||||
// Build and deploy the boot or seal node service
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build", workdir, network))
|
||||
}
|
||||
|
||||
@ -147,6 +160,8 @@ type nodeInfos struct {
|
||||
etherbase string
|
||||
keyJSON string
|
||||
keyPass string
|
||||
gasTarget float64
|
||||
gasPrice float64
|
||||
}
|
||||
|
||||
// String implements the stringer interface.
|
||||
@ -155,7 +170,8 @@ func (info *nodeInfos) String() string {
|
||||
if info.peersLight > 0 {
|
||||
discv5 = fmt.Sprintf(", portv5=%d", info.portLight)
|
||||
}
|
||||
return fmt.Sprintf("port=%d%s, datadir=%s, peers=%d, lights=%d, ethstats=%s", info.portFull, discv5, info.datadir, info.peersTotal, info.peersLight, info.ethstats)
|
||||
return fmt.Sprintf("port=%d%s, datadir=%s, peers=%d, lights=%d, ethstats=%s, gastarget=%0.3f MGas, gasprice=%0.3f GWei",
|
||||
info.portFull, discv5, info.datadir, info.peersTotal, info.peersLight, info.ethstats, info.gasTarget, info.gasPrice)
|
||||
}
|
||||
|
||||
// checkNode does a health-check against an boot or seal node server to verify
|
||||
@ -176,10 +192,12 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
|
||||
// Resolve a few types from the environmental variables
|
||||
totalPeers, _ := strconv.Atoi(infos.envvars["TOTAL_PEERS"])
|
||||
lightPeers, _ := strconv.Atoi(infos.envvars["LIGHT_PEERS"])
|
||||
gasTarget, _ := strconv.ParseFloat(infos.envvars["GAS_TARGET"], 64)
|
||||
gasPrice, _ := strconv.ParseFloat(infos.envvars["GAS_PRICE"], 64)
|
||||
|
||||
// Container available, retrieve its node ID and its genesis json
|
||||
var out []byte
|
||||
if out, err = client.Run(fmt.Sprintf("docker exec %s_%s_1 /geth --exec admin.nodeInfo.id attach", network, kind)); err != nil {
|
||||
if out, err = client.Run(fmt.Sprintf("docker exec %s_%s_1 geth --exec admin.nodeInfo.id attach", network, kind)); err != nil {
|
||||
return nil, ErrServiceUnreachable
|
||||
}
|
||||
id := bytes.Trim(bytes.TrimSpace(out), "\"")
|
||||
@ -213,6 +231,8 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
|
||||
etherbase: infos.envvars["MINER_NAME"],
|
||||
keyJSON: keyJSON,
|
||||
keyPass: keyPass,
|
||||
gasTarget: gasTarget,
|
||||
gasPrice: gasPrice,
|
||||
}
|
||||
stats.enodeFull = fmt.Sprintf("enode://%s@%s:%d", id, client.address, stats.portFull)
|
||||
if stats.portLight != 0 {
|
||||
|
@ -122,7 +122,7 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
|
||||
}
|
||||
}
|
||||
// If a public key exists for this SSH server, check that it matches
|
||||
if bytes.Compare(pubkey, key.Marshal()) == 0 {
|
||||
if bytes.Equal(pubkey, key.Marshal()) {
|
||||
return nil
|
||||
}
|
||||
// We have a mismatch, forbid connecting
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
@ -106,17 +107,15 @@ func (w *wizard) readString() string {
|
||||
// readDefaultString reads a single line from stdin, trimming if from spaces. If
|
||||
// an empty line is entered, the default value is returned.
|
||||
func (w *wizard) readDefaultString(def string) string {
|
||||
for {
|
||||
fmt.Printf("> ")
|
||||
text, err := w.in.ReadString('\n')
|
||||
if err != nil {
|
||||
log.Crit("Failed to read user input", "err", err)
|
||||
}
|
||||
if text = strings.TrimSpace(text); text != "" {
|
||||
return text
|
||||
}
|
||||
return def
|
||||
fmt.Printf("> ")
|
||||
text, err := w.in.ReadString('\n')
|
||||
if err != nil {
|
||||
log.Crit("Failed to read user input", "err", err)
|
||||
}
|
||||
if text = strings.TrimSpace(text); text != "" {
|
||||
return text
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
// readInt reads a single line from stdin, trimming if from spaces, enforcing it
|
||||
@ -162,18 +161,60 @@ func (w *wizard) readDefaultInt(def int) int {
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
// readFloat reads a single line from stdin, trimming if from spaces, enforcing it
|
||||
// to parse into a float.
|
||||
func (w *wizard) readFloat() float64 {
|
||||
for {
|
||||
fmt.Printf("> ")
|
||||
text, err := w.in.ReadString('\n')
|
||||
if err != nil {
|
||||
log.Crit("Failed to read user input", "err", err)
|
||||
}
|
||||
if text = strings.TrimSpace(text); text == "" {
|
||||
continue
|
||||
}
|
||||
val, err := strconv.ParseFloat(strings.TrimSpace(text), 64)
|
||||
if err != nil {
|
||||
log.Error("Invalid input, expected float", "err", err)
|
||||
continue
|
||||
}
|
||||
return val
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
// readDefaultFloat reads a single line from stdin, trimming if from spaces, enforcing
|
||||
// it to parse into a float. If an empty line is entered, the default value is returned.
|
||||
func (w *wizard) readDefaultFloat(def float64) float64 {
|
||||
for {
|
||||
fmt.Printf("> ")
|
||||
text, err := w.in.ReadString('\n')
|
||||
if err != nil {
|
||||
log.Crit("Failed to read user input", "err", err)
|
||||
}
|
||||
if text = strings.TrimSpace(text); text == "" {
|
||||
return def
|
||||
}
|
||||
val, err := strconv.ParseFloat(strings.TrimSpace(text), 64)
|
||||
if err != nil {
|
||||
log.Error("Invalid input, expected float", "err", err)
|
||||
continue
|
||||
}
|
||||
return val
|
||||
}
|
||||
}
|
||||
|
||||
// readPassword reads a single line from stdin, trimming it from the trailing new
|
||||
// line and returns it. The input will not be echoed.
|
||||
func (w *wizard) readPassword() string {
|
||||
for {
|
||||
fmt.Printf("> ")
|
||||
text, err := terminal.ReadPassword(int(syscall.Stdin))
|
||||
if err != nil {
|
||||
log.Crit("Failed to read password", "err", err)
|
||||
}
|
||||
fmt.Println()
|
||||
return string(text)
|
||||
fmt.Printf("> ")
|
||||
text, err := terminal.ReadPassword(int(syscall.Stdin))
|
||||
if err != nil {
|
||||
log.Crit("Failed to read password", "err", err)
|
||||
}
|
||||
fmt.Println()
|
||||
return string(text)
|
||||
}
|
||||
|
||||
// readAddress reads a single line from stdin, trimming if from spaces and converts
|
||||
@ -237,3 +278,26 @@ func (w *wizard) readJSON() string {
|
||||
return string(blob)
|
||||
}
|
||||
}
|
||||
|
||||
// readIPAddress reads a single line from stdin, trimming if from spaces and
|
||||
// converts it to a network IP address.
|
||||
func (w *wizard) readIPAddress() net.IP {
|
||||
for {
|
||||
// Read the IP address from the user
|
||||
fmt.Printf("> ")
|
||||
text, err := w.in.ReadString('\n')
|
||||
if err != nil {
|
||||
log.Crit("Failed to read user input", "err", err)
|
||||
}
|
||||
if text = strings.TrimSpace(text); text == "" {
|
||||
return nil
|
||||
}
|
||||
// Make sure it looks ok and return it if so
|
||||
ip := net.ParseIP(text)
|
||||
if ip == nil {
|
||||
log.Error("Invalid IP address, please retry")
|
||||
continue
|
||||
}
|
||||
return ip
|
||||
}
|
||||
}
|
||||
|
@ -60,6 +60,22 @@ func (w *wizard) deployEthstats() {
|
||||
fmt.Printf("What should be the secret password for the API? (default = %s)\n", infos.secret)
|
||||
infos.secret = w.readDefaultString(infos.secret)
|
||||
}
|
||||
// Gather any blacklists to ban from reporting
|
||||
fmt.Println()
|
||||
fmt.Printf("Keep existing IP %v blacklist (y/n)? (default = yes)\n", infos.banned)
|
||||
if w.readDefaultString("y") != "y" {
|
||||
infos.banned = nil
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("Which IP addresses should be blacklisted?")
|
||||
for {
|
||||
if ip := w.readIPAddress(); ip != nil {
|
||||
infos.banned = append(infos.banned, ip.String())
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
// Try to deploy the ethstats server on the host
|
||||
trusted := make([]string, 0, len(w.servers))
|
||||
for _, client := range w.servers {
|
||||
@ -67,7 +83,7 @@ func (w *wizard) deployEthstats() {
|
||||
trusted = append(trusted, client.address)
|
||||
}
|
||||
}
|
||||
if out, err := deployEthstats(client, w.network, infos.port, infos.secret, infos.host, trusted); err != nil {
|
||||
if out, err := deployEthstats(client, w.network, infos.port, infos.secret, infos.host, trusted, infos.banned); err != nil {
|
||||
log.Error("Failed to deploy ethstats container", "err", err)
|
||||
if len(out) > 0 {
|
||||
fmt.Printf("%s\n", out)
|
||||
|
@ -165,8 +165,7 @@ func (w *wizard) deployFaucet() {
|
||||
}
|
||||
// Load up the credential needed to release funds
|
||||
if infos.node.keyJSON != "" {
|
||||
var key keystore.Key
|
||||
if err := json.Unmarshal([]byte(infos.node.keyJSON), &key); err != nil {
|
||||
if key, err := keystore.DecryptKey([]byte(infos.node.keyJSON), infos.node.keyPass); err != nil {
|
||||
infos.node.keyJSON, infos.node.keyPass = "", ""
|
||||
} else {
|
||||
fmt.Println()
|
||||
|
@ -39,7 +39,7 @@ func (w *wizard) networkStats(tips bool) {
|
||||
// Iterate over all the specified hosts and check their status
|
||||
stats := tablewriter.NewWriter(os.Stdout)
|
||||
stats.SetHeader([]string{"Server", "IP", "Status", "Service", "Details"})
|
||||
stats.SetColWidth(128)
|
||||
stats.SetColWidth(100)
|
||||
|
||||
for server, pubkey := range w.conf.Servers {
|
||||
client := w.servers[server]
|
||||
|
@ -71,22 +71,20 @@ func (w *wizard) makeServer() string {
|
||||
fmt.Println()
|
||||
fmt.Println("Please enter remote server's address:")
|
||||
|
||||
for {
|
||||
// Read and fial the server to ensure docker is present
|
||||
input := w.readString()
|
||||
// Read and fial the server to ensure docker is present
|
||||
input := w.readString()
|
||||
|
||||
client, err := dial(input, nil)
|
||||
if err != nil {
|
||||
log.Error("Server not ready for puppeth", "err", err)
|
||||
return ""
|
||||
}
|
||||
// All checks passed, start tracking the server
|
||||
w.servers[input] = client
|
||||
w.conf.Servers[input] = client.pubkey
|
||||
w.conf.flush()
|
||||
|
||||
return input
|
||||
client, err := dial(input, nil)
|
||||
if err != nil {
|
||||
log.Error("Server not ready for puppeth", "err", err)
|
||||
return ""
|
||||
}
|
||||
// All checks passed, start tracking the server
|
||||
w.servers[input] = client
|
||||
w.conf.Servers[input] = client.pubkey
|
||||
w.conf.flush()
|
||||
|
||||
return input
|
||||
}
|
||||
|
||||
// selectServer lists the user all the currnetly known servers to choose from,
|
||||
|
@ -50,7 +50,7 @@ func (w *wizard) deployNode(boot bool) {
|
||||
if boot {
|
||||
infos = &nodeInfos{portFull: 30303, peersTotal: 512, peersLight: 256}
|
||||
} else {
|
||||
infos = &nodeInfos{portFull: 30303, peersTotal: 50, peersLight: 0}
|
||||
infos = &nodeInfos{portFull: 30303, peersTotal: 50, peersLight: 0, gasTarget: 4.7, gasPrice: 18}
|
||||
}
|
||||
}
|
||||
infos.genesis, _ = json.MarshalIndent(w.conf.genesis, "", " ")
|
||||
@ -109,8 +109,7 @@ func (w *wizard) deployNode(boot bool) {
|
||||
} else if w.conf.genesis.Config.Clique != nil {
|
||||
// If a previous signer was already set, offer to reuse it
|
||||
if infos.keyJSON != "" {
|
||||
var key keystore.Key
|
||||
if err := json.Unmarshal([]byte(infos.keyJSON), &key); err != nil {
|
||||
if key, err := keystore.DecryptKey([]byte(infos.keyJSON), infos.keyPass); err != nil {
|
||||
infos.keyJSON, infos.keyPass = "", ""
|
||||
} else {
|
||||
fmt.Println()
|
||||
@ -136,9 +135,17 @@ func (w *wizard) deployNode(boot bool) {
|
||||
}
|
||||
}
|
||||
}
|
||||
// Establish the gas dynamics to be enforced by the signer
|
||||
fmt.Println()
|
||||
fmt.Printf("What gas limit should empty blocks target (MGas)? (default = %0.3f)\n", infos.gasTarget)
|
||||
infos.gasTarget = w.readDefaultFloat(infos.gasTarget)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("What gas price should the signer require (GWei)? (default = %0.3f)\n", infos.gasPrice)
|
||||
infos.gasPrice = w.readDefaultFloat(infos.gasPrice)
|
||||
}
|
||||
// Try to deploy the full node on the host
|
||||
if out, err := deployNode(client, w.network, w.conf.bootFull, infos); err != nil {
|
||||
if out, err := deployNode(client, w.network, w.conf.bootFull, w.conf.bootLight, infos); err != nil {
|
||||
log.Error("Failed to deploy Ethereum node container", "err", err)
|
||||
if len(out) > 0 {
|
||||
fmt.Printf("%s\n", out)
|
||||
|
@ -1,38 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
func cleandb(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 1 {
|
||||
utils.Fatalf("Need path to chunks database as the first and only argument")
|
||||
}
|
||||
|
||||
chunkDbPath := args[0]
|
||||
hash := storage.MakeHashFunc("SHA3")
|
||||
dbStore, err := storage.NewDbStore(chunkDbPath, hash, 10000000, 0)
|
||||
if err != nil {
|
||||
utils.Fatalf("Cannot initialise dbstore: %v", err)
|
||||
}
|
||||
dbStore.Cleanup()
|
||||
}
|
116
cmd/swarm/db.go
Normal file
116
cmd/swarm/db.go
Normal file
@ -0,0 +1,116 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
func dbExport(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database) and <file> (path to write the tar archive to, - for stdout)")
|
||||
}
|
||||
|
||||
store, err := openDbStore(args[0])
|
||||
if err != nil {
|
||||
utils.Fatalf("error opening local chunk database: %s", err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
var out io.Writer
|
||||
if args[1] == "-" {
|
||||
out = os.Stdout
|
||||
} else {
|
||||
f, err := os.Create(args[1])
|
||||
if err != nil {
|
||||
utils.Fatalf("error opening output file: %s", err)
|
||||
}
|
||||
defer f.Close()
|
||||
out = f
|
||||
}
|
||||
|
||||
count, err := store.Export(out)
|
||||
if err != nil {
|
||||
utils.Fatalf("error exporting local chunk database: %s", err)
|
||||
}
|
||||
|
||||
log.Info(fmt.Sprintf("successfully exported %d chunks", count))
|
||||
}
|
||||
|
||||
func dbImport(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database) and <file> (path to read the tar archive from, - for stdin)")
|
||||
}
|
||||
|
||||
store, err := openDbStore(args[0])
|
||||
if err != nil {
|
||||
utils.Fatalf("error opening local chunk database: %s", err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
var in io.Reader
|
||||
if args[1] == "-" {
|
||||
in = os.Stdin
|
||||
} else {
|
||||
f, err := os.Open(args[1])
|
||||
if err != nil {
|
||||
utils.Fatalf("error opening input file: %s", err)
|
||||
}
|
||||
defer f.Close()
|
||||
in = f
|
||||
}
|
||||
|
||||
count, err := store.Import(in)
|
||||
if err != nil {
|
||||
utils.Fatalf("error importing local chunk database: %s", err)
|
||||
}
|
||||
|
||||
log.Info(fmt.Sprintf("successfully imported %d chunks", count))
|
||||
}
|
||||
|
||||
func dbClean(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 1 {
|
||||
utils.Fatalf("invalid arguments, please specify <chunkdb> (path to a local chunk database)")
|
||||
}
|
||||
|
||||
store, err := openDbStore(args[0])
|
||||
if err != nil {
|
||||
utils.Fatalf("error opening local chunk database: %s", err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
store.Cleanup()
|
||||
}
|
||||
|
||||
func openDbStore(path string) (*storage.DbStore, error) {
|
||||
if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
|
||||
return nil, fmt.Errorf("invalid chunkdb path: %s", err)
|
||||
}
|
||||
hash := storage.MakeHashFunc("SHA3")
|
||||
return storage.NewDbStore(path, hash, 10000000, 0)
|
||||
}
|
@ -17,21 +17,26 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/console"
|
||||
"github.com/ethereum/go-ethereum/contracts/ens"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
@ -40,6 +45,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/swarm"
|
||||
bzzapi "github.com/ethereum/go-ethereum/swarm/api"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
@ -67,6 +73,10 @@ var (
|
||||
Name: "bzzaccount",
|
||||
Usage: "Swarm account key file",
|
||||
}
|
||||
SwarmListenAddrFlag = cli.StringFlag{
|
||||
Name: "httpaddr",
|
||||
Usage: "Swarm HTTP API listening interface",
|
||||
}
|
||||
SwarmPortFlag = cli.StringFlag{
|
||||
Name: "bzzport",
|
||||
Usage: "Swarm local http api port",
|
||||
@ -83,15 +93,23 @@ var (
|
||||
Name: "swap",
|
||||
Usage: "Swarm SWAP enabled (default false)",
|
||||
}
|
||||
SwarmSwapAPIFlag = cli.StringFlag{
|
||||
Name: "swap-api",
|
||||
Usage: "URL of the Ethereum API provider to use to settle SWAP payments",
|
||||
}
|
||||
SwarmSyncEnabledFlag = cli.BoolTFlag{
|
||||
Name: "sync",
|
||||
Usage: "Swarm Syncing enabled (default true)",
|
||||
}
|
||||
EthAPIFlag = cli.StringFlag{
|
||||
Name: "ethapi",
|
||||
Usage: "URL of the Ethereum API provider",
|
||||
EnsAPIFlag = cli.StringFlag{
|
||||
Name: "ens-api",
|
||||
Usage: "URL of the Ethereum API provider to use for ENS record lookups",
|
||||
Value: node.DefaultIPCEndpoint("geth"),
|
||||
}
|
||||
EnsAddrFlag = cli.StringFlag{
|
||||
Name: "ens-addr",
|
||||
Usage: "ENS contract address (default is detected as testnet or mainnet using --ens-api)",
|
||||
}
|
||||
SwarmApiFlag = cli.StringFlag{
|
||||
Name: "bzzapi",
|
||||
Usage: "Swarm HTTP endpoint",
|
||||
@ -121,6 +139,12 @@ var (
|
||||
Name: "corsdomain",
|
||||
Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')",
|
||||
}
|
||||
|
||||
// the following flags are deprecated and should be removed in the future
|
||||
DeprecatedEthAPIFlag = cli.StringFlag{
|
||||
Name: "ethapi",
|
||||
Usage: "DEPRECATED: please use --ens-api and --swap-api",
|
||||
}
|
||||
)
|
||||
|
||||
var defaultNodeConfig = node.DefaultConfig
|
||||
@ -217,15 +241,69 @@ Removes a path from the manifest
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: cleandb,
|
||||
Name: "db",
|
||||
Usage: "manage the local chunk database",
|
||||
ArgsUsage: "db COMMAND",
|
||||
Description: `
|
||||
Manage the local chunk database.
|
||||
`,
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: dbExport,
|
||||
Name: "export",
|
||||
Usage: "export a local chunk database as a tar archive (use - to send to stdout)",
|
||||
ArgsUsage: "<chunkdb> <file>",
|
||||
Description: `
|
||||
Export a local chunk database as a tar archive (use - to send to stdout).
|
||||
|
||||
swarm db export ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar
|
||||
|
||||
The export may be quite large, consider piping the output through the Unix
|
||||
pv(1) tool to get a progress bar:
|
||||
|
||||
swarm db export ~/.ethereum/swarm/bzz-KEY/chunks - | pv > chunks.tar
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: dbImport,
|
||||
Name: "import",
|
||||
Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)",
|
||||
ArgsUsage: "<chunkdb> <file>",
|
||||
Description: `
|
||||
Import chunks from a tar archive into a local chunk database (use - to read from stdin).
|
||||
|
||||
swarm db import ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar
|
||||
|
||||
The import may be quite large, consider piping the input through the Unix
|
||||
pv(1) tool to get a progress bar:
|
||||
|
||||
pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: dbClean,
|
||||
Name: "clean",
|
||||
Usage: "remove corrupt entries from a local chunk database",
|
||||
ArgsUsage: "<chunkdb>",
|
||||
Description: `
|
||||
Remove corrupt entries from a local chunk database.
|
||||
`,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: func(ctx *cli.Context) {
|
||||
utils.Fatalf("ERROR: 'swarm cleandb' has been removed, please use 'swarm db clean'.")
|
||||
},
|
||||
Name: "cleandb",
|
||||
Usage: "Cleans database of corrupted entries",
|
||||
Usage: "DEPRECATED: use 'swarm db clean'",
|
||||
ArgsUsage: " ",
|
||||
Description: `
|
||||
Cleans database of corrupted entries.
|
||||
DEPRECATED: use 'swarm db clean'.
|
||||
`,
|
||||
},
|
||||
}
|
||||
sort.Sort(cli.CommandsByName(app.Commands))
|
||||
|
||||
app.Flags = []cli.Flag{
|
||||
utils.IdentityFlag,
|
||||
@ -245,10 +323,13 @@ Cleans database of corrupted entries.
|
||||
utils.PasswordFileFlag,
|
||||
// bzzd-specific flags
|
||||
CorsStringFlag,
|
||||
EthAPIFlag,
|
||||
EnsAPIFlag,
|
||||
EnsAddrFlag,
|
||||
SwarmConfigPathFlag,
|
||||
SwarmSwapEnabledFlag,
|
||||
SwarmSwapAPIFlag,
|
||||
SwarmSyncEnabledFlag,
|
||||
SwarmListenAddrFlag,
|
||||
SwarmPortFlag,
|
||||
SwarmAccountFlag,
|
||||
SwarmNetworkIdFlag,
|
||||
@ -260,6 +341,8 @@ Cleans database of corrupted entries.
|
||||
SwarmUploadDefaultPath,
|
||||
SwarmUpFromStdinFlag,
|
||||
SwarmUploadMimeType,
|
||||
//deprecated flags
|
||||
DeprecatedEthAPIFlag,
|
||||
}
|
||||
app.Flags = append(app.Flags, debug.Flags...)
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
@ -294,6 +377,11 @@ func version(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
func bzzd(ctx *cli.Context) error {
|
||||
// exit if the deprecated --ethapi flag is set
|
||||
if ctx.GlobalString(DeprecatedEthAPIFlag.Name) != "" {
|
||||
utils.Fatalf("--ethapi is no longer a valid command line flag, please use --ens-api and/or --swap-api.")
|
||||
}
|
||||
|
||||
cfg := defaultNodeConfig
|
||||
utils.SetNodeConfig(ctx, &cfg)
|
||||
stack, err := node.New(&cfg)
|
||||
@ -328,6 +416,38 @@ func bzzd(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// detectEnsAddr determines the ENS contract address by getting both the
|
||||
// version and genesis hash using the client and matching them to either
|
||||
// mainnet or testnet addresses
|
||||
func detectEnsAddr(client *rpc.Client) (common.Address, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var version string
|
||||
if err := client.CallContext(ctx, &version, "net_version"); err != nil {
|
||||
return common.Address{}, err
|
||||
}
|
||||
|
||||
block, err := ethclient.NewClient(client).BlockByNumber(ctx, big.NewInt(0))
|
||||
if err != nil {
|
||||
return common.Address{}, err
|
||||
}
|
||||
|
||||
switch {
|
||||
|
||||
case version == "1" && block.Hash() == params.MainnetGenesisHash:
|
||||
log.Info("using Mainnet ENS contract address", "addr", ens.MainNetAddress)
|
||||
return ens.MainNetAddress, nil
|
||||
|
||||
case version == "3" && block.Hash() == params.TestnetGenesisHash:
|
||||
log.Info("using Testnet ENS contract address", "addr", ens.TestNetAddress)
|
||||
return ens.TestNetAddress, nil
|
||||
|
||||
default:
|
||||
return common.Address{}, fmt.Errorf("unknown version and genesis hash: %s %s", version, block.Hash())
|
||||
}
|
||||
}
|
||||
|
||||
func registerBzzService(ctx *cli.Context, stack *node.Node) {
|
||||
prvkey := getAccount(ctx, stack)
|
||||
|
||||
@ -345,23 +465,54 @@ func registerBzzService(ctx *cli.Context, stack *node.Node) {
|
||||
if len(bzzport) > 0 {
|
||||
bzzconfig.Port = bzzport
|
||||
}
|
||||
if bzzaddr := ctx.GlobalString(SwarmListenAddrFlag.Name); bzzaddr != "" {
|
||||
bzzconfig.ListenAddr = bzzaddr
|
||||
}
|
||||
swapEnabled := ctx.GlobalBool(SwarmSwapEnabledFlag.Name)
|
||||
syncEnabled := ctx.GlobalBoolT(SwarmSyncEnabledFlag.Name)
|
||||
|
||||
ethapi := ctx.GlobalString(EthAPIFlag.Name)
|
||||
swapapi := ctx.GlobalString(SwarmSwapAPIFlag.Name)
|
||||
if swapEnabled && swapapi == "" {
|
||||
utils.Fatalf("SWAP is enabled but --swap-api is not set")
|
||||
}
|
||||
|
||||
ensapi := ctx.GlobalString(EnsAPIFlag.Name)
|
||||
ensAddr := ctx.GlobalString(EnsAddrFlag.Name)
|
||||
|
||||
cors := ctx.GlobalString(CorsStringFlag.Name)
|
||||
|
||||
boot := func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
var client *ethclient.Client
|
||||
if len(ethapi) > 0 {
|
||||
client, err = ethclient.Dial(ethapi)
|
||||
var swapClient *ethclient.Client
|
||||
if swapapi != "" {
|
||||
log.Info("connecting to SWAP API", "url", swapapi)
|
||||
swapClient, err = ethclient.Dial(swapapi)
|
||||
if err != nil {
|
||||
utils.Fatalf("Can't connect: %v", err)
|
||||
return nil, fmt.Errorf("error connecting to SWAP API %s: %s", swapapi, err)
|
||||
}
|
||||
} else {
|
||||
swapEnabled = false
|
||||
}
|
||||
return swarm.NewSwarm(ctx, client, bzzconfig, swapEnabled, syncEnabled, cors)
|
||||
|
||||
var ensClient *ethclient.Client
|
||||
if ensapi != "" {
|
||||
log.Info("connecting to ENS API", "url", ensapi)
|
||||
client, err := rpc.Dial(ensapi)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error connecting to ENS API %s: %s", ensapi, err)
|
||||
}
|
||||
ensClient = ethclient.NewClient(client)
|
||||
|
||||
if ensAddr != "" {
|
||||
bzzconfig.EnsRoot = common.HexToAddress(ensAddr)
|
||||
} else {
|
||||
ensAddr, err := detectEnsAddr(client)
|
||||
if err == nil {
|
||||
bzzconfig.EnsRoot = ensAddr
|
||||
} else {
|
||||
log.Warn(fmt.Sprintf("could not determine ENS contract address, using default %s", bzzconfig.EnsRoot), "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return swarm.NewSwarm(ctx, swapClient, ensClient, bzzconfig, swapEnabled, syncEnabled, cors)
|
||||
}
|
||||
if err := stack.Register(boot); err != nil {
|
||||
utils.Fatalf("Failed to register the Swarm service: %v", err)
|
||||
|
256
cmd/swarm/run_test.go
Normal file
256
cmd/swarm/run_test.go
Normal file
@ -0,0 +1,256 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/internal/cmdtest"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/swarm"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Run the app if we've been exec'd as "swarm-test" in runSwarm.
|
||||
reexec.Register("swarm-test", func() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
})
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
// check if we have been reexec'd
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func runSwarm(t *testing.T, args ...string) *cmdtest.TestCmd {
|
||||
tt := cmdtest.NewTestCmd(t, nil)
|
||||
|
||||
// Boot "swarm". This actually runs the test binary but the TestMain
|
||||
// function will prevent any tests from running.
|
||||
tt.Run("swarm-test", args...)
|
||||
|
||||
return tt
|
||||
}
|
||||
|
||||
type testCluster struct {
|
||||
Nodes []*testNode
|
||||
TmpDir string
|
||||
}
|
||||
|
||||
// newTestCluster starts a test swarm cluster of the given size.
|
||||
//
|
||||
// A temporary directory is created and each node gets a data directory inside
|
||||
// it.
|
||||
//
|
||||
// Each node listens on 127.0.0.1 with random ports for both the HTTP and p2p
|
||||
// ports (assigned by first listening on 127.0.0.1:0 and then passing the ports
|
||||
// as flags).
|
||||
//
|
||||
// When starting more than one node, they are connected together using the
|
||||
// admin SetPeer RPC method.
|
||||
func newTestCluster(t *testing.T, size int) *testCluster {
|
||||
cluster := &testCluster{}
|
||||
defer func() {
|
||||
if t.Failed() {
|
||||
cluster.Shutdown()
|
||||
}
|
||||
}()
|
||||
|
||||
tmpdir, err := ioutil.TempDir("", "swarm-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cluster.TmpDir = tmpdir
|
||||
|
||||
// start the nodes
|
||||
cluster.Nodes = make([]*testNode, 0, size)
|
||||
for i := 0; i < size; i++ {
|
||||
dir := filepath.Join(cluster.TmpDir, fmt.Sprintf("swarm%02d", i))
|
||||
if err := os.Mkdir(dir, 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
node := newTestNode(t, dir)
|
||||
node.Name = fmt.Sprintf("swarm%02d", i)
|
||||
|
||||
cluster.Nodes = append(cluster.Nodes, node)
|
||||
}
|
||||
|
||||
if size == 1 {
|
||||
return cluster
|
||||
}
|
||||
|
||||
// connect the nodes together
|
||||
for _, node := range cluster.Nodes {
|
||||
if err := node.Client.Call(nil, "admin_addPeer", cluster.Nodes[0].Enode); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// wait until all nodes have the correct number of peers
|
||||
outer:
|
||||
for _, node := range cluster.Nodes {
|
||||
var peers []*p2p.PeerInfo
|
||||
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(50 * time.Millisecond) {
|
||||
if err := node.Client.Call(&peers, "admin_peers"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(peers) == len(cluster.Nodes)-1 {
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
t.Fatalf("%s only has %d / %d peers", node.Name, len(peers), len(cluster.Nodes)-1)
|
||||
}
|
||||
|
||||
return cluster
|
||||
}
|
||||
|
||||
func (c *testCluster) Shutdown() {
|
||||
for _, node := range c.Nodes {
|
||||
node.Shutdown()
|
||||
}
|
||||
os.RemoveAll(c.TmpDir)
|
||||
}
|
||||
|
||||
type testNode struct {
|
||||
Name string
|
||||
Addr string
|
||||
URL string
|
||||
Enode string
|
||||
Dir string
|
||||
Client *rpc.Client
|
||||
Cmd *cmdtest.TestCmd
|
||||
}
|
||||
|
||||
const testPassphrase = "swarm-test-passphrase"
|
||||
|
||||
func newTestNode(t *testing.T, dir string) *testNode {
|
||||
// create key
|
||||
conf := &node.Config{
|
||||
DataDir: dir,
|
||||
IPCPath: "bzzd.ipc",
|
||||
NoUSB: true,
|
||||
}
|
||||
n, err := node.New(conf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
account, err := n.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore).NewAccount(testPassphrase)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
node := &testNode{Dir: dir}
|
||||
|
||||
// use a unique IPCPath when running tests on Windows
|
||||
if runtime.GOOS == "windows" {
|
||||
conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", account.Address.String())
|
||||
}
|
||||
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
p2pPort, err := assignTCPPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// start the node
|
||||
node.Cmd = runSwarm(t,
|
||||
"--port", p2pPort,
|
||||
"--nodiscover",
|
||||
"--datadir", dir,
|
||||
"--ipcpath", conf.IPCPath,
|
||||
"--ens-api", "",
|
||||
"--bzzaccount", account.Address.String(),
|
||||
"--bzznetworkid", "321",
|
||||
"--bzzport", httpPort,
|
||||
"--verbosity", "6",
|
||||
)
|
||||
node.Cmd.InputLine(testPassphrase)
|
||||
defer func() {
|
||||
if t.Failed() {
|
||||
node.Shutdown()
|
||||
}
|
||||
}()
|
||||
|
||||
// wait for the node to start
|
||||
for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
|
||||
node.Client, err = rpc.Dial(conf.IPCEndpoint())
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if node.Client == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// load info
|
||||
var info swarm.Info
|
||||
if err := node.Client.Call(&info, "bzz_info"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
node.Addr = net.JoinHostPort("127.0.0.1", info.Port)
|
||||
node.URL = "http://" + node.Addr
|
||||
|
||||
var nodeInfo p2p.NodeInfo
|
||||
if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort)
|
||||
|
||||
return node
|
||||
}
|
||||
|
||||
func (n *testNode) Shutdown() {
|
||||
if n.Cmd != nil {
|
||||
n.Cmd.Kill()
|
||||
}
|
||||
}
|
||||
|
||||
func assignTCPPort() (string, error) {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
l.Close()
|
||||
_, port, err := net.SplitHostPort(l.Addr().String())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return port, nil
|
||||
}
|
@ -18,6 +18,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@ -87,24 +88,32 @@ func upload(ctx *cli.Context) {
|
||||
if err != nil {
|
||||
utils.Fatalf("Error opening file: %s", err)
|
||||
}
|
||||
var hash string
|
||||
|
||||
// define a function which either uploads a directory or single file
|
||||
// based on the type of the file being uploaded
|
||||
var doUpload func() (hash string, err error)
|
||||
if stat.IsDir() {
|
||||
if !recursive {
|
||||
utils.Fatalf("Argument is a directory and recursive upload is disabled")
|
||||
doUpload = func() (string, error) {
|
||||
if !recursive {
|
||||
return "", errors.New("Argument is a directory and recursive upload is disabled")
|
||||
}
|
||||
return client.UploadDirectory(file, defaultPath, "")
|
||||
}
|
||||
hash, err = client.UploadDirectory(file, defaultPath, "")
|
||||
} else {
|
||||
if mimeType == "" {
|
||||
mimeType = detectMimeType(file)
|
||||
doUpload = func() (string, error) {
|
||||
f, err := swarm.Open(file)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error opening file: %s", err)
|
||||
}
|
||||
defer f.Close()
|
||||
if mimeType == "" {
|
||||
mimeType = detectMimeType(file)
|
||||
}
|
||||
f.ContentType = mimeType
|
||||
return client.Upload(f, "")
|
||||
}
|
||||
f, err := swarm.Open(file)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error opening file: %s", err)
|
||||
}
|
||||
defer f.Close()
|
||||
f.ContentType = mimeType
|
||||
hash, err = client.Upload(f, "")
|
||||
}
|
||||
hash, err := doUpload()
|
||||
if err != nil {
|
||||
utils.Fatalf("Upload failed: %s", err)
|
||||
}
|
||||
|
76
cmd/swarm/upload_test.go
Normal file
76
cmd/swarm/upload_test.go
Normal file
@ -0,0 +1,76 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestCLISwarmUp tests that running 'swarm up' makes the resulting file
|
||||
// available from all nodes via the HTTP API
|
||||
func TestCLISwarmUp(t *testing.T) {
|
||||
// start 3 node cluster
|
||||
t.Log("starting 3 node cluster")
|
||||
cluster := newTestCluster(t, 3)
|
||||
defer cluster.Shutdown()
|
||||
|
||||
// create a tmp file
|
||||
tmp, err := ioutil.TempFile("", "swarm-test")
|
||||
assertNil(t, err)
|
||||
defer tmp.Close()
|
||||
defer os.Remove(tmp.Name())
|
||||
_, err = io.WriteString(tmp, "data")
|
||||
assertNil(t, err)
|
||||
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
t.Log("uploading file with 'swarm up'")
|
||||
up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", tmp.Name())
|
||||
_, matches := up.ExpectRegexp(`[a-f\d]{64}`)
|
||||
up.ExpectExit()
|
||||
hash := matches[0]
|
||||
t.Logf("file uploaded with hash %s", hash)
|
||||
|
||||
// get the file from the HTTP API of each node
|
||||
for _, node := range cluster.Nodes {
|
||||
t.Logf("getting file from %s", node.Name)
|
||||
res, err := http.Get(node.URL + "/bzz:/" + hash)
|
||||
assertNil(t, err)
|
||||
assertHTTPResponse(t, res, http.StatusOK, "data")
|
||||
}
|
||||
}
|
||||
|
||||
func assertNil(t *testing.T, err error) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func assertHTTPResponse(t *testing.T, res *http.Response, expectedStatus int, expectedBody string) {
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != expectedStatus {
|
||||
t.Fatalf("expected HTTP status %d, got %s", expectedStatus, res.Status)
|
||||
}
|
||||
data, err := ioutil.ReadAll(res.Body)
|
||||
assertNil(t, err)
|
||||
if string(data) != expectedBody {
|
||||
t.Fatalf("expected HTTP body %q, got %q", expectedBody, data)
|
||||
}
|
||||
}
|
@ -164,7 +164,7 @@ func ImportChain(chain *core.BlockChain, fn string) error {
|
||||
|
||||
func hasAllBlocks(chain *core.BlockChain, bs []*types.Block) bool {
|
||||
for _, b := range bs {
|
||||
if !chain.HasBlock(b.Hash()) {
|
||||
if !chain.HasBlock(b.Hash(), b.NumberU64()) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -41,7 +41,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/eth/gasprice"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/ethstats"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/les"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
@ -56,6 +55,19 @@ import (
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
CommandHelpTemplate = `{{.cmd.Name}}{{if .cmd.Subcommands}} command{{end}}{{if .cmd.Flags}} [command options]{{end}} [arguments...]
|
||||
{{if .cmd.Description}}{{.cmd.Description}}
|
||||
{{end}}{{if .cmd.Subcommands}}
|
||||
SUBCOMMANDS:
|
||||
{{range .cmd.Subcommands}}{{.cmd.Name}}{{with .cmd.ShortName}}, {{.cmd}}{{end}}{{ "\t" }}{{.cmd.Usage}}
|
||||
{{end}}{{end}}{{if .categorizedFlags}}
|
||||
{{range $idx, $categorized := .categorizedFlags}}{{$categorized.Name}} OPTIONS:
|
||||
{{range $categorized.Flags}}{{"\t"}}{{.}}
|
||||
{{end}}
|
||||
{{end}}{{end}}`
|
||||
)
|
||||
|
||||
func init() {
|
||||
cli.AppHelpTemplate = `{{.Name}} {{if .Flags}}[global options] {{end}}command{{if .Flags}} [command options]{{end}} [arguments...]
|
||||
|
||||
@ -70,16 +82,7 @@ GLOBAL OPTIONS:
|
||||
{{end}}{{end}}
|
||||
`
|
||||
|
||||
cli.CommandHelpTemplate = `{{.Name}}{{if .Subcommands}} command{{end}}{{if .Flags}} [command options]{{end}} [arguments...]
|
||||
{{if .Description}}{{.Description}}
|
||||
{{end}}{{if .Subcommands}}
|
||||
SUBCOMMANDS:
|
||||
{{range .Subcommands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
|
||||
{{end}}{{end}}{{if .Flags}}
|
||||
OPTIONS:
|
||||
{{range .Flags}}{{.}}
|
||||
{{end}}{{end}}
|
||||
`
|
||||
cli.CommandHelpTemplate = CommandHelpTemplate
|
||||
}
|
||||
|
||||
// NewApp creates an app with sane defaults.
|
||||
@ -117,36 +120,7 @@ var (
|
||||
}
|
||||
NoUSBFlag = cli.BoolFlag{
|
||||
Name: "nousb",
|
||||
Usage: "Disables monitoring for and managine USB hardware wallets",
|
||||
}
|
||||
EthashCacheDirFlag = DirectoryFlag{
|
||||
Name: "ethash.cachedir",
|
||||
Usage: "Directory to store the ethash verification caches (default = inside the datadir)",
|
||||
}
|
||||
EthashCachesInMemoryFlag = cli.IntFlag{
|
||||
Name: "ethash.cachesinmem",
|
||||
Usage: "Number of recent ethash caches to keep in memory (16MB each)",
|
||||
Value: eth.DefaultConfig.EthashCachesInMem,
|
||||
}
|
||||
EthashCachesOnDiskFlag = cli.IntFlag{
|
||||
Name: "ethash.cachesondisk",
|
||||
Usage: "Number of recent ethash caches to keep on disk (16MB each)",
|
||||
Value: eth.DefaultConfig.EthashCachesOnDisk,
|
||||
}
|
||||
EthashDatasetDirFlag = DirectoryFlag{
|
||||
Name: "ethash.dagdir",
|
||||
Usage: "Directory to store the ethash mining DAGs (default = inside home folder)",
|
||||
Value: DirectoryString{eth.DefaultConfig.EthashDatasetDir},
|
||||
}
|
||||
EthashDatasetsInMemoryFlag = cli.IntFlag{
|
||||
Name: "ethash.dagsinmem",
|
||||
Usage: "Number of recent ethash mining DAGs to keep in memory (1+GB each)",
|
||||
Value: eth.DefaultConfig.EthashDatasetsInMem,
|
||||
}
|
||||
EthashDatasetsOnDiskFlag = cli.IntFlag{
|
||||
Name: "ethash.dagsondisk",
|
||||
Usage: "Number of recent ethash mining DAGs to keep on disk (1+GB each)",
|
||||
Value: eth.DefaultConfig.EthashDatasetsOnDisk,
|
||||
Usage: "Disables monitoring for and managing USB hardware wallets",
|
||||
}
|
||||
NetworkIdFlag = cli.Uint64Flag{
|
||||
Name: "networkid",
|
||||
@ -203,6 +177,86 @@ var (
|
||||
Name: "lightkdf",
|
||||
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
|
||||
}
|
||||
// Ethash settings
|
||||
EthashCacheDirFlag = DirectoryFlag{
|
||||
Name: "ethash.cachedir",
|
||||
Usage: "Directory to store the ethash verification caches (default = inside the datadir)",
|
||||
}
|
||||
EthashCachesInMemoryFlag = cli.IntFlag{
|
||||
Name: "ethash.cachesinmem",
|
||||
Usage: "Number of recent ethash caches to keep in memory (16MB each)",
|
||||
Value: eth.DefaultConfig.EthashCachesInMem,
|
||||
}
|
||||
EthashCachesOnDiskFlag = cli.IntFlag{
|
||||
Name: "ethash.cachesondisk",
|
||||
Usage: "Number of recent ethash caches to keep on disk (16MB each)",
|
||||
Value: eth.DefaultConfig.EthashCachesOnDisk,
|
||||
}
|
||||
EthashDatasetDirFlag = DirectoryFlag{
|
||||
Name: "ethash.dagdir",
|
||||
Usage: "Directory to store the ethash mining DAGs (default = inside home folder)",
|
||||
Value: DirectoryString{eth.DefaultConfig.EthashDatasetDir},
|
||||
}
|
||||
EthashDatasetsInMemoryFlag = cli.IntFlag{
|
||||
Name: "ethash.dagsinmem",
|
||||
Usage: "Number of recent ethash mining DAGs to keep in memory (1+GB each)",
|
||||
Value: eth.DefaultConfig.EthashDatasetsInMem,
|
||||
}
|
||||
EthashDatasetsOnDiskFlag = cli.IntFlag{
|
||||
Name: "ethash.dagsondisk",
|
||||
Usage: "Number of recent ethash mining DAGs to keep on disk (1+GB each)",
|
||||
Value: eth.DefaultConfig.EthashDatasetsOnDisk,
|
||||
}
|
||||
// Transaction pool settings
|
||||
TxPoolNoLocalsFlag = cli.BoolFlag{
|
||||
Name: "txpool.nolocals",
|
||||
Usage: "Disables price exemptions for locally submitted transactions",
|
||||
}
|
||||
TxPoolJournalFlag = cli.StringFlag{
|
||||
Name: "txpool.journal",
|
||||
Usage: "Disk journal for local transaction to survive node restarts",
|
||||
Value: core.DefaultTxPoolConfig.Journal,
|
||||
}
|
||||
TxPoolRejournalFlag = cli.DurationFlag{
|
||||
Name: "txpool.rejournal",
|
||||
Usage: "Time interval to regenerate the local transaction journal",
|
||||
Value: core.DefaultTxPoolConfig.Rejournal,
|
||||
}
|
||||
TxPoolPriceLimitFlag = cli.Uint64Flag{
|
||||
Name: "txpool.pricelimit",
|
||||
Usage: "Minimum gas price limit to enforce for acceptance into the pool",
|
||||
Value: eth.DefaultConfig.TxPool.PriceLimit,
|
||||
}
|
||||
TxPoolPriceBumpFlag = cli.Uint64Flag{
|
||||
Name: "txpool.pricebump",
|
||||
Usage: "Price bump percentage to replace an already existing transaction",
|
||||
Value: eth.DefaultConfig.TxPool.PriceBump,
|
||||
}
|
||||
TxPoolAccountSlotsFlag = cli.Uint64Flag{
|
||||
Name: "txpool.accountslots",
|
||||
Usage: "Minimum number of executable transaction slots guaranteed per account",
|
||||
Value: eth.DefaultConfig.TxPool.AccountSlots,
|
||||
}
|
||||
TxPoolGlobalSlotsFlag = cli.Uint64Flag{
|
||||
Name: "txpool.globalslots",
|
||||
Usage: "Maximum number of executable transaction slots for all accounts",
|
||||
Value: eth.DefaultConfig.TxPool.GlobalSlots,
|
||||
}
|
||||
TxPoolAccountQueueFlag = cli.Uint64Flag{
|
||||
Name: "txpool.accountqueue",
|
||||
Usage: "Maximum number of non-executable transaction slots permitted per account",
|
||||
Value: eth.DefaultConfig.TxPool.AccountQueue,
|
||||
}
|
||||
TxPoolGlobalQueueFlag = cli.Uint64Flag{
|
||||
Name: "txpool.globalqueue",
|
||||
Usage: "Maximum number of non-executable transaction slots for all accounts",
|
||||
Value: eth.DefaultConfig.TxPool.GlobalQueue,
|
||||
}
|
||||
TxPoolLifetimeFlag = cli.DurationFlag{
|
||||
Name: "txpool.lifetime",
|
||||
Usage: "Maximum amount of time non-executable transaction are queued",
|
||||
Value: eth.DefaultConfig.TxPool.Lifetime,
|
||||
}
|
||||
// Performance tuning settings
|
||||
CacheFlag = cli.IntFlag{
|
||||
Name: "cache",
|
||||
@ -237,7 +291,7 @@ var (
|
||||
GasPriceFlag = BigFlag{
|
||||
Name: "gasprice",
|
||||
Usage: "Minimal gas price to accept for mining a transactions",
|
||||
Value: big.NewInt(20 * params.Shannon),
|
||||
Value: eth.DefaultConfig.GasPrice,
|
||||
}
|
||||
ExtraDataFlag = cli.StringFlag{
|
||||
Name: "extradata",
|
||||
@ -360,7 +414,17 @@ var (
|
||||
}
|
||||
BootnodesFlag = cli.StringFlag{
|
||||
Name: "bootnodes",
|
||||
Usage: "Comma separated enode URLs for P2P discovery bootstrap",
|
||||
Usage: "Comma separated enode URLs for P2P discovery bootstrap (set v4+v5 instead for light servers)",
|
||||
Value: "",
|
||||
}
|
||||
BootnodesV4Flag = cli.StringFlag{
|
||||
Name: "bootnodesv4",
|
||||
Usage: "Comma separated enode URLs for P2P v4 discovery bootstrap (light server, full nodes)",
|
||||
Value: "",
|
||||
}
|
||||
BootnodesV5Flag = cli.StringFlag{
|
||||
Name: "bootnodesv5",
|
||||
Usage: "Comma separated enode URLs for P2P v5 discovery bootstrap (light server, light nodes)",
|
||||
Value: "",
|
||||
}
|
||||
NodeKeyFileFlag = cli.StringFlag{
|
||||
@ -389,11 +453,6 @@ var (
|
||||
Usage: "Restricts network communication to the given IP networks (CIDR masks)",
|
||||
}
|
||||
|
||||
WhisperEnabledFlag = cli.BoolFlag{
|
||||
Name: "shh",
|
||||
Usage: "Enable Whisper",
|
||||
}
|
||||
|
||||
// ATM the url is left to the user and deployment to
|
||||
JSpathFlag = cli.StringFlag{
|
||||
Name: "jspath",
|
||||
@ -412,6 +471,20 @@ var (
|
||||
Usage: "Suggested gas price is the given percentile of a set of recent transaction gas prices",
|
||||
Value: eth.DefaultConfig.GPO.Percentile,
|
||||
}
|
||||
WhisperEnabledFlag = cli.BoolFlag{
|
||||
Name: "shh",
|
||||
Usage: "Enable Whisper",
|
||||
}
|
||||
WhisperMaxMessageSizeFlag = cli.IntFlag{
|
||||
Name: "shh.maxmessagesize",
|
||||
Usage: "Max message size accepted",
|
||||
Value: int(whisper.DefaultMaxMessageSize),
|
||||
}
|
||||
WhisperMinPOWFlag = cli.Float64Flag{
|
||||
Name: "shh.pow",
|
||||
Usage: "Minimum POW accepted",
|
||||
Value: whisper.DefaultMinimumPoW,
|
||||
}
|
||||
)
|
||||
|
||||
// MakeDataDir retrieves the currently requested data directory, terminating
|
||||
@ -469,8 +542,12 @@ func setNodeUserIdent(ctx *cli.Context, cfg *node.Config) {
|
||||
func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
|
||||
urls := params.MainnetBootnodes
|
||||
switch {
|
||||
case ctx.GlobalIsSet(BootnodesFlag.Name):
|
||||
urls = strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",")
|
||||
case ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(BootnodesV4Flag.Name):
|
||||
if ctx.GlobalIsSet(BootnodesV4Flag.Name) {
|
||||
urls = strings.Split(ctx.GlobalString(BootnodesV4Flag.Name), ",")
|
||||
} else {
|
||||
urls = strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",")
|
||||
}
|
||||
case ctx.GlobalBool(TestnetFlag.Name):
|
||||
urls = params.TestnetBootnodes
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
@ -493,8 +570,12 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
|
||||
func setBootstrapNodesV5(ctx *cli.Context, cfg *p2p.Config) {
|
||||
urls := params.DiscoveryV5Bootnodes
|
||||
switch {
|
||||
case ctx.GlobalIsSet(BootnodesFlag.Name):
|
||||
urls = strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",")
|
||||
case ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(BootnodesV5Flag.Name):
|
||||
if ctx.GlobalIsSet(BootnodesV5Flag.Name) {
|
||||
urls = strings.Split(ctx.GlobalString(BootnodesV5Flag.Name), ",")
|
||||
} else {
|
||||
urls = strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",")
|
||||
}
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
urls = params.RinkebyV5Bootnodes
|
||||
case cfg.BootstrapNodesV5 != nil:
|
||||
@ -717,6 +798,7 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
|
||||
// --dev mode can't use p2p networking.
|
||||
cfg.MaxPeers = 0
|
||||
cfg.ListenAddr = ":0"
|
||||
cfg.DiscoveryV5Addr = ":0"
|
||||
cfg.NoDiscovery = true
|
||||
cfg.DiscoveryV5 = false
|
||||
}
|
||||
@ -761,6 +843,39 @@ func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) {
|
||||
if ctx.GlobalIsSet(TxPoolNoLocalsFlag.Name) {
|
||||
cfg.NoLocals = ctx.GlobalBool(TxPoolNoLocalsFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolJournalFlag.Name) {
|
||||
cfg.Journal = ctx.GlobalString(TxPoolJournalFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolRejournalFlag.Name) {
|
||||
cfg.Rejournal = ctx.GlobalDuration(TxPoolRejournalFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolPriceLimitFlag.Name) {
|
||||
cfg.PriceLimit = ctx.GlobalUint64(TxPoolPriceLimitFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolPriceBumpFlag.Name) {
|
||||
cfg.PriceBump = ctx.GlobalUint64(TxPoolPriceBumpFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolAccountSlotsFlag.Name) {
|
||||
cfg.AccountSlots = ctx.GlobalUint64(TxPoolAccountSlotsFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolGlobalSlotsFlag.Name) {
|
||||
cfg.GlobalSlots = ctx.GlobalUint64(TxPoolGlobalSlotsFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolAccountQueueFlag.Name) {
|
||||
cfg.AccountQueue = ctx.GlobalUint64(TxPoolAccountQueueFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolGlobalQueueFlag.Name) {
|
||||
cfg.GlobalQueue = ctx.GlobalUint64(TxPoolGlobalQueueFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolLifetimeFlag.Name) {
|
||||
cfg.Lifetime = ctx.GlobalDuration(TxPoolLifetimeFlag.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func setEthash(ctx *cli.Context, cfg *eth.Config) {
|
||||
if ctx.GlobalIsSet(EthashCacheDirFlag.Name) {
|
||||
cfg.EthashCacheDir = ctx.GlobalString(EthashCacheDirFlag.Name)
|
||||
@ -794,6 +909,16 @@ func checkExclusive(ctx *cli.Context, flags ...cli.Flag) {
|
||||
}
|
||||
}
|
||||
|
||||
// SetShhConfig applies shh-related command line flags to the config.
|
||||
func SetShhConfig(ctx *cli.Context, stack *node.Node, cfg *whisper.Config) {
|
||||
if ctx.GlobalIsSet(WhisperMaxMessageSizeFlag.Name) {
|
||||
cfg.MaxMessageSize = uint32(ctx.GlobalUint(WhisperMaxMessageSizeFlag.Name))
|
||||
}
|
||||
if ctx.GlobalIsSet(WhisperMinPOWFlag.Name) {
|
||||
cfg.MinimumAcceptedPOW = ctx.GlobalFloat64(WhisperMinPOWFlag.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// SetEthConfig applies eth-related command line flags to the config.
|
||||
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
// Avoid conflicting network flags
|
||||
@ -803,6 +928,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
|
||||
setEtherbase(ctx, ks, cfg)
|
||||
setGPO(ctx, &cfg.GPO)
|
||||
setTxPool(ctx, &cfg.TxPool)
|
||||
setEthash(ctx, cfg)
|
||||
|
||||
switch {
|
||||
@ -823,10 +949,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
cfg.NetworkId = ctx.GlobalUint64(NetworkIdFlag.Name)
|
||||
}
|
||||
|
||||
// Ethereum needs to know maxPeers to calculate the light server peer ratio.
|
||||
// TODO(fjl): ensure Ethereum can get MaxPeers from node.
|
||||
cfg.MaxPeers = ctx.GlobalInt(MaxPeersFlag.Name)
|
||||
|
||||
if ctx.GlobalIsSet(CacheFlag.Name) {
|
||||
cfg.DatabaseCache = ctx.GlobalInt(CacheFlag.Name)
|
||||
}
|
||||
@ -898,8 +1020,10 @@ func RegisterEthService(stack *node.Node, cfg *eth.Config) {
|
||||
}
|
||||
|
||||
// RegisterShhService configures Whisper and adds it to the given node.
|
||||
func RegisterShhService(stack *node.Node) {
|
||||
if err := stack.Register(func(*node.ServiceContext) (node.Service, error) { return whisper.New(), nil }); err != nil {
|
||||
func RegisterShhService(stack *node.Node, cfg *whisper.Config) {
|
||||
if err := stack.Register(func(n *node.ServiceContext) (node.Service, error) {
|
||||
return whisper.New(cfg), nil
|
||||
}); err != nil {
|
||||
Fatalf("Failed to register the Whisper service: %v", err)
|
||||
}
|
||||
}
|
||||
@ -964,14 +1088,17 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
|
||||
|
||||
engine := ethash.NewFaker()
|
||||
if !ctx.GlobalBool(FakePoWFlag.Name) {
|
||||
engine = ethash.New("", 1, 0, "", 1, 0)
|
||||
engine = ethash.New(
|
||||
stack.ResolvePath(eth.DefaultConfig.EthashCacheDir), eth.DefaultConfig.EthashCachesInMem, eth.DefaultConfig.EthashCachesOnDisk,
|
||||
stack.ResolvePath(eth.DefaultConfig.EthashDatasetDir), eth.DefaultConfig.EthashDatasetsInMem, eth.DefaultConfig.EthashDatasetsOnDisk,
|
||||
)
|
||||
}
|
||||
config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx))
|
||||
if err != nil {
|
||||
Fatalf("%v", err)
|
||||
}
|
||||
vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)}
|
||||
chain, err = core.NewBlockChain(chainDb, config, engine, new(event.TypeMux), vmcfg)
|
||||
chain, err = core.NewBlockChain(chainDb, config, engine, vmcfg)
|
||||
if err != nil {
|
||||
Fatalf("Can't create BlockChain: %v", err)
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ var (
|
||||
argVerbosity = flag.Int("verbosity", int(log.LvlError), "log verbosity level")
|
||||
argTTL = flag.Uint("ttl", 30, "time-to-live for messages in seconds")
|
||||
argWorkTime = flag.Uint("work", 5, "work time in seconds")
|
||||
argMaxSize = flag.Int("maxsize", whisper.DefaultMaxMessageLength, "max size of message")
|
||||
argMaxSize = flag.Uint("maxsize", uint(whisper.DefaultMaxMessageSize), "max size of message")
|
||||
argPoW = flag.Float64("pow", whisper.DefaultMinimumPoW, "PoW for normal messages in float format (e.g. 2.7)")
|
||||
argServerPoW = flag.Float64("mspow", whisper.DefaultMinimumPoW, "PoW requirement for Mail Server request")
|
||||
|
||||
@ -198,6 +198,11 @@ func initialize() {
|
||||
peers = append(peers, peer)
|
||||
}
|
||||
|
||||
cfg := &whisper.Config{
|
||||
MaxMessageSize: uint32(*argMaxSize),
|
||||
MinimumAcceptedPOW: *argPoW,
|
||||
}
|
||||
|
||||
if *mailServerMode {
|
||||
if len(msPassword) == 0 {
|
||||
msPassword, err = console.Stdin.PromptPassword("Please enter the Mail Server password: ")
|
||||
@ -205,11 +210,12 @@ func initialize() {
|
||||
utils.Fatalf("Failed to read Mail Server password: %s", err)
|
||||
}
|
||||
}
|
||||
shh = whisper.New()
|
||||
|
||||
shh = whisper.New(cfg)
|
||||
shh.RegisterServer(&mailServer)
|
||||
mailServer.Init(shh, *argDBPath, msPassword, *argServerPoW)
|
||||
} else {
|
||||
shh = whisper.New()
|
||||
shh = whisper.New(cfg)
|
||||
}
|
||||
|
||||
if *argPoW != whisper.DefaultMinimumPoW {
|
||||
@ -219,8 +225,8 @@ func initialize() {
|
||||
}
|
||||
}
|
||||
|
||||
if *argMaxSize != whisper.DefaultMaxMessageLength {
|
||||
err := shh.SetMaxMessageLength(*argMaxSize)
|
||||
if uint32(*argMaxSize) != whisper.DefaultMaxMessageSize {
|
||||
err := shh.SetMaxMessageSize(uint32(*argMaxSize))
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to set max message size: %s", err)
|
||||
}
|
||||
|
188
common/bitutil/bitutil.go
Normal file
188
common/bitutil/bitutil.go
Normal file
@ -0,0 +1,188 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Adapted from: https://golang.org/src/crypto/cipher/xor.go
|
||||
|
||||
// Package bitutil implements fast bitwise operations.
|
||||
package bitutil
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const wordSize = int(unsafe.Sizeof(uintptr(0)))
|
||||
const supportsUnaligned = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x"
|
||||
|
||||
// XORBytes xors the bytes in a and b. The destination is assumed to have enough
|
||||
// space. Returns the number of bytes xor'd.
|
||||
func XORBytes(dst, a, b []byte) int {
|
||||
if supportsUnaligned {
|
||||
return fastXORBytes(dst, a, b)
|
||||
}
|
||||
return safeXORBytes(dst, a, b)
|
||||
}
|
||||
|
||||
// fastXORBytes xors in bulk. It only works on architectures that support
|
||||
// unaligned read/writes.
|
||||
func fastXORBytes(dst, a, b []byte) int {
|
||||
n := len(a)
|
||||
if len(b) < n {
|
||||
n = len(b)
|
||||
}
|
||||
w := n / wordSize
|
||||
if w > 0 {
|
||||
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
|
||||
aw := *(*[]uintptr)(unsafe.Pointer(&a))
|
||||
bw := *(*[]uintptr)(unsafe.Pointer(&b))
|
||||
for i := 0; i < w; i++ {
|
||||
dw[i] = aw[i] ^ bw[i]
|
||||
}
|
||||
}
|
||||
for i := (n - n%wordSize); i < n; i++ {
|
||||
dst[i] = a[i] ^ b[i]
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// safeXORBytes xors one by one. It works on all architectures, independent if
|
||||
// it supports unaligned read/writes or not.
|
||||
func safeXORBytes(dst, a, b []byte) int {
|
||||
n := len(a)
|
||||
if len(b) < n {
|
||||
n = len(b)
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
dst[i] = a[i] ^ b[i]
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// ANDBytes ands the bytes in a and b. The destination is assumed to have enough
|
||||
// space. Returns the number of bytes and'd.
|
||||
func ANDBytes(dst, a, b []byte) int {
|
||||
if supportsUnaligned {
|
||||
return fastANDBytes(dst, a, b)
|
||||
}
|
||||
return safeANDBytes(dst, a, b)
|
||||
}
|
||||
|
||||
// fastANDBytes ands in bulk. It only works on architectures that support
|
||||
// unaligned read/writes.
|
||||
func fastANDBytes(dst, a, b []byte) int {
|
||||
n := len(a)
|
||||
if len(b) < n {
|
||||
n = len(b)
|
||||
}
|
||||
w := n / wordSize
|
||||
if w > 0 {
|
||||
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
|
||||
aw := *(*[]uintptr)(unsafe.Pointer(&a))
|
||||
bw := *(*[]uintptr)(unsafe.Pointer(&b))
|
||||
for i := 0; i < w; i++ {
|
||||
dw[i] = aw[i] & bw[i]
|
||||
}
|
||||
}
|
||||
for i := (n - n%wordSize); i < n; i++ {
|
||||
dst[i] = a[i] & b[i]
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// safeANDBytes ands one by one. It works on all architectures, independent if
|
||||
// it supports unaligned read/writes or not.
|
||||
func safeANDBytes(dst, a, b []byte) int {
|
||||
n := len(a)
|
||||
if len(b) < n {
|
||||
n = len(b)
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
dst[i] = a[i] & b[i]
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// ORBytes ors the bytes in a and b. The destination is assumed to have enough
|
||||
// space. Returns the number of bytes or'd.
|
||||
func ORBytes(dst, a, b []byte) int {
|
||||
if supportsUnaligned {
|
||||
return fastORBytes(dst, a, b)
|
||||
}
|
||||
return safeORBytes(dst, a, b)
|
||||
}
|
||||
|
||||
// fastORBytes ors in bulk. It only works on architectures that support
|
||||
// unaligned read/writes.
|
||||
func fastORBytes(dst, a, b []byte) int {
|
||||
n := len(a)
|
||||
if len(b) < n {
|
||||
n = len(b)
|
||||
}
|
||||
w := n / wordSize
|
||||
if w > 0 {
|
||||
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
|
||||
aw := *(*[]uintptr)(unsafe.Pointer(&a))
|
||||
bw := *(*[]uintptr)(unsafe.Pointer(&b))
|
||||
for i := 0; i < w; i++ {
|
||||
dw[i] = aw[i] | bw[i]
|
||||
}
|
||||
}
|
||||
for i := (n - n%wordSize); i < n; i++ {
|
||||
dst[i] = a[i] | b[i]
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// safeORBytes ors one by one. It works on all architectures, independent if
|
||||
// it supports unaligned read/writes or not.
|
||||
func safeORBytes(dst, a, b []byte) int {
|
||||
n := len(a)
|
||||
if len(b) < n {
|
||||
n = len(b)
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
dst[i] = a[i] | b[i]
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// TestBytes tests whether any bit is set in the input byte slice.
|
||||
func TestBytes(p []byte) bool {
|
||||
if supportsUnaligned {
|
||||
return fastTestBytes(p)
|
||||
}
|
||||
return safeTestBytes(p)
|
||||
}
|
||||
|
||||
// fastTestBytes tests for set bits in bulk. It only works on architectures that
|
||||
// support unaligned read/writes.
|
||||
func fastTestBytes(p []byte) bool {
|
||||
n := len(p)
|
||||
w := n / wordSize
|
||||
if w > 0 {
|
||||
pw := *(*[]uintptr)(unsafe.Pointer(&p))
|
||||
for i := 0; i < w; i++ {
|
||||
if pw[i] != 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := (n - n%wordSize); i < n; i++ {
|
||||
if p[i] != 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// safeTestBytes tests for set bits one byte at a time. It works on all
|
||||
// architectures, independent if it supports unaligned read/writes or not.
|
||||
func safeTestBytes(p []byte) bool {
|
||||
for i := 0; i < len(p); i++ {
|
||||
if p[i] != 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
215
common/bitutil/bitutil_test.go
Normal file
215
common/bitutil/bitutil_test.go
Normal file
@ -0,0 +1,215 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Adapted from: https://golang.org/src/crypto/cipher/xor_test.go
|
||||
|
||||
package bitutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Tests that bitwise XOR works for various alignments.
|
||||
func TestXOR(t *testing.T) {
|
||||
for alignP := 0; alignP < 2; alignP++ {
|
||||
for alignQ := 0; alignQ < 2; alignQ++ {
|
||||
for alignD := 0; alignD < 2; alignD++ {
|
||||
p := make([]byte, 1023)[alignP:]
|
||||
q := make([]byte, 1023)[alignQ:]
|
||||
|
||||
for i := 0; i < len(p); i++ {
|
||||
p[i] = byte(i)
|
||||
}
|
||||
for i := 0; i < len(q); i++ {
|
||||
q[i] = byte(len(q) - i)
|
||||
}
|
||||
d1 := make([]byte, 1023+alignD)[alignD:]
|
||||
d2 := make([]byte, 1023+alignD)[alignD:]
|
||||
|
||||
XORBytes(d1, p, q)
|
||||
safeXORBytes(d2, p, q)
|
||||
if !bytes.Equal(d1, d2) {
|
||||
t.Error("not equal", d1, d2)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that bitwise AND works for various alignments.
|
||||
func TestAND(t *testing.T) {
|
||||
for alignP := 0; alignP < 2; alignP++ {
|
||||
for alignQ := 0; alignQ < 2; alignQ++ {
|
||||
for alignD := 0; alignD < 2; alignD++ {
|
||||
p := make([]byte, 1023)[alignP:]
|
||||
q := make([]byte, 1023)[alignQ:]
|
||||
|
||||
for i := 0; i < len(p); i++ {
|
||||
p[i] = byte(i)
|
||||
}
|
||||
for i := 0; i < len(q); i++ {
|
||||
q[i] = byte(len(q) - i)
|
||||
}
|
||||
d1 := make([]byte, 1023+alignD)[alignD:]
|
||||
d2 := make([]byte, 1023+alignD)[alignD:]
|
||||
|
||||
ANDBytes(d1, p, q)
|
||||
safeANDBytes(d2, p, q)
|
||||
if !bytes.Equal(d1, d2) {
|
||||
t.Error("not equal")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that bitwise OR works for various alignments.
|
||||
func TestOR(t *testing.T) {
|
||||
for alignP := 0; alignP < 2; alignP++ {
|
||||
for alignQ := 0; alignQ < 2; alignQ++ {
|
||||
for alignD := 0; alignD < 2; alignD++ {
|
||||
p := make([]byte, 1023)[alignP:]
|
||||
q := make([]byte, 1023)[alignQ:]
|
||||
|
||||
for i := 0; i < len(p); i++ {
|
||||
p[i] = byte(i)
|
||||
}
|
||||
for i := 0; i < len(q); i++ {
|
||||
q[i] = byte(len(q) - i)
|
||||
}
|
||||
d1 := make([]byte, 1023+alignD)[alignD:]
|
||||
d2 := make([]byte, 1023+alignD)[alignD:]
|
||||
|
||||
ORBytes(d1, p, q)
|
||||
safeORBytes(d2, p, q)
|
||||
if !bytes.Equal(d1, d2) {
|
||||
t.Error("not equal")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that bit testing works for various alignments.
|
||||
func TestTest(t *testing.T) {
|
||||
for align := 0; align < 2; align++ {
|
||||
// Test for bits set in the bulk part
|
||||
p := make([]byte, 1023)[align:]
|
||||
p[100] = 1
|
||||
|
||||
if TestBytes(p) != safeTestBytes(p) {
|
||||
t.Error("not equal")
|
||||
}
|
||||
// Test for bits set in the tail part
|
||||
q := make([]byte, 1023)[align:]
|
||||
q[len(q)-1] = 1
|
||||
|
||||
if TestBytes(q) != safeTestBytes(q) {
|
||||
t.Error("not equal")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the potentially optimized XOR performance.
|
||||
func BenchmarkFastXOR1KB(b *testing.B) { benchmarkFastXOR(b, 1024) }
|
||||
func BenchmarkFastXOR2KB(b *testing.B) { benchmarkFastXOR(b, 2048) }
|
||||
func BenchmarkFastXOR4KB(b *testing.B) { benchmarkFastXOR(b, 4096) }
|
||||
|
||||
func benchmarkFastXOR(b *testing.B, size int) {
|
||||
p, q := make([]byte, size), make([]byte, size)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
XORBytes(p, p, q)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the baseline XOR performance.
|
||||
func BenchmarkBaseXOR1KB(b *testing.B) { benchmarkBaseXOR(b, 1024) }
|
||||
func BenchmarkBaseXOR2KB(b *testing.B) { benchmarkBaseXOR(b, 2048) }
|
||||
func BenchmarkBaseXOR4KB(b *testing.B) { benchmarkBaseXOR(b, 4096) }
|
||||
|
||||
func benchmarkBaseXOR(b *testing.B, size int) {
|
||||
p, q := make([]byte, size), make([]byte, size)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
safeXORBytes(p, p, q)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the potentially optimized AND performance.
|
||||
func BenchmarkFastAND1KB(b *testing.B) { benchmarkFastAND(b, 1024) }
|
||||
func BenchmarkFastAND2KB(b *testing.B) { benchmarkFastAND(b, 2048) }
|
||||
func BenchmarkFastAND4KB(b *testing.B) { benchmarkFastAND(b, 4096) }
|
||||
|
||||
func benchmarkFastAND(b *testing.B, size int) {
|
||||
p, q := make([]byte, size), make([]byte, size)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
ANDBytes(p, p, q)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the baseline AND performance.
|
||||
func BenchmarkBaseAND1KB(b *testing.B) { benchmarkBaseAND(b, 1024) }
|
||||
func BenchmarkBaseAND2KB(b *testing.B) { benchmarkBaseAND(b, 2048) }
|
||||
func BenchmarkBaseAND4KB(b *testing.B) { benchmarkBaseAND(b, 4096) }
|
||||
|
||||
func benchmarkBaseAND(b *testing.B, size int) {
|
||||
p, q := make([]byte, size), make([]byte, size)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
safeANDBytes(p, p, q)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the potentially optimized OR performance.
|
||||
func BenchmarkFastOR1KB(b *testing.B) { benchmarkFastOR(b, 1024) }
|
||||
func BenchmarkFastOR2KB(b *testing.B) { benchmarkFastOR(b, 2048) }
|
||||
func BenchmarkFastOR4KB(b *testing.B) { benchmarkFastOR(b, 4096) }
|
||||
|
||||
func benchmarkFastOR(b *testing.B, size int) {
|
||||
p, q := make([]byte, size), make([]byte, size)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
ORBytes(p, p, q)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the baseline OR performance.
|
||||
func BenchmarkBaseOR1KB(b *testing.B) { benchmarkBaseOR(b, 1024) }
|
||||
func BenchmarkBaseOR2KB(b *testing.B) { benchmarkBaseOR(b, 2048) }
|
||||
func BenchmarkBaseOR4KB(b *testing.B) { benchmarkBaseOR(b, 4096) }
|
||||
|
||||
func benchmarkBaseOR(b *testing.B, size int) {
|
||||
p, q := make([]byte, size), make([]byte, size)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
safeORBytes(p, p, q)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the potentially optimized bit testing performance.
|
||||
func BenchmarkFastTest1KB(b *testing.B) { benchmarkFastTest(b, 1024) }
|
||||
func BenchmarkFastTest2KB(b *testing.B) { benchmarkFastTest(b, 2048) }
|
||||
func BenchmarkFastTest4KB(b *testing.B) { benchmarkFastTest(b, 4096) }
|
||||
|
||||
func benchmarkFastTest(b *testing.B, size int) {
|
||||
p := make([]byte, size)
|
||||
for i := 0; i < b.N; i++ {
|
||||
TestBytes(p)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the baseline bit testing performance.
|
||||
func BenchmarkBaseTest1KB(b *testing.B) { benchmarkBaseTest(b, 1024) }
|
||||
func BenchmarkBaseTest2KB(b *testing.B) { benchmarkBaseTest(b, 2048) }
|
||||
func BenchmarkBaseTest4KB(b *testing.B) { benchmarkBaseTest(b, 4096) }
|
||||
|
||||
func benchmarkBaseTest(b *testing.B, size int) {
|
||||
p := make([]byte, size)
|
||||
for i := 0; i < b.N; i++ {
|
||||
safeTestBytes(p)
|
||||
}
|
||||
}
|
170
common/bitutil/compress.go
Normal file
170
common/bitutil/compress.go
Normal file
@ -0,0 +1,170 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bitutil
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
// errMissingData is returned from decompression if the byte referenced by
|
||||
// the bitset header overflows the input data.
|
||||
errMissingData = errors.New("missing bytes on input")
|
||||
|
||||
// errUnreferencedData is returned from decompression if not all bytes were used
|
||||
// up from the input data after decompressing it.
|
||||
errUnreferencedData = errors.New("extra bytes on input")
|
||||
|
||||
// errExceededTarget is returned from decompression if the bitset header has
|
||||
// more bits defined than the number of target buffer space available.
|
||||
errExceededTarget = errors.New("target data size exceeded")
|
||||
|
||||
// errZeroContent is returned from decompression if a data byte referenced in
|
||||
// the bitset header is actually a zero byte.
|
||||
errZeroContent = errors.New("zero byte in input content")
|
||||
)
|
||||
|
||||
// The compression algorithm implemented by CompressBytes and DecompressBytes is
|
||||
// optimized for sparse input data which contains a lot of zero bytes. Decompression
|
||||
// requires knowledge of the decompressed data length.
|
||||
//
|
||||
// Compression works as follows:
|
||||
//
|
||||
// if data only contains zeroes,
|
||||
// CompressBytes(data) == nil
|
||||
// otherwise if len(data) <= 1,
|
||||
// CompressBytes(data) == data
|
||||
// otherwise:
|
||||
// CompressBytes(data) == append(CompressBytes(nonZeroBitset(data)), nonZeroBytes(data)...)
|
||||
// where
|
||||
// nonZeroBitset(data) is a bit vector with len(data) bits (MSB first):
|
||||
// nonZeroBitset(data)[i/8] && (1 << (7-i%8)) != 0 if data[i] != 0
|
||||
// len(nonZeroBitset(data)) == (len(data)+7)/8
|
||||
// nonZeroBytes(data) contains the non-zero bytes of data in the same order
|
||||
|
||||
// CompressBytes compresses the input byte slice according to the sparse bitset
|
||||
// representation algorithm. If the result is bigger than the original input, no
|
||||
// compression is done.
|
||||
func CompressBytes(data []byte) []byte {
|
||||
if out := bitsetEncodeBytes(data); len(out) < len(data) {
|
||||
return out
|
||||
}
|
||||
cpy := make([]byte, len(data))
|
||||
copy(cpy, data)
|
||||
return cpy
|
||||
}
|
||||
|
||||
// bitsetEncodeBytes compresses the input byte slice according to the sparse
|
||||
// bitset representation algorithm.
|
||||
func bitsetEncodeBytes(data []byte) []byte {
|
||||
// Empty slices get compressed to nil
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
}
|
||||
// One byte slices compress to nil or retain the single byte
|
||||
if len(data) == 1 {
|
||||
if data[0] == 0 {
|
||||
return nil
|
||||
}
|
||||
return data
|
||||
}
|
||||
// Calculate the bitset of set bytes, and gather the non-zero bytes
|
||||
nonZeroBitset := make([]byte, (len(data)+7)/8)
|
||||
nonZeroBytes := make([]byte, 0, len(data))
|
||||
|
||||
for i, b := range data {
|
||||
if b != 0 {
|
||||
nonZeroBytes = append(nonZeroBytes, b)
|
||||
nonZeroBitset[i/8] |= 1 << byte(7-i%8)
|
||||
}
|
||||
}
|
||||
if len(nonZeroBytes) == 0 {
|
||||
return nil
|
||||
}
|
||||
return append(bitsetEncodeBytes(nonZeroBitset), nonZeroBytes...)
|
||||
}
|
||||
|
||||
// DecompressBytes decompresses data with a known target size. If the input data
|
||||
// matches the size of the target, it means no compression was done in the first
|
||||
// place.
|
||||
func DecompressBytes(data []byte, target int) ([]byte, error) {
|
||||
if len(data) > target {
|
||||
return nil, errExceededTarget
|
||||
}
|
||||
if len(data) == target {
|
||||
cpy := make([]byte, len(data))
|
||||
copy(cpy, data)
|
||||
return cpy, nil
|
||||
}
|
||||
return bitsetDecodeBytes(data, target)
|
||||
}
|
||||
|
||||
// bitsetDecodeBytes decompresses data with a known target size.
|
||||
func bitsetDecodeBytes(data []byte, target int) ([]byte, error) {
|
||||
out, size, err := bitsetDecodePartialBytes(data, target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if size != len(data) {
|
||||
return nil, errUnreferencedData
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// bitsetDecodePartialBytes decompresses data with a known target size, but does
|
||||
// not enforce consuming all the input bytes. In addition to the decompressed
|
||||
// output, the function returns the length of compressed input data corresponding
|
||||
// to the output as the input slice may be longer.
|
||||
func bitsetDecodePartialBytes(data []byte, target int) ([]byte, int, error) {
|
||||
// Sanity check 0 targets to avoid infinite recursion
|
||||
if target == 0 {
|
||||
return nil, 0, nil
|
||||
}
|
||||
// Handle the zero and single byte corner cases
|
||||
decomp := make([]byte, target)
|
||||
if len(data) == 0 {
|
||||
return decomp, 0, nil
|
||||
}
|
||||
if target == 1 {
|
||||
decomp[0] = data[0] // copy to avoid referencing the input slice
|
||||
if data[0] != 0 {
|
||||
return decomp, 1, nil
|
||||
}
|
||||
return decomp, 0, nil
|
||||
}
|
||||
// Decompress the bitset of set bytes and distribute the non zero bytes
|
||||
nonZeroBitset, ptr, err := bitsetDecodePartialBytes(data, (target+7)/8)
|
||||
if err != nil {
|
||||
return nil, ptr, err
|
||||
}
|
||||
for i := 0; i < 8*len(nonZeroBitset); i++ {
|
||||
if nonZeroBitset[i/8]&(1<<byte(7-i%8)) != 0 {
|
||||
// Make sure we have enough data to push into the correct slot
|
||||
if ptr >= len(data) {
|
||||
return nil, 0, errMissingData
|
||||
}
|
||||
if i >= len(decomp) {
|
||||
return nil, 0, errExceededTarget
|
||||
}
|
||||
// Make sure the data is valid and push into the slot
|
||||
if data[ptr] == 0 {
|
||||
return nil, 0, errZeroContent
|
||||
}
|
||||
decomp[i] = data[ptr]
|
||||
ptr++
|
||||
}
|
||||
}
|
||||
return decomp, ptr, nil
|
||||
}
|
56
common/bitutil/compress_fuzz.go
Normal file
56
common/bitutil/compress_fuzz.go
Normal file
@ -0,0 +1,56 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build gofuzz
|
||||
|
||||
package bitutil
|
||||
|
||||
import "bytes"
|
||||
|
||||
// Fuzz implements a go-fuzz fuzzer method to test various encoding method
|
||||
// invocations.
|
||||
func Fuzz(data []byte) int {
|
||||
if len(data) == 0 {
|
||||
return -1
|
||||
}
|
||||
if data[0]%2 == 0 {
|
||||
return fuzzEncode(data[1:])
|
||||
}
|
||||
return fuzzDecode(data[1:])
|
||||
}
|
||||
|
||||
// fuzzEncode implements a go-fuzz fuzzer method to test the bitset encoding and
|
||||
// decoding algorithm.
|
||||
func fuzzEncode(data []byte) int {
|
||||
proc, _ := bitsetDecodeBytes(bitsetEncodeBytes(data), len(data))
|
||||
if !bytes.Equal(data, proc) {
|
||||
panic("content mismatch")
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// fuzzDecode implements a go-fuzz fuzzer method to test the bit decoding and
|
||||
// reencoding algorithm.
|
||||
func fuzzDecode(data []byte) int {
|
||||
blob, err := bitsetDecodeBytes(data, 1024)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
if comp := bitsetEncodeBytes(blob); !bytes.Equal(comp, data) {
|
||||
panic("content mismatch")
|
||||
}
|
||||
return 0
|
||||
}
|
181
common/bitutil/compress_test.go
Normal file
181
common/bitutil/compress_test.go
Normal file
@ -0,0 +1,181 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bitutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
// Tests that data bitset encoding and decoding works and is bijective.
|
||||
func TestEncodingCycle(t *testing.T) {
|
||||
tests := []string{
|
||||
// Tests generated by go-fuzz to maximize code coverage
|
||||
"0x000000000000000000",
|
||||
"0xef0400",
|
||||
"0xdf7070533534333636313639343638373532313536346c1bc33339343837313070706336343035336336346c65fefb3930393233383838ac2f65fefb",
|
||||
"0x7b64000000",
|
||||
"0x000034000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f0000000000000000000",
|
||||
"0x4912385c0e7b64000000",
|
||||
"0x000034000000000000000000000000000000",
|
||||
"0x00",
|
||||
"0x000003e834ff7f0000",
|
||||
"0x0000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000ff00",
|
||||
"0x895f0c6a020f850c6a020f85f88df88d",
|
||||
"0xdf7070533534333636313639343638373432313536346c1bc3315aac2f65fefb",
|
||||
"0x0000000000",
|
||||
"0xdf70706336346c65fefb",
|
||||
"0x00006d643634000000",
|
||||
"0xdf7070533534333636313639343638373532313536346c1bc333393438373130707063363430353639343638373532313536346c1bc333393438336336346c65fe",
|
||||
}
|
||||
for i, tt := range tests {
|
||||
data := hexutil.MustDecode(tt)
|
||||
|
||||
proc, err := bitsetDecodeBytes(bitsetEncodeBytes(data), len(data))
|
||||
if err != nil {
|
||||
t.Errorf("test %d: failed to decompress compressed data: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(data, proc) {
|
||||
t.Errorf("test %d: compress/decompress mismatch: have %x, want %x", i, proc, data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that data bitset decoding and rencoding works and is bijective.
|
||||
func TestDecodingCycle(t *testing.T) {
|
||||
tests := []struct {
|
||||
size int
|
||||
input string
|
||||
fail error
|
||||
}{
|
||||
{size: 0, input: "0x"},
|
||||
|
||||
// Crashers generated by go-fuzz
|
||||
{size: 0, input: "0x0020", fail: errUnreferencedData},
|
||||
{size: 0, input: "0x30", fail: errUnreferencedData},
|
||||
{size: 1, input: "0x00", fail: errUnreferencedData},
|
||||
{size: 2, input: "0x07", fail: errMissingData},
|
||||
{size: 1024, input: "0x8000", fail: errZeroContent},
|
||||
|
||||
// Tests generated by go-fuzz to maximize code coverage
|
||||
{size: 29490, input: "0x343137343733323134333839373334323073333930783e3078333930783e70706336346c65303e", fail: errMissingData},
|
||||
{size: 59395, input: "0x00", fail: errUnreferencedData},
|
||||
{size: 52574, input: "0x70706336346c65c0de", fail: errExceededTarget},
|
||||
{size: 42264, input: "0x07", fail: errMissingData},
|
||||
{size: 52, input: "0xa5045bad48f4", fail: errExceededTarget},
|
||||
{size: 52574, input: "0xc0de", fail: errMissingData},
|
||||
{size: 52574, input: "0x"},
|
||||
{size: 29490, input: "0x34313734373332313433383937333432307333393078073034333839373334323073333930783e3078333937333432307333393078073061333930783e70706336346c65303e", fail: errMissingData},
|
||||
{size: 29491, input: "0x3973333930783e30783e", fail: errMissingData},
|
||||
|
||||
{size: 1024, input: "0x808080608080"},
|
||||
{size: 1024, input: "0x808470705e3632383337363033313434303137393130306c6580ef46806380635a80"},
|
||||
{size: 1024, input: "0x8080808070"},
|
||||
{size: 1024, input: "0x808070705e36346c6580ef46806380635a80"},
|
||||
{size: 1024, input: "0x80808046802680"},
|
||||
{size: 1024, input: "0x4040404035"},
|
||||
{size: 1024, input: "0x4040bf3ba2b3f684402d353234373438373934409fe5b1e7ada94ebfd7d0505e27be4035"},
|
||||
{size: 1024, input: "0x404040bf3ba2b3f6844035"},
|
||||
{size: 1024, input: "0x40402d35323437343837393440bfd7d0505e27be4035"},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
data := hexutil.MustDecode(tt.input)
|
||||
|
||||
orig, err := bitsetDecodeBytes(data, tt.size)
|
||||
if err != tt.fail {
|
||||
t.Errorf("test %d: failure mismatch: have %v, want %v", i, err, tt.fail)
|
||||
}
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if comp := bitsetEncodeBytes(orig); !bytes.Equal(comp, data) {
|
||||
t.Errorf("test %d: decompress/compress mismatch: have %x, want %x", i, comp, data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestCompression tests that compression works by returning either the bitset
|
||||
// encoded input, or the actual input if the bitset version is longer.
|
||||
func TestCompression(t *testing.T) {
|
||||
// Check the the compression returns the bitset encoding is shorter
|
||||
in := hexutil.MustDecode("0x4912385c0e7b64000000")
|
||||
out := hexutil.MustDecode("0x80fe4912385c0e7b64")
|
||||
|
||||
if data := CompressBytes(in); !bytes.Equal(data, out) {
|
||||
t.Errorf("encoding mismatch for sparse data: have %x, want %x", data, out)
|
||||
}
|
||||
if data, err := DecompressBytes(out, len(in)); err != nil || !bytes.Equal(data, in) {
|
||||
t.Errorf("decoding mismatch for sparse data: have %x, want %x, error %v", data, in, err)
|
||||
}
|
||||
// Check the the compression returns the input if the bitset encoding is longer
|
||||
in = hexutil.MustDecode("0xdf7070533534333636313639343638373532313536346c1bc33339343837313070706336343035336336346c65fefb3930393233383838ac2f65fefb")
|
||||
out = hexutil.MustDecode("0xdf7070533534333636313639343638373532313536346c1bc33339343837313070706336343035336336346c65fefb3930393233383838ac2f65fefb")
|
||||
|
||||
if data := CompressBytes(in); !bytes.Equal(data, out) {
|
||||
t.Errorf("encoding mismatch for dense data: have %x, want %x", data, out)
|
||||
}
|
||||
if data, err := DecompressBytes(out, len(in)); err != nil || !bytes.Equal(data, in) {
|
||||
t.Errorf("decoding mismatch for dense data: have %x, want %x, error %v", data, in, err)
|
||||
}
|
||||
// Check that decompressing a longer input than the target fails
|
||||
if _, err := DecompressBytes([]byte{0xc0, 0x01, 0x01}, 2); err != errExceededTarget {
|
||||
t.Errorf("decoding error mismatch for long data: have %v, want %v", err, errExceededTarget)
|
||||
}
|
||||
}
|
||||
|
||||
// Crude benchmark for compressing random slices of bytes.
|
||||
func BenchmarkEncoding1KBVerySparse(b *testing.B) { benchmarkEncoding(b, 1024, 0.0001) }
|
||||
func BenchmarkEncoding2KBVerySparse(b *testing.B) { benchmarkEncoding(b, 2048, 0.0001) }
|
||||
func BenchmarkEncoding4KBVerySparse(b *testing.B) { benchmarkEncoding(b, 4096, 0.0001) }
|
||||
|
||||
func BenchmarkEncoding1KBSparse(b *testing.B) { benchmarkEncoding(b, 1024, 0.001) }
|
||||
func BenchmarkEncoding2KBSparse(b *testing.B) { benchmarkEncoding(b, 2048, 0.001) }
|
||||
func BenchmarkEncoding4KBSparse(b *testing.B) { benchmarkEncoding(b, 4096, 0.001) }
|
||||
|
||||
func BenchmarkEncoding1KBDense(b *testing.B) { benchmarkEncoding(b, 1024, 0.1) }
|
||||
func BenchmarkEncoding2KBDense(b *testing.B) { benchmarkEncoding(b, 2048, 0.1) }
|
||||
func BenchmarkEncoding4KBDense(b *testing.B) { benchmarkEncoding(b, 4096, 0.1) }
|
||||
|
||||
func BenchmarkEncoding1KBSaturated(b *testing.B) { benchmarkEncoding(b, 1024, 0.5) }
|
||||
func BenchmarkEncoding2KBSaturated(b *testing.B) { benchmarkEncoding(b, 2048, 0.5) }
|
||||
func BenchmarkEncoding4KBSaturated(b *testing.B) { benchmarkEncoding(b, 4096, 0.5) }
|
||||
|
||||
func benchmarkEncoding(b *testing.B, bytes int, fill float64) {
|
||||
// Generate a random slice of bytes to compress
|
||||
random := rand.NewSource(0) // reproducible and comparable
|
||||
|
||||
data := make([]byte, bytes)
|
||||
bits := int(float64(bytes) * 8 * fill)
|
||||
|
||||
for i := 0; i < bits; i++ {
|
||||
idx := random.Int63() % int64(len(data))
|
||||
bit := uint(random.Int63() % 8)
|
||||
data[idx] |= 1 << bit
|
||||
}
|
||||
// Reset the benchmark and measure encoding/decoding
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bitsetDecodeBytes(bitsetEncodeBytes(data), len(data))
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user