Compare commits
370 Commits
Author | SHA1 | Date | |
---|---|---|---|
4bb3c89d44 | |||
bedf6f40af | |||
b4f2e4de8f | |||
72ed186f46 | |||
7b95cca56c | |||
e2b3a23663 | |||
0f184d3b14 | |||
6810674de9 | |||
8a79836044 | |||
f5091e5711 | |||
0dbf55d478 | |||
2ab5c11261 | |||
e39ca5e597 | |||
c7b0abf86b | |||
c8b7ea1c2e | |||
3c6b9c5d72 | |||
c5b8569707 | |||
b0190189a3 | |||
87f5b4123c | |||
4013e23312 | |||
5aa3eac22d | |||
bb57f1f1e5 | |||
463014126f | |||
bce5d837b5 | |||
43c8a1914c | |||
ba62215d9e | |||
984c25ac40 | |||
a3128f9099 | |||
924098c6e5 | |||
96ddf27a48 | |||
54ce3887d8 | |||
b81a9cd829 | |||
cef06358ff | |||
9b97f98334 | |||
836314c055 | |||
e401536c97 | |||
cb8bbe7081 | |||
f47adc9ea8 | |||
5d895db2fd | |||
86f6568f66 | |||
3ee86a57f3 | |||
b31cc71ff6 | |||
febfea7af2 | |||
d1eb4006e2 | |||
3d88ecd61a | |||
09b347fec9 | |||
d7f2462e8f | |||
4fe30bf5ad | |||
4732ee89cb | |||
7ace023981 | |||
0914d4e0d2 | |||
9619a61024 | |||
bfdc0fa362 | |||
9f7cd75682 | |||
0131bd6ff9 | |||
dd8a62683e | |||
07e8c177e7 | |||
8d434f6a6f | |||
6dafec0666 | |||
3e6d7c169b | |||
0095531a58 | |||
ca376ead88 | |||
6d6a5a9337 | |||
ea5f2da39a | |||
479aa61f11 | |||
0af1ab0c86 | |||
65738c1eb3 | |||
0e7d019e0e | |||
eaa4f8a5f9 | |||
0900aae412 | |||
ba2201981a | |||
eea996e4e1 | |||
2ab7fe8982 | |||
5d2c947060 | |||
e5c19b6f98 | |||
dec8bba9d4 | |||
7f7abfe4d1 | |||
0bb194c956 | |||
e9295163aa | |||
1db4ecdc0b | |||
fdb3bd287e | |||
a91e682234 | |||
41b7745529 | |||
a5fcaa55ac | |||
0ed4d76c79 | |||
4b5e797288 | |||
2e83c82f80 | |||
8d8034fe59 | |||
fd0e7b1c67 | |||
e9382c6e9f | |||
c599b78f62 | |||
ad44475231 | |||
35767dfd0c | |||
345332906c | |||
cefeb58598 | |||
b45cc0c9e8 | |||
3680cd5926 | |||
d3beff7e20 | |||
40a3856af9 | |||
89860f4197 | |||
88b1db7288 | |||
7a045af05b | |||
36243c7ed8 | |||
1ae0411d41 | |||
d54e3539d4 | |||
5df0b240ae | |||
605c2b261f | |||
4bc60e3aa8 | |||
eb9abbd3f2 | |||
41d361565b | |||
edba5e9854 | |||
c0a1f1c907 | |||
0510164145 | |||
629b5837e9 | |||
c2d93ded35 | |||
8d126a4981 | |||
f4c49bc0f0 | |||
d347656280 | |||
94903d572b | |||
f86c4177d5 | |||
7e9e3a134b | |||
7514e8a24d | |||
a31835c8b4 | |||
d78ad226c2 | |||
a660685746 | |||
2ab2a9f131 | |||
860e697b00 | |||
f3c9585f2e | |||
229bf51f0d | |||
2ee885958b | |||
2b4a5f2677 | |||
d6a6180366 | |||
9feec51e2d | |||
673007d7ae | |||
d558a595ad | |||
a0d783094e | |||
3c8656347f | |||
b9ff44bd64 | |||
cb5235eb07 | |||
a92d8a2654 | |||
dc17fa6b18 | |||
019dca9ba2 | |||
c197d805f7 | |||
5705ad004e | |||
a989cf5bad | |||
6c6c7b2af3 | |||
5c93462b5e | |||
701d60c889 | |||
9be07de539 | |||
885c13c2c9 | |||
5bbd7fb390 | |||
79b11121a7 | |||
72af509abe | |||
382c9266e6 | |||
f46adfac28 | |||
514b1587db | |||
66a7ef57e6 | |||
ecca2c3c1b | |||
7a7f6a4f29 | |||
c8e70186a6 | |||
794741b8b2 | |||
48705f8aea | |||
10b3f97c9d | |||
5596b664c4 | |||
42a5b54bf5 | |||
10181b57a9 | |||
ac193e36ce | |||
d6681ed360 | |||
5ba9225fe3 | |||
fc87bc5f52 | |||
c1740e4540 | |||
e3db1236de | |||
02b4d074f6 | |||
2dcb22afec | |||
69c8be7c86 | |||
55e5926f34 | |||
f30179d62e | |||
c4d21bc8e5 | |||
160add8570 | |||
564c8f3ae6 | |||
451ffdb62b | |||
6ff2c02991 | |||
f585f9eee8 | |||
4ea4d2dc34 | |||
1e67378df8 | |||
cc313e78b7 | |||
b0ca1b67ce | |||
03d00361f5 | |||
f90a193f92 | |||
8e14bb1448 | |||
cd6c861dc5 | |||
c91f7beb53 | |||
2bacf36d80 | |||
32d8d42274 | |||
da7d57e07c | |||
8cab3ab435 | |||
8f567dc8a2 | |||
504278e839 | |||
e7408b5552 | |||
1901521ed0 | |||
23b51a68cb | |||
dc92779c0a | |||
d70536b5d4 | |||
07635e43e2 | |||
64a3a3d23c | |||
777540628e | |||
a4df80f47f | |||
bc2a5578c0 | |||
ebf41d16a0 | |||
9d0c51fb0f | |||
08f27428b4 | |||
27a5622e99 | |||
8596fc5974 | |||
ad16aeb0a2 | |||
b872961ec8 | |||
54b1de67e2 | |||
68955ed2eb | |||
ff9a868232 | |||
20b818d206 | |||
63246e2542 | |||
3c48a25762 | |||
286ec5df40 | |||
4ee92f2d19 | |||
f7e39a7724 | |||
79cdbcfe64 | |||
79bf69b556 | |||
28aea46ac0 | |||
fc5f8a3dda | |||
3cc476c8ab | |||
2fd5ba6bd4 | |||
b4b27ebaea | |||
8c037dc487 | |||
3e14837c1c | |||
58f7f977e7 | |||
afdfdebd87 | |||
e311bb520a | |||
1ab3e30698 | |||
314246da78 | |||
bf1e263128 | |||
7e57fee355 | |||
a4da8416ee | |||
998abb9107 | |||
059c767adf | |||
d4f11d9b4f | |||
104375f398 | |||
1bbd400899 | |||
f9fb70d2ee | |||
1335a6cc8c | |||
b70a73cd3e | |||
0b978f91b6 | |||
64d199edf2 | |||
4e0fea4d30 | |||
9bd6068fef | |||
76069eef38 | |||
3df7142b3e | |||
3d123bcde6 | |||
3040243042 | |||
9facf6423d | |||
2403656373 | |||
ef0edc6e32 | |||
133de3d806 | |||
f8d8b56b28 | |||
d8aaa3a215 | |||
6131dd55c5 | |||
02656f9f61 | |||
967e097faa | |||
02aa86e659 | |||
7bbdf3e268 | |||
6ca59d98f8 | |||
833eeb9f23 | |||
2b422b1a47 | |||
73c5aba21f | |||
6a56b15019 | |||
5d9ac49c7e | |||
db568a61e2 | |||
17ce0a37de | |||
26b2d6e1aa | |||
fff6e03a79 | |||
d375193797 | |||
374c49e0ac | |||
10ce8b0e3c | |||
9a7e99f75d | |||
6f8c7b0def | |||
1c45f2f42e | |||
e063d538b8 | |||
43437806fb | |||
8f06b7980d | |||
971079822e | |||
f42bd73ce5 | |||
f5925b0459 | |||
8edaaa227d | |||
bd74882d83 | |||
67439c1dba | |||
f59a49d591 | |||
2b50367fe9 | |||
46cf0a616b | |||
85454e7678 | |||
80de4dc72c | |||
8c2cf3c66c | |||
37e9fcacca | |||
b36f54c684 | |||
3991745c5f | |||
6dd2803b8e | |||
fc0c6c175c | |||
7c74e166b0 | |||
f7848c2aa5 | |||
19866075ac | |||
faafeef79e | |||
cd82b89fde | |||
3780d0b6f7 | |||
ff89a3ddce | |||
aee70ae30b | |||
392151e251 | |||
5b742fb82b | |||
b159cdd8dd | |||
524ca544b2 | |||
1059927f9c | |||
fca6e515d6 | |||
ca436f4b90 | |||
350bb6d9ca | |||
5e805aa865 | |||
455fcc8309 | |||
0cc9b8791e | |||
8b84bd283f | |||
4a260dc1f2 | |||
4371367cd1 | |||
bc0e6a5e68 | |||
60c858a529 | |||
e9b850805e | |||
53f3460ab5 | |||
bdf98b4fcd | |||
c259e6874e | |||
13cda8d9b6 | |||
4f9789b28d | |||
ee748d1451 | |||
0732ad4e47 | |||
3d32690b54 | |||
a602ee90f2 | |||
fc78ce61c0 | |||
ffebf00114 | |||
99da85c895 | |||
f4841ff43d | |||
3a678a15c9 | |||
3e0dbe0eaa | |||
1802682f65 | |||
4d2249773a | |||
8a8fc5f8ef | |||
671ba3791d | |||
9488e7fd5f | |||
23c6fcdbe8 | |||
cf5d4b5541 | |||
b95c2b58f0 | |||
8e9197f2a1 | |||
c65f10a17b | |||
a56f3dc0d9 | |||
47359301a2 | |||
688ee6d1e5 | |||
ad8d519eb5 | |||
9e80d9bee1 | |||
0ff35e170d | |||
8d6a5a3581 | |||
33a24bb731 | |||
2d47c6bfde | |||
d68ad36bb9 | |||
df74a43396 | |||
01cb9948f1 | |||
bf468a81ec | |||
8c313eed26 | |||
c4d28aee9b | |||
f4b5f67ee0 |
@ -1,4 +1,7 @@
|
||||
**/.git
|
||||
.git
|
||||
!.git/HEAD
|
||||
!.git/refs/heads
|
||||
**/*_test.go
|
||||
|
||||
build/_workspace
|
||||
|
9
.github/CODEOWNERS
vendored
Normal file
9
.github/CODEOWNERS
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
# Lines starting with '#' are comments.
|
||||
# Each line is a file pattern followed by one or more owners.
|
||||
|
||||
accounts/usbwallet @karalabe
|
||||
consensus @karalabe
|
||||
core/ @karalabe @holiman
|
||||
eth/ @karalabe
|
||||
mobile/ @karalabe
|
||||
p2p/ @fjl @zsfelfoldi
|
6
.github/ISSUE_TEMPLATE.md
vendored
6
.github/ISSUE_TEMPLATE.md
vendored
@ -1,3 +1,9 @@
|
||||
Hi there,
|
||||
|
||||
please note that this is an issue tracker reserved for bug reports and feature requests.
|
||||
|
||||
For general questions please use the gitter channel or the Ethereum stack exchange at https://ethereum.stackexchange.com.
|
||||
|
||||
#### System information
|
||||
|
||||
Geth version: `geth version`
|
||||
|
8
.gitignore
vendored
8
.gitignore
vendored
@ -30,3 +30,11 @@ build/_vendor/pkg
|
||||
# travis
|
||||
profile.tmp
|
||||
profile.cov
|
||||
|
||||
# IdeaIDE
|
||||
.idea
|
||||
|
||||
# dashboard
|
||||
/dashboard/assets/node_modules
|
||||
/dashboard/assets/stats.json
|
||||
/dashboard/assets/public/bundle.js
|
||||
|
54
.travis.yml
54
.travis.yml
@ -6,7 +6,19 @@ matrix:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.7.6
|
||||
go: 1.7.x
|
||||
script:
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
|
||||
- sudo modprobe fuse
|
||||
- sudo chmod 666 /dev/fuse
|
||||
- sudo chown root:$USER /etc/fuse.conf
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage
|
||||
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.8.x
|
||||
script:
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
|
||||
- sudo modprobe fuse
|
||||
@ -19,30 +31,44 @@ matrix:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.8.3
|
||||
go: 1.9.x
|
||||
script:
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
|
||||
- sudo modprobe fuse
|
||||
- sudo chmod 666 /dev/fuse
|
||||
- sudo chown root:$USER /etc/fuse.conf
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage -misspell
|
||||
- go run build/ci.go test -coverage
|
||||
|
||||
- os: osx
|
||||
go: 1.8.3
|
||||
go: 1.9.x
|
||||
sudo: required
|
||||
script:
|
||||
- brew update
|
||||
- brew install caskroom/cask/brew-cask
|
||||
- brew cask install osxfuse
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage -misspell
|
||||
- go run build/ci.go test -coverage
|
||||
|
||||
# This builder only tests code linters on latest version of Go
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.9.x
|
||||
env:
|
||||
- lint
|
||||
script:
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
|
||||
- sudo modprobe fuse
|
||||
- sudo chmod 666 /dev/fuse
|
||||
- sudo chown root:$USER /etc/fuse.conf
|
||||
- go run build/ci.go lint
|
||||
|
||||
# This builder does the Ubuntu PPA and Linux Azure uploads
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.8.3
|
||||
go: 1.9.x
|
||||
env:
|
||||
- ubuntu-ppa
|
||||
- azure-linux
|
||||
@ -81,7 +107,7 @@ matrix:
|
||||
sudo: required
|
||||
services:
|
||||
- docker
|
||||
go: 1.8.3
|
||||
go: 1.9.x
|
||||
env:
|
||||
- azure-linux-mips
|
||||
script:
|
||||
@ -121,16 +147,16 @@ matrix:
|
||||
- azure-android
|
||||
- maven-android
|
||||
before_install:
|
||||
- curl https://storage.googleapis.com/golang/go1.8.3.linux-amd64.tar.gz | tar -xz
|
||||
- curl https://storage.googleapis.com/golang/go1.9.2.linux-amd64.tar.gz | tar -xz
|
||||
- export PATH=`pwd`/go/bin:$PATH
|
||||
- export GOROOT=`pwd`/go
|
||||
- export GOPATH=$HOME/go
|
||||
script:
|
||||
# Build the Android archive and upload it to Maven Central and Azure
|
||||
- curl https://dl.google.com/android/repository/android-ndk-r14b-linux-x86_64.zip -o android-ndk-r14b.zip
|
||||
- unzip -q android-ndk-r14b.zip && rm android-ndk-r14b.zip
|
||||
- mv android-ndk-r14b $HOME
|
||||
- export ANDROID_NDK=$HOME/android-ndk-r14b
|
||||
- curl https://dl.google.com/android/repository/android-ndk-r15c-linux-x86_64.zip -o android-ndk-r15c.zip
|
||||
- unzip -q android-ndk-r15c.zip && rm android-ndk-r15c.zip
|
||||
- mv android-ndk-r15c $HOME
|
||||
- export ANDROID_NDK=$HOME/android-ndk-r15c
|
||||
|
||||
- mkdir -p $GOPATH/src/github.com/ethereum
|
||||
- ln -s `pwd` $GOPATH/src/github.com/ethereum
|
||||
@ -138,7 +164,7 @@ matrix:
|
||||
|
||||
# This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads
|
||||
- os: osx
|
||||
go: 1.8.3
|
||||
go: 1.9.x
|
||||
env:
|
||||
- azure-osx
|
||||
- azure-ios
|
||||
@ -164,7 +190,7 @@ matrix:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.8.3
|
||||
go: 1.9.x
|
||||
env:
|
||||
- azure-purge
|
||||
script:
|
||||
|
21
Dockerfile
21
Dockerfile
@ -1,15 +1,16 @@
|
||||
FROM alpine:3.5
|
||||
# Build Geth in a stock Go builder container
|
||||
FROM golang:1.9-alpine as builder
|
||||
|
||||
RUN apk add --no-cache make gcc musl-dev linux-headers
|
||||
|
||||
ADD . /go-ethereum
|
||||
RUN \
|
||||
apk add --update git go make gcc musl-dev linux-headers && \
|
||||
(cd go-ethereum && make geth) && \
|
||||
cp go-ethereum/build/bin/geth /usr/local/bin/ && \
|
||||
apk del git go make gcc musl-dev linux-headers && \
|
||||
rm -rf /go-ethereum && rm -rf /var/cache/apk/*
|
||||
RUN cd /go-ethereum && make geth
|
||||
|
||||
EXPOSE 8545
|
||||
EXPOSE 30303
|
||||
EXPOSE 30303/udp
|
||||
# Pull Geth into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk add --no-cache ca-certificates
|
||||
COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/
|
||||
|
||||
EXPOSE 8545 8546 30303 30303/udp 30304/udp
|
||||
ENTRYPOINT ["geth"]
|
||||
|
15
Dockerfile.alltools
Normal file
15
Dockerfile.alltools
Normal file
@ -0,0 +1,15 @@
|
||||
# Build Geth in a stock Go builder container
|
||||
FROM golang:1.9-alpine as builder
|
||||
|
||||
RUN apk add --no-cache make gcc musl-dev linux-headers
|
||||
|
||||
ADD . /go-ethereum
|
||||
RUN cd /go-ethereum && make all
|
||||
|
||||
# Pull all binaries into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk add --no-cache ca-certificates
|
||||
COPY --from=builder /go-ethereum/build/bin/* /usr/local/bin/
|
||||
|
||||
EXPOSE 8545 8546 30303 30303/udp 30304/udp
|
7
Makefile
7
Makefile
@ -8,7 +8,7 @@
|
||||
.PHONY: geth-darwin geth-darwin-386 geth-darwin-amd64
|
||||
.PHONY: geth-windows geth-windows-386 geth-windows-amd64
|
||||
|
||||
GOBIN = build/bin
|
||||
GOBIN = $(shell pwd)/build/bin
|
||||
GO ?= latest
|
||||
|
||||
geth:
|
||||
@ -21,11 +21,6 @@ swarm:
|
||||
@echo "Done building."
|
||||
@echo "Run \"$(GOBIN)/swarm\" to launch swarm."
|
||||
|
||||
evm:
|
||||
build/env.sh go run build/ci.go install ./cmd/evm
|
||||
@echo "Done building."
|
||||
@echo "Run \"$(GOBIN)/evm\" to start the evm."
|
||||
|
||||
all:
|
||||
build/env.sh go run build/ci.go install
|
||||
|
||||
|
10
README.md
10
README.md
@ -35,11 +35,11 @@ The go-ethereum project comes with several wrappers/executables found in the `cm
|
||||
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default) archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. |
|
||||
| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. |
|
||||
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
|
||||
| `disasm` | Bytecode disassembler to convert EVM (Ethereum Virtual Machine) bytecode into more user friendly assembly-like opcodes (e.g. `echo "6001" | disasm`). For details on the individual opcodes, please see pages 22-30 of the [Ethereum Yellow Paper](http://gavwood.com/paper.pdf). |
|
||||
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow insolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug`). |
|
||||
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug`). |
|
||||
| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://github.com/ethereum/wiki/wiki/JSON-RPC) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. |
|
||||
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://github.com/ethereum/wiki/wiki/RLP)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
|
||||
| `swarm` | swarm daemon and tools. This is the entrypoint for the swarm network. `swarm --help` for command line options and subcommands. See https://swarm-guide.readthedocs.io for swarm documentation. |
|
||||
| `puppeth` | a CLI wizard that aids in creating a new Ethereum network. |
|
||||
|
||||
## Running geth
|
||||
|
||||
@ -116,7 +116,7 @@ To get an idea how the file should look like you can use the `dumpconfig` subcom
|
||||
$ geth --your-favourite-flags dumpconfig
|
||||
```
|
||||
|
||||
*Note: This works only with geth v1.6.0 and above*
|
||||
*Note: This works only with geth v1.6.0 and above.*
|
||||
|
||||
#### Docker quick start
|
||||
|
||||
@ -130,6 +130,8 @@ docker run -d --name ethereum-node -v /Users/alice/ethereum:/root \
|
||||
|
||||
This will start geth in fast sync mode with a DB memory allowance of 512MB just as the above command does. It will also create a persistent volume in your home directory for saving your blockchain as well as map the default ports. There is also an `alpine` tag available for a slim version of the image.
|
||||
|
||||
Do not forget `--rpcaddr 0.0.0.0`, if you want to access RPC from other containers and/or hosts. By default, `geth` binds to the local interface and RPC endpoints is not accessible from the outside.
|
||||
|
||||
### Programatically interfacing Geth nodes
|
||||
|
||||
As a developer, sooner rather than later you'll want to start interacting with Geth and the Ethereum
|
||||
@ -264,7 +266,7 @@ instance for mining, run it with all your usual flags, extended by:
|
||||
$ geth <usual-flags> --mine --minerthreads=1 --etherbase=0x0000000000000000000000000000000000000000
|
||||
```
|
||||
|
||||
Which will start mining bocks and transactions on a single CPU thread, crediting all proceedings to
|
||||
Which will start mining blocks and transactions on a single CPU thread, crediting all proceedings to
|
||||
the account specified by `--etherbase`. You can further tune the mining by changing the default gas
|
||||
limit blocks converge to (`--targetgaslimit`) and the price transactions are accepted at (`--gasprice`).
|
||||
|
||||
|
@ -20,10 +20,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
// The ABI holds information about a contract's context and available
|
||||
@ -76,106 +72,27 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
|
||||
return append(method.Id(), arguments...), nil
|
||||
}
|
||||
|
||||
// these variable are used to determine certain types during type assertion for
|
||||
// assignment.
|
||||
var (
|
||||
r_interSlice = reflect.TypeOf([]interface{}{})
|
||||
r_hash = reflect.TypeOf(common.Hash{})
|
||||
r_bytes = reflect.TypeOf([]byte{})
|
||||
r_byte = reflect.TypeOf(byte(0))
|
||||
)
|
||||
|
||||
// Unpack output in v according to the abi specification
|
||||
func (abi ABI) Unpack(v interface{}, name string, output []byte) error {
|
||||
var method = abi.Methods[name]
|
||||
|
||||
if len(output) == 0 {
|
||||
return fmt.Errorf("abi: unmarshalling empty output")
|
||||
func (abi ABI) Unpack(v interface{}, name string, output []byte) (err error) {
|
||||
if err = bytesAreProper(output); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// make sure the passed value is a pointer
|
||||
valueOf := reflect.ValueOf(v)
|
||||
if reflect.Ptr != valueOf.Kind() {
|
||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||
}
|
||||
|
||||
var (
|
||||
value = valueOf.Elem()
|
||||
typ = value.Type()
|
||||
)
|
||||
|
||||
if len(method.Outputs) > 1 {
|
||||
switch value.Kind() {
|
||||
// struct will match named return values to the struct's field
|
||||
// names
|
||||
case reflect.Struct:
|
||||
for i := 0; i < len(method.Outputs); i++ {
|
||||
marshalledValue, err := toGoType(i, method.Outputs[i], output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reflectValue := reflect.ValueOf(marshalledValue)
|
||||
|
||||
for j := 0; j < typ.NumField(); j++ {
|
||||
field := typ.Field(j)
|
||||
// TODO read tags: `abi:"fieldName"`
|
||||
if field.Name == strings.ToUpper(method.Outputs[i].Name[:1])+method.Outputs[i].Name[1:] {
|
||||
if err := set(value.Field(j), reflectValue, method.Outputs[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
if !value.Type().AssignableTo(r_interSlice) {
|
||||
return fmt.Errorf("abi: cannot marshal tuple in to slice %T (only []interface{} is supported)", v)
|
||||
}
|
||||
|
||||
// if the slice already contains values, set those instead of the interface slice itself.
|
||||
if value.Len() > 0 {
|
||||
if len(method.Outputs) > value.Len() {
|
||||
return fmt.Errorf("abi: cannot marshal in to slices of unequal size (require: %v, got: %v)", len(method.Outputs), value.Len())
|
||||
}
|
||||
|
||||
for i := 0; i < len(method.Outputs); i++ {
|
||||
marshalledValue, err := toGoType(i, method.Outputs[i], output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reflectValue := reflect.ValueOf(marshalledValue)
|
||||
if err := set(value.Index(i).Elem(), reflectValue, method.Outputs[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// create a new slice and start appending the unmarshalled
|
||||
// values to the new interface slice.
|
||||
z := reflect.MakeSlice(typ, 0, len(method.Outputs))
|
||||
for i := 0; i < len(method.Outputs); i++ {
|
||||
marshalledValue, err := toGoType(i, method.Outputs[i], output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
z = reflect.Append(z, reflect.ValueOf(marshalledValue))
|
||||
}
|
||||
value.Set(z)
|
||||
default:
|
||||
return fmt.Errorf("abi: cannot unmarshal tuple in to %v", typ)
|
||||
}
|
||||
|
||||
// since there can't be naming collisions with contracts and events,
|
||||
// we need to decide whether we're calling a method or an event
|
||||
var unpack unpacker
|
||||
if method, ok := abi.Methods[name]; ok {
|
||||
unpack = method
|
||||
} else if event, ok := abi.Events[name]; ok {
|
||||
unpack = event
|
||||
} else {
|
||||
marshalledValue, err := toGoType(0, method.Outputs[0], output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := set(value, reflect.ValueOf(marshalledValue), method.Outputs[0]); err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("abi: could not locate named method or event.")
|
||||
}
|
||||
|
||||
return nil
|
||||
// requires a struct to unpack into for a tuple return...
|
||||
if unpack.isTupleReturn() {
|
||||
return unpack.tupleUnpack(v, output)
|
||||
}
|
||||
return unpack.singleUnpack(v, output)
|
||||
}
|
||||
|
||||
func (abi *ABI) UnmarshalJSON(data []byte) error {
|
||||
|
@ -29,25 +29,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
// formatSilceOutput add padding to the value and adds a size
|
||||
func formatSliceOutput(v ...[]byte) []byte {
|
||||
off := common.LeftPadBytes(big.NewInt(int64(len(v))).Bytes(), 32)
|
||||
output := append(off, make([]byte, 0, len(v)*32)...)
|
||||
|
||||
for _, value := range v {
|
||||
output = append(output, common.LeftPadBytes(value, 32)...)
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
// quick helper padding
|
||||
func pad(input []byte, size int, left bool) []byte {
|
||||
if left {
|
||||
return common.LeftPadBytes(input, size)
|
||||
}
|
||||
return common.RightPadBytes(input, size)
|
||||
}
|
||||
|
||||
const jsondata = `
|
||||
[
|
||||
{ "type" : "function", "name" : "balance", "constant" : true },
|
||||
@ -191,7 +172,7 @@ func TestMethodSignature(t *testing.T) {
|
||||
t.Errorf("expected ids to match %x != %x", m.Id(), idexp)
|
||||
}
|
||||
|
||||
uintt, _ := NewType("uint")
|
||||
uintt, _ := NewType("uint256")
|
||||
m = Method{"foo", false, []Argument{{"bar", uintt, false}}, nil}
|
||||
exp = "foo(uint256)"
|
||||
if m.Sig() != exp {
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
@ -33,7 +34,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
@ -41,6 +41,7 @@ import (
|
||||
var _ bind.ContractBackend = (*SimulatedBackend)(nil)
|
||||
|
||||
var errBlockNumberUnsupported = errors.New("SimulatedBackend cannot access blocks other than the latest block")
|
||||
var errGasEstimationFailed = errors.New("gas required exceeds allowance or always failing transaction")
|
||||
|
||||
// SimulatedBackend implements bind.ContractBackend, simulating a blockchain in
|
||||
// the background. Its main purpose is to allow easily testing contract bindings.
|
||||
@ -59,9 +60,9 @@ type SimulatedBackend struct {
|
||||
// for testing purposes.
|
||||
func NewSimulatedBackend(alloc core.GenesisAlloc) *SimulatedBackend {
|
||||
database, _ := ethdb.NewMemDatabase()
|
||||
genesis := core.Genesis{Config: params.AllProtocolChanges, Alloc: alloc}
|
||||
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, Alloc: alloc}
|
||||
genesis.MustCommit(database)
|
||||
blockchain, _ := core.NewBlockChain(database, genesis.Config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
|
||||
blockchain, _ := core.NewBlockChain(database, genesis.Config, ethash.NewFaker(), vm.Config{})
|
||||
backend := &SimulatedBackend{database: database, blockchain: blockchain, config: genesis.Config}
|
||||
backend.rollback()
|
||||
return backend
|
||||
@ -144,7 +145,8 @@ func (b *SimulatedBackend) StorageAt(ctx context.Context, contract common.Addres
|
||||
|
||||
// TransactionReceipt returns the receipt of a transaction.
|
||||
func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) {
|
||||
return core.GetReceipt(b.database, txHash), nil
|
||||
receipt, _, _, _ := core.GetReceipt(b.database, txHash)
|
||||
return receipt, nil
|
||||
}
|
||||
|
||||
// PendingCodeAt returns the code associated with an account in the pending state.
|
||||
@ -167,7 +169,7 @@ func (b *SimulatedBackend) CallContract(ctx context.Context, call ethereum.CallM
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rval, _, err := b.callContract(ctx, call, b.blockchain.CurrentBlock(), state)
|
||||
rval, _, _, err := b.callContract(ctx, call, b.blockchain.CurrentBlock(), state)
|
||||
return rval, err
|
||||
}
|
||||
|
||||
@ -177,7 +179,7 @@ func (b *SimulatedBackend) PendingCallContract(ctx context.Context, call ethereu
|
||||
defer b.mu.Unlock()
|
||||
defer b.pendingState.RevertToSnapshot(b.pendingState.Snapshot())
|
||||
|
||||
rval, _, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
|
||||
rval, _, _, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
|
||||
return rval, err
|
||||
}
|
||||
|
||||
@ -202,36 +204,53 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
// Binary search the gas requirement, as it may be higher than the amount used
|
||||
var lo, hi uint64
|
||||
if call.Gas != nil {
|
||||
// Determine the lowest and highest possible gas limits to binary search in between
|
||||
var (
|
||||
lo uint64 = params.TxGas - 1
|
||||
hi uint64
|
||||
cap uint64
|
||||
)
|
||||
if call.Gas != nil && call.Gas.Uint64() >= params.TxGas {
|
||||
hi = call.Gas.Uint64()
|
||||
} else {
|
||||
hi = b.pendingBlock.GasLimit().Uint64()
|
||||
}
|
||||
for lo+1 < hi {
|
||||
// Take a guess at the gas, and check transaction validity
|
||||
mid := (hi + lo) / 2
|
||||
call.Gas = new(big.Int).SetUint64(mid)
|
||||
cap = hi
|
||||
|
||||
// Create a helper to check if a gas allowance results in an executable transaction
|
||||
executable := func(gas uint64) bool {
|
||||
call.Gas = new(big.Int).SetUint64(gas)
|
||||
|
||||
snapshot := b.pendingState.Snapshot()
|
||||
_, gas, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
|
||||
_, _, failed, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
|
||||
b.pendingState.RevertToSnapshot(snapshot)
|
||||
|
||||
// If the transaction became invalid or used all the gas (failed), raise the gas limit
|
||||
if err != nil || gas.Cmp(call.Gas) == 0 {
|
||||
lo = mid
|
||||
continue
|
||||
if err != nil || failed {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
// Execute the binary search and hone in on an executable gas limit
|
||||
for lo+1 < hi {
|
||||
mid := (hi + lo) / 2
|
||||
if !executable(mid) {
|
||||
lo = mid
|
||||
} else {
|
||||
hi = mid
|
||||
}
|
||||
}
|
||||
// Reject the transaction as invalid if it still fails at the highest allowance
|
||||
if hi == cap {
|
||||
if !executable(hi) {
|
||||
return nil, errGasEstimationFailed
|
||||
}
|
||||
// Otherwise assume the transaction succeeded, lower the gas limit
|
||||
hi = mid
|
||||
}
|
||||
return new(big.Int).SetUint64(hi), nil
|
||||
}
|
||||
|
||||
// callContract implemens common code between normal and pending contract calls.
|
||||
// state is modified during execution, make sure to copy it if necessary.
|
||||
func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, block *types.Block, statedb *state.StateDB) ([]byte, *big.Int, error) {
|
||||
func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, block *types.Block, statedb *state.StateDB) ([]byte, *big.Int, bool, error) {
|
||||
// Ensure message is initialized properly.
|
||||
if call.GasPrice == nil {
|
||||
call.GasPrice = big.NewInt(1)
|
||||
@ -253,8 +272,8 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
|
||||
// about the transaction and calling mechanisms.
|
||||
vmenv := vm.NewEVM(evmContext, statedb, b.config, vm.Config{})
|
||||
gaspool := new(core.GasPool).AddGas(math.MaxBig256)
|
||||
ret, gasUsed, _, err := core.NewStateTransition(vmenv, msg, gaspool).TransitionDb()
|
||||
return ret, gasUsed, err
|
||||
ret, gasUsed, _, failed, err := core.NewStateTransition(vmenv, msg, gaspool).TransitionDb()
|
||||
return ret, gasUsed, failed, err
|
||||
}
|
||||
|
||||
// SendTransaction updates the pending block to include the given transaction.
|
||||
@ -283,6 +302,22 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
|
||||
return nil
|
||||
}
|
||||
|
||||
// JumpTimeInSeconds adds skip seconds to the clock
|
||||
func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), b.database, 1, func(number int, block *core.BlockGen) {
|
||||
for _, tx := range b.pendingBlock.Transactions() {
|
||||
block.AddTx(tx)
|
||||
}
|
||||
block.OffsetTime(int64(adjustment.Seconds()))
|
||||
})
|
||||
b.pendingBlock = blocks[0]
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), state.NewDatabase(b.database))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// callmsg implements core.Message to allow passing it as a transaction simulator.
|
||||
type callmsg struct {
|
||||
ethereum.CallMsg
|
||||
|
@ -122,7 +122,7 @@ func Bind(types []string, abis []string, bytecodes []string, pkg string, lang La
|
||||
}
|
||||
// For Go bindings pass the code through goimports to clean it up and double check
|
||||
if lang == LangGo {
|
||||
code, err := imports.Process("", buffer.Bytes(), nil)
|
||||
code, err := imports.Process(".", buffer.Bytes(), nil)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("%v\n%s", err, buffer)
|
||||
}
|
||||
|
@ -459,7 +459,7 @@ func TestBindings(t *testing.T) {
|
||||
}
|
||||
// Skip the test if the go-ethereum sources are symlinked (https://github.com/golang/go/issues/14845)
|
||||
linkTestCode := fmt.Sprintf("package linktest\nfunc CheckSymlinks(){\nfmt.Println(backends.NewSimulatedBackend(nil))\n}")
|
||||
linkTestDeps, err := imports.Process("", []byte(linkTestCode), nil)
|
||||
linkTestDeps, err := imports.Process(os.TempDir(), []byte(linkTestCode), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed check for goimports symlink bug: %v", err)
|
||||
}
|
||||
|
@ -52,8 +52,8 @@ var tmplSource = map[Lang]string{
|
||||
// tmplSourceGo is the Go source template use to generate the contract binding
|
||||
// based on.
|
||||
const tmplSourceGo = `
|
||||
// This file is an automatically generated Go binding. Do not modify as any
|
||||
// change will likely be lost upon the next re-generation!
|
||||
// Code generated - DO NOT EDIT.
|
||||
// This file is a generated binding and any manual changes will be lost.
|
||||
|
||||
package {{.Package}}
|
||||
|
||||
|
@ -39,22 +39,23 @@ func formatSliceString(kind reflect.Kind, sliceSize int) string {
|
||||
// type in t.
|
||||
func sliceTypeCheck(t Type, val reflect.Value) error {
|
||||
if val.Kind() != reflect.Slice && val.Kind() != reflect.Array {
|
||||
return typeErr(formatSliceString(t.Kind, t.SliceSize), val.Type())
|
||||
}
|
||||
if t.IsArray && val.Len() != t.SliceSize {
|
||||
return typeErr(formatSliceString(t.Elem.Kind, t.SliceSize), formatSliceString(val.Type().Elem().Kind(), val.Len()))
|
||||
return typeErr(formatSliceString(t.Kind, t.Size), val.Type())
|
||||
}
|
||||
|
||||
if t.Elem.IsSlice {
|
||||
if t.T == ArrayTy && val.Len() != t.Size {
|
||||
return typeErr(formatSliceString(t.Elem.Kind, t.Size), formatSliceString(val.Type().Elem().Kind(), val.Len()))
|
||||
}
|
||||
|
||||
if t.Elem.T == SliceTy {
|
||||
if val.Len() > 0 {
|
||||
return sliceTypeCheck(*t.Elem, val.Index(0))
|
||||
}
|
||||
} else if t.Elem.IsArray {
|
||||
} else if t.Elem.T == ArrayTy {
|
||||
return sliceTypeCheck(*t.Elem, val.Index(0))
|
||||
}
|
||||
|
||||
if elemKind := val.Type().Elem().Kind(); elemKind != t.Elem.Kind {
|
||||
return typeErr(formatSliceString(t.Elem.Kind, t.SliceSize), val.Type())
|
||||
return typeErr(formatSliceString(t.Elem.Kind, t.Size), val.Type())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -62,20 +63,19 @@ func sliceTypeCheck(t Type, val reflect.Value) error {
|
||||
// typeCheck checks that the given reflection value can be assigned to the reflection
|
||||
// type in t.
|
||||
func typeCheck(t Type, value reflect.Value) error {
|
||||
if t.IsSlice || t.IsArray {
|
||||
if t.T == SliceTy || t.T == ArrayTy {
|
||||
return sliceTypeCheck(t, value)
|
||||
}
|
||||
|
||||
// Check base type validity. Element types will be checked later on.
|
||||
if t.Kind != value.Kind() {
|
||||
return typeErr(t.Kind, value.Kind())
|
||||
} else if t.T == FixedBytesTy && t.Size != value.Len() {
|
||||
return typeErr(t.Type, value.Type())
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// varErr returns a formatted error.
|
||||
func varErr(expected, got reflect.Kind) error {
|
||||
return typeErr(expected, got)
|
||||
}
|
||||
|
||||
// typeErr returns a formatted type casting error.
|
||||
|
@ -18,6 +18,7 @@ package abi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -44,3 +45,93 @@ func (e Event) Id() common.Hash {
|
||||
}
|
||||
return common.BytesToHash(crypto.Keccak256([]byte(fmt.Sprintf("%v(%v)", e.Name, strings.Join(types, ",")))))
|
||||
}
|
||||
|
||||
// unpacks an event return tuple into a struct of corresponding go types
|
||||
//
|
||||
// Unpacking can be done into a struct or a slice/array.
|
||||
func (e Event) tupleUnpack(v interface{}, output []byte) error {
|
||||
// make sure the passed value is a pointer
|
||||
valueOf := reflect.ValueOf(v)
|
||||
if reflect.Ptr != valueOf.Kind() {
|
||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||
}
|
||||
|
||||
var (
|
||||
value = valueOf.Elem()
|
||||
typ = value.Type()
|
||||
)
|
||||
|
||||
if value.Kind() != reflect.Struct {
|
||||
return fmt.Errorf("abi: cannot unmarshal tuple in to %v", typ)
|
||||
}
|
||||
|
||||
j := 0
|
||||
for i := 0; i < len(e.Inputs); i++ {
|
||||
input := e.Inputs[i]
|
||||
if input.Indexed {
|
||||
// can't read, continue
|
||||
continue
|
||||
} else if input.Type.T == ArrayTy {
|
||||
// need to move this up because they read sequentially
|
||||
j += input.Type.Size
|
||||
}
|
||||
marshalledValue, err := toGoType((i+j)*32, input.Type, output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reflectValue := reflect.ValueOf(marshalledValue)
|
||||
|
||||
switch value.Kind() {
|
||||
case reflect.Struct:
|
||||
for j := 0; j < typ.NumField(); j++ {
|
||||
field := typ.Field(j)
|
||||
// TODO read tags: `abi:"fieldName"`
|
||||
if field.Name == strings.ToUpper(e.Inputs[i].Name[:1])+e.Inputs[i].Name[1:] {
|
||||
if err := set(value.Field(j), reflectValue, e.Inputs[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Slice, reflect.Array:
|
||||
if value.Len() < i {
|
||||
return fmt.Errorf("abi: insufficient number of arguments for unpack, want %d, got %d", len(e.Inputs), value.Len())
|
||||
}
|
||||
v := value.Index(i)
|
||||
if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface {
|
||||
return fmt.Errorf("abi: cannot unmarshal %v in to %v", v.Type(), reflectValue.Type())
|
||||
}
|
||||
reflectValue := reflect.ValueOf(marshalledValue)
|
||||
if err := set(v.Elem(), reflectValue, e.Inputs[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("abi: cannot unmarshal tuple in to %v", typ)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e Event) isTupleReturn() bool { return len(e.Inputs) > 1 }
|
||||
|
||||
func (e Event) singleUnpack(v interface{}, output []byte) error {
|
||||
// make sure the passed value is a pointer
|
||||
valueOf := reflect.ValueOf(v)
|
||||
if reflect.Ptr != valueOf.Kind() {
|
||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||
}
|
||||
|
||||
if e.Inputs[0].Indexed {
|
||||
return fmt.Errorf("abi: attempting to unpack indexed variable into element.")
|
||||
}
|
||||
|
||||
value := valueOf.Elem()
|
||||
|
||||
marshalledValue, err := toGoType(0, e.Inputs[0].Type, output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := set(value, reflect.ValueOf(marshalledValue), e.Inputs[0]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ func TestEventId(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
definition: `[
|
||||
{ "type" : "event", "name" : "balance", "inputs": [{ "name" : "in", "type": "uint" }] },
|
||||
{ "type" : "event", "name" : "balance", "inputs": [{ "name" : "in", "type": "uint256" }] },
|
||||
{ "type" : "event", "name" : "check", "inputs": [{ "name" : "t", "type": "address" }, { "name": "b", "type": "uint256" }] }
|
||||
]`,
|
||||
expectations: map[string]common.Hash{
|
||||
|
@ -77,6 +77,85 @@ func (method Method) pack(args ...interface{}) ([]byte, error) {
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// unpacks a method return tuple into a struct of corresponding go types
|
||||
//
|
||||
// Unpacking can be done into a struct or a slice/array.
|
||||
func (method Method) tupleUnpack(v interface{}, output []byte) error {
|
||||
// make sure the passed value is a pointer
|
||||
valueOf := reflect.ValueOf(v)
|
||||
if reflect.Ptr != valueOf.Kind() {
|
||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||
}
|
||||
|
||||
var (
|
||||
value = valueOf.Elem()
|
||||
typ = value.Type()
|
||||
)
|
||||
|
||||
j := 0
|
||||
for i := 0; i < len(method.Outputs); i++ {
|
||||
toUnpack := method.Outputs[i]
|
||||
if toUnpack.Type.T == ArrayTy {
|
||||
// need to move this up because they read sequentially
|
||||
j += toUnpack.Type.Size
|
||||
}
|
||||
marshalledValue, err := toGoType((i+j)*32, toUnpack.Type, output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reflectValue := reflect.ValueOf(marshalledValue)
|
||||
|
||||
switch value.Kind() {
|
||||
case reflect.Struct:
|
||||
for j := 0; j < typ.NumField(); j++ {
|
||||
field := typ.Field(j)
|
||||
// TODO read tags: `abi:"fieldName"`
|
||||
if field.Name == strings.ToUpper(method.Outputs[i].Name[:1])+method.Outputs[i].Name[1:] {
|
||||
if err := set(value.Field(j), reflectValue, method.Outputs[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Slice, reflect.Array:
|
||||
if value.Len() < i {
|
||||
return fmt.Errorf("abi: insufficient number of arguments for unpack, want %d, got %d", len(method.Outputs), value.Len())
|
||||
}
|
||||
v := value.Index(i)
|
||||
if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface {
|
||||
return fmt.Errorf("abi: cannot unmarshal %v in to %v", v.Type(), reflectValue.Type())
|
||||
}
|
||||
reflectValue := reflect.ValueOf(marshalledValue)
|
||||
if err := set(v.Elem(), reflectValue, method.Outputs[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("abi: cannot unmarshal tuple in to %v", typ)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (method Method) isTupleReturn() bool { return len(method.Outputs) > 1 }
|
||||
|
||||
func (method Method) singleUnpack(v interface{}, output []byte) error {
|
||||
// make sure the passed value is a pointer
|
||||
valueOf := reflect.ValueOf(v)
|
||||
if reflect.Ptr != valueOf.Kind() {
|
||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||
}
|
||||
|
||||
value := valueOf.Elem()
|
||||
|
||||
marshalledValue, err := toGoType(0, method.Outputs[0].Type, output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := set(value, reflect.ValueOf(marshalledValue), method.Outputs[0]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sig returns the methods string signature according to the ABI spec.
|
||||
//
|
||||
// Example
|
||||
|
@ -25,36 +25,23 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
big_t = reflect.TypeOf(big.Int{})
|
||||
ubig_t = reflect.TypeOf(big.Int{})
|
||||
byte_t = reflect.TypeOf(byte(0))
|
||||
byte_ts = reflect.TypeOf([]byte(nil))
|
||||
uint_t = reflect.TypeOf(uint(0))
|
||||
uint8_t = reflect.TypeOf(uint8(0))
|
||||
uint16_t = reflect.TypeOf(uint16(0))
|
||||
uint32_t = reflect.TypeOf(uint32(0))
|
||||
uint64_t = reflect.TypeOf(uint64(0))
|
||||
int_t = reflect.TypeOf(int(0))
|
||||
int8_t = reflect.TypeOf(int8(0))
|
||||
int16_t = reflect.TypeOf(int16(0))
|
||||
int32_t = reflect.TypeOf(int32(0))
|
||||
int64_t = reflect.TypeOf(int64(0))
|
||||
hash_t = reflect.TypeOf(common.Hash{})
|
||||
address_t = reflect.TypeOf(common.Address{})
|
||||
|
||||
uint_ts = reflect.TypeOf([]uint(nil))
|
||||
uint8_ts = reflect.TypeOf([]uint8(nil))
|
||||
uint16_ts = reflect.TypeOf([]uint16(nil))
|
||||
uint32_ts = reflect.TypeOf([]uint32(nil))
|
||||
uint64_ts = reflect.TypeOf([]uint64(nil))
|
||||
ubig_ts = reflect.TypeOf([]*big.Int(nil))
|
||||
|
||||
int_ts = reflect.TypeOf([]int(nil))
|
||||
int8_ts = reflect.TypeOf([]int8(nil))
|
||||
int16_ts = reflect.TypeOf([]int16(nil))
|
||||
int32_ts = reflect.TypeOf([]int32(nil))
|
||||
int64_ts = reflect.TypeOf([]int64(nil))
|
||||
big_ts = reflect.TypeOf([]*big.Int(nil))
|
||||
big_t = reflect.TypeOf(&big.Int{})
|
||||
derefbig_t = reflect.TypeOf(big.Int{})
|
||||
uint8_t = reflect.TypeOf(uint8(0))
|
||||
uint16_t = reflect.TypeOf(uint16(0))
|
||||
uint32_t = reflect.TypeOf(uint32(0))
|
||||
uint64_t = reflect.TypeOf(uint64(0))
|
||||
int_t = reflect.TypeOf(int(0))
|
||||
int8_t = reflect.TypeOf(int8(0))
|
||||
int16_t = reflect.TypeOf(int16(0))
|
||||
int32_t = reflect.TypeOf(int32(0))
|
||||
int64_t = reflect.TypeOf(int64(0))
|
||||
address_t = reflect.TypeOf(common.Address{})
|
||||
int_ts = reflect.TypeOf([]int(nil))
|
||||
int8_ts = reflect.TypeOf([]int8(nil))
|
||||
int16_ts = reflect.TypeOf([]int16(nil))
|
||||
int32_ts = reflect.TypeOf([]int32(nil))
|
||||
int64_ts = reflect.TypeOf([]int64(nil))
|
||||
)
|
||||
|
||||
// U256 converts a big Int into a 256bit EVM number.
|
||||
|
@ -61,8 +61,9 @@ func packElement(t Type, reflectValue reflect.Value) []byte {
|
||||
reflectValue = mustArrayToByteSlice(reflectValue)
|
||||
}
|
||||
return common.RightPadBytes(reflectValue.Bytes(), 32)
|
||||
default:
|
||||
panic("abi: fatal error")
|
||||
}
|
||||
panic("abi: fatal error")
|
||||
}
|
||||
|
||||
// packNum packs the given number (using the reflect value) and will cast it to appropriate number representation
|
||||
@ -74,6 +75,8 @@ func packNum(value reflect.Value) []byte {
|
||||
return U256(big.NewInt(value.Int()))
|
||||
case reflect.Ptr:
|
||||
return U256(value.Interface().(*big.Int))
|
||||
default:
|
||||
panic("abi: fatal error")
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
@ -322,12 +322,12 @@ func TestPack(t *testing.T) {
|
||||
} {
|
||||
typ, err := NewType(test.typ)
|
||||
if err != nil {
|
||||
t.Fatal("unexpected parse error:", err)
|
||||
t.Fatalf("%v failed. Unexpected parse error: %v", i, err)
|
||||
}
|
||||
|
||||
output, err := typ.pack(reflect.ValueOf(test.input))
|
||||
if err != nil {
|
||||
t.Fatal("unexpected pack error:", err)
|
||||
t.Fatalf("%v failed. Unexpected pack error: %v", i, err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(output, test.output) {
|
||||
@ -435,7 +435,4 @@ func TestPackNumber(t *testing.T) {
|
||||
t.Errorf("test %d: pack mismatch: have %x, want %x", i, packed, tt.packed)
|
||||
}
|
||||
}
|
||||
if packed := packNum(reflect.ValueOf("string")); packed != nil {
|
||||
t.Errorf("expected 'string' to pack to nil. got %x instead", packed)
|
||||
}
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
// indirect recursively dereferences the value until it either gets the value
|
||||
// or finds a big.Int
|
||||
func indirect(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Ptr && v.Elem().Type() != big_t {
|
||||
if v.Kind() == reflect.Ptr && v.Elem().Type() != derefbig_t {
|
||||
return indirect(v.Elem())
|
||||
}
|
||||
return v
|
||||
@ -73,15 +73,9 @@ func mustArrayToByteSlice(value reflect.Value) reflect.Value {
|
||||
func set(dst, src reflect.Value, output Argument) error {
|
||||
dstType := dst.Type()
|
||||
srcType := src.Type()
|
||||
|
||||
switch {
|
||||
case dstType.AssignableTo(src.Type()):
|
||||
case dstType.AssignableTo(srcType):
|
||||
dst.Set(src)
|
||||
case dstType.Kind() == reflect.Array && srcType.Kind() == reflect.Slice:
|
||||
if dst.Len() < output.Type.SliceSize {
|
||||
return fmt.Errorf("abi: cannot unmarshal src (len=%d) in to dst (len=%d)", output.Type.SliceSize, dst.Len())
|
||||
}
|
||||
reflect.Copy(dst, src)
|
||||
case dstType.Kind() == reflect.Interface:
|
||||
dst.Set(src)
|
||||
case dstType.Kind() == reflect.Ptr:
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -29,6 +30,7 @@ const (
|
||||
BoolTy
|
||||
StringTy
|
||||
SliceTy
|
||||
ArrayTy
|
||||
AddressTy
|
||||
FixedBytesTy
|
||||
BytesTy
|
||||
@ -39,9 +41,6 @@ const (
|
||||
|
||||
// Type is the reflection of the supported argument type
|
||||
type Type struct {
|
||||
IsSlice, IsArray bool
|
||||
SliceSize int
|
||||
|
||||
Elem *Type
|
||||
|
||||
Kind reflect.Kind
|
||||
@ -53,118 +52,116 @@ type Type struct {
|
||||
}
|
||||
|
||||
var (
|
||||
// fullTypeRegex parses the abi types
|
||||
//
|
||||
// Types can be in the format of:
|
||||
//
|
||||
// Input = Type [ "[" [ Number ] "]" ] Name .
|
||||
// Type = [ "u" ] "int" [ Number ] [ x ] [ Number ].
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// string int uint fixed
|
||||
// string32 int8 uint8 uint[]
|
||||
// address int256 uint256 fixed128x128[2]
|
||||
fullTypeRegex = regexp.MustCompile(`([a-zA-Z0-9]+)(\[([0-9]*)\])?`)
|
||||
// typeRegex parses the abi sub types
|
||||
typeRegex = regexp.MustCompile("([a-zA-Z]+)(([0-9]+)(x([0-9]+))?)?")
|
||||
)
|
||||
|
||||
// NewType creates a new reflection type of abi type given in t.
|
||||
func NewType(t string) (typ Type, err error) {
|
||||
res := fullTypeRegex.FindAllStringSubmatch(t, -1)[0]
|
||||
// check if type is slice and parse type.
|
||||
switch {
|
||||
case res[3] != "":
|
||||
// err is ignored. Already checked for number through the regexp
|
||||
typ.SliceSize, _ = strconv.Atoi(res[3])
|
||||
typ.IsArray = true
|
||||
case res[2] != "":
|
||||
typ.IsSlice, typ.SliceSize = true, -1
|
||||
case res[0] == "":
|
||||
return Type{}, fmt.Errorf("abi: type parse error: %s", t)
|
||||
// check that array brackets are equal if they exist
|
||||
if strings.Count(t, "[") != strings.Count(t, "]") {
|
||||
return Type{}, fmt.Errorf("invalid arg type in abi")
|
||||
}
|
||||
if typ.IsArray || typ.IsSlice {
|
||||
sliceType, err := NewType(res[1])
|
||||
|
||||
typ.stringKind = t
|
||||
|
||||
// if there are brackets, get ready to go into slice/array mode and
|
||||
// recursively create the type
|
||||
if strings.Count(t, "[") != 0 {
|
||||
i := strings.LastIndex(t, "[")
|
||||
// recursively embed the type
|
||||
embeddedType, err := NewType(t[:i])
|
||||
if err != nil {
|
||||
return Type{}, err
|
||||
}
|
||||
typ.Elem = &sliceType
|
||||
typ.stringKind = sliceType.stringKind + t[len(res[1]):]
|
||||
// Although we know that this is an array, we cannot return
|
||||
// as we don't know the type of the element, however, if it
|
||||
// is still an array, then don't determine the type.
|
||||
if typ.Elem.IsArray || typ.Elem.IsSlice {
|
||||
return typ, nil
|
||||
}
|
||||
}
|
||||
// grab the last cell and create a type from there
|
||||
sliced := t[i:]
|
||||
// grab the slice size with regexp
|
||||
re := regexp.MustCompile("[0-9]+")
|
||||
intz := re.FindAllString(sliced, -1)
|
||||
|
||||
// parse the type and size of the abi-type.
|
||||
parsedType := typeRegex.FindAllStringSubmatch(res[1], -1)[0]
|
||||
// varSize is the size of the variable
|
||||
var varSize int
|
||||
if len(parsedType[3]) > 0 {
|
||||
var err error
|
||||
varSize, err = strconv.Atoi(parsedType[2])
|
||||
if err != nil {
|
||||
return Type{}, fmt.Errorf("abi: error parsing variable size: %v", err)
|
||||
}
|
||||
}
|
||||
// varType is the parsed abi type
|
||||
varType := parsedType[1]
|
||||
// substitute canonical integer
|
||||
if varSize == 0 && (varType == "int" || varType == "uint") {
|
||||
varSize = 256
|
||||
t += "256"
|
||||
}
|
||||
|
||||
// only set stringKind if not array or slice, as for those,
|
||||
// the correct string type has been set
|
||||
if !(typ.IsArray || typ.IsSlice) {
|
||||
typ.stringKind = t
|
||||
}
|
||||
|
||||
switch varType {
|
||||
case "int":
|
||||
typ.Kind, typ.Type = reflectIntKindAndType(false, varSize)
|
||||
typ.Size = varSize
|
||||
typ.T = IntTy
|
||||
case "uint":
|
||||
typ.Kind, typ.Type = reflectIntKindAndType(true, varSize)
|
||||
typ.Size = varSize
|
||||
typ.T = UintTy
|
||||
case "bool":
|
||||
typ.Kind = reflect.Bool
|
||||
typ.T = BoolTy
|
||||
case "address":
|
||||
typ.Kind = reflect.Array
|
||||
typ.Type = address_t
|
||||
typ.Size = 20
|
||||
typ.T = AddressTy
|
||||
case "string":
|
||||
typ.Kind = reflect.String
|
||||
typ.Size = -1
|
||||
typ.T = StringTy
|
||||
case "bytes":
|
||||
sliceType, _ := NewType("uint8")
|
||||
typ.Elem = &sliceType
|
||||
if varSize == 0 {
|
||||
typ.IsSlice = true
|
||||
typ.T = BytesTy
|
||||
typ.SliceSize = -1
|
||||
if len(intz) == 0 {
|
||||
// is a slice
|
||||
typ.T = SliceTy
|
||||
typ.Kind = reflect.Slice
|
||||
typ.Elem = &embeddedType
|
||||
typ.Type = reflect.SliceOf(embeddedType.Type)
|
||||
} else if len(intz) == 1 {
|
||||
// is a array
|
||||
typ.T = ArrayTy
|
||||
typ.Kind = reflect.Array
|
||||
typ.Elem = &embeddedType
|
||||
typ.Size, err = strconv.Atoi(intz[0])
|
||||
if err != nil {
|
||||
return Type{}, fmt.Errorf("abi: error parsing variable size: %v", err)
|
||||
}
|
||||
typ.Type = reflect.ArrayOf(typ.Size, embeddedType.Type)
|
||||
} else {
|
||||
typ.IsArray = true
|
||||
typ.T = FixedBytesTy
|
||||
typ.SliceSize = varSize
|
||||
return Type{}, fmt.Errorf("invalid formatting of array type")
|
||||
}
|
||||
return typ, err
|
||||
} else {
|
||||
// parse the type and size of the abi-type.
|
||||
parsedType := typeRegex.FindAllStringSubmatch(t, -1)[0]
|
||||
// varSize is the size of the variable
|
||||
var varSize int
|
||||
if len(parsedType[3]) > 0 {
|
||||
var err error
|
||||
varSize, err = strconv.Atoi(parsedType[2])
|
||||
if err != nil {
|
||||
return Type{}, fmt.Errorf("abi: error parsing variable size: %v", err)
|
||||
}
|
||||
} else {
|
||||
if parsedType[0] == "uint" || parsedType[0] == "int" {
|
||||
// this should fail because it means that there's something wrong with
|
||||
// the abi type (the compiler should always format it to the size...always)
|
||||
return Type{}, fmt.Errorf("unsupported arg type: %s", t)
|
||||
}
|
||||
}
|
||||
// varType is the parsed abi type
|
||||
varType := parsedType[1]
|
||||
|
||||
switch varType {
|
||||
case "int":
|
||||
typ.Kind, typ.Type = reflectIntKindAndType(false, varSize)
|
||||
typ.Size = varSize
|
||||
typ.T = IntTy
|
||||
case "uint":
|
||||
typ.Kind, typ.Type = reflectIntKindAndType(true, varSize)
|
||||
typ.Size = varSize
|
||||
typ.T = UintTy
|
||||
case "bool":
|
||||
typ.Kind = reflect.Bool
|
||||
typ.T = BoolTy
|
||||
typ.Type = reflect.TypeOf(bool(false))
|
||||
case "address":
|
||||
typ.Kind = reflect.Array
|
||||
typ.Type = address_t
|
||||
typ.Size = 20
|
||||
typ.T = AddressTy
|
||||
case "string":
|
||||
typ.Kind = reflect.String
|
||||
typ.Type = reflect.TypeOf("")
|
||||
typ.T = StringTy
|
||||
case "bytes":
|
||||
if varSize == 0 {
|
||||
typ.T = BytesTy
|
||||
typ.Kind = reflect.Slice
|
||||
typ.Type = reflect.SliceOf(reflect.TypeOf(byte(0)))
|
||||
} else {
|
||||
typ.T = FixedBytesTy
|
||||
typ.Kind = reflect.Array
|
||||
typ.Size = varSize
|
||||
typ.Type = reflect.ArrayOf(varSize, reflect.TypeOf(byte(0)))
|
||||
}
|
||||
case "function":
|
||||
typ.Kind = reflect.Array
|
||||
typ.T = FunctionTy
|
||||
typ.Size = 24
|
||||
typ.Type = reflect.ArrayOf(24, reflect.TypeOf(byte(0)))
|
||||
default:
|
||||
return Type{}, fmt.Errorf("unsupported arg type: %s", t)
|
||||
}
|
||||
case "function":
|
||||
sliceType, _ := NewType("uint8")
|
||||
typ.Elem = &sliceType
|
||||
typ.IsArray = true
|
||||
typ.T = FunctionTy
|
||||
typ.SliceSize = 24
|
||||
default:
|
||||
return Type{}, fmt.Errorf("unsupported arg type: %s", t)
|
||||
}
|
||||
|
||||
return
|
||||
@ -183,7 +180,7 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if (t.IsSlice || t.IsArray) && t.T != BytesTy && t.T != FixedBytesTy && t.T != FunctionTy {
|
||||
if t.T == SliceTy || t.T == ArrayTy {
|
||||
var packed []byte
|
||||
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
@ -193,18 +190,17 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
|
||||
}
|
||||
packed = append(packed, val...)
|
||||
}
|
||||
if t.IsSlice {
|
||||
if t.T == SliceTy {
|
||||
return packBytesSlice(packed, v.Len()), nil
|
||||
} else if t.IsArray {
|
||||
} else if t.T == ArrayTy {
|
||||
return packed, nil
|
||||
}
|
||||
}
|
||||
|
||||
return packElement(t, v), nil
|
||||
}
|
||||
|
||||
// requireLengthPrefix returns whether the type requires any sort of length
|
||||
// prefixing.
|
||||
func (t Type) requiresLengthPrefix() bool {
|
||||
return t.T != FixedBytesTy && (t.T == StringTy || t.T == BytesTy || t.IsSlice)
|
||||
return t.T == StringTy || t.T == BytesTy || t.T == SliceTy
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
@ -34,51 +35,58 @@ func TestTypeRegexp(t *testing.T) {
|
||||
blob string
|
||||
kind Type
|
||||
}{
|
||||
{"bool", Type{Kind: reflect.Bool, T: BoolTy, stringKind: "bool"}},
|
||||
{"bool[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Bool, T: BoolTy, Elem: &Type{Kind: reflect.Bool, T: BoolTy, stringKind: "bool"}, stringKind: "bool[]"}},
|
||||
{"bool[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Bool, T: BoolTy, Elem: &Type{Kind: reflect.Bool, T: BoolTy, stringKind: "bool"}, stringKind: "bool[2]"}},
|
||||
{"bool", Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}},
|
||||
{"bool[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool(nil)), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}},
|
||||
{"bool[2]", Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}},
|
||||
{"bool[2][]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}},
|
||||
{"bool[][]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}},
|
||||
{"bool[][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}},
|
||||
{"bool[2][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}},
|
||||
{"bool[2][][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][][2]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}, stringKind: "bool[2][][2]"}},
|
||||
{"bool[2][2][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}, stringKind: "bool[2][2][2]"}},
|
||||
{"bool[][][]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}, stringKind: "bool[][][]"}},
|
||||
{"bool[][2][]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][2][]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}, stringKind: "bool[][2][]"}},
|
||||
{"int8", Type{Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, stringKind: "int8"}},
|
||||
{"int16", Type{Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, stringKind: "int16"}},
|
||||
{"int32", Type{Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, stringKind: "int32"}},
|
||||
{"int64", Type{Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, stringKind: "int64"}},
|
||||
{"int256", Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}},
|
||||
{"int8[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, Elem: &Type{Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}},
|
||||
{"int8[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, Elem: &Type{Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}},
|
||||
{"int16[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, Elem: &Type{Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}},
|
||||
{"int16[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, Elem: &Type{Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}},
|
||||
{"int32[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, Elem: &Type{Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
|
||||
{"int32[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, Elem: &Type{Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
|
||||
{"int64[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, Elem: &Type{Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}},
|
||||
{"int64[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, Elem: &Type{Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}},
|
||||
{"int256[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
|
||||
{"int256[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
|
||||
{"int8[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}},
|
||||
{"int8[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}},
|
||||
{"int16[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}},
|
||||
{"int16[2]", Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}},
|
||||
{"int32[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
|
||||
{"int32[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
|
||||
{"int64[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}},
|
||||
{"int64[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}},
|
||||
{"int256[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
|
||||
{"int256[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
|
||||
{"uint8", Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}},
|
||||
{"uint16", Type{Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, stringKind: "uint16"}},
|
||||
{"uint32", Type{Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, stringKind: "uint32"}},
|
||||
{"uint64", Type{Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, stringKind: "uint64"}},
|
||||
{"uint256", Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, stringKind: "uint256"}},
|
||||
{"uint8[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}},
|
||||
{"uint8[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}},
|
||||
{"uint16[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, Elem: &Type{Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}},
|
||||
{"uint16[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, Elem: &Type{Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}},
|
||||
{"uint32[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, Elem: &Type{Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
|
||||
{"uint32[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, Elem: &Type{Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
|
||||
{"uint64[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, Elem: &Type{Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}},
|
||||
{"uint64[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, Elem: &Type{Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}},
|
||||
{"uint256[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
|
||||
{"uint256[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
|
||||
{"bytes32", Type{IsArray: true, SliceSize: 32, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: FixedBytesTy, stringKind: "bytes32"}},
|
||||
{"bytes[]", Type{IsSlice: true, SliceSize: -1, Elem: &Type{IsSlice: true, SliceSize: -1, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}},
|
||||
{"bytes[2]", Type{IsArray: true, SliceSize: 2, Elem: &Type{IsSlice: true, SliceSize: -1, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[2]"}},
|
||||
{"bytes32[]", Type{IsSlice: true, SliceSize: -1, Elem: &Type{IsArray: true, SliceSize: 32, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: FixedBytesTy, stringKind: "bytes32"}, stringKind: "bytes32[]"}},
|
||||
{"bytes32[2]", Type{IsArray: true, SliceSize: 2, Elem: &Type{IsArray: true, SliceSize: 32, Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: FixedBytesTy, stringKind: "bytes32"}, stringKind: "bytes32[2]"}},
|
||||
{"string", Type{Kind: reflect.String, Size: -1, T: StringTy, stringKind: "string"}},
|
||||
{"string[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.String, T: StringTy, Size: -1, Elem: &Type{Kind: reflect.String, T: StringTy, Size: -1, stringKind: "string"}, stringKind: "string[]"}},
|
||||
{"string[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.String, T: StringTy, Size: -1, Elem: &Type{Kind: reflect.String, T: StringTy, Size: -1, stringKind: "string"}, stringKind: "string[2]"}},
|
||||
{"uint8[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}},
|
||||
{"uint8[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}},
|
||||
{"uint16[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}},
|
||||
{"uint16[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}},
|
||||
{"uint32[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
|
||||
{"uint32[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
|
||||
{"uint64[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}},
|
||||
{"uint64[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}},
|
||||
{"uint256[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
|
||||
{"uint256[2]", Type{Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]*big.Int{}), Size: 2, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
|
||||
{"bytes32", Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}},
|
||||
{"bytes[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]byte{}), Elem: &Type{Kind: reflect.Slice, Type: reflect.TypeOf([]byte{}), T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}},
|
||||
{"bytes[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]byte{}), Elem: &Type{T: BytesTy, Type: reflect.TypeOf([]byte{}), Kind: reflect.Slice, stringKind: "bytes"}, stringKind: "bytes[2]"}},
|
||||
{"bytes32[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][32]byte{}), Elem: &Type{Kind: reflect.Array, Type: reflect.TypeOf([32]byte{}), T: FixedBytesTy, Size: 32, stringKind: "bytes32"}, stringKind: "bytes32[]"}},
|
||||
{"bytes32[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][32]byte{}), Elem: &Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}, stringKind: "bytes32[2]"}},
|
||||
{"string", Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}},
|
||||
{"string[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]string{}), Elem: &Type{Kind: reflect.String, Type: reflect.TypeOf(""), T: StringTy, stringKind: "string"}, stringKind: "string[]"}},
|
||||
{"string[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]string{}), Elem: &Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}, stringKind: "string[2]"}},
|
||||
{"address", Type{Kind: reflect.Array, Type: address_t, Size: 20, T: AddressTy, stringKind: "address"}},
|
||||
{"address[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Array, Type: address_t, T: AddressTy, Size: 20, Elem: &Type{Kind: reflect.Array, Type: address_t, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}},
|
||||
{"address[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Array, Type: address_t, T: AddressTy, Size: 20, Elem: &Type{Kind: reflect.Array, Type: address_t, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}},
|
||||
|
||||
{"address[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: address_t, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}},
|
||||
{"address[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: address_t, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}},
|
||||
// TODO when fixed types are implemented properly
|
||||
// {"fixed", Type{}},
|
||||
// {"fixed128x128", Type{}},
|
||||
@ -87,13 +95,14 @@ func TestTypeRegexp(t *testing.T) {
|
||||
// {"fixed128x128[]", Type{}},
|
||||
// {"fixed128x128[2]", Type{}},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
|
||||
for _, tt := range tests {
|
||||
typ, err := NewType(tt.blob)
|
||||
if err != nil {
|
||||
t.Errorf("type %d: failed to parse type string: %v", i, err)
|
||||
t.Errorf("type %q: failed to parse type string: %v", tt.blob, err)
|
||||
}
|
||||
if !reflect.DeepEqual(typ, tt.kind) {
|
||||
t.Errorf("type %d: parsed type mismatch:\n have %+v\n want %+v", i, typeWithoutStringer(typ), typeWithoutStringer(tt.kind))
|
||||
t.Errorf("type %q: parsed type mismatch:\nGOT %s\nWANT %s ", tt.blob, spew.Sdump(typeWithoutStringer(typ)), spew.Sdump(typeWithoutStringer(tt.kind)))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -104,15 +113,90 @@ func TestTypeCheck(t *testing.T) {
|
||||
input interface{}
|
||||
err string
|
||||
}{
|
||||
{"uint", big.NewInt(1), ""},
|
||||
{"int", big.NewInt(1), ""},
|
||||
{"uint30", big.NewInt(1), ""},
|
||||
{"uint", big.NewInt(1), "unsupported arg type: uint"},
|
||||
{"int", big.NewInt(1), "unsupported arg type: int"},
|
||||
{"uint256", big.NewInt(1), ""},
|
||||
{"uint256[][3][]", [][3][]*big.Int{{{}}}, ""},
|
||||
{"uint256[][][3]", [3][][]*big.Int{{{}}}, ""},
|
||||
{"uint256[3][][]", [][][3]*big.Int{{{}}}, ""},
|
||||
{"uint256[3][3][3]", [3][3][3]*big.Int{{{}}}, ""},
|
||||
{"uint8[][]", [][]uint8{}, ""},
|
||||
{"int256", big.NewInt(1), ""},
|
||||
{"uint8", uint8(1), ""},
|
||||
{"uint16", uint16(1), ""},
|
||||
{"uint32", uint32(1), ""},
|
||||
{"uint64", uint64(1), ""},
|
||||
{"int8", int8(1), ""},
|
||||
{"int16", int16(1), ""},
|
||||
{"int32", int32(1), ""},
|
||||
{"int64", int64(1), ""},
|
||||
{"uint24", big.NewInt(1), ""},
|
||||
{"uint40", big.NewInt(1), ""},
|
||||
{"uint48", big.NewInt(1), ""},
|
||||
{"uint56", big.NewInt(1), ""},
|
||||
{"uint72", big.NewInt(1), ""},
|
||||
{"uint80", big.NewInt(1), ""},
|
||||
{"uint88", big.NewInt(1), ""},
|
||||
{"uint96", big.NewInt(1), ""},
|
||||
{"uint104", big.NewInt(1), ""},
|
||||
{"uint112", big.NewInt(1), ""},
|
||||
{"uint120", big.NewInt(1), ""},
|
||||
{"uint128", big.NewInt(1), ""},
|
||||
{"uint136", big.NewInt(1), ""},
|
||||
{"uint144", big.NewInt(1), ""},
|
||||
{"uint152", big.NewInt(1), ""},
|
||||
{"uint160", big.NewInt(1), ""},
|
||||
{"uint168", big.NewInt(1), ""},
|
||||
{"uint176", big.NewInt(1), ""},
|
||||
{"uint184", big.NewInt(1), ""},
|
||||
{"uint192", big.NewInt(1), ""},
|
||||
{"uint200", big.NewInt(1), ""},
|
||||
{"uint208", big.NewInt(1), ""},
|
||||
{"uint216", big.NewInt(1), ""},
|
||||
{"uint224", big.NewInt(1), ""},
|
||||
{"uint232", big.NewInt(1), ""},
|
||||
{"uint240", big.NewInt(1), ""},
|
||||
{"uint248", big.NewInt(1), ""},
|
||||
{"int24", big.NewInt(1), ""},
|
||||
{"int40", big.NewInt(1), ""},
|
||||
{"int48", big.NewInt(1), ""},
|
||||
{"int56", big.NewInt(1), ""},
|
||||
{"int72", big.NewInt(1), ""},
|
||||
{"int80", big.NewInt(1), ""},
|
||||
{"int88", big.NewInt(1), ""},
|
||||
{"int96", big.NewInt(1), ""},
|
||||
{"int104", big.NewInt(1), ""},
|
||||
{"int112", big.NewInt(1), ""},
|
||||
{"int120", big.NewInt(1), ""},
|
||||
{"int128", big.NewInt(1), ""},
|
||||
{"int136", big.NewInt(1), ""},
|
||||
{"int144", big.NewInt(1), ""},
|
||||
{"int152", big.NewInt(1), ""},
|
||||
{"int160", big.NewInt(1), ""},
|
||||
{"int168", big.NewInt(1), ""},
|
||||
{"int176", big.NewInt(1), ""},
|
||||
{"int184", big.NewInt(1), ""},
|
||||
{"int192", big.NewInt(1), ""},
|
||||
{"int200", big.NewInt(1), ""},
|
||||
{"int208", big.NewInt(1), ""},
|
||||
{"int216", big.NewInt(1), ""},
|
||||
{"int224", big.NewInt(1), ""},
|
||||
{"int232", big.NewInt(1), ""},
|
||||
{"int240", big.NewInt(1), ""},
|
||||
{"int248", big.NewInt(1), ""},
|
||||
{"uint30", uint8(1), "abi: cannot use uint8 as type ptr as argument"},
|
||||
{"uint8", uint16(1), "abi: cannot use uint16 as type uint8 as argument"},
|
||||
{"uint8", uint32(1), "abi: cannot use uint32 as type uint8 as argument"},
|
||||
{"uint8", uint64(1), "abi: cannot use uint64 as type uint8 as argument"},
|
||||
{"uint8", int8(1), "abi: cannot use int8 as type uint8 as argument"},
|
||||
{"uint8", int16(1), "abi: cannot use int16 as type uint8 as argument"},
|
||||
{"uint8", int32(1), "abi: cannot use int32 as type uint8 as argument"},
|
||||
{"uint8", int64(1), "abi: cannot use int64 as type uint8 as argument"},
|
||||
{"uint16", uint16(1), ""},
|
||||
{"uint16", uint8(1), "abi: cannot use uint8 as type uint16 as argument"},
|
||||
{"uint16[]", []uint16{1, 2, 3}, ""},
|
||||
{"uint16[]", [3]uint16{1, 2, 3}, ""},
|
||||
{"uint16[]", []uint32{1, 2, 3}, "abi: cannot use []uint32 as type []uint16 as argument"},
|
||||
{"uint16[]", []uint32{1, 2, 3}, "abi: cannot use []uint32 as type [0]uint16 as argument"},
|
||||
{"uint16[3]", [3]uint32{1, 2, 3}, "abi: cannot use [3]uint32 as type [3]uint16 as argument"},
|
||||
{"uint16[3]", [4]uint16{1, 2, 3}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
|
||||
{"uint16[3]", []uint16{1, 2, 3}, ""},
|
||||
@ -122,20 +206,61 @@ func TestTypeCheck(t *testing.T) {
|
||||
{"address[1]", [1]common.Address{{1}}, ""},
|
||||
{"address[2]", [1]common.Address{{1}}, "abi: cannot use [1]array as type [2]array as argument"},
|
||||
{"bytes32", [32]byte{}, ""},
|
||||
{"bytes31", [31]byte{}, ""},
|
||||
{"bytes30", [30]byte{}, ""},
|
||||
{"bytes29", [29]byte{}, ""},
|
||||
{"bytes28", [28]byte{}, ""},
|
||||
{"bytes27", [27]byte{}, ""},
|
||||
{"bytes26", [26]byte{}, ""},
|
||||
{"bytes25", [25]byte{}, ""},
|
||||
{"bytes24", [24]byte{}, ""},
|
||||
{"bytes23", [23]byte{}, ""},
|
||||
{"bytes22", [22]byte{}, ""},
|
||||
{"bytes21", [21]byte{}, ""},
|
||||
{"bytes20", [20]byte{}, ""},
|
||||
{"bytes19", [19]byte{}, ""},
|
||||
{"bytes18", [18]byte{}, ""},
|
||||
{"bytes17", [17]byte{}, ""},
|
||||
{"bytes16", [16]byte{}, ""},
|
||||
{"bytes15", [15]byte{}, ""},
|
||||
{"bytes14", [14]byte{}, ""},
|
||||
{"bytes13", [13]byte{}, ""},
|
||||
{"bytes12", [12]byte{}, ""},
|
||||
{"bytes11", [11]byte{}, ""},
|
||||
{"bytes10", [10]byte{}, ""},
|
||||
{"bytes9", [9]byte{}, ""},
|
||||
{"bytes8", [8]byte{}, ""},
|
||||
{"bytes7", [7]byte{}, ""},
|
||||
{"bytes6", [6]byte{}, ""},
|
||||
{"bytes5", [5]byte{}, ""},
|
||||
{"bytes4", [4]byte{}, ""},
|
||||
{"bytes3", [3]byte{}, ""},
|
||||
{"bytes2", [2]byte{}, ""},
|
||||
{"bytes1", [1]byte{}, ""},
|
||||
{"bytes32", [33]byte{}, "abi: cannot use [33]uint8 as type [32]uint8 as argument"},
|
||||
{"bytes32", common.Hash{1}, ""},
|
||||
{"bytes31", [31]byte{}, ""},
|
||||
{"bytes31", common.Hash{1}, "abi: cannot use common.Hash as type [31]uint8 as argument"},
|
||||
{"bytes31", [32]byte{}, "abi: cannot use [32]uint8 as type [31]uint8 as argument"},
|
||||
{"bytes", []byte{0, 1}, ""},
|
||||
{"bytes", [2]byte{0, 1}, ""},
|
||||
{"bytes", common.Hash{1}, ""},
|
||||
{"bytes", [2]byte{0, 1}, "abi: cannot use array as type slice as argument"},
|
||||
{"bytes", common.Hash{1}, "abi: cannot use array as type slice as argument"},
|
||||
{"string", "hello world", ""},
|
||||
{"string", string(""), ""},
|
||||
{"string", []byte{}, "abi: cannot use slice as type string as argument"},
|
||||
{"bytes32[]", [][32]byte{{}}, ""},
|
||||
{"function", [24]byte{}, ""},
|
||||
{"bytes20", common.Address{}, ""},
|
||||
{"address", [20]byte{}, ""},
|
||||
{"address", common.Address{}, ""},
|
||||
} {
|
||||
typ, err := NewType(test.typ)
|
||||
if err != nil {
|
||||
if err != nil && len(test.err) == 0 {
|
||||
t.Fatal("unexpected parse error:", err)
|
||||
} else if err != nil && len(test.err) != 0 {
|
||||
if err.Error() != test.err {
|
||||
t.Errorf("%d failed. Expected err: '%v' got err: '%v'", i, test.err, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
err = typeCheck(typ, reflect.ValueOf(test.input))
|
||||
|
@ -25,122 +25,20 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
// toGoSliceType parses the input and casts it to the proper slice defined by the ABI
|
||||
// argument in T.
|
||||
func toGoSlice(i int, t Argument, output []byte) (interface{}, error) {
|
||||
index := i * 32
|
||||
// The slice must, at very least be large enough for the index+32 which is exactly the size required
|
||||
// for the [offset in output, size of offset].
|
||||
if index+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go slice: insufficient size output %d require %d", len(output), index+32)
|
||||
}
|
||||
elem := t.Type.Elem
|
||||
|
||||
// first we need to create a slice of the type
|
||||
var refSlice reflect.Value
|
||||
switch elem.T {
|
||||
case IntTy, UintTy, BoolTy:
|
||||
// create a new reference slice matching the element type
|
||||
switch t.Type.Kind {
|
||||
case reflect.Bool:
|
||||
refSlice = reflect.ValueOf([]bool(nil))
|
||||
case reflect.Uint8:
|
||||
refSlice = reflect.ValueOf([]uint8(nil))
|
||||
case reflect.Uint16:
|
||||
refSlice = reflect.ValueOf([]uint16(nil))
|
||||
case reflect.Uint32:
|
||||
refSlice = reflect.ValueOf([]uint32(nil))
|
||||
case reflect.Uint64:
|
||||
refSlice = reflect.ValueOf([]uint64(nil))
|
||||
case reflect.Int8:
|
||||
refSlice = reflect.ValueOf([]int8(nil))
|
||||
case reflect.Int16:
|
||||
refSlice = reflect.ValueOf([]int16(nil))
|
||||
case reflect.Int32:
|
||||
refSlice = reflect.ValueOf([]int32(nil))
|
||||
case reflect.Int64:
|
||||
refSlice = reflect.ValueOf([]int64(nil))
|
||||
default:
|
||||
refSlice = reflect.ValueOf([]*big.Int(nil))
|
||||
}
|
||||
case AddressTy: // address must be of slice Address
|
||||
refSlice = reflect.ValueOf([]common.Address(nil))
|
||||
case HashTy: // hash must be of slice hash
|
||||
refSlice = reflect.ValueOf([]common.Hash(nil))
|
||||
case FixedBytesTy:
|
||||
refSlice = reflect.ValueOf([][]byte(nil))
|
||||
default: // no other types are supported
|
||||
return nil, fmt.Errorf("abi: unsupported slice type %v", elem.T)
|
||||
}
|
||||
|
||||
var slice []byte
|
||||
var size int
|
||||
var offset int
|
||||
if t.Type.IsSlice {
|
||||
// get the offset which determines the start of this array ...
|
||||
offset = int(binary.BigEndian.Uint64(output[index+24 : index+32]))
|
||||
if offset+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go slice: offset %d would go over slice boundary (len=%d)", len(output), offset+32)
|
||||
}
|
||||
|
||||
slice = output[offset:]
|
||||
// ... starting with the size of the array in elements ...
|
||||
size = int(binary.BigEndian.Uint64(slice[24:32]))
|
||||
slice = slice[32:]
|
||||
// ... and make sure that we've at the very least the amount of bytes
|
||||
// available in the buffer.
|
||||
if size*32 > len(slice) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go slice: insufficient size output %d require %d", len(output), offset+32+size*32)
|
||||
}
|
||||
|
||||
// reslice to match the required size
|
||||
slice = slice[:size*32]
|
||||
} else if t.Type.IsArray {
|
||||
//get the number of elements in the array
|
||||
size = t.Type.SliceSize
|
||||
|
||||
//check to make sure array size matches up
|
||||
if index+32*size > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go array: offset %d would go over slice boundary (len=%d)", len(output), index+32*size)
|
||||
}
|
||||
//slice is there for a fixed amount of times
|
||||
slice = output[index : index+size*32]
|
||||
}
|
||||
|
||||
for i := 0; i < size; i++ {
|
||||
var (
|
||||
inter interface{} // interface type
|
||||
returnOutput = slice[i*32 : i*32+32] // the return output
|
||||
err error
|
||||
)
|
||||
// set inter to the correct type (cast)
|
||||
switch elem.T {
|
||||
case IntTy, UintTy:
|
||||
inter = readInteger(t.Type.Kind, returnOutput)
|
||||
case BoolTy:
|
||||
inter, err = readBool(returnOutput)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case AddressTy:
|
||||
inter = common.BytesToAddress(returnOutput)
|
||||
case HashTy:
|
||||
inter = common.BytesToHash(returnOutput)
|
||||
case FixedBytesTy:
|
||||
inter = returnOutput
|
||||
}
|
||||
// append the item to our reflect slice
|
||||
refSlice = reflect.Append(refSlice, reflect.ValueOf(inter))
|
||||
}
|
||||
|
||||
// return the interface
|
||||
return refSlice.Interface(), nil
|
||||
// unpacker is a utility interface that enables us to have
|
||||
// abstraction between events and methods and also to properly
|
||||
// "unpack" them; e.g. events use Inputs, methods use Outputs.
|
||||
type unpacker interface {
|
||||
tupleUnpack(v interface{}, output []byte) error
|
||||
singleUnpack(v interface{}, output []byte) error
|
||||
isTupleReturn() bool
|
||||
}
|
||||
|
||||
// reads the integer based on its kind
|
||||
func readInteger(kind reflect.Kind, b []byte) interface{} {
|
||||
switch kind {
|
||||
case reflect.Uint8:
|
||||
return uint8(b[len(b)-1])
|
||||
return b[len(b)-1]
|
||||
case reflect.Uint16:
|
||||
return binary.BigEndian.Uint16(b[len(b)-2:])
|
||||
case reflect.Uint32:
|
||||
@ -160,13 +58,10 @@ func readInteger(kind reflect.Kind, b []byte) interface{} {
|
||||
}
|
||||
}
|
||||
|
||||
// reads a bool
|
||||
func readBool(word []byte) (bool, error) {
|
||||
if len(word) != 32 {
|
||||
return false, fmt.Errorf("abi: fatal error: incorrect word length")
|
||||
}
|
||||
|
||||
for i, b := range word {
|
||||
if b != 0 && i != 31 {
|
||||
for _, b := range word[:31] {
|
||||
if b != 0 {
|
||||
return false, errBadBool
|
||||
}
|
||||
}
|
||||
@ -178,58 +73,144 @@ func readBool(word []byte) (bool, error) {
|
||||
default:
|
||||
return false, errBadBool
|
||||
}
|
||||
}
|
||||
|
||||
// A function type is simply the address with the function selection signature at the end.
|
||||
// This enforces that standard by always presenting it as a 24-array (address + sig = 24 bytes)
|
||||
func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) {
|
||||
if t.T != FunctionTy {
|
||||
return [24]byte{}, fmt.Errorf("abi: invalid type in call to make function type byte array.")
|
||||
}
|
||||
if garbage := binary.BigEndian.Uint64(word[24:32]); garbage != 0 {
|
||||
err = fmt.Errorf("abi: got improperly encoded function type, got %v", word)
|
||||
} else {
|
||||
copy(funcTy[:], word[0:24])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// through reflection, creates a fixed array to be read from
|
||||
func readFixedBytes(t Type, word []byte) (interface{}, error) {
|
||||
if t.T != FixedBytesTy {
|
||||
return nil, fmt.Errorf("abi: invalid type in call to make fixed byte array.")
|
||||
}
|
||||
// convert
|
||||
array := reflect.New(t.Type).Elem()
|
||||
|
||||
reflect.Copy(array, reflect.ValueOf(word[0:t.Size]))
|
||||
return array.Interface(), nil
|
||||
|
||||
}
|
||||
|
||||
// toGoType parses the input and casts it to the proper type defined by the ABI
|
||||
// argument in T.
|
||||
func toGoType(i int, t Argument, output []byte) (interface{}, error) {
|
||||
// we need to treat slices differently
|
||||
if (t.Type.IsSlice || t.Type.IsArray) && t.Type.T != BytesTy && t.Type.T != StringTy && t.Type.T != FixedBytesTy && t.Type.T != FunctionTy {
|
||||
return toGoSlice(i, t, output)
|
||||
// iteratively unpack elements
|
||||
func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) {
|
||||
if start+32*size > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go array: offset %d would go over slice boundary (len=%d)", len(output), start+32*size)
|
||||
}
|
||||
|
||||
index := i * 32
|
||||
// this value will become our slice or our array, depending on the type
|
||||
var refSlice reflect.Value
|
||||
slice := output[start : start+size*32]
|
||||
|
||||
if t.T == SliceTy {
|
||||
// declare our slice
|
||||
refSlice = reflect.MakeSlice(t.Type, size, size)
|
||||
} else if t.T == ArrayTy {
|
||||
// declare our array
|
||||
refSlice = reflect.New(t.Type).Elem()
|
||||
} else {
|
||||
return nil, fmt.Errorf("abi: invalid type in array/slice unpacking stage")
|
||||
}
|
||||
|
||||
for i, j := start, 0; j*32 < len(slice); i, j = i+32, j+1 {
|
||||
// this corrects the arrangement so that we get all the underlying array values
|
||||
if t.Elem.T == ArrayTy && j != 0 {
|
||||
i = start + t.Elem.Size*32*j
|
||||
}
|
||||
inter, err := toGoType(i, *t.Elem, output)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// append the item to our reflect slice
|
||||
refSlice.Index(j).Set(reflect.ValueOf(inter))
|
||||
}
|
||||
|
||||
// return the interface
|
||||
return refSlice.Interface(), nil
|
||||
}
|
||||
|
||||
// toGoType parses the output bytes and recursively assigns the value of these bytes
|
||||
// into a go type with accordance with the ABI spec.
|
||||
func toGoType(index int, t Type, output []byte) (interface{}, error) {
|
||||
if index+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), index+32)
|
||||
}
|
||||
|
||||
// Parse the given index output and check whether we need to read
|
||||
// a different offset and length based on the type (i.e. string, bytes)
|
||||
var returnOutput []byte
|
||||
switch t.Type.T {
|
||||
case StringTy, BytesTy: // variable arrays are written at the end of the return bytes
|
||||
// parse offset from which we should start reading
|
||||
offset := int(binary.BigEndian.Uint64(output[index+24 : index+32]))
|
||||
if offset+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), offset+32)
|
||||
}
|
||||
// parse the size up until we should be reading
|
||||
size := int(binary.BigEndian.Uint64(output[offset+24 : offset+32]))
|
||||
if offset+32+size > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), offset+32+size)
|
||||
}
|
||||
var (
|
||||
returnOutput []byte
|
||||
begin, end int
|
||||
err error
|
||||
)
|
||||
|
||||
// get the bytes for this return value
|
||||
returnOutput = output[offset+32 : offset+32+size]
|
||||
default:
|
||||
// if we require a length prefix, find the beginning word and size returned.
|
||||
if t.requiresLengthPrefix() {
|
||||
begin, end, err = lengthPrefixPointsTo(index, output)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
returnOutput = output[index : index+32]
|
||||
}
|
||||
|
||||
// convert the bytes to whatever is specified by the ABI.
|
||||
switch t.Type.T {
|
||||
switch t.T {
|
||||
case SliceTy:
|
||||
return forEachUnpack(t, output, begin, end)
|
||||
case ArrayTy:
|
||||
return forEachUnpack(t, output, index, t.Size)
|
||||
case StringTy: // variable arrays are written at the end of the return bytes
|
||||
return string(output[begin : begin+end]), nil
|
||||
case IntTy, UintTy:
|
||||
return readInteger(t.Type.Kind, returnOutput), nil
|
||||
return readInteger(t.Kind, returnOutput), nil
|
||||
case BoolTy:
|
||||
return readBool(returnOutput)
|
||||
case AddressTy:
|
||||
return common.BytesToAddress(returnOutput), nil
|
||||
case HashTy:
|
||||
return common.BytesToHash(returnOutput), nil
|
||||
case BytesTy, FixedBytesTy, FunctionTy:
|
||||
return returnOutput, nil
|
||||
case StringTy:
|
||||
return string(returnOutput), nil
|
||||
case BytesTy:
|
||||
return output[begin : begin+end], nil
|
||||
case FixedBytesTy:
|
||||
return readFixedBytes(t, returnOutput)
|
||||
case FunctionTy:
|
||||
return readFunctionType(t, returnOutput)
|
||||
default:
|
||||
return nil, fmt.Errorf("abi: unknown type %v", t.T)
|
||||
}
|
||||
}
|
||||
|
||||
// interprets a 32 byte slice as an offset and then determines which indice to look to decode the type.
|
||||
func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err error) {
|
||||
offset := int(binary.BigEndian.Uint64(output[index+24 : index+32]))
|
||||
if offset+32 > len(output) {
|
||||
return 0, 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %d would go over slice boundary (len=%d)", len(output), offset+32)
|
||||
}
|
||||
length = int(binary.BigEndian.Uint64(output[offset+24 : offset+32]))
|
||||
if offset+32+length > len(output) {
|
||||
return 0, 0, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), offset+32+length)
|
||||
}
|
||||
start = offset + 32
|
||||
|
||||
//fmt.Printf("LENGTH PREFIX INFO: \nsize: %v\noffset: %v\nstart: %v\n", length, offset, start)
|
||||
return
|
||||
}
|
||||
|
||||
// checks for proper formatting of byte output
|
||||
func bytesAreProper(output []byte) error {
|
||||
if len(output) == 0 {
|
||||
return fmt.Errorf("abi: unmarshalling empty output")
|
||||
} else if len(output)%32 != 0 {
|
||||
return fmt.Errorf("abi: improperly formatted output")
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
return nil, fmt.Errorf("abi: unknown type %v", t.Type.T)
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package abi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
@ -27,260 +28,258 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
func TestSimpleMethodUnpack(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
def string // definition of the **output** ABI params
|
||||
marshalledOutput []byte // evm return data
|
||||
expectedOut interface{} // the expected output
|
||||
outVar string // the output variable (e.g. uint32, *big.Int, etc)
|
||||
err string // empty or error if expected
|
||||
}{
|
||||
{
|
||||
`[ { "type": "bool" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
bool(true),
|
||||
"bool",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint32" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
uint32(1),
|
||||
"uint32",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint32" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
nil,
|
||||
"uint16",
|
||||
"abi: cannot unmarshal uint32 in to uint16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint17" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
nil,
|
||||
"uint16",
|
||||
"abi: cannot unmarshal *big.Int in to uint16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "uint17" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
big.NewInt(1),
|
||||
"*big.Int",
|
||||
"",
|
||||
},
|
||||
type unpackTest struct {
|
||||
def string // ABI definition JSON
|
||||
enc string // evm return data
|
||||
want interface{} // the expected output
|
||||
err string // empty or error if expected
|
||||
}
|
||||
|
||||
{
|
||||
`[ { "type": "int32" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
int32(1),
|
||||
"int32",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "int32" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
nil,
|
||||
"int16",
|
||||
"abi: cannot unmarshal int32 in to int16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "int17" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
nil,
|
||||
"int16",
|
||||
"abi: cannot unmarshal *big.Int in to int16",
|
||||
},
|
||||
{
|
||||
`[ { "type": "int17" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
big.NewInt(1),
|
||||
"*big.Int",
|
||||
"",
|
||||
},
|
||||
func (test unpackTest) checkError(err error) error {
|
||||
if err != nil {
|
||||
if len(test.err) == 0 {
|
||||
return fmt.Errorf("expected no err but got: %v", err)
|
||||
} else if err.Error() != test.err {
|
||||
return fmt.Errorf("expected err: '%v' got err: %q", test.err, err)
|
||||
}
|
||||
} else if len(test.err) > 0 {
|
||||
return fmt.Errorf("expected err: %v but got none", test.err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
{
|
||||
`[ { "type": "address" } ]`,
|
||||
common.Hex2Bytes("0000000000000000000000000100000000000000000000000000000000000000"),
|
||||
common.Address{1},
|
||||
"address",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "bytes32" } ]`,
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
"bytes",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "bytes32" } ]`,
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
"hash",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "bytes32" } ]`,
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
"interface",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`[ { "type": "function" } ]`,
|
||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
[24]byte{1},
|
||||
"function",
|
||||
"",
|
||||
},
|
||||
} {
|
||||
abiDefinition := fmt.Sprintf(`[{ "name" : "method", "outputs": %s}]`, test.def)
|
||||
abi, err := JSON(strings.NewReader(abiDefinition))
|
||||
var unpackTests = []unpackTest{
|
||||
{
|
||||
def: `[{ "type": "bool" }]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint32"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: uint32(1),
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint32"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: uint16(0),
|
||||
err: "abi: cannot unmarshal uint32 in to uint16",
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint17"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: uint16(0),
|
||||
err: "abi: cannot unmarshal *big.Int in to uint16",
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint17"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: big.NewInt(1),
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int32"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: int32(1),
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int32"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: int16(0),
|
||||
err: "abi: cannot unmarshal int32 in to int16",
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int17"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: int16(0),
|
||||
err: "abi: cannot unmarshal *big.Int in to int16",
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int17"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: big.NewInt(1),
|
||||
},
|
||||
{
|
||||
def: `[{"type": "address"}]`,
|
||||
enc: "0000000000000000000000000100000000000000000000000000000000000000",
|
||||
want: common.Address{1},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "bytes32"}]`,
|
||||
enc: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||
want: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "bytes"}]`,
|
||||
enc: "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200100000000000000000000000000000000000000000000000000000000000000",
|
||||
want: common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
def: `[{"type": "bytes"}]`,
|
||||
enc: "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200100000000000000000000000000000000000000000000000000000000000000",
|
||||
want: [32]byte{},
|
||||
err: "abi: cannot unmarshal []uint8 in to [32]uint8",
|
||||
},
|
||||
{
|
||||
def: `[{"type": "bytes32"}]`,
|
||||
enc: "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200100000000000000000000000000000000000000000000000000000000000000",
|
||||
want: []byte(nil),
|
||||
err: "abi: cannot unmarshal [32]uint8 in to []uint8",
|
||||
},
|
||||
{
|
||||
def: `[{"type": "bytes32"}]`,
|
||||
enc: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||
want: common.HexToHash("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
{
|
||||
def: `[{"type": "function"}]`,
|
||||
enc: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||
want: [24]byte{1},
|
||||
},
|
||||
// slices
|
||||
{
|
||||
def: `[{"type": "uint8[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: []uint8{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[2]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [2]uint8{1, 2},
|
||||
},
|
||||
// multi dimensional, if these pass, all types that don't require length prefix should pass
|
||||
{
|
||||
def: `[{"type": "uint8[][]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000E0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [][]uint8{{1, 2}, {1, 2}},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[2][2]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [2][2]uint8{{1, 2}, {1, 2}},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[][2]"}]`,
|
||||
enc: "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: [2][]uint8{{1}, {1}},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[2][]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [][2]uint8{{1, 2}},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint16[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: []uint16{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint16[2]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [2]uint16{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint32[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: []uint32{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint32[2]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [2]uint32{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint64[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: []uint64{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint64[2]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [2]uint64{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint256[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: []*big.Int{big.NewInt(1), big.NewInt(2)},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint256[3]"}]`,
|
||||
enc: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003",
|
||||
want: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int8[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: []int8{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int8[2]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [2]int8{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int16[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: []int16{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int16[2]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [2]int16{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int32[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: []int32{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int32[2]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [2]int32{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int64[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: []int64{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int64[2]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [2]int64{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int256[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: []*big.Int{big.NewInt(1), big.NewInt(2)},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int256[3]"}]`,
|
||||
enc: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003",
|
||||
want: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)},
|
||||
},
|
||||
}
|
||||
|
||||
func TestUnpack(t *testing.T) {
|
||||
for i, test := range unpackTests {
|
||||
def := fmt.Sprintf(`[{ "name" : "method", "outputs": %s}]`, test.def)
|
||||
abi, err := JSON(strings.NewReader(def))
|
||||
if err != nil {
|
||||
t.Errorf("%d failed. %v", i, err)
|
||||
t.Fatalf("invalid ABI definition %s: %v", def, err)
|
||||
}
|
||||
encb, err := hex.DecodeString(test.enc)
|
||||
if err != nil {
|
||||
t.Fatalf("invalid hex: %s" + test.enc)
|
||||
}
|
||||
outptr := reflect.New(reflect.TypeOf(test.want))
|
||||
err = abi.Unpack(outptr.Interface(), "method", encb)
|
||||
if err := test.checkError(err); err != nil {
|
||||
t.Errorf("test %d (%v) failed: %v", i, test.def, err)
|
||||
continue
|
||||
}
|
||||
|
||||
var outvar interface{}
|
||||
switch test.outVar {
|
||||
case "bool":
|
||||
var v bool
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint8":
|
||||
var v uint8
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint16":
|
||||
var v uint16
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint32":
|
||||
var v uint32
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "uint64":
|
||||
var v uint64
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int8":
|
||||
var v int8
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int16":
|
||||
var v int16
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int32":
|
||||
var v int32
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "int64":
|
||||
var v int64
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "*big.Int":
|
||||
var v *big.Int
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "address":
|
||||
var v common.Address
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "bytes":
|
||||
var v []byte
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "hash":
|
||||
var v common.Hash
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v.Bytes()[:]
|
||||
case "function":
|
||||
var v [24]byte
|
||||
err = abi.Unpack(&v, "method", test.marshalledOutput)
|
||||
outvar = v
|
||||
case "interface":
|
||||
err = abi.Unpack(&outvar, "method", test.marshalledOutput)
|
||||
default:
|
||||
t.Errorf("unsupported type '%v' please add it to the switch statement in this test", test.outVar)
|
||||
continue
|
||||
out := outptr.Elem().Interface()
|
||||
if !reflect.DeepEqual(test.want, out) {
|
||||
t.Errorf("test %d (%v) failed: expected %v, got %v", i, test.def, test.want, out)
|
||||
}
|
||||
|
||||
if err != nil && len(test.err) == 0 {
|
||||
t.Errorf("%d failed. Expected no err but got: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if err == nil && len(test.err) != 0 {
|
||||
t.Errorf("%d failed. Expected err: %v but got none", i, test.err)
|
||||
continue
|
||||
}
|
||||
if err != nil && len(test.err) != 0 && err.Error() != test.err {
|
||||
t.Errorf("%d failed. Expected err: '%v' got err: '%v'", i, test.err, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
if !reflect.DeepEqual(test.expectedOut, outvar) {
|
||||
t.Errorf("%d failed. Output error: expected %v, got %v", i, test.expectedOut, outvar)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnpackSetInterfaceSlice(t *testing.T) {
|
||||
var (
|
||||
var1 = new(uint8)
|
||||
var2 = new(uint8)
|
||||
)
|
||||
out := []interface{}{var1, var2}
|
||||
abi, err := JSON(strings.NewReader(`[{"type":"function", "name":"ints", "outputs":[{"type":"uint8"}, {"type":"uint8"}]}]`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
marshalledReturn := append(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"), common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")...)
|
||||
err = abi.Unpack(&out, "ints", marshalledReturn)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if *var1 != 1 {
|
||||
t.Error("expected var1 to be 1, got", *var1)
|
||||
}
|
||||
if *var2 != 2 {
|
||||
t.Error("expected var2 to be 2, got", *var2)
|
||||
}
|
||||
|
||||
out = []interface{}{var1}
|
||||
err = abi.Unpack(&out, "ints", marshalledReturn)
|
||||
|
||||
expErr := "abi: cannot marshal in to slices of unequal size (require: 2, got: 1)"
|
||||
if err == nil || err.Error() != expErr {
|
||||
t.Error("expected err:", expErr, "Got:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnpackSetInterfaceArrayOutput(t *testing.T) {
|
||||
var (
|
||||
var1 = new([1]uint32)
|
||||
var2 = new([1]uint32)
|
||||
)
|
||||
out := []interface{}{var1, var2}
|
||||
abi, err := JSON(strings.NewReader(`[{"type":"function", "name":"ints", "outputs":[{"type":"uint32[1]"}, {"type":"uint32[1]"}]}]`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
marshalledReturn := append(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"), common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")...)
|
||||
err = abi.Unpack(&out, "ints", marshalledReturn)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if *var1 != [1]uint32{1} {
|
||||
t.Error("expected var1 to be [1], got", *var1)
|
||||
}
|
||||
if *var2 != [1]uint32{2} {
|
||||
t.Error("expected var2 to be [2], got", *var2)
|
||||
}
|
||||
}
|
||||
|
||||
@ -337,101 +336,6 @@ func TestMultiReturnWithStruct(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiReturnWithSlice(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "multi", "constant" : false, "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// using buff to make the code readable
|
||||
buff := new(bytes.Buffer)
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005"))
|
||||
stringOut := "hello"
|
||||
buff.Write(common.RightPadBytes([]byte(stringOut), 32))
|
||||
|
||||
var inter []interface{}
|
||||
err = abi.Unpack(&inter, "multi", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(inter) != 2 {
|
||||
t.Fatal("expected 2 results got", len(inter))
|
||||
}
|
||||
|
||||
if num, ok := inter[0].(*big.Int); !ok || num.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Error("expected index 0 to be 1 got", num)
|
||||
}
|
||||
|
||||
if str, ok := inter[1].(string); !ok || str != stringOut {
|
||||
t.Error("expected index 1 to be", stringOut, "got", str)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalArrays(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "bytes32", "constant" : false, "outputs": [ { "type": "bytes32" } ] },
|
||||
{ "name" : "bytes10", "constant" : false, "outputs": [ { "type": "bytes10" } ] }
|
||||
]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
output := common.LeftPadBytes([]byte{1}, 32)
|
||||
|
||||
var bytes10 [10]byte
|
||||
err = abi.Unpack(&bytes10, "bytes32", output)
|
||||
if err == nil || err.Error() != "abi: cannot unmarshal src (len=32) in to dst (len=10)" {
|
||||
t.Error("expected error or bytes32 not be assignable to bytes10:", err)
|
||||
}
|
||||
|
||||
var bytes32 [32]byte
|
||||
err = abi.Unpack(&bytes32, "bytes32", output)
|
||||
if err != nil {
|
||||
t.Error("didn't expect error:", err)
|
||||
}
|
||||
if !bytes.Equal(bytes32[:], output) {
|
||||
t.Error("expected bytes32[31] to be 1 got", bytes32[31])
|
||||
}
|
||||
|
||||
type (
|
||||
B10 [10]byte
|
||||
B32 [32]byte
|
||||
)
|
||||
|
||||
var b10 B10
|
||||
err = abi.Unpack(&b10, "bytes32", output)
|
||||
if err == nil || err.Error() != "abi: cannot unmarshal src (len=32) in to dst (len=10)" {
|
||||
t.Error("expected error or bytes32 not be assignable to bytes10:", err)
|
||||
}
|
||||
|
||||
var b32 B32
|
||||
err = abi.Unpack(&b32, "bytes32", output)
|
||||
if err != nil {
|
||||
t.Error("didn't expect error:", err)
|
||||
}
|
||||
if !bytes.Equal(b32[:], output) {
|
||||
t.Error("expected bytes32[31] to be 1 got", bytes32[31])
|
||||
}
|
||||
|
||||
output[10] = 1
|
||||
var shortAssignLong [32]byte
|
||||
err = abi.Unpack(&shortAssignLong, "bytes10", output)
|
||||
if err != nil {
|
||||
t.Error("didn't expect error:", err)
|
||||
}
|
||||
if !bytes.Equal(output, shortAssignLong[:]) {
|
||||
t.Errorf("expected %x to be %x", shortAssignLong, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshal(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "name" : "int", "constant" : false, "outputs": [ { "type": "uint256" } ] },
|
||||
@ -450,6 +354,29 @@ func TestUnmarshal(t *testing.T) {
|
||||
}
|
||||
buff := new(bytes.Buffer)
|
||||
|
||||
// marshall mixed bytes (mixedBytes)
|
||||
p0, p0Exp := []byte{}, common.Hex2Bytes("01020000000000000000")
|
||||
p1, p1Exp := [32]byte{}, common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000ddeeff")
|
||||
mixedBytes := []interface{}{&p0, &p1}
|
||||
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000ddeeff"))
|
||||
buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000a"))
|
||||
buff.Write(common.Hex2Bytes("0102000000000000000000000000000000000000000000000000000000000000"))
|
||||
|
||||
err = abi.Unpack(&mixedBytes, "mixedBytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else {
|
||||
if bytes.Compare(p0, p0Exp) != 0 {
|
||||
t.Errorf("unexpected value unpacked: want %x, got %x", p0Exp, p0)
|
||||
}
|
||||
|
||||
if bytes.Compare(p1[:], p1Exp) != 0 {
|
||||
t.Errorf("unexpected value unpacked: want %x, got %x", p1Exp, p1)
|
||||
}
|
||||
}
|
||||
|
||||
// marshal int
|
||||
var Int *big.Int
|
||||
err = abi.Unpack(&Int, "int", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
@ -473,6 +400,7 @@ func TestUnmarshal(t *testing.T) {
|
||||
}
|
||||
|
||||
// marshal dynamic bytes max length 32
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
bytesOut := common.RightPadBytes([]byte("hello"), 32)
|
||||
@ -504,11 +432,11 @@ func TestUnmarshal(t *testing.T) {
|
||||
t.Errorf("expected %x got %x", bytesOut, Bytes)
|
||||
}
|
||||
|
||||
// marshall dynamic bytes max length 63
|
||||
// marshall dynamic bytes max length 64
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000003f"))
|
||||
bytesOut = common.RightPadBytes([]byte("hello"), 63)
|
||||
bytesOut = common.RightPadBytes([]byte("hello"), 64)
|
||||
buff.Write(bytesOut)
|
||||
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
@ -516,8 +444,8 @@ func TestUnmarshal(t *testing.T) {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(Bytes, bytesOut) {
|
||||
t.Errorf("expected %x got %x", bytesOut, Bytes)
|
||||
if !bytes.Equal(Bytes, bytesOut[:len(bytesOut)-1]) {
|
||||
t.Errorf("expected %x got %x", bytesOut[:len(bytesOut)-1], Bytes)
|
||||
}
|
||||
|
||||
// marshal dynamic bytes output empty
|
||||
@ -569,29 +497,6 @@ func TestUnmarshal(t *testing.T) {
|
||||
t.Error("expected error")
|
||||
}
|
||||
|
||||
// marshal mixed bytes
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
|
||||
fixed := common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")
|
||||
buff.Write(fixed)
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
bytesOut = common.RightPadBytes([]byte("hello"), 32)
|
||||
buff.Write(bytesOut)
|
||||
|
||||
var out []interface{}
|
||||
err = abi.Unpack(&out, "mixedBytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Fatal("didn't expect error:", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(bytesOut, out[0].([]byte)) {
|
||||
t.Errorf("expected %x, got %x", bytesOut, out[0])
|
||||
}
|
||||
|
||||
if !bytes.Equal(fixed, out[1].([]byte)) {
|
||||
t.Errorf("expected %x, got %x", fixed, out[1])
|
||||
}
|
||||
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"))
|
||||
|
@ -42,8 +42,9 @@ type Wallet interface {
|
||||
URL() URL
|
||||
|
||||
// Status returns a textual status to aid the user in the current state of the
|
||||
// wallet.
|
||||
Status() string
|
||||
// wallet. It also returns an error indicating any failure the wallet might have
|
||||
// encountered.
|
||||
Status() (string, error)
|
||||
|
||||
// Open initializes access to a wallet instance. It is not meant to unlock or
|
||||
// decrypt account keys, rather simply to establish a connection to hardware
|
||||
@ -147,9 +148,26 @@ type Backend interface {
|
||||
Subscribe(sink chan<- WalletEvent) event.Subscription
|
||||
}
|
||||
|
||||
// WalletEventType represents the different event types that can be fired by
|
||||
// the wallet subscription subsystem.
|
||||
type WalletEventType int
|
||||
|
||||
const (
|
||||
// WalletArrived is fired when a new wallet is detected either via USB or via
|
||||
// a filesystem event in the keystore.
|
||||
WalletArrived WalletEventType = iota
|
||||
|
||||
// WalletOpened is fired when a wallet is successfully opened with the purpose
|
||||
// of starting any background processes such as automatic key derivation.
|
||||
WalletOpened
|
||||
|
||||
// WalletDropped
|
||||
WalletDropped
|
||||
)
|
||||
|
||||
// WalletEvent is an event fired by an account backend when a wallet arrival or
|
||||
// departure is detected.
|
||||
type WalletEvent struct {
|
||||
Wallet Wallet // Wallet instance arrived or departed
|
||||
Arrive bool // Whether the wallet was added or removed
|
||||
Wallet Wallet // Wallet instance arrived or departed
|
||||
Kind WalletEventType // Event type that happened in the system
|
||||
}
|
||||
|
@ -27,12 +27,17 @@ import (
|
||||
// DefaultRootDerivationPath is the root path to which custom derivation endpoints
|
||||
// are appended. As such, the first account will be at m/44'/60'/0'/0, the second
|
||||
// at m/44'/60'/0'/1, etc.
|
||||
var DefaultRootDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0}
|
||||
var DefaultRootDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}
|
||||
|
||||
// DefaultBaseDerivationPath is the base path from which custom derivation endpoints
|
||||
// are incremented. As such, the first account will be at m/44'/60'/0'/0, the second
|
||||
// at m/44'/60'/0'/1, etc.
|
||||
var DefaultBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}
|
||||
var DefaultBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0}
|
||||
|
||||
// DefaultLedgerBaseDerivationPath is the base path from which custom derivation endpoints
|
||||
// are incremented. As such, the first account will be at m/44'/60'/0'/0, the second
|
||||
// at m/44'/60'/0'/1, etc.
|
||||
var DefaultLedgerBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}
|
||||
|
||||
// DerivationPath represents the computer friendly version of a hierarchical
|
||||
// deterministic wallet account derivaion path.
|
||||
|
@ -37,11 +37,11 @@ func TestHDPathParsing(t *testing.T) {
|
||||
{"m/2147483692/2147483708/2147483648/2147483648", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 0}},
|
||||
|
||||
// Plain relative derivation paths
|
||||
{"0", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}},
|
||||
{"128", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 128}},
|
||||
{"0'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 0}},
|
||||
{"128'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 128}},
|
||||
{"2147483648", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 0}},
|
||||
{"0", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0}},
|
||||
{"128", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 128}},
|
||||
{"0'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0x80000000 + 0}},
|
||||
{"128'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0x80000000 + 128}},
|
||||
{"2147483648", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0x80000000 + 0}},
|
||||
|
||||
// Hexadecimal absolute derivation paths
|
||||
{"m/0x2C'/0x3c'/0x00'/0x00", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}},
|
||||
@ -52,11 +52,11 @@ func TestHDPathParsing(t *testing.T) {
|
||||
{"m/0x8000002C/0x8000003c/0x80000000/0x80000000", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 0}},
|
||||
|
||||
// Hexadecimal relative derivation paths
|
||||
{"0x00", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}},
|
||||
{"0x80", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 128}},
|
||||
{"0x00'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 0}},
|
||||
{"0x80'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 128}},
|
||||
{"0x80000000", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 0}},
|
||||
{"0x00", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0}},
|
||||
{"0x80", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 128}},
|
||||
{"0x00'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0x80000000 + 0}},
|
||||
{"0x80'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0x80000000 + 128}},
|
||||
{"0x80000000", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0x80000000 + 0}},
|
||||
|
||||
// Weird inputs just to ensure they work
|
||||
{" m / 44 '\n/\n 60 \n\n\t' /\n0 ' /\t\t 0", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}},
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
@ -31,6 +30,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gopkg.in/fatih/set.v0"
|
||||
)
|
||||
|
||||
// Minimum amount of time between cache reloads. This limit applies if the platform does
|
||||
@ -71,6 +71,7 @@ type accountCache struct {
|
||||
byAddr map[common.Address][]accounts.Account
|
||||
throttle *time.Timer
|
||||
notify chan struct{}
|
||||
fileC fileCache
|
||||
}
|
||||
|
||||
func newAccountCache(keydir string) (*accountCache, chan struct{}) {
|
||||
@ -78,6 +79,7 @@ func newAccountCache(keydir string) (*accountCache, chan struct{}) {
|
||||
keydir: keydir,
|
||||
byAddr: make(map[common.Address][]accounts.Account),
|
||||
notify: make(chan struct{}, 1),
|
||||
fileC: fileCache{all: set.NewNonTS()},
|
||||
}
|
||||
ac.watcher = newWatcher(ac)
|
||||
return ac, ac.notify
|
||||
@ -127,6 +129,23 @@ func (ac *accountCache) delete(removed accounts.Account) {
|
||||
}
|
||||
}
|
||||
|
||||
// deleteByFile removes an account referenced by the given path.
|
||||
func (ac *accountCache) deleteByFile(path string) {
|
||||
ac.mu.Lock()
|
||||
defer ac.mu.Unlock()
|
||||
i := sort.Search(len(ac.all), func(i int) bool { return ac.all[i].URL.Path >= path })
|
||||
|
||||
if i < len(ac.all) && ac.all[i].URL.Path == path {
|
||||
removed := ac.all[i]
|
||||
ac.all = append(ac.all[:i], ac.all[i+1:]...)
|
||||
if ba := removeAccount(ac.byAddr[removed.Address], removed); len(ba) == 0 {
|
||||
delete(ac.byAddr, removed.Address)
|
||||
} else {
|
||||
ac.byAddr[removed.Address] = ba
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeAccount(slice []accounts.Account, elem accounts.Account) []accounts.Account {
|
||||
for i := range slice {
|
||||
if slice[i] == elem {
|
||||
@ -167,15 +186,16 @@ func (ac *accountCache) find(a accounts.Account) (accounts.Account, error) {
|
||||
default:
|
||||
err := &AmbiguousAddrError{Addr: a.Address, Matches: make([]accounts.Account, len(matches))}
|
||||
copy(err.Matches, matches)
|
||||
sort.Sort(accountsByURL(err.Matches))
|
||||
return accounts.Account{}, err
|
||||
}
|
||||
}
|
||||
|
||||
func (ac *accountCache) maybeReload() {
|
||||
ac.mu.Lock()
|
||||
defer ac.mu.Unlock()
|
||||
|
||||
if ac.watcher.running {
|
||||
ac.mu.Unlock()
|
||||
return // A watcher is running and will keep the cache up-to-date.
|
||||
}
|
||||
if ac.throttle == nil {
|
||||
@ -184,12 +204,15 @@ func (ac *accountCache) maybeReload() {
|
||||
select {
|
||||
case <-ac.throttle.C:
|
||||
default:
|
||||
ac.mu.Unlock()
|
||||
return // The cache was reloaded recently.
|
||||
}
|
||||
}
|
||||
// No watcher running, start it.
|
||||
ac.watcher.start()
|
||||
ac.reload()
|
||||
ac.throttle.Reset(minReloadInterval)
|
||||
ac.mu.Unlock()
|
||||
ac.scanAccounts()
|
||||
}
|
||||
|
||||
func (ac *accountCache) close() {
|
||||
@ -205,80 +228,71 @@ func (ac *accountCache) close() {
|
||||
ac.mu.Unlock()
|
||||
}
|
||||
|
||||
// reload caches addresses of existing accounts.
|
||||
// Callers must hold ac.mu.
|
||||
func (ac *accountCache) reload() {
|
||||
accounts, err := ac.scan()
|
||||
// scanAccounts checks if any changes have occurred on the filesystem, and
|
||||
// updates the account cache accordingly
|
||||
func (ac *accountCache) scanAccounts() error {
|
||||
// Scan the entire folder metadata for file changes
|
||||
creates, deletes, updates, err := ac.fileC.scan(ac.keydir)
|
||||
if err != nil {
|
||||
log.Debug("Failed to reload keystore contents", "err", err)
|
||||
return err
|
||||
}
|
||||
ac.all = accounts
|
||||
sort.Sort(ac.all)
|
||||
for k := range ac.byAddr {
|
||||
delete(ac.byAddr, k)
|
||||
if creates.Size() == 0 && deletes.Size() == 0 && updates.Size() == 0 {
|
||||
return nil
|
||||
}
|
||||
for _, a := range accounts {
|
||||
ac.byAddr[a.Address] = append(ac.byAddr[a.Address], a)
|
||||
// Create a helper method to scan the contents of the key files
|
||||
var (
|
||||
buf = new(bufio.Reader)
|
||||
key struct {
|
||||
Address string `json:"address"`
|
||||
}
|
||||
)
|
||||
readAccount := func(path string) *accounts.Account {
|
||||
fd, err := os.Open(path)
|
||||
if err != nil {
|
||||
log.Trace("Failed to open keystore file", "path", path, "err", err)
|
||||
return nil
|
||||
}
|
||||
defer fd.Close()
|
||||
buf.Reset(fd)
|
||||
// Parse the address.
|
||||
key.Address = ""
|
||||
err = json.NewDecoder(buf).Decode(&key)
|
||||
addr := common.HexToAddress(key.Address)
|
||||
switch {
|
||||
case err != nil:
|
||||
log.Debug("Failed to decode keystore key", "path", path, "err", err)
|
||||
case (addr == common.Address{}):
|
||||
log.Debug("Failed to decode keystore key", "path", path, "err", "missing or zero address")
|
||||
default:
|
||||
return &accounts.Account{Address: addr, URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Process all the file diffs
|
||||
start := time.Now()
|
||||
|
||||
for _, p := range creates.List() {
|
||||
if a := readAccount(p.(string)); a != nil {
|
||||
ac.add(*a)
|
||||
}
|
||||
}
|
||||
for _, p := range deletes.List() {
|
||||
ac.deleteByFile(p.(string))
|
||||
}
|
||||
for _, p := range updates.List() {
|
||||
path := p.(string)
|
||||
ac.deleteByFile(path)
|
||||
if a := readAccount(path); a != nil {
|
||||
ac.add(*a)
|
||||
}
|
||||
}
|
||||
end := time.Now()
|
||||
|
||||
select {
|
||||
case ac.notify <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
log.Debug("Reloaded keystore contents", "accounts", len(ac.all))
|
||||
}
|
||||
|
||||
func (ac *accountCache) scan() ([]accounts.Account, error) {
|
||||
files, err := ioutil.ReadDir(ac.keydir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
buf = new(bufio.Reader)
|
||||
addrs []accounts.Account
|
||||
keyJSON struct {
|
||||
Address string `json:"address"`
|
||||
}
|
||||
)
|
||||
for _, fi := range files {
|
||||
path := filepath.Join(ac.keydir, fi.Name())
|
||||
if skipKeyFile(fi) {
|
||||
log.Trace("Ignoring file on account scan", "path", path)
|
||||
continue
|
||||
}
|
||||
logger := log.New("path", path)
|
||||
|
||||
fd, err := os.Open(path)
|
||||
if err != nil {
|
||||
logger.Trace("Failed to open keystore file", "err", err)
|
||||
continue
|
||||
}
|
||||
buf.Reset(fd)
|
||||
// Parse the address.
|
||||
keyJSON.Address = ""
|
||||
err = json.NewDecoder(buf).Decode(&keyJSON)
|
||||
addr := common.HexToAddress(keyJSON.Address)
|
||||
switch {
|
||||
case err != nil:
|
||||
logger.Debug("Failed to decode keystore key", "err", err)
|
||||
case (addr == common.Address{}):
|
||||
logger.Debug("Failed to decode keystore key", "err", "missing or zero address")
|
||||
default:
|
||||
addrs = append(addrs, accounts.Account{Address: addr, URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}})
|
||||
}
|
||||
fd.Close()
|
||||
}
|
||||
return addrs, err
|
||||
}
|
||||
|
||||
func skipKeyFile(fi os.FileInfo) bool {
|
||||
// Skip editor backups and UNIX-style hidden files.
|
||||
if strings.HasSuffix(fi.Name(), "~") || strings.HasPrefix(fi.Name(), ".") {
|
||||
return true
|
||||
}
|
||||
// Skip misc special files, directories (yes, symlinks too).
|
||||
if fi.IsDir() || fi.Mode()&os.ModeType != 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
log.Trace("Handled keystore changes", "time", end.Sub(start))
|
||||
return nil
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package keystore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -295,3 +296,101 @@ func TestCacheFind(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error {
|
||||
var list []accounts.Account
|
||||
for d := 200 * time.Millisecond; d < 8*time.Second; d *= 2 {
|
||||
list = ks.Accounts()
|
||||
if reflect.DeepEqual(list, wantAccounts) {
|
||||
// ks should have also received change notifications
|
||||
select {
|
||||
case <-ks.changes:
|
||||
default:
|
||||
return fmt.Errorf("wasn't notified of new accounts")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
time.Sleep(d)
|
||||
}
|
||||
return fmt.Errorf("\ngot %v\nwant %v", list, wantAccounts)
|
||||
}
|
||||
|
||||
// TestUpdatedKeyfileContents tests that updating the contents of a keystore file
|
||||
// is noticed by the watcher, and the account cache is updated accordingly
|
||||
func TestUpdatedKeyfileContents(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a temporary kesytore to test with
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watch-test-%d-%d", os.Getpid(), rand.Int()))
|
||||
ks := NewKeyStore(dir, LightScryptN, LightScryptP)
|
||||
|
||||
list := ks.Accounts()
|
||||
if len(list) > 0 {
|
||||
t.Error("initial account list not empty:", list)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Create the directory and copy a key file into it.
|
||||
os.MkdirAll(dir, 0700)
|
||||
defer os.RemoveAll(dir)
|
||||
file := filepath.Join(dir, "aaa")
|
||||
|
||||
// Place one of our testfiles in there
|
||||
if err := cp.CopyFile(file, cachetestAccounts[0].URL.Path); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// ks should see the account.
|
||||
wantAccounts := []accounts.Account{cachetestAccounts[0]}
|
||||
wantAccounts[0].URL = accounts.URL{Scheme: KeyStoreScheme, Path: file}
|
||||
if err := waitForAccounts(wantAccounts, ks); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Now replace file contents
|
||||
if err := forceCopyFile(file, cachetestAccounts[1].URL.Path); err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
wantAccounts = []accounts.Account{cachetestAccounts[1]}
|
||||
wantAccounts[0].URL = accounts.URL{Scheme: KeyStoreScheme, Path: file}
|
||||
if err := waitForAccounts(wantAccounts, ks); err != nil {
|
||||
t.Errorf("First replacement failed")
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Now replace file contents again
|
||||
if err := forceCopyFile(file, cachetestAccounts[2].URL.Path); err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
wantAccounts = []accounts.Account{cachetestAccounts[2]}
|
||||
wantAccounts[0].URL = accounts.URL{Scheme: KeyStoreScheme, Path: file}
|
||||
if err := waitForAccounts(wantAccounts, ks); err != nil {
|
||||
t.Errorf("Second replacement failed")
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
// Now replace file contents with crap
|
||||
if err := ioutil.WriteFile(file, []byte("foo"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
if err := waitForAccounts([]accounts.Account{}, ks); err != nil {
|
||||
t.Errorf("Emptying account file failed")
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// forceCopyFile is like cp.CopyFile, but doesn't complain if the destination exists.
|
||||
func forceCopyFile(dst, src string) error {
|
||||
data, err := ioutil.ReadFile(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutil.WriteFile(dst, data, 0644)
|
||||
}
|
||||
|
102
accounts/keystore/file_cache.go
Normal file
102
accounts/keystore/file_cache.go
Normal file
@ -0,0 +1,102 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package keystore
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
set "gopkg.in/fatih/set.v0"
|
||||
)
|
||||
|
||||
// fileCache is a cache of files seen during scan of keystore.
|
||||
type fileCache struct {
|
||||
all *set.SetNonTS // Set of all files from the keystore folder
|
||||
lastMod time.Time // Last time instance when a file was modified
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// scan performs a new scan on the given directory, compares against the already
|
||||
// cached filenames, and returns file sets: creates, deletes, updates.
|
||||
func (fc *fileCache) scan(keyDir string) (set.Interface, set.Interface, set.Interface, error) {
|
||||
t0 := time.Now()
|
||||
|
||||
// List all the failes from the keystore folder
|
||||
files, err := ioutil.ReadDir(keyDir)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
t1 := time.Now()
|
||||
|
||||
fc.mu.Lock()
|
||||
defer fc.mu.Unlock()
|
||||
|
||||
// Iterate all the files and gather their metadata
|
||||
all := set.NewNonTS()
|
||||
mods := set.NewNonTS()
|
||||
|
||||
var newLastMod time.Time
|
||||
for _, fi := range files {
|
||||
// Skip any non-key files from the folder
|
||||
path := filepath.Join(keyDir, fi.Name())
|
||||
if skipKeyFile(fi) {
|
||||
log.Trace("Ignoring file on account scan", "path", path)
|
||||
continue
|
||||
}
|
||||
// Gather the set of all and fresly modified files
|
||||
all.Add(path)
|
||||
|
||||
modified := fi.ModTime()
|
||||
if modified.After(fc.lastMod) {
|
||||
mods.Add(path)
|
||||
}
|
||||
if modified.After(newLastMod) {
|
||||
newLastMod = modified
|
||||
}
|
||||
}
|
||||
t2 := time.Now()
|
||||
|
||||
// Update the tracked files and return the three sets
|
||||
deletes := set.Difference(fc.all, all) // Deletes = previous - current
|
||||
creates := set.Difference(all, fc.all) // Creates = current - previous
|
||||
updates := set.Difference(mods, creates) // Updates = modified - creates
|
||||
|
||||
fc.all, fc.lastMod = all, newLastMod
|
||||
t3 := time.Now()
|
||||
|
||||
// Report on the scanning stats and return
|
||||
log.Debug("FS scan times", "list", t1.Sub(t0), "set", t2.Sub(t1), "diff", t3.Sub(t2))
|
||||
return creates, deletes, updates, nil
|
||||
}
|
||||
|
||||
// skipKeyFile ignores editor backups, hidden files and folders/symlinks.
|
||||
func skipKeyFile(fi os.FileInfo) bool {
|
||||
// Skip editor backups and UNIX-style hidden files.
|
||||
if strings.HasSuffix(fi.Name(), "~") || strings.HasPrefix(fi.Name(), ".") {
|
||||
return true
|
||||
}
|
||||
// Skip misc special files, directories (yes, symlinks too).
|
||||
if fi.IsDir() || fi.Mode()&os.ModeType != 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
@ -91,14 +91,6 @@ type cipherparamsJSON struct {
|
||||
IV string `json:"iv"`
|
||||
}
|
||||
|
||||
type scryptParamsJSON struct {
|
||||
N int `json:"n"`
|
||||
R int `json:"r"`
|
||||
P int `json:"p"`
|
||||
DkLen int `json:"dklen"`
|
||||
Salt string `json:"salt"`
|
||||
}
|
||||
|
||||
func (k *Key) MarshalJSON() (j []byte, err error) {
|
||||
jStruct := plainKeyJSON{
|
||||
hex.EncodeToString(k.Address[:]),
|
||||
|
@ -143,14 +143,14 @@ func (ks *KeyStore) refreshWallets() {
|
||||
for _, account := range accs {
|
||||
// Drop wallets while they were in front of the next account
|
||||
for len(ks.wallets) > 0 && ks.wallets[0].URL().Cmp(account.URL) < 0 {
|
||||
events = append(events, accounts.WalletEvent{Wallet: ks.wallets[0], Arrive: false})
|
||||
events = append(events, accounts.WalletEvent{Wallet: ks.wallets[0], Kind: accounts.WalletDropped})
|
||||
ks.wallets = ks.wallets[1:]
|
||||
}
|
||||
// If there are no more wallets or the account is before the next, wrap new wallet
|
||||
if len(ks.wallets) == 0 || ks.wallets[0].URL().Cmp(account.URL) > 0 {
|
||||
wallet := &keystoreWallet{account: account, keystore: ks}
|
||||
|
||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Arrive: true})
|
||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletArrived})
|
||||
wallets = append(wallets, wallet)
|
||||
continue
|
||||
}
|
||||
@ -163,7 +163,7 @@ func (ks *KeyStore) refreshWallets() {
|
||||
}
|
||||
// Drop any leftover wallets and set the new batch
|
||||
for _, wallet := range ks.wallets {
|
||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Arrive: false})
|
||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletDropped})
|
||||
}
|
||||
ks.wallets = wallets
|
||||
ks.mu.Unlock()
|
||||
|
@ -28,6 +28,7 @@ package keystore
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/aes"
|
||||
crand "crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
@ -90,6 +91,12 @@ func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string)
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// StoreKey generates a key, encrypts with 'auth' and stores in the given directory
|
||||
func StoreKey(dir, auth string, scryptN, scryptP int) (common.Address, error) {
|
||||
_, a, err := storeNewKey(&keyStorePassphrase{dir, scryptN, scryptP}, crand.Reader, auth)
|
||||
return a.Address, err
|
||||
}
|
||||
|
||||
func (ks keyStorePassphrase) StoreKey(filename string, key *Key, auth string) error {
|
||||
keyjson, err := EncryptKey(key, auth, ks.scryptN, ks.scryptP)
|
||||
if err != nil {
|
||||
@ -140,7 +147,7 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
|
||||
Cipher: "aes-128-ctr",
|
||||
CipherText: hex.EncodeToString(cipherText),
|
||||
CipherParams: cipherParamsJSON,
|
||||
KDF: "scrypt",
|
||||
KDF: keyHeaderKDF,
|
||||
KDFParams: scryptParamsJSON,
|
||||
MAC: hex.EncodeToString(mac),
|
||||
}
|
||||
@ -275,7 +282,7 @@ func getKDFKey(cryptoJSON cryptoJSON, auth string) ([]byte, error) {
|
||||
}
|
||||
dkLen := ensureInt(cryptoJSON.KDFParams["dklen"])
|
||||
|
||||
if cryptoJSON.KDF == "scrypt" {
|
||||
if cryptoJSON.KDF == keyHeaderKDF {
|
||||
n := ensureInt(cryptoJSON.KDFParams["n"])
|
||||
r := ensureInt(cryptoJSON.KDFParams["r"])
|
||||
p := ensureInt(cryptoJSON.KDFParams["p"])
|
||||
|
@ -272,82 +272,104 @@ func TestWalletNotifierLifecycle(t *testing.T) {
|
||||
t.Errorf("wallet notifier didn't terminate after unsubscribe")
|
||||
}
|
||||
|
||||
type walletEvent struct {
|
||||
accounts.WalletEvent
|
||||
a accounts.Account
|
||||
}
|
||||
|
||||
// Tests that wallet notifications and correctly fired when accounts are added
|
||||
// or deleted from the keystore.
|
||||
func TestWalletNotifications(t *testing.T) {
|
||||
// Create a temporary kesytore to test with
|
||||
dir, ks := tmpKeyStore(t, false)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
// Subscribe to the wallet feed
|
||||
updates := make(chan accounts.WalletEvent, 1)
|
||||
sub := ks.Subscribe(updates)
|
||||
// Subscribe to the wallet feed and collect events.
|
||||
var (
|
||||
events []walletEvent
|
||||
updates = make(chan accounts.WalletEvent)
|
||||
sub = ks.Subscribe(updates)
|
||||
)
|
||||
defer sub.Unsubscribe()
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case ev := <-updates:
|
||||
events = append(events, walletEvent{ev, ev.Wallet.Accounts()[0]})
|
||||
case <-sub.Err():
|
||||
close(updates)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Randomly add and remove account and make sure events and wallets are in sync
|
||||
live := make(map[common.Address]accounts.Account)
|
||||
// Randomly add and remove accounts.
|
||||
var (
|
||||
live = make(map[common.Address]accounts.Account)
|
||||
wantEvents []walletEvent
|
||||
)
|
||||
for i := 0; i < 1024; i++ {
|
||||
// Execute a creation or deletion and ensure event arrival
|
||||
if create := len(live) == 0 || rand.Int()%4 > 0; create {
|
||||
// Add a new account and ensure wallet notifications arrives
|
||||
account, err := ks.NewAccount("")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create test account: %v", err)
|
||||
}
|
||||
select {
|
||||
case event := <-updates:
|
||||
if !event.Arrive {
|
||||
t.Errorf("departure event on account creation")
|
||||
}
|
||||
if event.Wallet.Accounts()[0] != account {
|
||||
t.Errorf("account mismatch on created wallet: have %v, want %v", event.Wallet.Accounts()[0], account)
|
||||
}
|
||||
default:
|
||||
t.Errorf("wallet arrival event not fired on account creation")
|
||||
}
|
||||
live[account.Address] = account
|
||||
wantEvents = append(wantEvents, walletEvent{accounts.WalletEvent{Kind: accounts.WalletArrived}, account})
|
||||
} else {
|
||||
// Select a random account to delete (crude, but works)
|
||||
// Delete a random account.
|
||||
var account accounts.Account
|
||||
for _, a := range live {
|
||||
account = a
|
||||
break
|
||||
}
|
||||
// Remove an account and ensure wallet notifiaction arrives
|
||||
if err := ks.Delete(account, ""); err != nil {
|
||||
t.Fatalf("failed to delete test account: %v", err)
|
||||
}
|
||||
select {
|
||||
case event := <-updates:
|
||||
if event.Arrive {
|
||||
t.Errorf("arrival event on account deletion")
|
||||
}
|
||||
if event.Wallet.Accounts()[0] != account {
|
||||
t.Errorf("account mismatch on deleted wallet: have %v, want %v", event.Wallet.Accounts()[0], account)
|
||||
}
|
||||
default:
|
||||
t.Errorf("wallet departure event not fired on account creation")
|
||||
}
|
||||
delete(live, account.Address)
|
||||
wantEvents = append(wantEvents, walletEvent{accounts.WalletEvent{Kind: accounts.WalletDropped}, account})
|
||||
}
|
||||
// Retrieve the list of wallets and ensure it matches with our required live set
|
||||
liveList := make([]accounts.Account, 0, len(live))
|
||||
for _, account := range live {
|
||||
liveList = append(liveList, account)
|
||||
}
|
||||
sort.Sort(accountsByURL(liveList))
|
||||
}
|
||||
|
||||
wallets := ks.Wallets()
|
||||
if len(liveList) != len(wallets) {
|
||||
t.Errorf("wallet list doesn't match required accounts: have %v, want %v", wallets, liveList)
|
||||
} else {
|
||||
for j, wallet := range wallets {
|
||||
if accs := wallet.Accounts(); len(accs) != 1 {
|
||||
t.Errorf("wallet %d: contains invalid number of accounts: have %d, want 1", j, len(accs))
|
||||
} else if accs[0] != liveList[j] {
|
||||
t.Errorf("wallet %d: account mismatch: have %v, want %v", j, accs[0], liveList[j])
|
||||
}
|
||||
// Shut down the event collector and check events.
|
||||
sub.Unsubscribe()
|
||||
<-updates
|
||||
checkAccounts(t, live, ks.Wallets())
|
||||
checkEvents(t, wantEvents, events)
|
||||
}
|
||||
|
||||
// checkAccounts checks that all known live accounts are present in the wallet list.
|
||||
func checkAccounts(t *testing.T, live map[common.Address]accounts.Account, wallets []accounts.Wallet) {
|
||||
if len(live) != len(wallets) {
|
||||
t.Errorf("wallet list doesn't match required accounts: have %d, want %d", len(wallets), len(live))
|
||||
return
|
||||
}
|
||||
liveList := make([]accounts.Account, 0, len(live))
|
||||
for _, account := range live {
|
||||
liveList = append(liveList, account)
|
||||
}
|
||||
sort.Sort(accountsByURL(liveList))
|
||||
for j, wallet := range wallets {
|
||||
if accs := wallet.Accounts(); len(accs) != 1 {
|
||||
t.Errorf("wallet %d: contains invalid number of accounts: have %d, want 1", j, len(accs))
|
||||
} else if accs[0] != liveList[j] {
|
||||
t.Errorf("wallet %d: account mismatch: have %v, want %v", j, accs[0], liveList[j])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checkEvents checks that all events in 'want' are present in 'have'. Events may be present multiple times.
|
||||
func checkEvents(t *testing.T, want []walletEvent, have []walletEvent) {
|
||||
for _, wantEv := range want {
|
||||
nmatch := 0
|
||||
for ; len(have) > 0; nmatch++ {
|
||||
if have[0].Kind != wantEv.Kind || have[0].a != wantEv.a {
|
||||
break
|
||||
}
|
||||
have = have[1:]
|
||||
}
|
||||
if nmatch == 0 {
|
||||
t.Fatalf("can't find event with Kind=%v for %x", wantEv.Kind, wantEv.a.Address)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -36,16 +36,16 @@ func (w *keystoreWallet) URL() accounts.URL {
|
||||
return w.account.URL
|
||||
}
|
||||
|
||||
// Status implements accounts.Wallet, always returning "open", since there is no
|
||||
// concept of open/close for plain keystore accounts.
|
||||
func (w *keystoreWallet) Status() string {
|
||||
// Status implements accounts.Wallet, returning whether the account held by the
|
||||
// keystore wallet is unlocked or not.
|
||||
func (w *keystoreWallet) Status() (string, error) {
|
||||
w.keystore.mu.RLock()
|
||||
defer w.keystore.mu.RUnlock()
|
||||
|
||||
if _, ok := w.keystore.unlocked[w.account.Address]; ok {
|
||||
return "Unlocked"
|
||||
return "Unlocked", nil
|
||||
}
|
||||
return "Locked"
|
||||
return "Locked", nil
|
||||
}
|
||||
|
||||
// Open implements accounts.Wallet, but is a noop for plain wallets since there
|
||||
|
@ -70,7 +70,6 @@ func (w *watcher) loop() {
|
||||
return
|
||||
}
|
||||
defer notify.Stop(w.ev)
|
||||
|
||||
logger.Trace("Started watching keystore folder")
|
||||
defer logger.Trace("Stopped watching keystore folder")
|
||||
|
||||
@ -82,32 +81,28 @@ func (w *watcher) loop() {
|
||||
// When an event occurs, the reload call is delayed a bit so that
|
||||
// multiple events arriving quickly only cause a single reload.
|
||||
var (
|
||||
debounce = time.NewTimer(0)
|
||||
debounceDuration = 500 * time.Millisecond
|
||||
inCycle, hadEvent bool
|
||||
debounceDuration = 500 * time.Millisecond
|
||||
rescanTriggered = false
|
||||
debounce = time.NewTimer(0)
|
||||
)
|
||||
// Ignore initial trigger
|
||||
if !debounce.Stop() {
|
||||
<-debounce.C
|
||||
}
|
||||
defer debounce.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-w.quit:
|
||||
return
|
||||
case <-w.ev:
|
||||
if !inCycle {
|
||||
// Trigger the scan (with delay), if not already triggered
|
||||
if !rescanTriggered {
|
||||
debounce.Reset(debounceDuration)
|
||||
inCycle = true
|
||||
} else {
|
||||
hadEvent = true
|
||||
rescanTriggered = true
|
||||
}
|
||||
case <-debounce.C:
|
||||
w.ac.mu.Lock()
|
||||
w.ac.reload()
|
||||
w.ac.mu.Unlock()
|
||||
if hadEvent {
|
||||
debounce.Reset(debounceDuration)
|
||||
inCycle, hadEvent = true, false
|
||||
} else {
|
||||
inCycle, hadEvent = false, false
|
||||
}
|
||||
w.ac.scanAccounts()
|
||||
rescanTriggered = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -41,6 +41,11 @@ type Manager struct {
|
||||
// NewManager creates a generic account manager to sign transaction via various
|
||||
// supported backends.
|
||||
func NewManager(backends ...Backend) *Manager {
|
||||
// Retrieve the initial list of wallets from the backends and sort by URL
|
||||
var wallets []Wallet
|
||||
for _, backend := range backends {
|
||||
wallets = merge(wallets, backend.Wallets()...)
|
||||
}
|
||||
// Subscribe to wallet notifications from all backends
|
||||
updates := make(chan WalletEvent, 4*len(backends))
|
||||
|
||||
@ -48,11 +53,6 @@ func NewManager(backends ...Backend) *Manager {
|
||||
for i, backend := range backends {
|
||||
subs[i] = backend.Subscribe(updates)
|
||||
}
|
||||
// Retrieve the initial list of wallets from the backends and sort by URL
|
||||
var wallets []Wallet
|
||||
for _, backend := range backends {
|
||||
wallets = merge(wallets, backend.Wallets()...)
|
||||
}
|
||||
// Assemble the account manager and return
|
||||
am := &Manager{
|
||||
backends: make(map[reflect.Type][]Backend),
|
||||
@ -96,9 +96,10 @@ func (am *Manager) update() {
|
||||
case event := <-am.updates:
|
||||
// Wallet event arrived, update local cache
|
||||
am.lock.Lock()
|
||||
if event.Arrive {
|
||||
switch event.Kind {
|
||||
case WalletArrived:
|
||||
am.wallets = merge(am.wallets, event.Wallet)
|
||||
} else {
|
||||
case WalletDropped:
|
||||
am.wallets = drop(am.wallets, event.Wallet)
|
||||
}
|
||||
am.lock.Unlock()
|
||||
|
@ -14,10 +14,6 @@
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// This file contains the implementation for interacting with the Ledger hardware
|
||||
// wallets. The wire protocol spec can be found in the Ledger Blue GitHub repo:
|
||||
// https://raw.githubusercontent.com/LedgerHQ/blue-app-eth/master/doc/ethapp.asc
|
||||
|
||||
package usbwallet
|
||||
|
||||
import (
|
||||
@ -33,24 +29,30 @@ import (
|
||||
)
|
||||
|
||||
// LedgerScheme is the protocol scheme prefixing account and wallet URLs.
|
||||
var LedgerScheme = "ledger"
|
||||
const LedgerScheme = "ledger"
|
||||
|
||||
// ledgerDeviceIDs are the known device IDs that Ledger wallets use.
|
||||
var ledgerDeviceIDs = []deviceID{
|
||||
{Vendor: 0x2c97, Product: 0x0000}, // Ledger Blue
|
||||
{Vendor: 0x2c97, Product: 0x0001}, // Ledger Nano S
|
||||
}
|
||||
// TrezorScheme is the protocol scheme prefixing account and wallet URLs.
|
||||
const TrezorScheme = "trezor"
|
||||
|
||||
// Maximum time between wallet refreshes (if USB hotplug notifications don't work).
|
||||
const ledgerRefreshCycle = time.Second
|
||||
// refreshCycle is the maximum time between wallet refreshes (if USB hotplug
|
||||
// notifications don't work).
|
||||
const refreshCycle = time.Second
|
||||
|
||||
// Minimum time between wallet refreshes to avoid USB trashing.
|
||||
const ledgerRefreshThrottling = 500 * time.Millisecond
|
||||
// refreshThrottling is the minimum time between wallet refreshes to avoid USB
|
||||
// trashing.
|
||||
const refreshThrottling = 500 * time.Millisecond
|
||||
|
||||
// Hub is a accounts.Backend that can find and handle generic USB hardware wallets.
|
||||
type Hub struct {
|
||||
scheme string // Protocol scheme prefixing account and wallet URLs.
|
||||
vendorID uint16 // USB vendor identifier used for device discovery
|
||||
productIDs []uint16 // USB product identifiers used for device discovery
|
||||
usageID uint16 // USB usage page identifier used for macOS device discovery
|
||||
endpointID int // USB endpoint identifier used for non-macOS device discovery
|
||||
makeDriver func(log.Logger) driver // Factory method to construct a vendor specific driver
|
||||
|
||||
// LedgerHub is a accounts.Backend that can find and handle Ledger hardware wallets.
|
||||
type LedgerHub struct {
|
||||
refreshed time.Time // Time instance when the list of wallets was last refreshed
|
||||
wallets []accounts.Wallet // List of Ledger devices currently tracking
|
||||
wallets []accounts.Wallet // List of USB wallet devices currently tracking
|
||||
updateFeed event.Feed // Event feed to notify wallet additions/removals
|
||||
updateScope event.SubscriptionScope // Subscription scope tracking current live listeners
|
||||
updating bool // Whether the event notification loop is running
|
||||
@ -65,20 +67,36 @@ type LedgerHub struct {
|
||||
}
|
||||
|
||||
// NewLedgerHub creates a new hardware wallet manager for Ledger devices.
|
||||
func NewLedgerHub() (*LedgerHub, error) {
|
||||
func NewLedgerHub() (*Hub, error) {
|
||||
return newHub(LedgerScheme, 0x2c97, []uint16{0x0000 /* Ledger Blue */, 0x0001 /* Ledger Nano S */}, 0xffa0, 0, newLedgerDriver)
|
||||
}
|
||||
|
||||
// NewTrezorHub creates a new hardware wallet manager for Trezor devices.
|
||||
func NewTrezorHub() (*Hub, error) {
|
||||
return newHub(TrezorScheme, 0x534c, []uint16{0x0001 /* Trezor 1 */}, 0xff00, 0, newTrezorDriver)
|
||||
}
|
||||
|
||||
// newHub creates a new hardware wallet manager for generic USB devices.
|
||||
func newHub(scheme string, vendorID uint16, productIDs []uint16, usageID uint16, endpointID int, makeDriver func(log.Logger) driver) (*Hub, error) {
|
||||
if !hid.Supported() {
|
||||
return nil, errors.New("unsupported platform")
|
||||
}
|
||||
hub := &LedgerHub{
|
||||
quit: make(chan chan error),
|
||||
hub := &Hub{
|
||||
scheme: scheme,
|
||||
vendorID: vendorID,
|
||||
productIDs: productIDs,
|
||||
usageID: usageID,
|
||||
endpointID: endpointID,
|
||||
makeDriver: makeDriver,
|
||||
quit: make(chan chan error),
|
||||
}
|
||||
hub.refreshWallets()
|
||||
return hub, nil
|
||||
}
|
||||
|
||||
// Wallets implements accounts.Backend, returning all the currently tracked USB
|
||||
// devices that appear to be Ledger hardware wallets.
|
||||
func (hub *LedgerHub) Wallets() []accounts.Wallet {
|
||||
// devices that appear to be hardware wallets.
|
||||
func (hub *Hub) Wallets() []accounts.Wallet {
|
||||
// Make sure the list of wallets is up to date
|
||||
hub.refreshWallets()
|
||||
|
||||
@ -92,17 +110,17 @@ func (hub *LedgerHub) Wallets() []accounts.Wallet {
|
||||
|
||||
// refreshWallets scans the USB devices attached to the machine and updates the
|
||||
// list of wallets based on the found devices.
|
||||
func (hub *LedgerHub) refreshWallets() {
|
||||
func (hub *Hub) refreshWallets() {
|
||||
// Don't scan the USB like crazy it the user fetches wallets in a loop
|
||||
hub.stateLock.RLock()
|
||||
elapsed := time.Since(hub.refreshed)
|
||||
hub.stateLock.RUnlock()
|
||||
|
||||
if elapsed < ledgerRefreshThrottling {
|
||||
if elapsed < refreshThrottling {
|
||||
return
|
||||
}
|
||||
// Retrieve the current list of Ledger devices
|
||||
var ledgers []hid.DeviceInfo
|
||||
// Retrieve the current list of USB wallet devices
|
||||
var devices []hid.DeviceInfo
|
||||
|
||||
if runtime.GOOS == "linux" {
|
||||
// hidapi on Linux opens the device during enumeration to retrieve some infos,
|
||||
@ -117,10 +135,10 @@ func (hub *LedgerHub) refreshWallets() {
|
||||
return
|
||||
}
|
||||
}
|
||||
for _, info := range hid.Enumerate(0, 0) { // Can't enumerate directly, one valid ID is the 0 wildcard
|
||||
for _, id := range ledgerDeviceIDs {
|
||||
if info.VendorID == id.Vendor && info.ProductID == id.Product {
|
||||
ledgers = append(ledgers, info)
|
||||
for _, info := range hid.Enumerate(hub.vendorID, 0) {
|
||||
for _, id := range hub.productIDs {
|
||||
if info.ProductID == id && (info.UsagePage == hub.usageID || info.Interface == hub.endpointID) {
|
||||
devices = append(devices, info)
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -132,22 +150,29 @@ func (hub *LedgerHub) refreshWallets() {
|
||||
// Transform the current list of wallets into the new one
|
||||
hub.stateLock.Lock()
|
||||
|
||||
wallets := make([]accounts.Wallet, 0, len(ledgers))
|
||||
wallets := make([]accounts.Wallet, 0, len(devices))
|
||||
events := []accounts.WalletEvent{}
|
||||
|
||||
for _, ledger := range ledgers {
|
||||
url := accounts.URL{Scheme: LedgerScheme, Path: ledger.Path}
|
||||
for _, device := range devices {
|
||||
url := accounts.URL{Scheme: hub.scheme, Path: device.Path}
|
||||
|
||||
// Drop wallets in front of the next device or those that failed for some reason
|
||||
for len(hub.wallets) > 0 && (hub.wallets[0].URL().Cmp(url) < 0 || hub.wallets[0].(*ledgerWallet).failed()) {
|
||||
events = append(events, accounts.WalletEvent{Wallet: hub.wallets[0], Arrive: false})
|
||||
for len(hub.wallets) > 0 {
|
||||
// Abort if we're past the current device and found an operational one
|
||||
_, failure := hub.wallets[0].Status()
|
||||
if hub.wallets[0].URL().Cmp(url) >= 0 || failure == nil {
|
||||
break
|
||||
}
|
||||
// Drop the stale and failed devices
|
||||
events = append(events, accounts.WalletEvent{Wallet: hub.wallets[0], Kind: accounts.WalletDropped})
|
||||
hub.wallets = hub.wallets[1:]
|
||||
}
|
||||
// If there are no more wallets or the device is before the next, wrap new wallet
|
||||
if len(hub.wallets) == 0 || hub.wallets[0].URL().Cmp(url) > 0 {
|
||||
wallet := &ledgerWallet{hub: hub, url: &url, info: ledger, log: log.New("url", url)}
|
||||
logger := log.New("url", url)
|
||||
wallet := &wallet{hub: hub, driver: hub.makeDriver(logger), url: &url, info: device, log: logger}
|
||||
|
||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Arrive: true})
|
||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletArrived})
|
||||
wallets = append(wallets, wallet)
|
||||
continue
|
||||
}
|
||||
@ -160,7 +185,7 @@ func (hub *LedgerHub) refreshWallets() {
|
||||
}
|
||||
// Drop any leftover wallets and set the new batch
|
||||
for _, wallet := range hub.wallets {
|
||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Arrive: false})
|
||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletDropped})
|
||||
}
|
||||
hub.refreshed = time.Now()
|
||||
hub.wallets = wallets
|
||||
@ -173,8 +198,8 @@ func (hub *LedgerHub) refreshWallets() {
|
||||
}
|
||||
|
||||
// Subscribe implements accounts.Backend, creating an async subscription to
|
||||
// receive notifications on the addition or removal of Ledger wallets.
|
||||
func (hub *LedgerHub) Subscribe(sink chan<- accounts.WalletEvent) event.Subscription {
|
||||
// receive notifications on the addition or removal of USB wallets.
|
||||
func (hub *Hub) Subscribe(sink chan<- accounts.WalletEvent) event.Subscription {
|
||||
// We need the mutex to reliably start/stop the update loop
|
||||
hub.stateLock.Lock()
|
||||
defer hub.stateLock.Unlock()
|
||||
@ -190,18 +215,14 @@ func (hub *LedgerHub) Subscribe(sink chan<- accounts.WalletEvent) event.Subscrip
|
||||
return sub
|
||||
}
|
||||
|
||||
// updater is responsible for maintaining an up-to-date list of wallets stored in
|
||||
// the keystore, and for firing wallet addition/removal events. It listens for
|
||||
// account change events from the underlying account cache, and also periodically
|
||||
// forces a manual refresh (only triggers for systems where the filesystem notifier
|
||||
// is not running).
|
||||
func (hub *LedgerHub) updater() {
|
||||
// updater is responsible for maintaining an up-to-date list of wallets managed
|
||||
// by the USB hub, and for firing wallet addition/removal events.
|
||||
func (hub *Hub) updater() {
|
||||
for {
|
||||
// Wait for a USB hotplug event (not supported yet) or a refresh timeout
|
||||
select {
|
||||
//case <-hub.changes: // reenable on hutplug implementation
|
||||
case <-time.After(ledgerRefreshCycle):
|
||||
}
|
||||
// TODO: Wait for a USB hotplug event (not supported yet) or a refresh timeout
|
||||
// <-hub.changes
|
||||
time.Sleep(refreshCycle)
|
||||
|
||||
// Run the wallet refresher
|
||||
hub.refreshWallets()
|
||||
|
3081
accounts/usbwallet/internal/trezor/messages.pb.go
Normal file
3081
accounts/usbwallet/internal/trezor/messages.pb.go
Normal file
File diff suppressed because it is too large
Load Diff
903
accounts/usbwallet/internal/trezor/messages.proto
Normal file
903
accounts/usbwallet/internal/trezor/messages.proto
Normal file
@ -0,0 +1,903 @@
|
||||
// This file originates from the SatoshiLabs Trezor `common` repository at:
|
||||
// https://github.com/trezor/trezor-common/blob/master/protob/messages.proto
|
||||
// dated 28.07.2017, commit dd8ec3231fb5f7992360aff9bdfe30bb58130f4b.
|
||||
|
||||
/**
|
||||
* Messages for TREZOR communication
|
||||
*/
|
||||
|
||||
// Sugar for easier handling in Java
|
||||
option java_package = "com.satoshilabs.trezor.lib.protobuf";
|
||||
option java_outer_classname = "TrezorMessage";
|
||||
|
||||
import "types.proto";
|
||||
|
||||
/**
|
||||
* Mapping between Trezor wire identifier (uint) and a protobuf message
|
||||
*/
|
||||
enum MessageType {
|
||||
MessageType_Initialize = 0 [(wire_in) = true];
|
||||
MessageType_Ping = 1 [(wire_in) = true];
|
||||
MessageType_Success = 2 [(wire_out) = true];
|
||||
MessageType_Failure = 3 [(wire_out) = true];
|
||||
MessageType_ChangePin = 4 [(wire_in) = true];
|
||||
MessageType_WipeDevice = 5 [(wire_in) = true];
|
||||
MessageType_FirmwareErase = 6 [(wire_in) = true, (wire_bootloader) = true];
|
||||
MessageType_FirmwareUpload = 7 [(wire_in) = true, (wire_bootloader) = true];
|
||||
MessageType_FirmwareRequest = 8 [(wire_out) = true, (wire_bootloader) = true];
|
||||
MessageType_GetEntropy = 9 [(wire_in) = true];
|
||||
MessageType_Entropy = 10 [(wire_out) = true];
|
||||
MessageType_GetPublicKey = 11 [(wire_in) = true];
|
||||
MessageType_PublicKey = 12 [(wire_out) = true];
|
||||
MessageType_LoadDevice = 13 [(wire_in) = true];
|
||||
MessageType_ResetDevice = 14 [(wire_in) = true];
|
||||
MessageType_SignTx = 15 [(wire_in) = true];
|
||||
MessageType_SimpleSignTx = 16 [(wire_in) = true, deprecated = true];
|
||||
MessageType_Features = 17 [(wire_out) = true];
|
||||
MessageType_PinMatrixRequest = 18 [(wire_out) = true];
|
||||
MessageType_PinMatrixAck = 19 [(wire_in) = true, (wire_tiny) = true];
|
||||
MessageType_Cancel = 20 [(wire_in) = true];
|
||||
MessageType_TxRequest = 21 [(wire_out) = true];
|
||||
MessageType_TxAck = 22 [(wire_in) = true];
|
||||
MessageType_CipherKeyValue = 23 [(wire_in) = true];
|
||||
MessageType_ClearSession = 24 [(wire_in) = true];
|
||||
MessageType_ApplySettings = 25 [(wire_in) = true];
|
||||
MessageType_ButtonRequest = 26 [(wire_out) = true];
|
||||
MessageType_ButtonAck = 27 [(wire_in) = true, (wire_tiny) = true];
|
||||
MessageType_ApplyFlags = 28 [(wire_in) = true];
|
||||
MessageType_GetAddress = 29 [(wire_in) = true];
|
||||
MessageType_Address = 30 [(wire_out) = true];
|
||||
MessageType_SelfTest = 32 [(wire_in) = true, (wire_bootloader) = true];
|
||||
MessageType_BackupDevice = 34 [(wire_in) = true];
|
||||
MessageType_EntropyRequest = 35 [(wire_out) = true];
|
||||
MessageType_EntropyAck = 36 [(wire_in) = true];
|
||||
MessageType_SignMessage = 38 [(wire_in) = true];
|
||||
MessageType_VerifyMessage = 39 [(wire_in) = true];
|
||||
MessageType_MessageSignature = 40 [(wire_out) = true];
|
||||
MessageType_PassphraseRequest = 41 [(wire_out) = true];
|
||||
MessageType_PassphraseAck = 42 [(wire_in) = true, (wire_tiny) = true];
|
||||
MessageType_EstimateTxSize = 43 [(wire_in) = true, deprecated = true];
|
||||
MessageType_TxSize = 44 [(wire_out) = true, deprecated = true];
|
||||
MessageType_RecoveryDevice = 45 [(wire_in) = true];
|
||||
MessageType_WordRequest = 46 [(wire_out) = true];
|
||||
MessageType_WordAck = 47 [(wire_in) = true];
|
||||
MessageType_CipheredKeyValue = 48 [(wire_out) = true];
|
||||
MessageType_EncryptMessage = 49 [(wire_in) = true, deprecated = true];
|
||||
MessageType_EncryptedMessage = 50 [(wire_out) = true, deprecated = true];
|
||||
MessageType_DecryptMessage = 51 [(wire_in) = true, deprecated = true];
|
||||
MessageType_DecryptedMessage = 52 [(wire_out) = true, deprecated = true];
|
||||
MessageType_SignIdentity = 53 [(wire_in) = true];
|
||||
MessageType_SignedIdentity = 54 [(wire_out) = true];
|
||||
MessageType_GetFeatures = 55 [(wire_in) = true];
|
||||
MessageType_EthereumGetAddress = 56 [(wire_in) = true];
|
||||
MessageType_EthereumAddress = 57 [(wire_out) = true];
|
||||
MessageType_EthereumSignTx = 58 [(wire_in) = true];
|
||||
MessageType_EthereumTxRequest = 59 [(wire_out) = true];
|
||||
MessageType_EthereumTxAck = 60 [(wire_in) = true];
|
||||
MessageType_GetECDHSessionKey = 61 [(wire_in) = true];
|
||||
MessageType_ECDHSessionKey = 62 [(wire_out) = true];
|
||||
MessageType_SetU2FCounter = 63 [(wire_in) = true];
|
||||
MessageType_EthereumSignMessage = 64 [(wire_in) = true];
|
||||
MessageType_EthereumVerifyMessage = 65 [(wire_in) = true];
|
||||
MessageType_EthereumMessageSignature = 66 [(wire_out) = true];
|
||||
MessageType_DebugLinkDecision = 100 [(wire_debug_in) = true, (wire_tiny) = true];
|
||||
MessageType_DebugLinkGetState = 101 [(wire_debug_in) = true];
|
||||
MessageType_DebugLinkState = 102 [(wire_debug_out) = true];
|
||||
MessageType_DebugLinkStop = 103 [(wire_debug_in) = true];
|
||||
MessageType_DebugLinkLog = 104 [(wire_debug_out) = true];
|
||||
MessageType_DebugLinkMemoryRead = 110 [(wire_debug_in) = true];
|
||||
MessageType_DebugLinkMemory = 111 [(wire_debug_out) = true];
|
||||
MessageType_DebugLinkMemoryWrite = 112 [(wire_debug_in) = true];
|
||||
MessageType_DebugLinkFlashErase = 113 [(wire_debug_in) = true];
|
||||
}
|
||||
|
||||
////////////////////
|
||||
// Basic messages //
|
||||
////////////////////
|
||||
|
||||
/**
|
||||
* Request: Reset device to default state and ask for device details
|
||||
* @next Features
|
||||
*/
|
||||
message Initialize {
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask for device details (no device reset)
|
||||
* @next Features
|
||||
*/
|
||||
message GetFeatures {
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Reports various information about the device
|
||||
* @prev Initialize
|
||||
* @prev GetFeatures
|
||||
*/
|
||||
message Features {
|
||||
optional string vendor = 1; // name of the manufacturer, e.g. "bitcointrezor.com"
|
||||
optional uint32 major_version = 2; // major version of the device, e.g. 1
|
||||
optional uint32 minor_version = 3; // minor version of the device, e.g. 0
|
||||
optional uint32 patch_version = 4; // patch version of the device, e.g. 0
|
||||
optional bool bootloader_mode = 5; // is device in bootloader mode?
|
||||
optional string device_id = 6; // device's unique identifier
|
||||
optional bool pin_protection = 7; // is device protected by PIN?
|
||||
optional bool passphrase_protection = 8; // is node/mnemonic encrypted using passphrase?
|
||||
optional string language = 9; // device language
|
||||
optional string label = 10; // device description label
|
||||
repeated CoinType coins = 11; // supported coins
|
||||
optional bool initialized = 12; // does device contain seed?
|
||||
optional bytes revision = 13; // SCM revision of firmware
|
||||
optional bytes bootloader_hash = 14; // hash of the bootloader
|
||||
optional bool imported = 15; // was storage imported from an external source?
|
||||
optional bool pin_cached = 16; // is PIN already cached in session?
|
||||
optional bool passphrase_cached = 17; // is passphrase already cached in session?
|
||||
optional bool firmware_present = 18; // is valid firmware loaded?
|
||||
optional bool needs_backup = 19; // does storage need backup? (equals to Storage.needs_backup)
|
||||
optional uint32 flags = 20; // device flags (equals to Storage.flags)
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: clear session (removes cached PIN, passphrase, etc).
|
||||
* @next Success
|
||||
*/
|
||||
message ClearSession {
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: change language and/or label of the device
|
||||
* @next Success
|
||||
* @next Failure
|
||||
* @next ButtonRequest
|
||||
* @next PinMatrixRequest
|
||||
*/
|
||||
message ApplySettings {
|
||||
optional string language = 1;
|
||||
optional string label = 2;
|
||||
optional bool use_passphrase = 3;
|
||||
optional bytes homescreen = 4;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: set flags of the device
|
||||
* @next Success
|
||||
* @next Failure
|
||||
*/
|
||||
message ApplyFlags {
|
||||
optional uint32 flags = 1; // bitmask, can only set bits, not unset
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Starts workflow for setting/changing/removing the PIN
|
||||
* @next ButtonRequest
|
||||
* @next PinMatrixRequest
|
||||
*/
|
||||
message ChangePin {
|
||||
optional bool remove = 1; // is PIN removal requested?
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Test if the device is alive, device sends back the message in Success response
|
||||
* @next Success
|
||||
*/
|
||||
message Ping {
|
||||
optional string message = 1; // message to send back in Success message
|
||||
optional bool button_protection = 2; // ask for button press
|
||||
optional bool pin_protection = 3; // ask for PIN if set in device
|
||||
optional bool passphrase_protection = 4; // ask for passphrase if set in device
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Success of the previous request
|
||||
*/
|
||||
message Success {
|
||||
optional string message = 1; // human readable description of action or request-specific payload
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Failure of the previous request
|
||||
*/
|
||||
message Failure {
|
||||
optional FailureType code = 1; // computer-readable definition of the error state
|
||||
optional string message = 2; // human-readable message of the error state
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device is waiting for HW button press.
|
||||
* @next ButtonAck
|
||||
* @next Cancel
|
||||
*/
|
||||
message ButtonRequest {
|
||||
optional ButtonRequestType code = 1;
|
||||
optional string data = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Computer agrees to wait for HW button press
|
||||
* @prev ButtonRequest
|
||||
*/
|
||||
message ButtonAck {
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device is asking computer to show PIN matrix and awaits PIN encoded using this matrix scheme
|
||||
* @next PinMatrixAck
|
||||
* @next Cancel
|
||||
*/
|
||||
message PinMatrixRequest {
|
||||
optional PinMatrixRequestType type = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Computer responds with encoded PIN
|
||||
* @prev PinMatrixRequest
|
||||
*/
|
||||
message PinMatrixAck {
|
||||
required string pin = 1; // matrix encoded PIN entered by user
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Abort last operation that required user interaction
|
||||
* @prev ButtonRequest
|
||||
* @prev PinMatrixRequest
|
||||
* @prev PassphraseRequest
|
||||
*/
|
||||
message Cancel {
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device awaits encryption passphrase
|
||||
* @next PassphraseAck
|
||||
* @next Cancel
|
||||
*/
|
||||
message PassphraseRequest {
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Send passphrase back
|
||||
* @prev PassphraseRequest
|
||||
*/
|
||||
message PassphraseAck {
|
||||
required string passphrase = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Request a sample of random data generated by hardware RNG. May be used for testing.
|
||||
* @next ButtonRequest
|
||||
* @next Entropy
|
||||
* @next Failure
|
||||
*/
|
||||
message GetEntropy {
|
||||
required uint32 size = 1; // size of requested entropy
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Reply with random data generated by internal RNG
|
||||
* @prev GetEntropy
|
||||
*/
|
||||
message Entropy {
|
||||
required bytes entropy = 1; // stream of random generated bytes
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device for public key corresponding to address_n path
|
||||
* @next PassphraseRequest
|
||||
* @next PublicKey
|
||||
* @next Failure
|
||||
*/
|
||||
message GetPublicKey {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
|
||||
optional string ecdsa_curve_name = 2; // ECDSA curve name to use
|
||||
optional bool show_display = 3; // optionally show on display before sending the result
|
||||
optional string coin_name = 4 [default='Bitcoin'];
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Contains public key derived from device private seed
|
||||
* @prev GetPublicKey
|
||||
*/
|
||||
message PublicKey {
|
||||
required HDNodeType node = 1; // BIP32 public node
|
||||
optional string xpub = 2; // serialized form of public node
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device for address corresponding to address_n path
|
||||
* @next PassphraseRequest
|
||||
* @next Address
|
||||
* @next Failure
|
||||
*/
|
||||
message GetAddress {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
|
||||
optional string coin_name = 2 [default='Bitcoin'];
|
||||
optional bool show_display = 3 ; // optionally show on display before sending the result
|
||||
optional MultisigRedeemScriptType multisig = 4; // filled if we are showing a multisig address
|
||||
optional InputScriptType script_type = 5 [default=SPENDADDRESS]; // used to distinguish between various address formats (non-segwit, segwit, etc.)
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device for Ethereum address corresponding to address_n path
|
||||
* @next PassphraseRequest
|
||||
* @next EthereumAddress
|
||||
* @next Failure
|
||||
*/
|
||||
message EthereumGetAddress {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
|
||||
optional bool show_display = 2; // optionally show on display before sending the result
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Contains address derived from device private seed
|
||||
* @prev GetAddress
|
||||
*/
|
||||
message Address {
|
||||
required string address = 1; // Coin address in Base58 encoding
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Contains an Ethereum address derived from device private seed
|
||||
* @prev EthereumGetAddress
|
||||
*/
|
||||
message EthereumAddress {
|
||||
required bytes address = 1; // Coin address as an Ethereum 160 bit hash
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Request device to wipe all sensitive data and settings
|
||||
* @next ButtonRequest
|
||||
*/
|
||||
message WipeDevice {
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Load seed and related internal settings from the computer
|
||||
* @next ButtonRequest
|
||||
* @next Success
|
||||
* @next Failure
|
||||
*/
|
||||
message LoadDevice {
|
||||
optional string mnemonic = 1; // seed encoded as BIP-39 mnemonic (12, 18 or 24 words)
|
||||
optional HDNodeType node = 2; // BIP-32 node
|
||||
optional string pin = 3; // set PIN protection
|
||||
optional bool passphrase_protection = 4; // enable master node encryption using passphrase
|
||||
optional string language = 5 [default='english']; // device language
|
||||
optional string label = 6; // device label
|
||||
optional bool skip_checksum = 7; // do not test mnemonic for valid BIP-39 checksum
|
||||
optional uint32 u2f_counter = 8; // U2F counter
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device to do initialization involving user interaction
|
||||
* @next EntropyRequest
|
||||
* @next Failure
|
||||
*/
|
||||
message ResetDevice {
|
||||
optional bool display_random = 1; // display entropy generated by the device before asking for additional entropy
|
||||
optional uint32 strength = 2 [default=256]; // strength of seed in bits
|
||||
optional bool passphrase_protection = 3; // enable master node encryption using passphrase
|
||||
optional bool pin_protection = 4; // enable PIN protection
|
||||
optional string language = 5 [default='english']; // device language
|
||||
optional string label = 6; // device label
|
||||
optional uint32 u2f_counter = 7; // U2F counter
|
||||
optional bool skip_backup = 8; // postpone seed backup to BackupDevice workflow
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Perform backup of the device seed if not backed up using ResetDevice
|
||||
* @next ButtonRequest
|
||||
*/
|
||||
message BackupDevice {
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Ask for additional entropy from host computer
|
||||
* @prev ResetDevice
|
||||
* @next EntropyAck
|
||||
*/
|
||||
message EntropyRequest {
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Provide additional entropy for seed generation function
|
||||
* @prev EntropyRequest
|
||||
* @next ButtonRequest
|
||||
*/
|
||||
message EntropyAck {
|
||||
optional bytes entropy = 1; // 256 bits (32 bytes) of random data
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Start recovery workflow asking user for specific words of mnemonic
|
||||
* Used to recovery device safely even on untrusted computer.
|
||||
* @next WordRequest
|
||||
*/
|
||||
message RecoveryDevice {
|
||||
optional uint32 word_count = 1; // number of words in BIP-39 mnemonic
|
||||
optional bool passphrase_protection = 2; // enable master node encryption using passphrase
|
||||
optional bool pin_protection = 3; // enable PIN protection
|
||||
optional string language = 4 [default='english']; // device language
|
||||
optional string label = 5; // device label
|
||||
optional bool enforce_wordlist = 6; // enforce BIP-39 wordlist during the process
|
||||
// 7 reserved for unused recovery method
|
||||
optional uint32 type = 8; // supported recovery type (see RecoveryType)
|
||||
optional uint32 u2f_counter = 9; // U2F counter
|
||||
optional bool dry_run = 10; // perform dry-run recovery workflow (for safe mnemonic validation)
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device is waiting for user to enter word of the mnemonic
|
||||
* Its position is shown only on device's internal display.
|
||||
* @prev RecoveryDevice
|
||||
* @prev WordAck
|
||||
*/
|
||||
message WordRequest {
|
||||
optional WordRequestType type = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Computer replies with word from the mnemonic
|
||||
* @prev WordRequest
|
||||
* @next WordRequest
|
||||
* @next Success
|
||||
* @next Failure
|
||||
*/
|
||||
message WordAck {
|
||||
required string word = 1; // one word of mnemonic on asked position
|
||||
}
|
||||
|
||||
//////////////////////////////
|
||||
// Message signing messages //
|
||||
//////////////////////////////
|
||||
|
||||
/**
|
||||
* Request: Ask device to sign message
|
||||
* @next MessageSignature
|
||||
* @next Failure
|
||||
*/
|
||||
message SignMessage {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
|
||||
required bytes message = 2; // message to be signed
|
||||
optional string coin_name = 3 [default='Bitcoin']; // coin to use for signing
|
||||
optional InputScriptType script_type = 4 [default=SPENDADDRESS]; // used to distinguish between various address formats (non-segwit, segwit, etc.)
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device to verify message
|
||||
* @next Success
|
||||
* @next Failure
|
||||
*/
|
||||
message VerifyMessage {
|
||||
optional string address = 1; // address to verify
|
||||
optional bytes signature = 2; // signature to verify
|
||||
optional bytes message = 3; // message to verify
|
||||
optional string coin_name = 4 [default='Bitcoin']; // coin to use for verifying
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Signed message
|
||||
* @prev SignMessage
|
||||
*/
|
||||
message MessageSignature {
|
||||
optional string address = 1; // address used to sign the message
|
||||
optional bytes signature = 2; // signature of the message
|
||||
}
|
||||
|
||||
///////////////////////////
|
||||
// Encryption/decryption //
|
||||
///////////////////////////
|
||||
|
||||
/**
|
||||
* Request: Ask device to encrypt message
|
||||
* @next EncryptedMessage
|
||||
* @next Failure
|
||||
*/
|
||||
message EncryptMessage {
|
||||
optional bytes pubkey = 1; // public key
|
||||
optional bytes message = 2; // message to encrypt
|
||||
optional bool display_only = 3; // show just on display? (don't send back via wire)
|
||||
repeated uint32 address_n = 4; // BIP-32 path to derive the signing key from master node
|
||||
optional string coin_name = 5 [default='Bitcoin']; // coin to use for signing
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Encrypted message
|
||||
* @prev EncryptMessage
|
||||
*/
|
||||
message EncryptedMessage {
|
||||
optional bytes nonce = 1; // nonce used during encryption
|
||||
optional bytes message = 2; // encrypted message
|
||||
optional bytes hmac = 3; // message hmac
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device to decrypt message
|
||||
* @next Success
|
||||
* @next Failure
|
||||
*/
|
||||
message DecryptMessage {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the decryption key from master node
|
||||
optional bytes nonce = 2; // nonce used during encryption
|
||||
optional bytes message = 3; // message to decrypt
|
||||
optional bytes hmac = 4; // message hmac
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Decrypted message
|
||||
* @prev DecryptedMessage
|
||||
*/
|
||||
message DecryptedMessage {
|
||||
optional bytes message = 1; // decrypted message
|
||||
optional string address = 2; // address used to sign the message (if used)
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device to encrypt or decrypt value of given key
|
||||
* @next CipheredKeyValue
|
||||
* @next Failure
|
||||
*/
|
||||
message CipherKeyValue {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
|
||||
optional string key = 2; // key component of key:value
|
||||
optional bytes value = 3; // value component of key:value
|
||||
optional bool encrypt = 4; // are we encrypting (True) or decrypting (False)?
|
||||
optional bool ask_on_encrypt = 5; // should we ask on encrypt operation?
|
||||
optional bool ask_on_decrypt = 6; // should we ask on decrypt operation?
|
||||
optional bytes iv = 7; // initialization vector (will be computed if not set)
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Return ciphered/deciphered value
|
||||
* @prev CipherKeyValue
|
||||
*/
|
||||
message CipheredKeyValue {
|
||||
optional bytes value = 1; // ciphered/deciphered value
|
||||
}
|
||||
|
||||
//////////////////////////////////
|
||||
// Transaction signing messages //
|
||||
//////////////////////////////////
|
||||
|
||||
/**
|
||||
* Request: Estimated size of the transaction
|
||||
* This behaves exactly like SignTx, which means that it can ask using TxRequest
|
||||
* This call is non-blocking (except possible PassphraseRequest to unlock the seed)
|
||||
* @next TxSize
|
||||
* @next Failure
|
||||
*/
|
||||
message EstimateTxSize {
|
||||
required uint32 outputs_count = 1; // number of transaction outputs
|
||||
required uint32 inputs_count = 2; // number of transaction inputs
|
||||
optional string coin_name = 3 [default='Bitcoin']; // coin to use
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Estimated size of the transaction
|
||||
* @prev EstimateTxSize
|
||||
*/
|
||||
message TxSize {
|
||||
optional uint32 tx_size = 1; // estimated size of transaction in bytes
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device to sign transaction
|
||||
* @next PassphraseRequest
|
||||
* @next PinMatrixRequest
|
||||
* @next TxRequest
|
||||
* @next Failure
|
||||
*/
|
||||
message SignTx {
|
||||
required uint32 outputs_count = 1; // number of transaction outputs
|
||||
required uint32 inputs_count = 2; // number of transaction inputs
|
||||
optional string coin_name = 3 [default='Bitcoin']; // coin to use
|
||||
optional uint32 version = 4 [default=1]; // transaction version
|
||||
optional uint32 lock_time = 5 [default=0]; // transaction lock_time
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Simplified transaction signing
|
||||
* This method doesn't support streaming, so there are hardware limits in number of inputs and outputs.
|
||||
* In case of success, the result is returned using TxRequest message.
|
||||
* @next PassphraseRequest
|
||||
* @next PinMatrixRequest
|
||||
* @next TxRequest
|
||||
* @next Failure
|
||||
*/
|
||||
message SimpleSignTx {
|
||||
repeated TxInputType inputs = 1; // transaction inputs
|
||||
repeated TxOutputType outputs = 2; // transaction outputs
|
||||
repeated TransactionType transactions = 3; // transactions whose outputs are used to build current inputs
|
||||
optional string coin_name = 4 [default='Bitcoin']; // coin to use
|
||||
optional uint32 version = 5 [default=1]; // transaction version
|
||||
optional uint32 lock_time = 6 [default=0]; // transaction lock_time
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device asks for information for signing transaction or returns the last result
|
||||
* If request_index is set, device awaits TxAck message (with fields filled in according to request_type)
|
||||
* If signature_index is set, 'signature' contains signed input of signature_index's input
|
||||
* @prev SignTx
|
||||
* @prev SimpleSignTx
|
||||
* @prev TxAck
|
||||
*/
|
||||
message TxRequest {
|
||||
optional RequestType request_type = 1; // what should be filled in TxAck message?
|
||||
optional TxRequestDetailsType details = 2; // request for tx details
|
||||
optional TxRequestSerializedType serialized = 3; // serialized data and request for next
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Reported transaction data
|
||||
* @prev TxRequest
|
||||
* @next TxRequest
|
||||
*/
|
||||
message TxAck {
|
||||
optional TransactionType tx = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device to sign transaction
|
||||
* All fields are optional from the protocol's point of view. Each field defaults to value `0` if missing.
|
||||
* Note: the first at most 1024 bytes of data MUST be transmitted as part of this message.
|
||||
* @next PassphraseRequest
|
||||
* @next PinMatrixRequest
|
||||
* @next EthereumTxRequest
|
||||
* @next Failure
|
||||
*/
|
||||
message EthereumSignTx {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
|
||||
optional bytes nonce = 2; // <=256 bit unsigned big endian
|
||||
optional bytes gas_price = 3; // <=256 bit unsigned big endian (in wei)
|
||||
optional bytes gas_limit = 4; // <=256 bit unsigned big endian
|
||||
optional bytes to = 5; // 160 bit address hash
|
||||
optional bytes value = 6; // <=256 bit unsigned big endian (in wei)
|
||||
optional bytes data_initial_chunk = 7; // The initial data chunk (<= 1024 bytes)
|
||||
optional uint32 data_length = 8; // Length of transaction payload
|
||||
optional uint32 chain_id = 9; // Chain Id for EIP 155
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device asks for more data from transaction payload, or returns the signature.
|
||||
* If data_length is set, device awaits that many more bytes of payload.
|
||||
* Otherwise, the signature_* fields contain the computed transaction signature. All three fields will be present.
|
||||
* @prev EthereumSignTx
|
||||
* @next EthereumTxAck
|
||||
*/
|
||||
message EthereumTxRequest {
|
||||
optional uint32 data_length = 1; // Number of bytes being requested (<= 1024)
|
||||
optional uint32 signature_v = 2; // Computed signature (recovery parameter, limited to 27 or 28)
|
||||
optional bytes signature_r = 3; // Computed signature R component (256 bit)
|
||||
optional bytes signature_s = 4; // Computed signature S component (256 bit)
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Transaction payload data.
|
||||
* @prev EthereumTxRequest
|
||||
* @next EthereumTxRequest
|
||||
*/
|
||||
message EthereumTxAck {
|
||||
optional bytes data_chunk = 1; // Bytes from transaction payload (<= 1024 bytes)
|
||||
}
|
||||
|
||||
////////////////////////////////////////
|
||||
// Ethereum: Message signing messages //
|
||||
////////////////////////////////////////
|
||||
|
||||
/**
|
||||
* Request: Ask device to sign message
|
||||
* @next EthereumMessageSignature
|
||||
* @next Failure
|
||||
*/
|
||||
message EthereumSignMessage {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
|
||||
required bytes message = 2; // message to be signed
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device to verify message
|
||||
* @next Success
|
||||
* @next Failure
|
||||
*/
|
||||
message EthereumVerifyMessage {
|
||||
optional bytes address = 1; // address to verify
|
||||
optional bytes signature = 2; // signature to verify
|
||||
optional bytes message = 3; // message to verify
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Signed message
|
||||
* @prev EthereumSignMessage
|
||||
*/
|
||||
message EthereumMessageSignature {
|
||||
optional bytes address = 1; // address used to sign the message
|
||||
optional bytes signature = 2; // signature of the message
|
||||
}
|
||||
|
||||
///////////////////////
|
||||
// Identity messages //
|
||||
///////////////////////
|
||||
|
||||
/**
|
||||
* Request: Ask device to sign identity
|
||||
* @next SignedIdentity
|
||||
* @next Failure
|
||||
*/
|
||||
message SignIdentity {
|
||||
optional IdentityType identity = 1; // identity
|
||||
optional bytes challenge_hidden = 2; // non-visible challenge
|
||||
optional string challenge_visual = 3; // challenge shown on display (e.g. date+time)
|
||||
optional string ecdsa_curve_name = 4; // ECDSA curve name to use
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device provides signed identity
|
||||
* @prev SignIdentity
|
||||
*/
|
||||
message SignedIdentity {
|
||||
optional string address = 1; // identity address
|
||||
optional bytes public_key = 2; // identity public key
|
||||
optional bytes signature = 3; // signature of the identity data
|
||||
}
|
||||
|
||||
///////////////////
|
||||
// ECDH messages //
|
||||
///////////////////
|
||||
|
||||
/**
|
||||
* Request: Ask device to generate ECDH session key
|
||||
* @next ECDHSessionKey
|
||||
* @next Failure
|
||||
*/
|
||||
message GetECDHSessionKey {
|
||||
optional IdentityType identity = 1; // identity
|
||||
optional bytes peer_public_key = 2; // peer's public key
|
||||
optional string ecdsa_curve_name = 3; // ECDSA curve name to use
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device provides ECDH session key
|
||||
* @prev GetECDHSessionKey
|
||||
*/
|
||||
message ECDHSessionKey {
|
||||
optional bytes session_key = 1; // ECDH session key
|
||||
}
|
||||
|
||||
///////////////////
|
||||
// U2F messages //
|
||||
///////////////////
|
||||
|
||||
/**
|
||||
* Request: Set U2F counter
|
||||
* @next Success
|
||||
*/
|
||||
message SetU2FCounter {
|
||||
optional uint32 u2f_counter = 1; // counter
|
||||
}
|
||||
|
||||
/////////////////////////
|
||||
// Bootloader messages //
|
||||
/////////////////////////
|
||||
|
||||
/**
|
||||
* Request: Ask device to erase its firmware (so it can be replaced via FirmwareUpload)
|
||||
* @next Success
|
||||
* @next FirmwareRequest
|
||||
* @next Failure
|
||||
*/
|
||||
message FirmwareErase {
|
||||
optional uint32 length = 1; // length of new firmware
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Ask for firmware chunk
|
||||
* @next FirmwareUpload
|
||||
*/
|
||||
message FirmwareRequest {
|
||||
optional uint32 offset = 1; // offset of requested firmware chunk
|
||||
optional uint32 length = 2; // length of requested firmware chunk
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Send firmware in binary form to the device
|
||||
* @next Success
|
||||
* @next Failure
|
||||
*/
|
||||
message FirmwareUpload {
|
||||
required bytes payload = 1; // firmware to be loaded into device
|
||||
optional bytes hash = 2; // hash of the payload
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Request: Perform a device self-test
|
||||
* @next Success
|
||||
* @next Failure
|
||||
*/
|
||||
message SelfTest {
|
||||
optional bytes payload = 1; // payload to be used in self-test
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
// Debug messages (only available if DebugLink is enabled) //
|
||||
/////////////////////////////////////////////////////////////
|
||||
|
||||
/**
|
||||
* Request: "Press" the button on the device
|
||||
* @next Success
|
||||
*/
|
||||
message DebugLinkDecision {
|
||||
required bool yes_no = 1; // true for "Confirm", false for "Cancel"
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Computer asks for device state
|
||||
* @next DebugLinkState
|
||||
*/
|
||||
message DebugLinkGetState {
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device current state
|
||||
* @prev DebugLinkGetState
|
||||
*/
|
||||
message DebugLinkState {
|
||||
optional bytes layout = 1; // raw buffer of display
|
||||
optional string pin = 2; // current PIN, blank if PIN is not set/enabled
|
||||
optional string matrix = 3; // current PIN matrix
|
||||
optional string mnemonic = 4; // current BIP-39 mnemonic
|
||||
optional HDNodeType node = 5; // current BIP-32 node
|
||||
optional bool passphrase_protection = 6; // is node/mnemonic encrypted using passphrase?
|
||||
optional string reset_word = 7; // word on device display during ResetDevice workflow
|
||||
optional bytes reset_entropy = 8; // current entropy during ResetDevice workflow
|
||||
optional string recovery_fake_word = 9; // (fake) word on display during RecoveryDevice workflow
|
||||
optional uint32 recovery_word_pos = 10; // index of mnemonic word the device is expecting during RecoveryDevice workflow
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Ask device to restart
|
||||
*/
|
||||
message DebugLinkStop {
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device wants host to log event
|
||||
*/
|
||||
message DebugLinkLog {
|
||||
optional uint32 level = 1;
|
||||
optional string bucket = 2;
|
||||
optional string text = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Read memory from device
|
||||
* @next DebugLinkMemory
|
||||
*/
|
||||
message DebugLinkMemoryRead {
|
||||
optional uint32 address = 1;
|
||||
optional uint32 length = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Response: Device sends memory back
|
||||
* @prev DebugLinkMemoryRead
|
||||
*/
|
||||
message DebugLinkMemory {
|
||||
optional bytes memory = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Write memory to device.
|
||||
* WARNING: Writing to the wrong location can irreparably break the device.
|
||||
*/
|
||||
message DebugLinkMemoryWrite {
|
||||
optional uint32 address = 1;
|
||||
optional bytes memory = 2;
|
||||
optional bool flash = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request: Erase block of flash on device
|
||||
* WARNING: Writing to the wrong location can irreparably break the device.
|
||||
*/
|
||||
message DebugLinkFlashErase {
|
||||
optional uint32 sector = 1;
|
||||
}
|
46
accounts/usbwallet/internal/trezor/trezor.go
Normal file
46
accounts/usbwallet/internal/trezor/trezor.go
Normal file
@ -0,0 +1,46 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// This file contains the implementation for interacting with the Trezor hardware
|
||||
// wallets. The wire protocol spec can be found on the SatoshiLabs website:
|
||||
// https://doc.satoshilabs.com/trezor-tech/api-protobuf.html
|
||||
|
||||
//go:generate protoc --go_out=Mgoogle/protobuf/descriptor.proto=github.com/golang/protobuf/protoc-gen-go/descriptor,import_path=trezor:. types.proto messages.proto
|
||||
|
||||
// Package trezor contains the wire protocol wrapper in Go.
|
||||
package trezor
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
// Type returns the protocol buffer type number of a specific message. If the
|
||||
// message is nil, this method panics!
|
||||
func Type(msg proto.Message) uint16 {
|
||||
return uint16(MessageType_value["MessageType_"+reflect.TypeOf(msg).Elem().Name()])
|
||||
}
|
||||
|
||||
// Name returns the friendly message type name of a specific protocol buffer
|
||||
// type numbers.
|
||||
func Name(kind uint16) string {
|
||||
name := MessageType_name[int32(kind)]
|
||||
if len(name) < 12 {
|
||||
return name
|
||||
}
|
||||
return name[12:]
|
||||
}
|
1333
accounts/usbwallet/internal/trezor/types.pb.go
Normal file
1333
accounts/usbwallet/internal/trezor/types.pb.go
Normal file
File diff suppressed because it is too large
Load Diff
276
accounts/usbwallet/internal/trezor/types.proto
Normal file
276
accounts/usbwallet/internal/trezor/types.proto
Normal file
@ -0,0 +1,276 @@
|
||||
// This file originates from the SatoshiLabs Trezor `common` repository at:
|
||||
// https://github.com/trezor/trezor-common/blob/master/protob/types.proto
|
||||
// dated 28.07.2017, commit dd8ec3231fb5f7992360aff9bdfe30bb58130f4b.
|
||||
|
||||
/**
|
||||
* Types for TREZOR communication
|
||||
*
|
||||
* @author Marek Palatinus <slush@satoshilabs.com>
|
||||
* @version 1.2
|
||||
*/
|
||||
|
||||
// Sugar for easier handling in Java
|
||||
option java_package = "com.satoshilabs.trezor.lib.protobuf";
|
||||
option java_outer_classname = "TrezorType";
|
||||
|
||||
import "google/protobuf/descriptor.proto";
|
||||
|
||||
/**
|
||||
* Options for specifying message direction and type of wire (normal/debug)
|
||||
*/
|
||||
extend google.protobuf.EnumValueOptions {
|
||||
optional bool wire_in = 50002; // message can be transmitted via wire from PC to TREZOR
|
||||
optional bool wire_out = 50003; // message can be transmitted via wire from TREZOR to PC
|
||||
optional bool wire_debug_in = 50004; // message can be transmitted via debug wire from PC to TREZOR
|
||||
optional bool wire_debug_out = 50005; // message can be transmitted via debug wire from TREZOR to PC
|
||||
optional bool wire_tiny = 50006; // message is handled by TREZOR when the USB stack is in tiny mode
|
||||
optional bool wire_bootloader = 50007; // message is only handled by TREZOR Bootloader
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of failures returned by Failure message
|
||||
* @used_in Failure
|
||||
*/
|
||||
enum FailureType {
|
||||
Failure_UnexpectedMessage = 1;
|
||||
Failure_ButtonExpected = 2;
|
||||
Failure_DataError = 3;
|
||||
Failure_ActionCancelled = 4;
|
||||
Failure_PinExpected = 5;
|
||||
Failure_PinCancelled = 6;
|
||||
Failure_PinInvalid = 7;
|
||||
Failure_InvalidSignature = 8;
|
||||
Failure_ProcessError = 9;
|
||||
Failure_NotEnoughFunds = 10;
|
||||
Failure_NotInitialized = 11;
|
||||
Failure_FirmwareError = 99;
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of script which will be used for transaction output
|
||||
* @used_in TxOutputType
|
||||
*/
|
||||
enum OutputScriptType {
|
||||
PAYTOADDRESS = 0; // used for all addresses (bitcoin, p2sh, witness)
|
||||
PAYTOSCRIPTHASH = 1; // p2sh address (deprecated; use PAYTOADDRESS)
|
||||
PAYTOMULTISIG = 2; // only for change output
|
||||
PAYTOOPRETURN = 3; // op_return
|
||||
PAYTOWITNESS = 4; // only for change output
|
||||
PAYTOP2SHWITNESS = 5; // only for change output
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of script which will be used for transaction output
|
||||
* @used_in TxInputType
|
||||
*/
|
||||
enum InputScriptType {
|
||||
SPENDADDRESS = 0; // standard p2pkh address
|
||||
SPENDMULTISIG = 1; // p2sh multisig address
|
||||
EXTERNAL = 2; // reserved for external inputs (coinjoin)
|
||||
SPENDWITNESS = 3; // native segwit
|
||||
SPENDP2SHWITNESS = 4; // segwit over p2sh (backward compatible)
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of information required by transaction signing process
|
||||
* @used_in TxRequest
|
||||
*/
|
||||
enum RequestType {
|
||||
TXINPUT = 0;
|
||||
TXOUTPUT = 1;
|
||||
TXMETA = 2;
|
||||
TXFINISHED = 3;
|
||||
TXEXTRADATA = 4;
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of button request
|
||||
* @used_in ButtonRequest
|
||||
*/
|
||||
enum ButtonRequestType {
|
||||
ButtonRequest_Other = 1;
|
||||
ButtonRequest_FeeOverThreshold = 2;
|
||||
ButtonRequest_ConfirmOutput = 3;
|
||||
ButtonRequest_ResetDevice = 4;
|
||||
ButtonRequest_ConfirmWord = 5;
|
||||
ButtonRequest_WipeDevice = 6;
|
||||
ButtonRequest_ProtectCall = 7;
|
||||
ButtonRequest_SignTx = 8;
|
||||
ButtonRequest_FirmwareCheck = 9;
|
||||
ButtonRequest_Address = 10;
|
||||
ButtonRequest_PublicKey = 11;
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of PIN request
|
||||
* @used_in PinMatrixRequest
|
||||
*/
|
||||
enum PinMatrixRequestType {
|
||||
PinMatrixRequestType_Current = 1;
|
||||
PinMatrixRequestType_NewFirst = 2;
|
||||
PinMatrixRequestType_NewSecond = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of recovery procedure. These should be used as bitmask, e.g.,
|
||||
* `RecoveryDeviceType_ScrambledWords | RecoveryDeviceType_Matrix`
|
||||
* listing every method supported by the host computer.
|
||||
*
|
||||
* Note that ScrambledWords must be supported by every implementation
|
||||
* for backward compatibility; there is no way to not support it.
|
||||
*
|
||||
* @used_in RecoveryDevice
|
||||
*/
|
||||
enum RecoveryDeviceType {
|
||||
// use powers of two when extending this field
|
||||
RecoveryDeviceType_ScrambledWords = 0; // words in scrambled order
|
||||
RecoveryDeviceType_Matrix = 1; // matrix recovery type
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of Recovery Word request
|
||||
* @used_in WordRequest
|
||||
*/
|
||||
enum WordRequestType {
|
||||
WordRequestType_Plain = 0;
|
||||
WordRequestType_Matrix9 = 1;
|
||||
WordRequestType_Matrix6 = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing BIP32 (hierarchical deterministic) node
|
||||
* Used for imports of private key into the device and exporting public key out of device
|
||||
* @used_in PublicKey
|
||||
* @used_in LoadDevice
|
||||
* @used_in DebugLinkState
|
||||
* @used_in Storage
|
||||
*/
|
||||
message HDNodeType {
|
||||
required uint32 depth = 1;
|
||||
required uint32 fingerprint = 2;
|
||||
required uint32 child_num = 3;
|
||||
required bytes chain_code = 4;
|
||||
optional bytes private_key = 5;
|
||||
optional bytes public_key = 6;
|
||||
}
|
||||
|
||||
message HDNodePathType {
|
||||
required HDNodeType node = 1; // BIP-32 node in deserialized form
|
||||
repeated uint32 address_n = 2; // BIP-32 path to derive the key from node
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing Coin
|
||||
* @used_in Features
|
||||
*/
|
||||
message CoinType {
|
||||
optional string coin_name = 1;
|
||||
optional string coin_shortcut = 2;
|
||||
optional uint32 address_type = 3 [default=0];
|
||||
optional uint64 maxfee_kb = 4;
|
||||
optional uint32 address_type_p2sh = 5 [default=5];
|
||||
optional string signed_message_header = 8;
|
||||
optional uint32 xpub_magic = 9 [default=76067358]; // default=0x0488b21e
|
||||
optional uint32 xprv_magic = 10 [default=76066276]; // default=0x0488ade4
|
||||
optional bool segwit = 11;
|
||||
optional uint32 forkid = 12;
|
||||
}
|
||||
|
||||
/**
|
||||
* Type of redeem script used in input
|
||||
* @used_in TxInputType
|
||||
*/
|
||||
message MultisigRedeemScriptType {
|
||||
repeated HDNodePathType pubkeys = 1; // pubkeys from multisig address (sorted lexicographically)
|
||||
repeated bytes signatures = 2; // existing signatures for partially signed input
|
||||
optional uint32 m = 3; // "m" from n, how many valid signatures is necessary for spending
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing transaction input
|
||||
* @used_in SimpleSignTx
|
||||
* @used_in TransactionType
|
||||
*/
|
||||
message TxInputType {
|
||||
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
|
||||
required bytes prev_hash = 2; // hash of previous transaction output to spend by this input
|
||||
required uint32 prev_index = 3; // index of previous output to spend
|
||||
optional bytes script_sig = 4; // script signature, unset for tx to sign
|
||||
optional uint32 sequence = 5 [default=4294967295]; // sequence (default=0xffffffff)
|
||||
optional InputScriptType script_type = 6 [default=SPENDADDRESS]; // defines template of input script
|
||||
optional MultisigRedeemScriptType multisig = 7; // Filled if input is going to spend multisig tx
|
||||
optional uint64 amount = 8; // amount of previous transaction output (for segwit only)
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing transaction output
|
||||
* @used_in SimpleSignTx
|
||||
* @used_in TransactionType
|
||||
*/
|
||||
message TxOutputType {
|
||||
optional string address = 1; // target coin address in Base58 encoding
|
||||
repeated uint32 address_n = 2; // BIP-32 path to derive the key from master node; has higher priority than "address"
|
||||
required uint64 amount = 3; // amount to spend in satoshis
|
||||
required OutputScriptType script_type = 4; // output script type
|
||||
optional MultisigRedeemScriptType multisig = 5; // defines multisig address; script_type must be PAYTOMULTISIG
|
||||
optional bytes op_return_data = 6; // defines op_return data; script_type must be PAYTOOPRETURN, amount must be 0
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing compiled transaction output
|
||||
* @used_in TransactionType
|
||||
*/
|
||||
message TxOutputBinType {
|
||||
required uint64 amount = 1;
|
||||
required bytes script_pubkey = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing transaction
|
||||
* @used_in SimpleSignTx
|
||||
*/
|
||||
message TransactionType {
|
||||
optional uint32 version = 1;
|
||||
repeated TxInputType inputs = 2;
|
||||
repeated TxOutputBinType bin_outputs = 3;
|
||||
repeated TxOutputType outputs = 5;
|
||||
optional uint32 lock_time = 4;
|
||||
optional uint32 inputs_cnt = 6;
|
||||
optional uint32 outputs_cnt = 7;
|
||||
optional bytes extra_data = 8;
|
||||
optional uint32 extra_data_len = 9;
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing request details
|
||||
* @used_in TxRequest
|
||||
*/
|
||||
message TxRequestDetailsType {
|
||||
optional uint32 request_index = 1; // device expects TxAck message from the computer
|
||||
optional bytes tx_hash = 2; // tx_hash of requested transaction
|
||||
optional uint32 extra_data_len = 3; // length of requested extra data
|
||||
optional uint32 extra_data_offset = 4; // offset of requested extra data
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing serialized data
|
||||
* @used_in TxRequest
|
||||
*/
|
||||
message TxRequestSerializedType {
|
||||
optional uint32 signature_index = 1; // 'signature' field contains signed input of this index
|
||||
optional bytes signature = 2; // signature of the signature_index input
|
||||
optional bytes serialized_tx = 3; // part of serialized and signed transaction
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure representing identity data
|
||||
* @used_in IdentityType
|
||||
*/
|
||||
message IdentityType {
|
||||
optional string proto = 1; // proto part of URI
|
||||
optional string user = 2; // user part of URI
|
||||
optional string host = 3; // host part of URI
|
||||
optional string port = 4; // port part of URI
|
||||
optional string path = 5; // path part of URI
|
||||
optional uint32 index = 6 [default=0]; // identity index
|
||||
}
|
464
accounts/usbwallet/ledger.go
Normal file
464
accounts/usbwallet/ledger.go
Normal file
@ -0,0 +1,464 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// This file contains the implementation for interacting with the Ledger hardware
|
||||
// wallets. The wire protocol spec can be found in the Ledger Blue GitHub repo:
|
||||
// https://raw.githubusercontent.com/LedgerHQ/blue-app-eth/master/doc/ethapp.asc
|
||||
|
||||
package usbwallet
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// ledgerOpcode is an enumeration encoding the supported Ledger opcodes.
|
||||
type ledgerOpcode byte
|
||||
|
||||
// ledgerParam1 is an enumeration encoding the supported Ledger parameters for
|
||||
// specific opcodes. The same parameter values may be reused between opcodes.
|
||||
type ledgerParam1 byte
|
||||
|
||||
// ledgerParam2 is an enumeration encoding the supported Ledger parameters for
|
||||
// specific opcodes. The same parameter values may be reused between opcodes.
|
||||
type ledgerParam2 byte
|
||||
|
||||
const (
|
||||
ledgerOpRetrieveAddress ledgerOpcode = 0x02 // Returns the public key and Ethereum address for a given BIP 32 path
|
||||
ledgerOpSignTransaction ledgerOpcode = 0x04 // Signs an Ethereum transaction after having the user validate the parameters
|
||||
ledgerOpGetConfiguration ledgerOpcode = 0x06 // Returns specific wallet application configuration
|
||||
|
||||
ledgerP1DirectlyFetchAddress ledgerParam1 = 0x00 // Return address directly from the wallet
|
||||
ledgerP1ConfirmFetchAddress ledgerParam1 = 0x01 // Require a user confirmation before returning the address
|
||||
ledgerP1InitTransactionData ledgerParam1 = 0x00 // First transaction data block for signing
|
||||
ledgerP1ContTransactionData ledgerParam1 = 0x80 // Subsequent transaction data block for signing
|
||||
ledgerP2DiscardAddressChainCode ledgerParam2 = 0x00 // Do not return the chain code along with the address
|
||||
ledgerP2ReturnAddressChainCode ledgerParam2 = 0x01 // Require a user confirmation before returning the address
|
||||
)
|
||||
|
||||
// errLedgerReplyInvalidHeader is the error message returned by a Ledger data exchange
|
||||
// if the device replies with a mismatching header. This usually means the device
|
||||
// is in browser mode.
|
||||
var errLedgerReplyInvalidHeader = errors.New("ledger: invalid reply header")
|
||||
|
||||
// errLedgerInvalidVersionReply is the error message returned by a Ledger version retrieval
|
||||
// when a response does arrive, but it does not contain the expected data.
|
||||
var errLedgerInvalidVersionReply = errors.New("ledger: invalid version reply")
|
||||
|
||||
// ledgerDriver implements the communication with a Ledger hardware wallet.
|
||||
type ledgerDriver struct {
|
||||
device io.ReadWriter // USB device connection to communicate through
|
||||
version [3]byte // Current version of the Ledger firmware (zero if app is offline)
|
||||
browser bool // Flag whether the Ledger is in browser mode (reply channel mismatch)
|
||||
failure error // Any failure that would make the device unusable
|
||||
log log.Logger // Contextual logger to tag the ledger with its id
|
||||
}
|
||||
|
||||
// newLedgerDriver creates a new instance of a Ledger USB protocol driver.
|
||||
func newLedgerDriver(logger log.Logger) driver {
|
||||
return &ledgerDriver{
|
||||
log: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// Status implements usbwallet.driver, returning various states the Ledger can
|
||||
// currently be in.
|
||||
func (w *ledgerDriver) Status() (string, error) {
|
||||
if w.failure != nil {
|
||||
return fmt.Sprintf("Failed: %v", w.failure), w.failure
|
||||
}
|
||||
if w.browser {
|
||||
return "Ethereum app in browser mode", w.failure
|
||||
}
|
||||
if w.offline() {
|
||||
return "Ethereum app offline", w.failure
|
||||
}
|
||||
return fmt.Sprintf("Ethereum app v%d.%d.%d online", w.version[0], w.version[1], w.version[2]), w.failure
|
||||
}
|
||||
|
||||
// offline returns whether the wallet and the Ethereum app is offline or not.
|
||||
//
|
||||
// The method assumes that the state lock is held!
|
||||
func (w *ledgerDriver) offline() bool {
|
||||
return w.version == [3]byte{0, 0, 0}
|
||||
}
|
||||
|
||||
// Open implements usbwallet.driver, attempting to initialize the connection to the
|
||||
// Ledger hardware wallet. The Ledger does not require a user passphrase, so that
|
||||
// parameter is silently discarded.
|
||||
func (w *ledgerDriver) Open(device io.ReadWriter, passphrase string) error {
|
||||
w.device, w.failure = device, nil
|
||||
|
||||
_, err := w.ledgerDerive(accounts.DefaultBaseDerivationPath)
|
||||
if err != nil {
|
||||
// Ethereum app is not running or in browser mode, nothing more to do, return
|
||||
if err == errLedgerReplyInvalidHeader {
|
||||
w.browser = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Try to resolve the Ethereum app's version, will fail prior to v1.0.2
|
||||
if w.version, err = w.ledgerVersion(); err != nil {
|
||||
w.version = [3]byte{1, 0, 0} // Assume worst case, can't verify if v1.0.0 or v1.0.1
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close implements usbwallet.driver, cleaning up and metadata maintained within
|
||||
// the Ledger driver.
|
||||
func (w *ledgerDriver) Close() error {
|
||||
w.browser, w.version = false, [3]byte{}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Heartbeat implements usbwallet.driver, performing a sanity check against the
|
||||
// Ledger to see if it's still online.
|
||||
func (w *ledgerDriver) Heartbeat() error {
|
||||
if _, err := w.ledgerVersion(); err != nil && err != errLedgerInvalidVersionReply {
|
||||
w.failure = err
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Derive implements usbwallet.driver, sending a derivation request to the Ledger
|
||||
// and returning the Ethereum address located on that derivation path.
|
||||
func (w *ledgerDriver) Derive(path accounts.DerivationPath) (common.Address, error) {
|
||||
return w.ledgerDerive(path)
|
||||
}
|
||||
|
||||
// SignTx implements usbwallet.driver, sending the transaction to the Ledger and
|
||||
// waiting for the user to confirm or deny the transaction.
|
||||
//
|
||||
// Note, if the version of the Ethereum application running on the Ledger wallet is
|
||||
// too old to sign EIP-155 transactions, but such is requested nonetheless, an error
|
||||
// will be returned opposed to silently signing in Homestead mode.
|
||||
func (w *ledgerDriver) SignTx(path accounts.DerivationPath, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error) {
|
||||
// If the Ethereum app doesn't run, abort
|
||||
if w.offline() {
|
||||
return common.Address{}, nil, accounts.ErrWalletClosed
|
||||
}
|
||||
// Ensure the wallet is capable of signing the given transaction
|
||||
if chainID != nil && w.version[0] <= 1 && w.version[1] <= 0 && w.version[2] <= 2 {
|
||||
return common.Address{}, nil, fmt.Errorf("Ledger v%d.%d.%d doesn't support signing this transaction, please update to v1.0.3 at least", w.version[0], w.version[1], w.version[2])
|
||||
}
|
||||
// All infos gathered and metadata checks out, request signing
|
||||
return w.ledgerSign(path, tx, chainID)
|
||||
}
|
||||
|
||||
// ledgerVersion retrieves the current version of the Ethereum wallet app running
|
||||
// on the Ledger wallet.
|
||||
//
|
||||
// The version retrieval protocol is defined as follows:
|
||||
//
|
||||
// CLA | INS | P1 | P2 | Lc | Le
|
||||
// ----+-----+----+----+----+---
|
||||
// E0 | 06 | 00 | 00 | 00 | 04
|
||||
//
|
||||
// With no input data, and the output data being:
|
||||
//
|
||||
// Description | Length
|
||||
// ---------------------------------------------------+--------
|
||||
// Flags 01: arbitrary data signature enabled by user | 1 byte
|
||||
// Application major version | 1 byte
|
||||
// Application minor version | 1 byte
|
||||
// Application patch version | 1 byte
|
||||
func (w *ledgerDriver) ledgerVersion() ([3]byte, error) {
|
||||
// Send the request and wait for the response
|
||||
reply, err := w.ledgerExchange(ledgerOpGetConfiguration, 0, 0, nil)
|
||||
if err != nil {
|
||||
return [3]byte{}, err
|
||||
}
|
||||
if len(reply) != 4 {
|
||||
return [3]byte{}, errLedgerInvalidVersionReply
|
||||
}
|
||||
// Cache the version for future reference
|
||||
var version [3]byte
|
||||
copy(version[:], reply[1:])
|
||||
return version, nil
|
||||
}
|
||||
|
||||
// ledgerDerive retrieves the currently active Ethereum address from a Ledger
|
||||
// wallet at the specified derivation path.
|
||||
//
|
||||
// The address derivation protocol is defined as follows:
|
||||
//
|
||||
// CLA | INS | P1 | P2 | Lc | Le
|
||||
// ----+-----+----+----+-----+---
|
||||
// E0 | 02 | 00 return address
|
||||
// 01 display address and confirm before returning
|
||||
// | 00: do not return the chain code
|
||||
// | 01: return the chain code
|
||||
// | var | 00
|
||||
//
|
||||
// Where the input data is:
|
||||
//
|
||||
// Description | Length
|
||||
// -------------------------------------------------+--------
|
||||
// Number of BIP 32 derivations to perform (max 10) | 1 byte
|
||||
// First derivation index (big endian) | 4 bytes
|
||||
// ... | 4 bytes
|
||||
// Last derivation index (big endian) | 4 bytes
|
||||
//
|
||||
// And the output data is:
|
||||
//
|
||||
// Description | Length
|
||||
// ------------------------+-------------------
|
||||
// Public Key length | 1 byte
|
||||
// Uncompressed Public Key | arbitrary
|
||||
// Ethereum address length | 1 byte
|
||||
// Ethereum address | 40 bytes hex ascii
|
||||
// Chain code if requested | 32 bytes
|
||||
func (w *ledgerDriver) ledgerDerive(derivationPath []uint32) (common.Address, error) {
|
||||
// Flatten the derivation path into the Ledger request
|
||||
path := make([]byte, 1+4*len(derivationPath))
|
||||
path[0] = byte(len(derivationPath))
|
||||
for i, component := range derivationPath {
|
||||
binary.BigEndian.PutUint32(path[1+4*i:], component)
|
||||
}
|
||||
// Send the request and wait for the response
|
||||
reply, err := w.ledgerExchange(ledgerOpRetrieveAddress, ledgerP1DirectlyFetchAddress, ledgerP2DiscardAddressChainCode, path)
|
||||
if err != nil {
|
||||
return common.Address{}, err
|
||||
}
|
||||
// Discard the public key, we don't need that for now
|
||||
if len(reply) < 1 || len(reply) < 1+int(reply[0]) {
|
||||
return common.Address{}, errors.New("reply lacks public key entry")
|
||||
}
|
||||
reply = reply[1+int(reply[0]):]
|
||||
|
||||
// Extract the Ethereum hex address string
|
||||
if len(reply) < 1 || len(reply) < 1+int(reply[0]) {
|
||||
return common.Address{}, errors.New("reply lacks address entry")
|
||||
}
|
||||
hexstr := reply[1 : 1+int(reply[0])]
|
||||
|
||||
// Decode the hex sting into an Ethereum address and return
|
||||
var address common.Address
|
||||
hex.Decode(address[:], hexstr)
|
||||
return address, nil
|
||||
}
|
||||
|
||||
// ledgerSign sends the transaction to the Ledger wallet, and waits for the user
|
||||
// to confirm or deny the transaction.
|
||||
//
|
||||
// The transaction signing protocol is defined as follows:
|
||||
//
|
||||
// CLA | INS | P1 | P2 | Lc | Le
|
||||
// ----+-----+----+----+-----+---
|
||||
// E0 | 04 | 00: first transaction data block
|
||||
// 80: subsequent transaction data block
|
||||
// | 00 | variable | variable
|
||||
//
|
||||
// Where the input for the first transaction block (first 255 bytes) is:
|
||||
//
|
||||
// Description | Length
|
||||
// -------------------------------------------------+----------
|
||||
// Number of BIP 32 derivations to perform (max 10) | 1 byte
|
||||
// First derivation index (big endian) | 4 bytes
|
||||
// ... | 4 bytes
|
||||
// Last derivation index (big endian) | 4 bytes
|
||||
// RLP transaction chunk | arbitrary
|
||||
//
|
||||
// And the input for subsequent transaction blocks (first 255 bytes) are:
|
||||
//
|
||||
// Description | Length
|
||||
// ----------------------+----------
|
||||
// RLP transaction chunk | arbitrary
|
||||
//
|
||||
// And the output data is:
|
||||
//
|
||||
// Description | Length
|
||||
// ------------+---------
|
||||
// signature V | 1 byte
|
||||
// signature R | 32 bytes
|
||||
// signature S | 32 bytes
|
||||
func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error) {
|
||||
// Flatten the derivation path into the Ledger request
|
||||
path := make([]byte, 1+4*len(derivationPath))
|
||||
path[0] = byte(len(derivationPath))
|
||||
for i, component := range derivationPath {
|
||||
binary.BigEndian.PutUint32(path[1+4*i:], component)
|
||||
}
|
||||
// Create the transaction RLP based on whether legacy or EIP155 signing was requeste
|
||||
var (
|
||||
txrlp []byte
|
||||
err error
|
||||
)
|
||||
if chainID == nil {
|
||||
if txrlp, err = rlp.EncodeToBytes([]interface{}{tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data()}); err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
} else {
|
||||
if txrlp, err = rlp.EncodeToBytes([]interface{}{tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data(), chainID, big.NewInt(0), big.NewInt(0)}); err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
}
|
||||
payload := append(path, txrlp...)
|
||||
|
||||
// Send the request and wait for the response
|
||||
var (
|
||||
op = ledgerP1InitTransactionData
|
||||
reply []byte
|
||||
)
|
||||
for len(payload) > 0 {
|
||||
// Calculate the size of the next data chunk
|
||||
chunk := 255
|
||||
if chunk > len(payload) {
|
||||
chunk = len(payload)
|
||||
}
|
||||
// Send the chunk over, ensuring it's processed correctly
|
||||
reply, err = w.ledgerExchange(ledgerOpSignTransaction, op, 0, payload[:chunk])
|
||||
if err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
// Shift the payload and ensure subsequent chunks are marked as such
|
||||
payload = payload[chunk:]
|
||||
op = ledgerP1ContTransactionData
|
||||
}
|
||||
// Extract the Ethereum signature and do a sanity validation
|
||||
if len(reply) != 65 {
|
||||
return common.Address{}, nil, errors.New("reply lacks signature")
|
||||
}
|
||||
signature := append(reply[1:], reply[0])
|
||||
|
||||
// Create the correct signer and signature transform based on the chain ID
|
||||
var signer types.Signer
|
||||
if chainID == nil {
|
||||
signer = new(types.HomesteadSigner)
|
||||
} else {
|
||||
signer = types.NewEIP155Signer(chainID)
|
||||
signature[64] = signature[64] - byte(chainID.Uint64()*2+35)
|
||||
}
|
||||
signed, err := tx.WithSignature(signer, signature)
|
||||
if err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
sender, err := types.Sender(signer, signed)
|
||||
if err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
return sender, signed, nil
|
||||
}
|
||||
|
||||
// ledgerExchange performs a data exchange with the Ledger wallet, sending it a
|
||||
// message and retrieving the response.
|
||||
//
|
||||
// The common transport header is defined as follows:
|
||||
//
|
||||
// Description | Length
|
||||
// --------------------------------------+----------
|
||||
// Communication channel ID (big endian) | 2 bytes
|
||||
// Command tag | 1 byte
|
||||
// Packet sequence index (big endian) | 2 bytes
|
||||
// Payload | arbitrary
|
||||
//
|
||||
// The Communication channel ID allows commands multiplexing over the same
|
||||
// physical link. It is not used for the time being, and should be set to 0101
|
||||
// to avoid compatibility issues with implementations ignoring a leading 00 byte.
|
||||
//
|
||||
// The Command tag describes the message content. Use TAG_APDU (0x05) for standard
|
||||
// APDU payloads, or TAG_PING (0x02) for a simple link test.
|
||||
//
|
||||
// The Packet sequence index describes the current sequence for fragmented payloads.
|
||||
// The first fragment index is 0x00.
|
||||
//
|
||||
// APDU Command payloads are encoded as follows:
|
||||
//
|
||||
// Description | Length
|
||||
// -----------------------------------
|
||||
// APDU length (big endian) | 2 bytes
|
||||
// APDU CLA | 1 byte
|
||||
// APDU INS | 1 byte
|
||||
// APDU P1 | 1 byte
|
||||
// APDU P2 | 1 byte
|
||||
// APDU length | 1 byte
|
||||
// Optional APDU data | arbitrary
|
||||
func (w *ledgerDriver) ledgerExchange(opcode ledgerOpcode, p1 ledgerParam1, p2 ledgerParam2, data []byte) ([]byte, error) {
|
||||
// Construct the message payload, possibly split into multiple chunks
|
||||
apdu := make([]byte, 2, 7+len(data))
|
||||
|
||||
binary.BigEndian.PutUint16(apdu, uint16(5+len(data)))
|
||||
apdu = append(apdu, []byte{0xe0, byte(opcode), byte(p1), byte(p2), byte(len(data))}...)
|
||||
apdu = append(apdu, data...)
|
||||
|
||||
// Stream all the chunks to the device
|
||||
header := []byte{0x01, 0x01, 0x05, 0x00, 0x00} // Channel ID and command tag appended
|
||||
chunk := make([]byte, 64)
|
||||
space := len(chunk) - len(header)
|
||||
|
||||
for i := 0; len(apdu) > 0; i++ {
|
||||
// Construct the new message to stream
|
||||
chunk = append(chunk[:0], header...)
|
||||
binary.BigEndian.PutUint16(chunk[3:], uint16(i))
|
||||
|
||||
if len(apdu) > space {
|
||||
chunk = append(chunk, apdu[:space]...)
|
||||
apdu = apdu[space:]
|
||||
} else {
|
||||
chunk = append(chunk, apdu...)
|
||||
apdu = nil
|
||||
}
|
||||
// Send over to the device
|
||||
w.log.Trace("Data chunk sent to the Ledger", "chunk", hexutil.Bytes(chunk))
|
||||
if _, err := w.device.Write(chunk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Stream the reply back from the wallet in 64 byte chunks
|
||||
var reply []byte
|
||||
chunk = chunk[:64] // Yeah, we surely have enough space
|
||||
for {
|
||||
// Read the next chunk from the Ledger wallet
|
||||
if _, err := io.ReadFull(w.device, chunk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.log.Trace("Data chunk received from the Ledger", "chunk", hexutil.Bytes(chunk))
|
||||
|
||||
// Make sure the transport header matches
|
||||
if chunk[0] != 0x01 || chunk[1] != 0x01 || chunk[2] != 0x05 {
|
||||
return nil, errLedgerReplyInvalidHeader
|
||||
}
|
||||
// If it's the first chunk, retrieve the total message length
|
||||
var payload []byte
|
||||
|
||||
if chunk[3] == 0x00 && chunk[4] == 0x00 {
|
||||
reply = make([]byte, 0, int(binary.BigEndian.Uint16(chunk[5:7])))
|
||||
payload = chunk[7:]
|
||||
} else {
|
||||
payload = chunk[5:]
|
||||
}
|
||||
// Append to the reply and stop when filled up
|
||||
if left := cap(reply) - len(reply); left > len(payload) {
|
||||
reply = append(reply, payload...)
|
||||
} else {
|
||||
reply = append(reply, payload[:left]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
return reply[:len(reply)-2], nil
|
||||
}
|
@ -1,903 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// This file contains the implementation for interacting with the Ledger hardware
|
||||
// wallets. The wire protocol spec can be found in the Ledger Blue GitHub repo:
|
||||
// https://raw.githubusercontent.com/LedgerHQ/blue-app-eth/master/doc/ethapp.asc
|
||||
|
||||
package usbwallet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
ethereum "github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/karalabe/hid"
|
||||
)
|
||||
|
||||
// Maximum time between wallet health checks to detect USB unplugs.
|
||||
const ledgerHeartbeatCycle = time.Second
|
||||
|
||||
// Minimum time to wait between self derivation attempts, even it the user is
|
||||
// requesting accounts like crazy.
|
||||
const ledgerSelfDeriveThrottling = time.Second
|
||||
|
||||
// ledgerOpcode is an enumeration encoding the supported Ledger opcodes.
|
||||
type ledgerOpcode byte
|
||||
|
||||
// ledgerParam1 is an enumeration encoding the supported Ledger parameters for
|
||||
// specific opcodes. The same parameter values may be reused between opcodes.
|
||||
type ledgerParam1 byte
|
||||
|
||||
// ledgerParam2 is an enumeration encoding the supported Ledger parameters for
|
||||
// specific opcodes. The same parameter values may be reused between opcodes.
|
||||
type ledgerParam2 byte
|
||||
|
||||
const (
|
||||
ledgerOpRetrieveAddress ledgerOpcode = 0x02 // Returns the public key and Ethereum address for a given BIP 32 path
|
||||
ledgerOpSignTransaction ledgerOpcode = 0x04 // Signs an Ethereum transaction after having the user validate the parameters
|
||||
ledgerOpGetConfiguration ledgerOpcode = 0x06 // Returns specific wallet application configuration
|
||||
|
||||
ledgerP1DirectlyFetchAddress ledgerParam1 = 0x00 // Return address directly from the wallet
|
||||
ledgerP1ConfirmFetchAddress ledgerParam1 = 0x01 // Require a user confirmation before returning the address
|
||||
ledgerP1InitTransactionData ledgerParam1 = 0x00 // First transaction data block for signing
|
||||
ledgerP1ContTransactionData ledgerParam1 = 0x80 // Subsequent transaction data block for signing
|
||||
ledgerP2DiscardAddressChainCode ledgerParam2 = 0x00 // Do not return the chain code along with the address
|
||||
ledgerP2ReturnAddressChainCode ledgerParam2 = 0x01 // Require a user confirmation before returning the address
|
||||
)
|
||||
|
||||
// errReplyInvalidHeader is the error message returned by a Ledger data exchange
|
||||
// if the device replies with a mismatching header. This usually means the device
|
||||
// is in browser mode.
|
||||
var errReplyInvalidHeader = errors.New("invalid reply header")
|
||||
|
||||
// errInvalidVersionReply is the error message returned by a Ledger version retrieval
|
||||
// when a response does arrive, but it does not contain the expected data.
|
||||
var errInvalidVersionReply = errors.New("invalid version reply")
|
||||
|
||||
// ledgerWallet represents a live USB Ledger hardware wallet.
|
||||
type ledgerWallet struct {
|
||||
hub *LedgerHub // USB hub the device originates from (TODO(karalabe): remove if hotplug lands on Windows)
|
||||
url *accounts.URL // Textual URL uniquely identifying this wallet
|
||||
|
||||
info hid.DeviceInfo // Known USB device infos about the wallet
|
||||
device *hid.Device // USB device advertising itself as a Ledger wallet
|
||||
failure error // Any failure that would make the device unusable
|
||||
|
||||
version [3]byte // Current version of the Ledger Ethereum app (zero if app is offline)
|
||||
browser bool // Flag whether the Ledger is in browser mode (reply channel mismatch)
|
||||
accounts []accounts.Account // List of derive accounts pinned on the Ledger
|
||||
paths map[common.Address]accounts.DerivationPath // Known derivation paths for signing operations
|
||||
|
||||
deriveNextPath accounts.DerivationPath // Next derivation path for account auto-discovery
|
||||
deriveNextAddr common.Address // Next derived account address for auto-discovery
|
||||
deriveChain ethereum.ChainStateReader // Blockchain state reader to discover used account with
|
||||
deriveReq chan chan struct{} // Channel to request a self-derivation on
|
||||
deriveQuit chan chan error // Channel to terminate the self-deriver with
|
||||
|
||||
healthQuit chan chan error
|
||||
|
||||
// Locking a hardware wallet is a bit special. Since hardware devices are lower
|
||||
// performing, any communication with them might take a non negligible amount of
|
||||
// time. Worse still, waiting for user confirmation can take arbitrarily long,
|
||||
// but exclusive communication must be upheld during. Locking the entire wallet
|
||||
// in the mean time however would stall any parts of the system that don't want
|
||||
// to communicate, just read some state (e.g. list the accounts).
|
||||
//
|
||||
// As such, a hardware wallet needs two locks to function correctly. A state
|
||||
// lock can be used to protect the wallet's software-side internal state, which
|
||||
// must not be held exlusively during hardware communication. A communication
|
||||
// lock can be used to achieve exclusive access to the device itself, this one
|
||||
// however should allow "skipping" waiting for operations that might want to
|
||||
// use the device, but can live without too (e.g. account self-derivation).
|
||||
//
|
||||
// Since we have two locks, it's important to know how to properly use them:
|
||||
// - Communication requires the `device` to not change, so obtaining the
|
||||
// commsLock should be done after having a stateLock.
|
||||
// - Communication must not disable read access to the wallet state, so it
|
||||
// must only ever hold a *read* lock to stateLock.
|
||||
commsLock chan struct{} // Mutex (buf=1) for the USB comms without keeping the state locked
|
||||
stateLock sync.RWMutex // Protects read and write access to the wallet struct fields
|
||||
|
||||
log log.Logger // Contextual logger to tag the ledger with its id
|
||||
}
|
||||
|
||||
// URL implements accounts.Wallet, returning the URL of the Ledger device.
|
||||
func (w *ledgerWallet) URL() accounts.URL {
|
||||
return *w.url // Immutable, no need for a lock
|
||||
}
|
||||
|
||||
// Status implements accounts.Wallet, always whether the Ledger is opened, closed
|
||||
// or whether the Ethereum app was not started on it.
|
||||
func (w *ledgerWallet) Status() string {
|
||||
w.stateLock.RLock() // No device communication, state lock is enough
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
if w.failure != nil {
|
||||
return fmt.Sprintf("Failed: %v", w.failure)
|
||||
}
|
||||
if w.device == nil {
|
||||
return "Closed"
|
||||
}
|
||||
if w.browser {
|
||||
return "Ethereum app in browser mode"
|
||||
}
|
||||
if w.offline() {
|
||||
return "Ethereum app offline"
|
||||
}
|
||||
return fmt.Sprintf("Ethereum app v%d.%d.%d online", w.version[0], w.version[1], w.version[2])
|
||||
}
|
||||
|
||||
// offline returns whether the wallet and the Ethereum app is offline or not.
|
||||
//
|
||||
// The method assumes that the state lock is held!
|
||||
func (w *ledgerWallet) offline() bool {
|
||||
return w.version == [3]byte{0, 0, 0}
|
||||
}
|
||||
|
||||
// failed returns if the USB device wrapped by the wallet failed for some reason.
|
||||
// This is used by the device scanner to report failed wallets as departed.
|
||||
//
|
||||
// The method assumes that the state lock is *not* held!
|
||||
func (w *ledgerWallet) failed() bool {
|
||||
w.stateLock.RLock() // No device communication, state lock is enough
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
return w.failure != nil
|
||||
}
|
||||
|
||||
// Open implements accounts.Wallet, attempting to open a USB connection to the
|
||||
// Ledger hardware wallet. The Ledger does not require a user passphrase, so that
|
||||
// parameter is silently discarded.
|
||||
func (w *ledgerWallet) Open(passphrase string) error {
|
||||
w.stateLock.Lock() // State lock is enough since there's no connection yet at this point
|
||||
defer w.stateLock.Unlock()
|
||||
|
||||
// If the wallet was already opened, don't try to open again
|
||||
if w.device != nil {
|
||||
return accounts.ErrWalletAlreadyOpen
|
||||
}
|
||||
// Otherwise iterate over all USB devices and find this again (no way to directly do this)
|
||||
device, err := w.info.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Wallet seems to be successfully opened, guess if the Ethereum app is running
|
||||
w.device = device
|
||||
w.commsLock = make(chan struct{}, 1)
|
||||
w.commsLock <- struct{}{} // Enable lock
|
||||
|
||||
w.paths = make(map[common.Address]accounts.DerivationPath)
|
||||
|
||||
w.deriveReq = make(chan chan struct{})
|
||||
w.deriveQuit = make(chan chan error)
|
||||
w.healthQuit = make(chan chan error)
|
||||
|
||||
defer func() {
|
||||
go w.heartbeat()
|
||||
go w.selfDerive()
|
||||
}()
|
||||
|
||||
if _, err = w.ledgerDerive(accounts.DefaultBaseDerivationPath); err != nil {
|
||||
// Ethereum app is not running or in browser mode, nothing more to do, return
|
||||
if err == errReplyInvalidHeader {
|
||||
w.browser = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Try to resolve the Ethereum app's version, will fail prior to v1.0.2
|
||||
if w.version, err = w.ledgerVersion(); err != nil {
|
||||
w.version = [3]byte{1, 0, 0} // Assume worst case, can't verify if v1.0.0 or v1.0.1
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// heartbeat is a health check loop for the Ledger wallets to periodically verify
|
||||
// whether they are still present or if they malfunctioned. It is needed because:
|
||||
// - libusb on Windows doesn't support hotplug, so we can't detect USB unplugs
|
||||
// - communication timeout on the Ledger requires a device power cycle to fix
|
||||
func (w *ledgerWallet) heartbeat() {
|
||||
w.log.Debug("Ledger health-check started")
|
||||
defer w.log.Debug("Ledger health-check stopped")
|
||||
|
||||
// Execute heartbeat checks until termination or error
|
||||
var (
|
||||
errc chan error
|
||||
err error
|
||||
)
|
||||
for errc == nil && err == nil {
|
||||
// Wait until termination is requested or the heartbeat cycle arrives
|
||||
select {
|
||||
case errc = <-w.healthQuit:
|
||||
// Termination requested
|
||||
continue
|
||||
case <-time.After(ledgerHeartbeatCycle):
|
||||
// Heartbeat time
|
||||
}
|
||||
// Execute a tiny data exchange to see responsiveness
|
||||
w.stateLock.RLock()
|
||||
if w.device == nil {
|
||||
// Terminated while waiting for the lock
|
||||
w.stateLock.RUnlock()
|
||||
continue
|
||||
}
|
||||
<-w.commsLock // Don't lock state while resolving version
|
||||
_, err = w.ledgerVersion()
|
||||
w.commsLock <- struct{}{}
|
||||
w.stateLock.RUnlock()
|
||||
|
||||
if err != nil && err != errInvalidVersionReply {
|
||||
w.stateLock.Lock() // Lock state to tear the wallet down
|
||||
w.failure = err
|
||||
w.close()
|
||||
w.stateLock.Unlock()
|
||||
}
|
||||
// Ignore non hardware related errors
|
||||
err = nil
|
||||
}
|
||||
// In case of error, wait for termination
|
||||
if err != nil {
|
||||
w.log.Debug("Ledger health-check failed", "err", err)
|
||||
errc = <-w.healthQuit
|
||||
}
|
||||
errc <- err
|
||||
}
|
||||
|
||||
// Close implements accounts.Wallet, closing the USB connection to the Ledger.
|
||||
func (w *ledgerWallet) Close() error {
|
||||
// Ensure the wallet was opened
|
||||
w.stateLock.RLock()
|
||||
hQuit, dQuit := w.healthQuit, w.deriveQuit
|
||||
w.stateLock.RUnlock()
|
||||
|
||||
// Terminate the health checks
|
||||
var herr error
|
||||
if hQuit != nil {
|
||||
errc := make(chan error)
|
||||
hQuit <- errc
|
||||
herr = <-errc // Save for later, we *must* close the USB
|
||||
}
|
||||
// Terminate the self-derivations
|
||||
var derr error
|
||||
if dQuit != nil {
|
||||
errc := make(chan error)
|
||||
dQuit <- errc
|
||||
derr = <-errc // Save for later, we *must* close the USB
|
||||
}
|
||||
// Terminate the device connection
|
||||
w.stateLock.Lock()
|
||||
defer w.stateLock.Unlock()
|
||||
|
||||
w.healthQuit = nil
|
||||
w.deriveQuit = nil
|
||||
w.deriveReq = nil
|
||||
|
||||
if err := w.close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if herr != nil {
|
||||
return herr
|
||||
}
|
||||
return derr
|
||||
}
|
||||
|
||||
// close is the internal wallet closer that terminates the USB connection and
|
||||
// resets all the fields to their defaults.
|
||||
//
|
||||
// Note, close assumes the state lock is held!
|
||||
func (w *ledgerWallet) close() error {
|
||||
// Allow duplicate closes, especially for health-check failures
|
||||
if w.device == nil {
|
||||
return nil
|
||||
}
|
||||
// Close the device, clear everything, then return
|
||||
w.device.Close()
|
||||
w.device = nil
|
||||
|
||||
w.browser, w.version = false, [3]byte{}
|
||||
w.accounts, w.paths = nil, nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Accounts implements accounts.Wallet, returning the list of accounts pinned to
|
||||
// the Ledger hardware wallet. If self-derivation was enabled, the account list
|
||||
// is periodically expanded based on current chain state.
|
||||
func (w *ledgerWallet) Accounts() []accounts.Account {
|
||||
// Attempt self-derivation if it's running
|
||||
reqc := make(chan struct{}, 1)
|
||||
select {
|
||||
case w.deriveReq <- reqc:
|
||||
// Self-derivation request accepted, wait for it
|
||||
<-reqc
|
||||
default:
|
||||
// Self-derivation offline, throttled or busy, skip
|
||||
}
|
||||
// Return whatever account list we ended up with
|
||||
w.stateLock.RLock()
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
cpy := make([]accounts.Account, len(w.accounts))
|
||||
copy(cpy, w.accounts)
|
||||
return cpy
|
||||
}
|
||||
|
||||
// selfDerive is an account derivation loop that upon request attempts to find
|
||||
// new non-zero accounts.
|
||||
func (w *ledgerWallet) selfDerive() {
|
||||
w.log.Debug("Ledger self-derivation started")
|
||||
defer w.log.Debug("Ledger self-derivation stopped")
|
||||
|
||||
// Execute self-derivations until termination or error
|
||||
var (
|
||||
reqc chan struct{}
|
||||
errc chan error
|
||||
err error
|
||||
)
|
||||
for errc == nil && err == nil {
|
||||
// Wait until either derivation or termination is requested
|
||||
select {
|
||||
case errc = <-w.deriveQuit:
|
||||
// Termination requested
|
||||
continue
|
||||
case reqc = <-w.deriveReq:
|
||||
// Account discovery requested
|
||||
}
|
||||
// Derivation needs a chain and device access, skip if either unavailable
|
||||
w.stateLock.RLock()
|
||||
if w.device == nil || w.deriveChain == nil || w.offline() {
|
||||
w.stateLock.RUnlock()
|
||||
reqc <- struct{}{}
|
||||
continue
|
||||
}
|
||||
select {
|
||||
case <-w.commsLock:
|
||||
default:
|
||||
w.stateLock.RUnlock()
|
||||
reqc <- struct{}{}
|
||||
continue
|
||||
}
|
||||
// Device lock obtained, derive the next batch of accounts
|
||||
var (
|
||||
accs []accounts.Account
|
||||
paths []accounts.DerivationPath
|
||||
|
||||
nextAddr = w.deriveNextAddr
|
||||
nextPath = w.deriveNextPath
|
||||
|
||||
context = context.Background()
|
||||
)
|
||||
for empty := false; !empty; {
|
||||
// Retrieve the next derived Ethereum account
|
||||
if nextAddr == (common.Address{}) {
|
||||
if nextAddr, err = w.ledgerDerive(nextPath); err != nil {
|
||||
w.log.Warn("Ledger account derivation failed", "err", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
// Check the account's status against the current chain state
|
||||
var (
|
||||
balance *big.Int
|
||||
nonce uint64
|
||||
)
|
||||
balance, err = w.deriveChain.BalanceAt(context, nextAddr, nil)
|
||||
if err != nil {
|
||||
w.log.Warn("Ledger balance retrieval failed", "err", err)
|
||||
break
|
||||
}
|
||||
nonce, err = w.deriveChain.NonceAt(context, nextAddr, nil)
|
||||
if err != nil {
|
||||
w.log.Warn("Ledger nonce retrieval failed", "err", err)
|
||||
break
|
||||
}
|
||||
// If the next account is empty, stop self-derivation, but add it nonetheless
|
||||
if balance.Sign() == 0 && nonce == 0 {
|
||||
empty = true
|
||||
}
|
||||
// We've just self-derived a new account, start tracking it locally
|
||||
path := make(accounts.DerivationPath, len(nextPath))
|
||||
copy(path[:], nextPath[:])
|
||||
paths = append(paths, path)
|
||||
|
||||
account := accounts.Account{
|
||||
Address: nextAddr,
|
||||
URL: accounts.URL{Scheme: w.url.Scheme, Path: fmt.Sprintf("%s/%s", w.url.Path, path)},
|
||||
}
|
||||
accs = append(accs, account)
|
||||
|
||||
// Display a log message to the user for new (or previously empty accounts)
|
||||
if _, known := w.paths[nextAddr]; !known || (!empty && nextAddr == w.deriveNextAddr) {
|
||||
w.log.Info("Ledger discovered new account", "address", nextAddr, "path", path, "balance", balance, "nonce", nonce)
|
||||
}
|
||||
// Fetch the next potential account
|
||||
if !empty {
|
||||
nextAddr = common.Address{}
|
||||
nextPath[len(nextPath)-1]++
|
||||
}
|
||||
}
|
||||
// Self derivation complete, release device lock
|
||||
w.commsLock <- struct{}{}
|
||||
w.stateLock.RUnlock()
|
||||
|
||||
// Insert any accounts successfully derived
|
||||
w.stateLock.Lock()
|
||||
for i := 0; i < len(accs); i++ {
|
||||
if _, ok := w.paths[accs[i].Address]; !ok {
|
||||
w.accounts = append(w.accounts, accs[i])
|
||||
w.paths[accs[i].Address] = paths[i]
|
||||
}
|
||||
}
|
||||
// Shift the self-derivation forward
|
||||
// TODO(karalabe): don't overwrite changes from wallet.SelfDerive
|
||||
w.deriveNextAddr = nextAddr
|
||||
w.deriveNextPath = nextPath
|
||||
w.stateLock.Unlock()
|
||||
|
||||
// Notify the user of termination and loop after a bit of time (to avoid trashing)
|
||||
reqc <- struct{}{}
|
||||
if err == nil {
|
||||
select {
|
||||
case errc = <-w.deriveQuit:
|
||||
// Termination requested, abort
|
||||
case <-time.After(ledgerSelfDeriveThrottling):
|
||||
// Waited enough, willing to self-derive again
|
||||
}
|
||||
}
|
||||
}
|
||||
// In case of error, wait for termination
|
||||
if err != nil {
|
||||
w.log.Debug("Ledger self-derivation failed", "err", err)
|
||||
errc = <-w.deriveQuit
|
||||
}
|
||||
errc <- err
|
||||
}
|
||||
|
||||
// Contains implements accounts.Wallet, returning whether a particular account is
|
||||
// or is not pinned into this Ledger instance. Although we could attempt to resolve
|
||||
// unpinned accounts, that would be an non-negligible hardware operation.
|
||||
func (w *ledgerWallet) Contains(account accounts.Account) bool {
|
||||
w.stateLock.RLock()
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
_, exists := w.paths[account.Address]
|
||||
return exists
|
||||
}
|
||||
|
||||
// Derive implements accounts.Wallet, deriving a new account at the specific
|
||||
// derivation path. If pin is set to true, the account will be added to the list
|
||||
// of tracked accounts.
|
||||
func (w *ledgerWallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Account, error) {
|
||||
// Try to derive the actual account and update its URL if successful
|
||||
w.stateLock.RLock() // Avoid device disappearing during derivation
|
||||
|
||||
if w.device == nil || w.offline() {
|
||||
w.stateLock.RUnlock()
|
||||
return accounts.Account{}, accounts.ErrWalletClosed
|
||||
}
|
||||
<-w.commsLock // Avoid concurrent hardware access
|
||||
address, err := w.ledgerDerive(path)
|
||||
w.commsLock <- struct{}{}
|
||||
|
||||
w.stateLock.RUnlock()
|
||||
|
||||
// If an error occurred or no pinning was requested, return
|
||||
if err != nil {
|
||||
return accounts.Account{}, err
|
||||
}
|
||||
account := accounts.Account{
|
||||
Address: address,
|
||||
URL: accounts.URL{Scheme: w.url.Scheme, Path: fmt.Sprintf("%s/%s", w.url.Path, path)},
|
||||
}
|
||||
if !pin {
|
||||
return account, nil
|
||||
}
|
||||
// Pinning needs to modify the state
|
||||
w.stateLock.Lock()
|
||||
defer w.stateLock.Unlock()
|
||||
|
||||
if _, ok := w.paths[address]; !ok {
|
||||
w.accounts = append(w.accounts, account)
|
||||
w.paths[address] = path
|
||||
}
|
||||
return account, nil
|
||||
}
|
||||
|
||||
// SelfDerive implements accounts.Wallet, trying to discover accounts that the
|
||||
// user used previously (based on the chain state), but ones that he/she did not
|
||||
// explicitly pin to the wallet manually. To avoid chain head monitoring, self
|
||||
// derivation only runs during account listing (and even then throttled).
|
||||
func (w *ledgerWallet) SelfDerive(base accounts.DerivationPath, chain ethereum.ChainStateReader) {
|
||||
w.stateLock.Lock()
|
||||
defer w.stateLock.Unlock()
|
||||
|
||||
w.deriveNextPath = make(accounts.DerivationPath, len(base))
|
||||
copy(w.deriveNextPath[:], base[:])
|
||||
|
||||
w.deriveNextAddr = common.Address{}
|
||||
w.deriveChain = chain
|
||||
}
|
||||
|
||||
// SignHash implements accounts.Wallet, however signing arbitrary data is not
|
||||
// supported for Ledger wallets, so this method will always return an error.
|
||||
func (w *ledgerWallet) SignHash(acc accounts.Account, hash []byte) ([]byte, error) {
|
||||
return nil, accounts.ErrNotSupported
|
||||
}
|
||||
|
||||
// SignTx implements accounts.Wallet. It sends the transaction over to the Ledger
|
||||
// wallet to request a confirmation from the user. It returns either the signed
|
||||
// transaction or a failure if the user denied the transaction.
|
||||
//
|
||||
// Note, if the version of the Ethereum application running on the Ledger wallet is
|
||||
// too old to sign EIP-155 transactions, but such is requested nonetheless, an error
|
||||
// will be returned opposed to silently signing in Homestead mode.
|
||||
func (w *ledgerWallet) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
|
||||
w.stateLock.RLock() // Comms have own mutex, this is for the state fields
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
// If the wallet is closed, or the Ethereum app doesn't run, abort
|
||||
if w.device == nil || w.offline() {
|
||||
return nil, accounts.ErrWalletClosed
|
||||
}
|
||||
// Make sure the requested account is contained within
|
||||
path, ok := w.paths[account.Address]
|
||||
if !ok {
|
||||
return nil, accounts.ErrUnknownAccount
|
||||
}
|
||||
// Ensure the wallet is capable of signing the given transaction
|
||||
if chainID != nil && w.version[0] <= 1 && w.version[1] <= 0 && w.version[2] <= 2 {
|
||||
return nil, fmt.Errorf("Ledger v%d.%d.%d doesn't support signing this transaction, please update to v1.0.3 at least", w.version[0], w.version[1], w.version[2])
|
||||
}
|
||||
// All infos gathered and metadata checks out, request signing
|
||||
<-w.commsLock
|
||||
defer func() { w.commsLock <- struct{}{} }()
|
||||
|
||||
// Ensure the device isn't screwed with while user confirmation is pending
|
||||
// TODO(karalabe): remove if hotplug lands on Windows
|
||||
w.hub.commsLock.Lock()
|
||||
w.hub.commsPend++
|
||||
w.hub.commsLock.Unlock()
|
||||
|
||||
defer func() {
|
||||
w.hub.commsLock.Lock()
|
||||
w.hub.commsPend--
|
||||
w.hub.commsLock.Unlock()
|
||||
}()
|
||||
return w.ledgerSign(path, account.Address, tx, chainID)
|
||||
}
|
||||
|
||||
// SignHashWithPassphrase implements accounts.Wallet, however signing arbitrary
|
||||
// data is not supported for Ledger wallets, so this method will always return
|
||||
// an error.
|
||||
func (w *ledgerWallet) SignHashWithPassphrase(account accounts.Account, passphrase string, hash []byte) ([]byte, error) {
|
||||
return nil, accounts.ErrNotSupported
|
||||
}
|
||||
|
||||
// SignTxWithPassphrase implements accounts.Wallet, attempting to sign the given
|
||||
// transaction with the given account using passphrase as extra authentication.
|
||||
// Since the Ledger does not support extra passphrases, it is silently ignored.
|
||||
func (w *ledgerWallet) SignTxWithPassphrase(account accounts.Account, passphrase string, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
|
||||
return w.SignTx(account, tx, chainID)
|
||||
}
|
||||
|
||||
// ledgerVersion retrieves the current version of the Ethereum wallet app running
|
||||
// on the Ledger wallet.
|
||||
//
|
||||
// The version retrieval protocol is defined as follows:
|
||||
//
|
||||
// CLA | INS | P1 | P2 | Lc | Le
|
||||
// ----+-----+----+----+----+---
|
||||
// E0 | 06 | 00 | 00 | 00 | 04
|
||||
//
|
||||
// With no input data, and the output data being:
|
||||
//
|
||||
// Description | Length
|
||||
// ---------------------------------------------------+--------
|
||||
// Flags 01: arbitrary data signature enabled by user | 1 byte
|
||||
// Application major version | 1 byte
|
||||
// Application minor version | 1 byte
|
||||
// Application patch version | 1 byte
|
||||
func (w *ledgerWallet) ledgerVersion() ([3]byte, error) {
|
||||
// Send the request and wait for the response
|
||||
reply, err := w.ledgerExchange(ledgerOpGetConfiguration, 0, 0, nil)
|
||||
if err != nil {
|
||||
return [3]byte{}, err
|
||||
}
|
||||
if len(reply) != 4 {
|
||||
return [3]byte{}, errInvalidVersionReply
|
||||
}
|
||||
// Cache the version for future reference
|
||||
var version [3]byte
|
||||
copy(version[:], reply[1:])
|
||||
return version, nil
|
||||
}
|
||||
|
||||
// ledgerDerive retrieves the currently active Ethereum address from a Ledger
|
||||
// wallet at the specified derivation path.
|
||||
//
|
||||
// The address derivation protocol is defined as follows:
|
||||
//
|
||||
// CLA | INS | P1 | P2 | Lc | Le
|
||||
// ----+-----+----+----+-----+---
|
||||
// E0 | 02 | 00 return address
|
||||
// 01 display address and confirm before returning
|
||||
// | 00: do not return the chain code
|
||||
// | 01: return the chain code
|
||||
// | var | 00
|
||||
//
|
||||
// Where the input data is:
|
||||
//
|
||||
// Description | Length
|
||||
// -------------------------------------------------+--------
|
||||
// Number of BIP 32 derivations to perform (max 10) | 1 byte
|
||||
// First derivation index (big endian) | 4 bytes
|
||||
// ... | 4 bytes
|
||||
// Last derivation index (big endian) | 4 bytes
|
||||
//
|
||||
// And the output data is:
|
||||
//
|
||||
// Description | Length
|
||||
// ------------------------+-------------------
|
||||
// Public Key length | 1 byte
|
||||
// Uncompressed Public Key | arbitrary
|
||||
// Ethereum address length | 1 byte
|
||||
// Ethereum address | 40 bytes hex ascii
|
||||
// Chain code if requested | 32 bytes
|
||||
func (w *ledgerWallet) ledgerDerive(derivationPath []uint32) (common.Address, error) {
|
||||
// Flatten the derivation path into the Ledger request
|
||||
path := make([]byte, 1+4*len(derivationPath))
|
||||
path[0] = byte(len(derivationPath))
|
||||
for i, component := range derivationPath {
|
||||
binary.BigEndian.PutUint32(path[1+4*i:], component)
|
||||
}
|
||||
// Send the request and wait for the response
|
||||
reply, err := w.ledgerExchange(ledgerOpRetrieveAddress, ledgerP1DirectlyFetchAddress, ledgerP2DiscardAddressChainCode, path)
|
||||
if err != nil {
|
||||
return common.Address{}, err
|
||||
}
|
||||
// Discard the public key, we don't need that for now
|
||||
if len(reply) < 1 || len(reply) < 1+int(reply[0]) {
|
||||
return common.Address{}, errors.New("reply lacks public key entry")
|
||||
}
|
||||
reply = reply[1+int(reply[0]):]
|
||||
|
||||
// Extract the Ethereum hex address string
|
||||
if len(reply) < 1 || len(reply) < 1+int(reply[0]) {
|
||||
return common.Address{}, errors.New("reply lacks address entry")
|
||||
}
|
||||
hexstr := reply[1 : 1+int(reply[0])]
|
||||
|
||||
// Decode the hex sting into an Ethereum address and return
|
||||
var address common.Address
|
||||
hex.Decode(address[:], hexstr)
|
||||
return address, nil
|
||||
}
|
||||
|
||||
// ledgerSign sends the transaction to the Ledger wallet, and waits for the user
|
||||
// to confirm or deny the transaction.
|
||||
//
|
||||
// The transaction signing protocol is defined as follows:
|
||||
//
|
||||
// CLA | INS | P1 | P2 | Lc | Le
|
||||
// ----+-----+----+----+-----+---
|
||||
// E0 | 04 | 00: first transaction data block
|
||||
// 80: subsequent transaction data block
|
||||
// | 00 | variable | variable
|
||||
//
|
||||
// Where the input for the first transaction block (first 255 bytes) is:
|
||||
//
|
||||
// Description | Length
|
||||
// -------------------------------------------------+----------
|
||||
// Number of BIP 32 derivations to perform (max 10) | 1 byte
|
||||
// First derivation index (big endian) | 4 bytes
|
||||
// ... | 4 bytes
|
||||
// Last derivation index (big endian) | 4 bytes
|
||||
// RLP transaction chunk | arbitrary
|
||||
//
|
||||
// And the input for subsequent transaction blocks (first 255 bytes) are:
|
||||
//
|
||||
// Description | Length
|
||||
// ----------------------+----------
|
||||
// RLP transaction chunk | arbitrary
|
||||
//
|
||||
// And the output data is:
|
||||
//
|
||||
// Description | Length
|
||||
// ------------+---------
|
||||
// signature V | 1 byte
|
||||
// signature R | 32 bytes
|
||||
// signature S | 32 bytes
|
||||
func (w *ledgerWallet) ledgerSign(derivationPath []uint32, address common.Address, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
|
||||
// Flatten the derivation path into the Ledger request
|
||||
path := make([]byte, 1+4*len(derivationPath))
|
||||
path[0] = byte(len(derivationPath))
|
||||
for i, component := range derivationPath {
|
||||
binary.BigEndian.PutUint32(path[1+4*i:], component)
|
||||
}
|
||||
// Create the transaction RLP based on whether legacy or EIP155 signing was requeste
|
||||
var (
|
||||
txrlp []byte
|
||||
err error
|
||||
)
|
||||
if chainID == nil {
|
||||
if txrlp, err = rlp.EncodeToBytes([]interface{}{tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data()}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if txrlp, err = rlp.EncodeToBytes([]interface{}{tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data(), chainID, big.NewInt(0), big.NewInt(0)}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
payload := append(path, txrlp...)
|
||||
|
||||
// Send the request and wait for the response
|
||||
var (
|
||||
op = ledgerP1InitTransactionData
|
||||
reply []byte
|
||||
)
|
||||
for len(payload) > 0 {
|
||||
// Calculate the size of the next data chunk
|
||||
chunk := 255
|
||||
if chunk > len(payload) {
|
||||
chunk = len(payload)
|
||||
}
|
||||
// Send the chunk over, ensuring it's processed correctly
|
||||
reply, err = w.ledgerExchange(ledgerOpSignTransaction, op, 0, payload[:chunk])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Shift the payload and ensure subsequent chunks are marked as such
|
||||
payload = payload[chunk:]
|
||||
op = ledgerP1ContTransactionData
|
||||
}
|
||||
// Extract the Ethereum signature and do a sanity validation
|
||||
if len(reply) != 65 {
|
||||
return nil, errors.New("reply lacks signature")
|
||||
}
|
||||
signature := append(reply[1:], reply[0])
|
||||
|
||||
// Create the correct signer and signature transform based on the chain ID
|
||||
var signer types.Signer
|
||||
if chainID == nil {
|
||||
signer = new(types.HomesteadSigner)
|
||||
} else {
|
||||
signer = types.NewEIP155Signer(chainID)
|
||||
signature[64] = signature[64] - byte(chainID.Uint64()*2+35)
|
||||
}
|
||||
// Inject the final signature into the transaction and sanity check the sender
|
||||
signed, err := tx.WithSignature(signer, signature)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sender, err := types.Sender(signer, signed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if sender != address {
|
||||
return nil, fmt.Errorf("signer mismatch: expected %s, got %s", address.Hex(), sender.Hex())
|
||||
}
|
||||
return signed, nil
|
||||
}
|
||||
|
||||
// ledgerExchange performs a data exchange with the Ledger wallet, sending it a
|
||||
// message and retrieving the response.
|
||||
//
|
||||
// The common transport header is defined as follows:
|
||||
//
|
||||
// Description | Length
|
||||
// --------------------------------------+----------
|
||||
// Communication channel ID (big endian) | 2 bytes
|
||||
// Command tag | 1 byte
|
||||
// Packet sequence index (big endian) | 2 bytes
|
||||
// Payload | arbitrary
|
||||
//
|
||||
// The Communication channel ID allows commands multiplexing over the same
|
||||
// physical link. It is not used for the time being, and should be set to 0101
|
||||
// to avoid compatibility issues with implementations ignoring a leading 00 byte.
|
||||
//
|
||||
// The Command tag describes the message content. Use TAG_APDU (0x05) for standard
|
||||
// APDU payloads, or TAG_PING (0x02) for a simple link test.
|
||||
//
|
||||
// The Packet sequence index describes the current sequence for fragmented payloads.
|
||||
// The first fragment index is 0x00.
|
||||
//
|
||||
// APDU Command payloads are encoded as follows:
|
||||
//
|
||||
// Description | Length
|
||||
// -----------------------------------
|
||||
// APDU length (big endian) | 2 bytes
|
||||
// APDU CLA | 1 byte
|
||||
// APDU INS | 1 byte
|
||||
// APDU P1 | 1 byte
|
||||
// APDU P2 | 1 byte
|
||||
// APDU length | 1 byte
|
||||
// Optional APDU data | arbitrary
|
||||
func (w *ledgerWallet) ledgerExchange(opcode ledgerOpcode, p1 ledgerParam1, p2 ledgerParam2, data []byte) ([]byte, error) {
|
||||
// Construct the message payload, possibly split into multiple chunks
|
||||
apdu := make([]byte, 2, 7+len(data))
|
||||
|
||||
binary.BigEndian.PutUint16(apdu, uint16(5+len(data)))
|
||||
apdu = append(apdu, []byte{0xe0, byte(opcode), byte(p1), byte(p2), byte(len(data))}...)
|
||||
apdu = append(apdu, data...)
|
||||
|
||||
// Stream all the chunks to the device
|
||||
header := []byte{0x01, 0x01, 0x05, 0x00, 0x00} // Channel ID and command tag appended
|
||||
chunk := make([]byte, 64)
|
||||
space := len(chunk) - len(header)
|
||||
|
||||
for i := 0; len(apdu) > 0; i++ {
|
||||
// Construct the new message to stream
|
||||
chunk = append(chunk[:0], header...)
|
||||
binary.BigEndian.PutUint16(chunk[3:], uint16(i))
|
||||
|
||||
if len(apdu) > space {
|
||||
chunk = append(chunk, apdu[:space]...)
|
||||
apdu = apdu[space:]
|
||||
} else {
|
||||
chunk = append(chunk, apdu...)
|
||||
apdu = nil
|
||||
}
|
||||
// Send over to the device
|
||||
w.log.Trace("Data chunk sent to the Ledger", "chunk", hexutil.Bytes(chunk))
|
||||
if _, err := w.device.Write(chunk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Stream the reply back from the wallet in 64 byte chunks
|
||||
var reply []byte
|
||||
chunk = chunk[:64] // Yeah, we surely have enough space
|
||||
for {
|
||||
// Read the next chunk from the Ledger wallet
|
||||
if _, err := io.ReadFull(w.device, chunk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.log.Trace("Data chunk received from the Ledger", "chunk", hexutil.Bytes(chunk))
|
||||
|
||||
// Make sure the transport header matches
|
||||
if chunk[0] != 0x01 || chunk[1] != 0x01 || chunk[2] != 0x05 {
|
||||
return nil, errReplyInvalidHeader
|
||||
}
|
||||
// If it's the first chunk, retrieve the total message length
|
||||
var payload []byte
|
||||
|
||||
if chunk[3] == 0x00 && chunk[4] == 0x00 {
|
||||
reply = make([]byte, 0, int(binary.BigEndian.Uint16(chunk[5:7])))
|
||||
payload = chunk[7:]
|
||||
} else {
|
||||
payload = chunk[5:]
|
||||
}
|
||||
// Append to the reply and stop when filled up
|
||||
if left := cap(reply) - len(reply); left > len(payload) {
|
||||
reply = append(reply, payload...)
|
||||
} else {
|
||||
reply = append(reply, payload[:left]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
return reply[:len(reply)-2], nil
|
||||
}
|
330
accounts/usbwallet/trezor.go
Normal file
330
accounts/usbwallet/trezor.go
Normal file
@ -0,0 +1,330 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// This file contains the implementation for interacting with the Trezor hardware
|
||||
// wallets. The wire protocol spec can be found on the SatoshiLabs website:
|
||||
// https://doc.satoshilabs.com/trezor-tech/api-protobuf.html
|
||||
|
||||
package usbwallet
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/accounts/usbwallet/internal/trezor"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
// ErrTrezorPINNeeded is returned if opening the trezor requires a PIN code. In
|
||||
// this case, the calling application should display a pinpad and send back the
|
||||
// encoded passphrase.
|
||||
var ErrTrezorPINNeeded = errors.New("trezor: pin needed")
|
||||
|
||||
// errTrezorReplyInvalidHeader is the error message returned by a Trezor data exchange
|
||||
// if the device replies with a mismatching header. This usually means the device
|
||||
// is in browser mode.
|
||||
var errTrezorReplyInvalidHeader = errors.New("trezor: invalid reply header")
|
||||
|
||||
// trezorDriver implements the communication with a Trezor hardware wallet.
|
||||
type trezorDriver struct {
|
||||
device io.ReadWriter // USB device connection to communicate through
|
||||
version [3]uint32 // Current version of the Trezor firmware
|
||||
label string // Current textual label of the Trezor device
|
||||
pinwait bool // Flags whether the device is waiting for PIN entry
|
||||
failure error // Any failure that would make the device unusable
|
||||
log log.Logger // Contextual logger to tag the trezor with its id
|
||||
}
|
||||
|
||||
// newTrezorDriver creates a new instance of a Trezor USB protocol driver.
|
||||
func newTrezorDriver(logger log.Logger) driver {
|
||||
return &trezorDriver{
|
||||
log: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// Status implements accounts.Wallet, always whether the Trezor is opened, closed
|
||||
// or whether the Ethereum app was not started on it.
|
||||
func (w *trezorDriver) Status() (string, error) {
|
||||
if w.failure != nil {
|
||||
return fmt.Sprintf("Failed: %v", w.failure), w.failure
|
||||
}
|
||||
if w.device == nil {
|
||||
return "Closed", w.failure
|
||||
}
|
||||
if w.pinwait {
|
||||
return fmt.Sprintf("Trezor v%d.%d.%d '%s' waiting for PIN", w.version[0], w.version[1], w.version[2], w.label), w.failure
|
||||
}
|
||||
return fmt.Sprintf("Trezor v%d.%d.%d '%s' online", w.version[0], w.version[1], w.version[2], w.label), w.failure
|
||||
}
|
||||
|
||||
// Open implements usbwallet.driver, attempting to initialize the connection to
|
||||
// the Trezor hardware wallet. Initializing the Trezor is a two phase operation:
|
||||
// * The first phase is to initialize the connection and read the wallet's
|
||||
// features. This phase is invoked is the provided passphrase is empty. The
|
||||
// device will display the pinpad as a result and will return an appropriate
|
||||
// error to notify the user that a second open phase is needed.
|
||||
// * The second phase is to unlock access to the Trezor, which is done by the
|
||||
// user actually providing a passphrase mapping a keyboard keypad to the pin
|
||||
// number of the user (shuffled according to the pinpad displayed).
|
||||
func (w *trezorDriver) Open(device io.ReadWriter, passphrase string) error {
|
||||
w.device, w.failure = device, nil
|
||||
|
||||
// If phase 1 is requested, init the connection and wait for user callback
|
||||
if passphrase == "" {
|
||||
// If we're already waiting for a PIN entry, insta-return
|
||||
if w.pinwait {
|
||||
return ErrTrezorPINNeeded
|
||||
}
|
||||
// Initialize a connection to the device
|
||||
features := new(trezor.Features)
|
||||
if _, err := w.trezorExchange(&trezor.Initialize{}, features); err != nil {
|
||||
return err
|
||||
}
|
||||
w.version = [3]uint32{features.GetMajorVersion(), features.GetMinorVersion(), features.GetPatchVersion()}
|
||||
w.label = features.GetLabel()
|
||||
|
||||
// Do a manual ping, forcing the device to ask for its PIN
|
||||
askPin := true
|
||||
res, err := w.trezorExchange(&trezor.Ping{PinProtection: &askPin}, new(trezor.PinMatrixRequest), new(trezor.Success))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Only return the PIN request if the device wasn't unlocked until now
|
||||
if res == 1 {
|
||||
return nil // Device responded with trezor.Success
|
||||
}
|
||||
w.pinwait = true
|
||||
return ErrTrezorPINNeeded
|
||||
}
|
||||
// Phase 2 requested with actual PIN entry
|
||||
w.pinwait = false
|
||||
|
||||
if _, err := w.trezorExchange(&trezor.PinMatrixAck{Pin: &passphrase}, new(trezor.Success)); err != nil {
|
||||
w.failure = err
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close implements usbwallet.driver, cleaning up and metadata maintained within
|
||||
// the Trezor driver.
|
||||
func (w *trezorDriver) Close() error {
|
||||
w.version, w.label, w.pinwait = [3]uint32{}, "", false
|
||||
return nil
|
||||
}
|
||||
|
||||
// Heartbeat implements usbwallet.driver, performing a sanity check against the
|
||||
// Trezor to see if it's still online.
|
||||
func (w *trezorDriver) Heartbeat() error {
|
||||
if _, err := w.trezorExchange(&trezor.Ping{}, new(trezor.Success)); err != nil {
|
||||
w.failure = err
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Derive implements usbwallet.driver, sending a derivation request to the Trezor
|
||||
// and returning the Ethereum address located on that derivation path.
|
||||
func (w *trezorDriver) Derive(path accounts.DerivationPath) (common.Address, error) {
|
||||
return w.trezorDerive(path)
|
||||
}
|
||||
|
||||
// SignTx implements usbwallet.driver, sending the transaction to the Trezor and
|
||||
// waiting for the user to confirm or deny the transaction.
|
||||
func (w *trezorDriver) SignTx(path accounts.DerivationPath, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error) {
|
||||
if w.device == nil {
|
||||
return common.Address{}, nil, accounts.ErrWalletClosed
|
||||
}
|
||||
return w.trezorSign(path, tx, chainID)
|
||||
}
|
||||
|
||||
// trezorDerive sends a derivation request to the Trezor device and returns the
|
||||
// Ethereum address located on that path.
|
||||
func (w *trezorDriver) trezorDerive(derivationPath []uint32) (common.Address, error) {
|
||||
address := new(trezor.EthereumAddress)
|
||||
if _, err := w.trezorExchange(&trezor.EthereumGetAddress{AddressN: derivationPath}, address); err != nil {
|
||||
return common.Address{}, err
|
||||
}
|
||||
return common.BytesToAddress(address.GetAddress()), nil
|
||||
}
|
||||
|
||||
// trezorSign sends the transaction to the Trezor wallet, and waits for the user
|
||||
// to confirm or deny the transaction.
|
||||
func (w *trezorDriver) trezorSign(derivationPath []uint32, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error) {
|
||||
// Create the transaction initiation message
|
||||
data := tx.Data()
|
||||
length := uint32(len(data))
|
||||
|
||||
request := &trezor.EthereumSignTx{
|
||||
AddressN: derivationPath,
|
||||
Nonce: new(big.Int).SetUint64(tx.Nonce()).Bytes(),
|
||||
GasPrice: tx.GasPrice().Bytes(),
|
||||
GasLimit: tx.Gas().Bytes(),
|
||||
Value: tx.Value().Bytes(),
|
||||
DataLength: &length,
|
||||
}
|
||||
if to := tx.To(); to != nil {
|
||||
request.To = (*to)[:] // Non contract deploy, set recipient explicitly
|
||||
}
|
||||
if length > 1024 { // Send the data chunked if that was requested
|
||||
request.DataInitialChunk, data = data[:1024], data[1024:]
|
||||
} else {
|
||||
request.DataInitialChunk, data = data, nil
|
||||
}
|
||||
if chainID != nil { // EIP-155 transaction, set chain ID explicitly (only 32 bit is supported!?)
|
||||
id := uint32(chainID.Int64())
|
||||
request.ChainId = &id
|
||||
}
|
||||
// Send the initiation message and stream content until a signature is returned
|
||||
response := new(trezor.EthereumTxRequest)
|
||||
if _, err := w.trezorExchange(request, response); err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
for response.DataLength != nil && int(*response.DataLength) <= len(data) {
|
||||
chunk := data[:*response.DataLength]
|
||||
data = data[*response.DataLength:]
|
||||
|
||||
if _, err := w.trezorExchange(&trezor.EthereumTxAck{DataChunk: chunk}, response); err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
}
|
||||
// Extract the Ethereum signature and do a sanity validation
|
||||
if len(response.GetSignatureR()) == 0 || len(response.GetSignatureS()) == 0 || response.GetSignatureV() == 0 {
|
||||
return common.Address{}, nil, errors.New("reply lacks signature")
|
||||
}
|
||||
signature := append(append(response.GetSignatureR(), response.GetSignatureS()...), byte(response.GetSignatureV()))
|
||||
|
||||
// Create the correct signer and signature transform based on the chain ID
|
||||
var signer types.Signer
|
||||
if chainID == nil {
|
||||
signer = new(types.HomesteadSigner)
|
||||
} else {
|
||||
signer = types.NewEIP155Signer(chainID)
|
||||
signature[64] = signature[64] - byte(chainID.Uint64()*2+35)
|
||||
}
|
||||
// Inject the final signature into the transaction and sanity check the sender
|
||||
signed, err := tx.WithSignature(signer, signature)
|
||||
if err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
sender, err := types.Sender(signer, signed)
|
||||
if err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
return sender, signed, nil
|
||||
}
|
||||
|
||||
// trezorExchange performs a data exchange with the Trezor wallet, sending it a
|
||||
// message and retrieving the response. If multiple responses are possible, the
|
||||
// method will also return the index of the destination object used.
|
||||
func (w *trezorDriver) trezorExchange(req proto.Message, results ...proto.Message) (int, error) {
|
||||
// Construct the original message payload to chunk up
|
||||
data, err := proto.Marshal(req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
payload := make([]byte, 8+len(data))
|
||||
copy(payload, []byte{0x23, 0x23})
|
||||
binary.BigEndian.PutUint16(payload[2:], trezor.Type(req))
|
||||
binary.BigEndian.PutUint32(payload[4:], uint32(len(data)))
|
||||
copy(payload[8:], data)
|
||||
|
||||
// Stream all the chunks to the device
|
||||
chunk := make([]byte, 64)
|
||||
chunk[0] = 0x3f // Report ID magic number
|
||||
|
||||
for len(payload) > 0 {
|
||||
// Construct the new message to stream, padding with zeroes if needed
|
||||
if len(payload) > 63 {
|
||||
copy(chunk[1:], payload[:63])
|
||||
payload = payload[63:]
|
||||
} else {
|
||||
copy(chunk[1:], payload)
|
||||
copy(chunk[1+len(payload):], make([]byte, 63-len(payload)))
|
||||
payload = nil
|
||||
}
|
||||
// Send over to the device
|
||||
w.log.Trace("Data chunk sent to the Trezor", "chunk", hexutil.Bytes(chunk))
|
||||
if _, err := w.device.Write(chunk); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
// Stream the reply back from the wallet in 64 byte chunks
|
||||
var (
|
||||
kind uint16
|
||||
reply []byte
|
||||
)
|
||||
for {
|
||||
// Read the next chunk from the Trezor wallet
|
||||
if _, err := io.ReadFull(w.device, chunk); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
w.log.Trace("Data chunk received from the Trezor", "chunk", hexutil.Bytes(chunk))
|
||||
|
||||
// Make sure the transport header matches
|
||||
if chunk[0] != 0x3f || (len(reply) == 0 && (chunk[1] != 0x23 || chunk[2] != 0x23)) {
|
||||
return 0, errTrezorReplyInvalidHeader
|
||||
}
|
||||
// If it's the first chunk, retrieve the reply message type and total message length
|
||||
var payload []byte
|
||||
|
||||
if len(reply) == 0 {
|
||||
kind = binary.BigEndian.Uint16(chunk[3:5])
|
||||
reply = make([]byte, 0, int(binary.BigEndian.Uint32(chunk[5:9])))
|
||||
payload = chunk[9:]
|
||||
} else {
|
||||
payload = chunk[1:]
|
||||
}
|
||||
// Append to the reply and stop when filled up
|
||||
if left := cap(reply) - len(reply); left > len(payload) {
|
||||
reply = append(reply, payload...)
|
||||
} else {
|
||||
reply = append(reply, payload[:left]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
// Try to parse the reply into the requested reply message
|
||||
if kind == uint16(trezor.MessageType_MessageType_Failure) {
|
||||
// Trezor returned a failure, extract and return the message
|
||||
failure := new(trezor.Failure)
|
||||
if err := proto.Unmarshal(reply, failure); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return 0, errors.New("trezor: " + failure.GetMessage())
|
||||
}
|
||||
if kind == uint16(trezor.MessageType_MessageType_ButtonRequest) {
|
||||
// Trezor is waiting for user confirmation, ack and wait for the next message
|
||||
return w.trezorExchange(&trezor.ButtonAck{}, results...)
|
||||
}
|
||||
for i, res := range results {
|
||||
if trezor.Type(res) == kind {
|
||||
return i, proto.Unmarshal(reply, res)
|
||||
}
|
||||
}
|
||||
expected := make([]string, len(results))
|
||||
for i, res := range results {
|
||||
expected[i] = trezor.Name(trezor.Type(res))
|
||||
}
|
||||
return 0, fmt.Errorf("trezor: expected reply types %s, got %s", expected, trezor.Name(kind))
|
||||
}
|
562
accounts/usbwallet/wallet.go
Normal file
562
accounts/usbwallet/wallet.go
Normal file
@ -0,0 +1,562 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package usbwallet implements support for USB hardware wallets.
|
||||
package usbwallet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
ethereum "github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/karalabe/hid"
|
||||
)
|
||||
|
||||
// Maximum time between wallet health checks to detect USB unplugs.
|
||||
const heartbeatCycle = time.Second
|
||||
|
||||
// Minimum time to wait between self derivation attempts, even it the user is
|
||||
// requesting accounts like crazy.
|
||||
const selfDeriveThrottling = time.Second
|
||||
|
||||
// driver defines the vendor specific functionality hardware wallets instances
|
||||
// must implement to allow using them with the wallet lifecycle management.
|
||||
type driver interface {
|
||||
// Status returns a textual status to aid the user in the current state of the
|
||||
// wallet. It also returns an error indicating any failure the wallet might have
|
||||
// encountered.
|
||||
Status() (string, error)
|
||||
|
||||
// Open initializes access to a wallet instance. The passphrase parameter may
|
||||
// or may not be used by the implementation of a particular wallet instance.
|
||||
Open(device io.ReadWriter, passphrase string) error
|
||||
|
||||
// Close releases any resources held by an open wallet instance.
|
||||
Close() error
|
||||
|
||||
// Heartbeat performs a sanity check against the hardware wallet to see if it
|
||||
// is still online and healthy.
|
||||
Heartbeat() error
|
||||
|
||||
// Derive sends a derivation request to the USB device and returns the Ethereum
|
||||
// address located on that path.
|
||||
Derive(path accounts.DerivationPath) (common.Address, error)
|
||||
|
||||
// SignTx sends the transaction to the USB device and waits for the user to confirm
|
||||
// or deny the transaction.
|
||||
SignTx(path accounts.DerivationPath, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error)
|
||||
}
|
||||
|
||||
// wallet represents the common functionality shared by all USB hardware
|
||||
// wallets to prevent reimplementing the same complex maintenance mechanisms
|
||||
// for different vendors.
|
||||
type wallet struct {
|
||||
hub *Hub // USB hub scanning
|
||||
driver driver // Hardware implementation of the low level device operations
|
||||
url *accounts.URL // Textual URL uniquely identifying this wallet
|
||||
|
||||
info hid.DeviceInfo // Known USB device infos about the wallet
|
||||
device *hid.Device // USB device advertising itself as a hardware wallet
|
||||
|
||||
accounts []accounts.Account // List of derive accounts pinned on the hardware wallet
|
||||
paths map[common.Address]accounts.DerivationPath // Known derivation paths for signing operations
|
||||
|
||||
deriveNextPath accounts.DerivationPath // Next derivation path for account auto-discovery
|
||||
deriveNextAddr common.Address // Next derived account address for auto-discovery
|
||||
deriveChain ethereum.ChainStateReader // Blockchain state reader to discover used account with
|
||||
deriveReq chan chan struct{} // Channel to request a self-derivation on
|
||||
deriveQuit chan chan error // Channel to terminate the self-deriver with
|
||||
|
||||
healthQuit chan chan error
|
||||
|
||||
// Locking a hardware wallet is a bit special. Since hardware devices are lower
|
||||
// performing, any communication with them might take a non negligible amount of
|
||||
// time. Worse still, waiting for user confirmation can take arbitrarily long,
|
||||
// but exclusive communication must be upheld during. Locking the entire wallet
|
||||
// in the mean time however would stall any parts of the system that don't want
|
||||
// to communicate, just read some state (e.g. list the accounts).
|
||||
//
|
||||
// As such, a hardware wallet needs two locks to function correctly. A state
|
||||
// lock can be used to protect the wallet's software-side internal state, which
|
||||
// must not be held exlusively during hardware communication. A communication
|
||||
// lock can be used to achieve exclusive access to the device itself, this one
|
||||
// however should allow "skipping" waiting for operations that might want to
|
||||
// use the device, but can live without too (e.g. account self-derivation).
|
||||
//
|
||||
// Since we have two locks, it's important to know how to properly use them:
|
||||
// - Communication requires the `device` to not change, so obtaining the
|
||||
// commsLock should be done after having a stateLock.
|
||||
// - Communication must not disable read access to the wallet state, so it
|
||||
// must only ever hold a *read* lock to stateLock.
|
||||
commsLock chan struct{} // Mutex (buf=1) for the USB comms without keeping the state locked
|
||||
stateLock sync.RWMutex // Protects read and write access to the wallet struct fields
|
||||
|
||||
log log.Logger // Contextual logger to tag the base with its id
|
||||
}
|
||||
|
||||
// URL implements accounts.Wallet, returning the URL of the USB hardware device.
|
||||
func (w *wallet) URL() accounts.URL {
|
||||
return *w.url // Immutable, no need for a lock
|
||||
}
|
||||
|
||||
// Status implements accounts.Wallet, returning a custom status message from the
|
||||
// underlying vendor-specific hardware wallet implementation.
|
||||
func (w *wallet) Status() (string, error) {
|
||||
w.stateLock.RLock() // No device communication, state lock is enough
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
status, failure := w.driver.Status()
|
||||
if w.device == nil {
|
||||
return "Closed", failure
|
||||
}
|
||||
return status, failure
|
||||
}
|
||||
|
||||
// Open implements accounts.Wallet, attempting to open a USB connection to the
|
||||
// hardware wallet.
|
||||
func (w *wallet) Open(passphrase string) error {
|
||||
w.stateLock.Lock() // State lock is enough since there's no connection yet at this point
|
||||
defer w.stateLock.Unlock()
|
||||
|
||||
// If the device was already opened once, refuse to try again
|
||||
if w.paths != nil {
|
||||
return accounts.ErrWalletAlreadyOpen
|
||||
}
|
||||
// Make sure the actual device connection is done only once
|
||||
if w.device == nil {
|
||||
device, err := w.info.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.device = device
|
||||
w.commsLock = make(chan struct{}, 1)
|
||||
w.commsLock <- struct{}{} // Enable lock
|
||||
}
|
||||
// Delegate device initialization to the underlying driver
|
||||
if err := w.driver.Open(w.device, passphrase); err != nil {
|
||||
return err
|
||||
}
|
||||
// Connection successful, start life-cycle management
|
||||
w.paths = make(map[common.Address]accounts.DerivationPath)
|
||||
|
||||
w.deriveReq = make(chan chan struct{})
|
||||
w.deriveQuit = make(chan chan error)
|
||||
w.healthQuit = make(chan chan error)
|
||||
|
||||
go w.heartbeat()
|
||||
go w.selfDerive()
|
||||
|
||||
// Notify anyone listening for wallet events that a new device is accessible
|
||||
go w.hub.updateFeed.Send(accounts.WalletEvent{Wallet: w, Kind: accounts.WalletOpened})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// heartbeat is a health check loop for the USB wallets to periodically verify
|
||||
// whether they are still present or if they malfunctioned.
|
||||
func (w *wallet) heartbeat() {
|
||||
w.log.Debug("USB wallet health-check started")
|
||||
defer w.log.Debug("USB wallet health-check stopped")
|
||||
|
||||
// Execute heartbeat checks until termination or error
|
||||
var (
|
||||
errc chan error
|
||||
err error
|
||||
)
|
||||
for errc == nil && err == nil {
|
||||
// Wait until termination is requested or the heartbeat cycle arrives
|
||||
select {
|
||||
case errc = <-w.healthQuit:
|
||||
// Termination requested
|
||||
continue
|
||||
case <-time.After(heartbeatCycle):
|
||||
// Heartbeat time
|
||||
}
|
||||
// Execute a tiny data exchange to see responsiveness
|
||||
w.stateLock.RLock()
|
||||
if w.device == nil {
|
||||
// Terminated while waiting for the lock
|
||||
w.stateLock.RUnlock()
|
||||
continue
|
||||
}
|
||||
<-w.commsLock // Don't lock state while resolving version
|
||||
err = w.driver.Heartbeat()
|
||||
w.commsLock <- struct{}{}
|
||||
w.stateLock.RUnlock()
|
||||
|
||||
if err != nil {
|
||||
w.stateLock.Lock() // Lock state to tear the wallet down
|
||||
w.close()
|
||||
w.stateLock.Unlock()
|
||||
}
|
||||
// Ignore non hardware related errors
|
||||
err = nil
|
||||
}
|
||||
// In case of error, wait for termination
|
||||
if err != nil {
|
||||
w.log.Debug("USB wallet health-check failed", "err", err)
|
||||
errc = <-w.healthQuit
|
||||
}
|
||||
errc <- err
|
||||
}
|
||||
|
||||
// Close implements accounts.Wallet, closing the USB connection to the device.
|
||||
func (w *wallet) Close() error {
|
||||
// Ensure the wallet was opened
|
||||
w.stateLock.RLock()
|
||||
hQuit, dQuit := w.healthQuit, w.deriveQuit
|
||||
w.stateLock.RUnlock()
|
||||
|
||||
// Terminate the health checks
|
||||
var herr error
|
||||
if hQuit != nil {
|
||||
errc := make(chan error)
|
||||
hQuit <- errc
|
||||
herr = <-errc // Save for later, we *must* close the USB
|
||||
}
|
||||
// Terminate the self-derivations
|
||||
var derr error
|
||||
if dQuit != nil {
|
||||
errc := make(chan error)
|
||||
dQuit <- errc
|
||||
derr = <-errc // Save for later, we *must* close the USB
|
||||
}
|
||||
// Terminate the device connection
|
||||
w.stateLock.Lock()
|
||||
defer w.stateLock.Unlock()
|
||||
|
||||
w.healthQuit = nil
|
||||
w.deriveQuit = nil
|
||||
w.deriveReq = nil
|
||||
|
||||
if err := w.close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if herr != nil {
|
||||
return herr
|
||||
}
|
||||
return derr
|
||||
}
|
||||
|
||||
// close is the internal wallet closer that terminates the USB connection and
|
||||
// resets all the fields to their defaults.
|
||||
//
|
||||
// Note, close assumes the state lock is held!
|
||||
func (w *wallet) close() error {
|
||||
// Allow duplicate closes, especially for health-check failures
|
||||
if w.device == nil {
|
||||
return nil
|
||||
}
|
||||
// Close the device, clear everything, then return
|
||||
w.device.Close()
|
||||
w.device = nil
|
||||
|
||||
w.accounts, w.paths = nil, nil
|
||||
w.driver.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Accounts implements accounts.Wallet, returning the list of accounts pinned to
|
||||
// the USB hardware wallet. If self-derivation was enabled, the account list is
|
||||
// periodically expanded based on current chain state.
|
||||
func (w *wallet) Accounts() []accounts.Account {
|
||||
// Attempt self-derivation if it's running
|
||||
reqc := make(chan struct{}, 1)
|
||||
select {
|
||||
case w.deriveReq <- reqc:
|
||||
// Self-derivation request accepted, wait for it
|
||||
<-reqc
|
||||
default:
|
||||
// Self-derivation offline, throttled or busy, skip
|
||||
}
|
||||
// Return whatever account list we ended up with
|
||||
w.stateLock.RLock()
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
cpy := make([]accounts.Account, len(w.accounts))
|
||||
copy(cpy, w.accounts)
|
||||
return cpy
|
||||
}
|
||||
|
||||
// selfDerive is an account derivation loop that upon request attempts to find
|
||||
// new non-zero accounts.
|
||||
func (w *wallet) selfDerive() {
|
||||
w.log.Debug("USB wallet self-derivation started")
|
||||
defer w.log.Debug("USB wallet self-derivation stopped")
|
||||
|
||||
// Execute self-derivations until termination or error
|
||||
var (
|
||||
reqc chan struct{}
|
||||
errc chan error
|
||||
err error
|
||||
)
|
||||
for errc == nil && err == nil {
|
||||
// Wait until either derivation or termination is requested
|
||||
select {
|
||||
case errc = <-w.deriveQuit:
|
||||
// Termination requested
|
||||
continue
|
||||
case reqc = <-w.deriveReq:
|
||||
// Account discovery requested
|
||||
}
|
||||
// Derivation needs a chain and device access, skip if either unavailable
|
||||
w.stateLock.RLock()
|
||||
if w.device == nil || w.deriveChain == nil {
|
||||
w.stateLock.RUnlock()
|
||||
reqc <- struct{}{}
|
||||
continue
|
||||
}
|
||||
select {
|
||||
case <-w.commsLock:
|
||||
default:
|
||||
w.stateLock.RUnlock()
|
||||
reqc <- struct{}{}
|
||||
continue
|
||||
}
|
||||
// Device lock obtained, derive the next batch of accounts
|
||||
var (
|
||||
accs []accounts.Account
|
||||
paths []accounts.DerivationPath
|
||||
|
||||
nextAddr = w.deriveNextAddr
|
||||
nextPath = w.deriveNextPath
|
||||
|
||||
context = context.Background()
|
||||
)
|
||||
for empty := false; !empty; {
|
||||
// Retrieve the next derived Ethereum account
|
||||
if nextAddr == (common.Address{}) {
|
||||
if nextAddr, err = w.driver.Derive(nextPath); err != nil {
|
||||
w.log.Warn("USB wallet account derivation failed", "err", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
// Check the account's status against the current chain state
|
||||
var (
|
||||
balance *big.Int
|
||||
nonce uint64
|
||||
)
|
||||
balance, err = w.deriveChain.BalanceAt(context, nextAddr, nil)
|
||||
if err != nil {
|
||||
w.log.Warn("USB wallet balance retrieval failed", "err", err)
|
||||
break
|
||||
}
|
||||
nonce, err = w.deriveChain.NonceAt(context, nextAddr, nil)
|
||||
if err != nil {
|
||||
w.log.Warn("USB wallet nonce retrieval failed", "err", err)
|
||||
break
|
||||
}
|
||||
// If the next account is empty, stop self-derivation, but add it nonetheless
|
||||
if balance.Sign() == 0 && nonce == 0 {
|
||||
empty = true
|
||||
}
|
||||
// We've just self-derived a new account, start tracking it locally
|
||||
path := make(accounts.DerivationPath, len(nextPath))
|
||||
copy(path[:], nextPath[:])
|
||||
paths = append(paths, path)
|
||||
|
||||
account := accounts.Account{
|
||||
Address: nextAddr,
|
||||
URL: accounts.URL{Scheme: w.url.Scheme, Path: fmt.Sprintf("%s/%s", w.url.Path, path)},
|
||||
}
|
||||
accs = append(accs, account)
|
||||
|
||||
// Display a log message to the user for new (or previously empty accounts)
|
||||
if _, known := w.paths[nextAddr]; !known || (!empty && nextAddr == w.deriveNextAddr) {
|
||||
w.log.Info("USB wallet discovered new account", "address", nextAddr, "path", path, "balance", balance, "nonce", nonce)
|
||||
}
|
||||
// Fetch the next potential account
|
||||
if !empty {
|
||||
nextAddr = common.Address{}
|
||||
nextPath[len(nextPath)-1]++
|
||||
}
|
||||
}
|
||||
// Self derivation complete, release device lock
|
||||
w.commsLock <- struct{}{}
|
||||
w.stateLock.RUnlock()
|
||||
|
||||
// Insert any accounts successfully derived
|
||||
w.stateLock.Lock()
|
||||
for i := 0; i < len(accs); i++ {
|
||||
if _, ok := w.paths[accs[i].Address]; !ok {
|
||||
w.accounts = append(w.accounts, accs[i])
|
||||
w.paths[accs[i].Address] = paths[i]
|
||||
}
|
||||
}
|
||||
// Shift the self-derivation forward
|
||||
// TODO(karalabe): don't overwrite changes from wallet.SelfDerive
|
||||
w.deriveNextAddr = nextAddr
|
||||
w.deriveNextPath = nextPath
|
||||
w.stateLock.Unlock()
|
||||
|
||||
// Notify the user of termination and loop after a bit of time (to avoid trashing)
|
||||
reqc <- struct{}{}
|
||||
if err == nil {
|
||||
select {
|
||||
case errc = <-w.deriveQuit:
|
||||
// Termination requested, abort
|
||||
case <-time.After(selfDeriveThrottling):
|
||||
// Waited enough, willing to self-derive again
|
||||
}
|
||||
}
|
||||
}
|
||||
// In case of error, wait for termination
|
||||
if err != nil {
|
||||
w.log.Debug("USB wallet self-derivation failed", "err", err)
|
||||
errc = <-w.deriveQuit
|
||||
}
|
||||
errc <- err
|
||||
}
|
||||
|
||||
// Contains implements accounts.Wallet, returning whether a particular account is
|
||||
// or is not pinned into this wallet instance. Although we could attempt to resolve
|
||||
// unpinned accounts, that would be an non-negligible hardware operation.
|
||||
func (w *wallet) Contains(account accounts.Account) bool {
|
||||
w.stateLock.RLock()
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
_, exists := w.paths[account.Address]
|
||||
return exists
|
||||
}
|
||||
|
||||
// Derive implements accounts.Wallet, deriving a new account at the specific
|
||||
// derivation path. If pin is set to true, the account will be added to the list
|
||||
// of tracked accounts.
|
||||
func (w *wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Account, error) {
|
||||
// Try to derive the actual account and update its URL if successful
|
||||
w.stateLock.RLock() // Avoid device disappearing during derivation
|
||||
|
||||
if w.device == nil {
|
||||
w.stateLock.RUnlock()
|
||||
return accounts.Account{}, accounts.ErrWalletClosed
|
||||
}
|
||||
<-w.commsLock // Avoid concurrent hardware access
|
||||
address, err := w.driver.Derive(path)
|
||||
w.commsLock <- struct{}{}
|
||||
|
||||
w.stateLock.RUnlock()
|
||||
|
||||
// If an error occurred or no pinning was requested, return
|
||||
if err != nil {
|
||||
return accounts.Account{}, err
|
||||
}
|
||||
account := accounts.Account{
|
||||
Address: address,
|
||||
URL: accounts.URL{Scheme: w.url.Scheme, Path: fmt.Sprintf("%s/%s", w.url.Path, path)},
|
||||
}
|
||||
if !pin {
|
||||
return account, nil
|
||||
}
|
||||
// Pinning needs to modify the state
|
||||
w.stateLock.Lock()
|
||||
defer w.stateLock.Unlock()
|
||||
|
||||
if _, ok := w.paths[address]; !ok {
|
||||
w.accounts = append(w.accounts, account)
|
||||
w.paths[address] = path
|
||||
}
|
||||
return account, nil
|
||||
}
|
||||
|
||||
// SelfDerive implements accounts.Wallet, trying to discover accounts that the
|
||||
// user used previously (based on the chain state), but ones that he/she did not
|
||||
// explicitly pin to the wallet manually. To avoid chain head monitoring, self
|
||||
// derivation only runs during account listing (and even then throttled).
|
||||
func (w *wallet) SelfDerive(base accounts.DerivationPath, chain ethereum.ChainStateReader) {
|
||||
w.stateLock.Lock()
|
||||
defer w.stateLock.Unlock()
|
||||
|
||||
w.deriveNextPath = make(accounts.DerivationPath, len(base))
|
||||
copy(w.deriveNextPath[:], base[:])
|
||||
|
||||
w.deriveNextAddr = common.Address{}
|
||||
w.deriveChain = chain
|
||||
}
|
||||
|
||||
// SignHash implements accounts.Wallet, however signing arbitrary data is not
|
||||
// supported for hardware wallets, so this method will always return an error.
|
||||
func (w *wallet) SignHash(account accounts.Account, hash []byte) ([]byte, error) {
|
||||
return nil, accounts.ErrNotSupported
|
||||
}
|
||||
|
||||
// SignTx implements accounts.Wallet. It sends the transaction over to the Ledger
|
||||
// wallet to request a confirmation from the user. It returns either the signed
|
||||
// transaction or a failure if the user denied the transaction.
|
||||
//
|
||||
// Note, if the version of the Ethereum application running on the Ledger wallet is
|
||||
// too old to sign EIP-155 transactions, but such is requested nonetheless, an error
|
||||
// will be returned opposed to silently signing in Homestead mode.
|
||||
func (w *wallet) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
|
||||
w.stateLock.RLock() // Comms have own mutex, this is for the state fields
|
||||
defer w.stateLock.RUnlock()
|
||||
|
||||
// If the wallet is closed, abort
|
||||
if w.device == nil {
|
||||
return nil, accounts.ErrWalletClosed
|
||||
}
|
||||
// Make sure the requested account is contained within
|
||||
path, ok := w.paths[account.Address]
|
||||
if !ok {
|
||||
return nil, accounts.ErrUnknownAccount
|
||||
}
|
||||
// All infos gathered and metadata checks out, request signing
|
||||
<-w.commsLock
|
||||
defer func() { w.commsLock <- struct{}{} }()
|
||||
|
||||
// Ensure the device isn't screwed with while user confirmation is pending
|
||||
// TODO(karalabe): remove if hotplug lands on Windows
|
||||
w.hub.commsLock.Lock()
|
||||
w.hub.commsPend++
|
||||
w.hub.commsLock.Unlock()
|
||||
|
||||
defer func() {
|
||||
w.hub.commsLock.Lock()
|
||||
w.hub.commsPend--
|
||||
w.hub.commsLock.Unlock()
|
||||
}()
|
||||
// Sign the transaction and verify the sender to avoid hardware fault surprises
|
||||
sender, signed, err := w.driver.SignTx(path, tx, chainID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if sender != account.Address {
|
||||
return nil, fmt.Errorf("signer mismatch: expected %s, got %s", account.Address.Hex(), sender.Hex())
|
||||
}
|
||||
return signed, nil
|
||||
}
|
||||
|
||||
// SignHashWithPassphrase implements accounts.Wallet, however signing arbitrary
|
||||
// data is not supported for Ledger wallets, so this method will always return
|
||||
// an error.
|
||||
func (w *wallet) SignHashWithPassphrase(account accounts.Account, passphrase string, hash []byte) ([]byte, error) {
|
||||
return w.SignHash(account, hash)
|
||||
}
|
||||
|
||||
// SignTxWithPassphrase implements accounts.Wallet, attempting to sign the given
|
||||
// transaction with the given account using passphrase as extra authentication.
|
||||
// Since USB wallets don't rely on passphrases, these are silently ignored.
|
||||
func (w *wallet) SignTxWithPassphrase(account accounts.Account, passphrase string, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
|
||||
return w.SignTx(account, tx, chainID)
|
||||
}
|
@ -23,8 +23,8 @@ environment:
|
||||
install:
|
||||
- git submodule update --init
|
||||
- rmdir C:\go /s /q
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.8.3.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.8.3.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.9.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.9.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- go version
|
||||
- gcc --version
|
||||
|
||||
|
562
bmt/bmt.go
Normal file
562
bmt/bmt.go
Normal file
@ -0,0 +1,562 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package bmt provides a binary merkle tree implementation
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
/*
|
||||
Binary Merkle Tree Hash is a hash function over arbitrary datachunks of limited size
|
||||
It is defined as the root hash of the binary merkle tree built over fixed size segments
|
||||
of the underlying chunk using any base hash function (e.g keccak 256 SHA3)
|
||||
|
||||
It is used as the chunk hash function in swarm which in turn is the basis for the
|
||||
128 branching swarm hash http://swarm-guide.readthedocs.io/en/latest/architecture.html#swarm-hash
|
||||
|
||||
The BMT is optimal for providing compact inclusion proofs, i.e. prove that a
|
||||
segment is a substring of a chunk starting at a particular offset
|
||||
The size of the underlying segments is fixed at 32 bytes (called the resolution
|
||||
of the BMT hash), the EVM word size to optimize for on-chain BMT verification
|
||||
as well as the hash size optimal for inclusion proofs in the merkle tree of the swarm hash.
|
||||
|
||||
Two implementations are provided:
|
||||
|
||||
* RefHasher is optimized for code simplicity and meant as a reference implementation
|
||||
* Hasher is optimized for speed taking advantage of concurrency with minimalistic
|
||||
control structure to coordinate the concurrent routines
|
||||
It implements the ChunkHash interface as well as the go standard hash.Hash interface
|
||||
|
||||
*/
|
||||
|
||||
const (
|
||||
// DefaultSegmentCount is the maximum number of segments of the underlying chunk
|
||||
DefaultSegmentCount = 128 // Should be equal to storage.DefaultBranches
|
||||
// DefaultPoolSize is the maximum number of bmt trees used by the hashers, i.e,
|
||||
// the maximum number of concurrent BMT hashing operations performed by the same hasher
|
||||
DefaultPoolSize = 8
|
||||
)
|
||||
|
||||
// BaseHasher is a hash.Hash constructor function used for the base hash of the BMT.
|
||||
type BaseHasher func() hash.Hash
|
||||
|
||||
// Hasher a reusable hasher for fixed maximum size chunks representing a BMT
|
||||
// implements the hash.Hash interface
|
||||
// reuse pool of Tree-s for amortised memory allocation and resource control
|
||||
// supports order-agnostic concurrent segment writes
|
||||
// as well as sequential read and write
|
||||
// can not be called concurrently on more than one chunk
|
||||
// can be further appended after Sum
|
||||
// Reset gives back the Tree to the pool and guaranteed to leave
|
||||
// the tree and itself in a state reusable for hashing a new chunk
|
||||
type Hasher struct {
|
||||
pool *TreePool // BMT resource pool
|
||||
bmt *Tree // prebuilt BMT resource for flowcontrol and proofs
|
||||
blocksize int // segment size (size of hash) also for hash.Hash
|
||||
count int // segment count
|
||||
size int // for hash.Hash same as hashsize
|
||||
cur int // cursor position for righmost currently open chunk
|
||||
segment []byte // the rightmost open segment (not complete)
|
||||
depth int // index of last level
|
||||
result chan []byte // result channel
|
||||
hash []byte // to record the result
|
||||
max int32 // max segments for SegmentWriter interface
|
||||
blockLength []byte // The block length that needes to be added in Sum
|
||||
}
|
||||
|
||||
// New creates a reusable Hasher
|
||||
// implements the hash.Hash interface
|
||||
// pulls a new Tree from a resource pool for hashing each chunk
|
||||
func New(p *TreePool) *Hasher {
|
||||
return &Hasher{
|
||||
pool: p,
|
||||
depth: depth(p.SegmentCount),
|
||||
size: p.SegmentSize,
|
||||
blocksize: p.SegmentSize,
|
||||
count: p.SegmentCount,
|
||||
result: make(chan []byte),
|
||||
}
|
||||
}
|
||||
|
||||
// Node is a reuseable segment hasher representing a node in a BMT
|
||||
// it allows for continued writes after a Sum
|
||||
// and is left in completely reusable state after Reset
|
||||
type Node struct {
|
||||
level, index int // position of node for information/logging only
|
||||
initial bool // first and last node
|
||||
root bool // whether the node is root to a smaller BMT
|
||||
isLeft bool // whether it is left side of the parent double segment
|
||||
unbalanced bool // indicates if a node has only the left segment
|
||||
parent *Node // BMT connections
|
||||
state int32 // atomic increment impl concurrent boolean toggle
|
||||
left, right []byte
|
||||
}
|
||||
|
||||
// NewNode constructor for segment hasher nodes in the BMT
|
||||
func NewNode(level, index int, parent *Node) *Node {
|
||||
return &Node{
|
||||
parent: parent,
|
||||
level: level,
|
||||
index: index,
|
||||
initial: index == 0,
|
||||
isLeft: index%2 == 0,
|
||||
}
|
||||
}
|
||||
|
||||
// TreePool provides a pool of Trees used as resources by Hasher
|
||||
// a Tree popped from the pool is guaranteed to have clean state
|
||||
// for hashing a new chunk
|
||||
// Hasher Reset releases the Tree to the pool
|
||||
type TreePool struct {
|
||||
lock sync.Mutex
|
||||
c chan *Tree
|
||||
hasher BaseHasher
|
||||
SegmentSize int
|
||||
SegmentCount int
|
||||
Capacity int
|
||||
count int
|
||||
}
|
||||
|
||||
// NewTreePool creates a Tree pool with hasher, segment size, segment count and capacity
|
||||
// on GetTree it reuses free Trees or creates a new one if size is not reached
|
||||
func NewTreePool(hasher BaseHasher, segmentCount, capacity int) *TreePool {
|
||||
return &TreePool{
|
||||
c: make(chan *Tree, capacity),
|
||||
hasher: hasher,
|
||||
SegmentSize: hasher().Size(),
|
||||
SegmentCount: segmentCount,
|
||||
Capacity: capacity,
|
||||
}
|
||||
}
|
||||
|
||||
// Drain drains the pool uptil it has no more than n resources
|
||||
func (self *TreePool) Drain(n int) {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
for len(self.c) > n {
|
||||
<-self.c
|
||||
self.count--
|
||||
}
|
||||
}
|
||||
|
||||
// Reserve is blocking until it returns an available Tree
|
||||
// it reuses free Trees or creates a new one if size is not reached
|
||||
func (self *TreePool) Reserve() *Tree {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
var t *Tree
|
||||
if self.count == self.Capacity {
|
||||
return <-self.c
|
||||
}
|
||||
select {
|
||||
case t = <-self.c:
|
||||
default:
|
||||
t = NewTree(self.hasher, self.SegmentSize, self.SegmentCount)
|
||||
self.count++
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Release gives back a Tree to the pool.
|
||||
// This Tree is guaranteed to be in reusable state
|
||||
// does not need locking
|
||||
func (self *TreePool) Release(t *Tree) {
|
||||
self.c <- t // can never fail but...
|
||||
}
|
||||
|
||||
// Tree is a reusable control structure representing a BMT
|
||||
// organised in a binary tree
|
||||
// Hasher uses a TreePool to pick one for each chunk hash
|
||||
// the Tree is 'locked' while not in the pool
|
||||
type Tree struct {
|
||||
leaves []*Node
|
||||
}
|
||||
|
||||
// Draw draws the BMT (badly)
|
||||
func (self *Tree) Draw(hash []byte, d int) string {
|
||||
var left, right []string
|
||||
var anc []*Node
|
||||
for i, n := range self.leaves {
|
||||
left = append(left, fmt.Sprintf("%v", hashstr(n.left)))
|
||||
if i%2 == 0 {
|
||||
anc = append(anc, n.parent)
|
||||
}
|
||||
right = append(right, fmt.Sprintf("%v", hashstr(n.right)))
|
||||
}
|
||||
anc = self.leaves
|
||||
var hashes [][]string
|
||||
for l := 0; len(anc) > 0; l++ {
|
||||
var nodes []*Node
|
||||
hash := []string{""}
|
||||
for i, n := range anc {
|
||||
hash = append(hash, fmt.Sprintf("%v|%v", hashstr(n.left), hashstr(n.right)))
|
||||
if i%2 == 0 && n.parent != nil {
|
||||
nodes = append(nodes, n.parent)
|
||||
}
|
||||
}
|
||||
hash = append(hash, "")
|
||||
hashes = append(hashes, hash)
|
||||
anc = nodes
|
||||
}
|
||||
hashes = append(hashes, []string{"", fmt.Sprintf("%v", hashstr(hash)), ""})
|
||||
total := 60
|
||||
del := " "
|
||||
var rows []string
|
||||
for i := len(hashes) - 1; i >= 0; i-- {
|
||||
var textlen int
|
||||
hash := hashes[i]
|
||||
for _, s := range hash {
|
||||
textlen += len(s)
|
||||
}
|
||||
if total < textlen {
|
||||
total = textlen + len(hash)
|
||||
}
|
||||
delsize := (total - textlen) / (len(hash) - 1)
|
||||
if delsize > len(del) {
|
||||
delsize = len(del)
|
||||
}
|
||||
row := fmt.Sprintf("%v: %v", len(hashes)-i-1, strings.Join(hash, del[:delsize]))
|
||||
rows = append(rows, row)
|
||||
|
||||
}
|
||||
rows = append(rows, strings.Join(left, " "))
|
||||
rows = append(rows, strings.Join(right, " "))
|
||||
return strings.Join(rows, "\n") + "\n"
|
||||
}
|
||||
|
||||
// NewTree initialises the Tree by building up the nodes of a BMT
|
||||
// segment size is stipulated to be the size of the hash
|
||||
// segmentCount needs to be positive integer and does not need to be
|
||||
// a power of two and can even be an odd number
|
||||
// segmentSize * segmentCount determines the maximum chunk size
|
||||
// hashed using the tree
|
||||
func NewTree(hasher BaseHasher, segmentSize, segmentCount int) *Tree {
|
||||
n := NewNode(0, 0, nil)
|
||||
n.root = true
|
||||
prevlevel := []*Node{n}
|
||||
// iterate over levels and creates 2^level nodes
|
||||
level := 1
|
||||
count := 2
|
||||
for d := 1; d <= depth(segmentCount); d++ {
|
||||
nodes := make([]*Node, count)
|
||||
for i := 0; i < len(nodes); i++ {
|
||||
var parent *Node
|
||||
parent = prevlevel[i/2]
|
||||
t := NewNode(level, i, parent)
|
||||
nodes[i] = t
|
||||
}
|
||||
prevlevel = nodes
|
||||
level++
|
||||
count *= 2
|
||||
}
|
||||
// the datanode level is the nodes on the last level where
|
||||
return &Tree{
|
||||
leaves: prevlevel,
|
||||
}
|
||||
}
|
||||
|
||||
// methods needed by hash.Hash
|
||||
|
||||
// Size returns the size
|
||||
func (self *Hasher) Size() int {
|
||||
return self.size
|
||||
}
|
||||
|
||||
// BlockSize returns the block size
|
||||
func (self *Hasher) BlockSize() int {
|
||||
return self.blocksize
|
||||
}
|
||||
|
||||
// Sum returns the hash of the buffer
|
||||
// hash.Hash interface Sum method appends the byte slice to the underlying
|
||||
// data before it calculates and returns the hash of the chunk
|
||||
func (self *Hasher) Sum(b []byte) (r []byte) {
|
||||
t := self.bmt
|
||||
i := self.cur
|
||||
n := t.leaves[i]
|
||||
j := i
|
||||
// must run strictly before all nodes calculate
|
||||
// datanodes are guaranteed to have a parent
|
||||
if len(self.segment) > self.size && i > 0 && n.parent != nil {
|
||||
n = n.parent
|
||||
} else {
|
||||
i *= 2
|
||||
}
|
||||
d := self.finalise(n, i)
|
||||
self.writeSegment(j, self.segment, d)
|
||||
c := <-self.result
|
||||
self.releaseTree()
|
||||
|
||||
// sha3(length + BMT(pure_chunk))
|
||||
if self.blockLength == nil {
|
||||
return c
|
||||
}
|
||||
res := self.pool.hasher()
|
||||
res.Reset()
|
||||
res.Write(self.blockLength)
|
||||
res.Write(c)
|
||||
return res.Sum(nil)
|
||||
}
|
||||
|
||||
// Hasher implements the SwarmHash interface
|
||||
|
||||
// Hash waits for the hasher result and returns it
|
||||
// caller must call this on a BMT Hasher being written to
|
||||
func (self *Hasher) Hash() []byte {
|
||||
return <-self.result
|
||||
}
|
||||
|
||||
// Hasher implements the io.Writer interface
|
||||
|
||||
// Write fills the buffer to hash
|
||||
// with every full segment complete launches a hasher go routine
|
||||
// that shoots up the BMT
|
||||
func (self *Hasher) Write(b []byte) (int, error) {
|
||||
l := len(b)
|
||||
if l <= 0 {
|
||||
return 0, nil
|
||||
}
|
||||
s := self.segment
|
||||
i := self.cur
|
||||
count := (self.count + 1) / 2
|
||||
need := self.count*self.size - self.cur*2*self.size
|
||||
size := self.size
|
||||
if need > size {
|
||||
size *= 2
|
||||
}
|
||||
if l < need {
|
||||
need = l
|
||||
}
|
||||
// calculate missing bit to complete current open segment
|
||||
rest := size - len(s)
|
||||
if need < rest {
|
||||
rest = need
|
||||
}
|
||||
s = append(s, b[:rest]...)
|
||||
need -= rest
|
||||
// read full segments and the last possibly partial segment
|
||||
for need > 0 && i < count-1 {
|
||||
// push all finished chunks we read
|
||||
self.writeSegment(i, s, self.depth)
|
||||
need -= size
|
||||
if need < 0 {
|
||||
size += need
|
||||
}
|
||||
s = b[rest : rest+size]
|
||||
rest += size
|
||||
i++
|
||||
}
|
||||
self.segment = s
|
||||
self.cur = i
|
||||
// otherwise, we can assume len(s) == 0, so all buffer is read and chunk is not yet full
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// Hasher implements the io.ReaderFrom interface
|
||||
|
||||
// ReadFrom reads from io.Reader and appends to the data to hash using Write
|
||||
// it reads so that chunk to hash is maximum length or reader reaches EOF
|
||||
// caller must Reset the hasher prior to call
|
||||
func (self *Hasher) ReadFrom(r io.Reader) (m int64, err error) {
|
||||
bufsize := self.size*self.count - self.size*self.cur - len(self.segment)
|
||||
buf := make([]byte, bufsize)
|
||||
var read int
|
||||
for {
|
||||
var n int
|
||||
n, err = r.Read(buf)
|
||||
read += n
|
||||
if err == io.EOF || read == len(buf) {
|
||||
hash := self.Sum(buf[:n])
|
||||
if read == len(buf) {
|
||||
err = NewEOC(hash)
|
||||
}
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
n, err = self.Write(buf[:n])
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return int64(read), err
|
||||
}
|
||||
|
||||
// Reset needs to be called before writing to the hasher
|
||||
func (self *Hasher) Reset() {
|
||||
self.getTree()
|
||||
self.blockLength = nil
|
||||
}
|
||||
|
||||
// Hasher implements the SwarmHash interface
|
||||
|
||||
// ResetWithLength needs to be called before writing to the hasher
|
||||
// the argument is supposed to be the byte slice binary representation of
|
||||
// the legth of the data subsumed under the hash
|
||||
func (self *Hasher) ResetWithLength(l []byte) {
|
||||
self.Reset()
|
||||
self.blockLength = l
|
||||
|
||||
}
|
||||
|
||||
// Release gives back the Tree to the pool whereby it unlocks
|
||||
// it resets tree, segment and index
|
||||
func (self *Hasher) releaseTree() {
|
||||
if self.bmt != nil {
|
||||
n := self.bmt.leaves[self.cur]
|
||||
for ; n != nil; n = n.parent {
|
||||
n.unbalanced = false
|
||||
if n.parent != nil {
|
||||
n.root = false
|
||||
}
|
||||
}
|
||||
self.pool.Release(self.bmt)
|
||||
self.bmt = nil
|
||||
|
||||
}
|
||||
self.cur = 0
|
||||
self.segment = nil
|
||||
}
|
||||
|
||||
func (self *Hasher) writeSegment(i int, s []byte, d int) {
|
||||
h := self.pool.hasher()
|
||||
n := self.bmt.leaves[i]
|
||||
|
||||
if len(s) > self.size && n.parent != nil {
|
||||
go func() {
|
||||
h.Reset()
|
||||
h.Write(s)
|
||||
s = h.Sum(nil)
|
||||
|
||||
if n.root {
|
||||
self.result <- s
|
||||
return
|
||||
}
|
||||
self.run(n.parent, h, d, n.index, s)
|
||||
}()
|
||||
return
|
||||
}
|
||||
go self.run(n, h, d, i*2, s)
|
||||
}
|
||||
|
||||
func (self *Hasher) run(n *Node, h hash.Hash, d int, i int, s []byte) {
|
||||
isLeft := i%2 == 0
|
||||
for {
|
||||
if isLeft {
|
||||
n.left = s
|
||||
} else {
|
||||
n.right = s
|
||||
}
|
||||
if !n.unbalanced && n.toggle() {
|
||||
return
|
||||
}
|
||||
if !n.unbalanced || !isLeft || i == 0 && d == 0 {
|
||||
h.Reset()
|
||||
h.Write(n.left)
|
||||
h.Write(n.right)
|
||||
s = h.Sum(nil)
|
||||
|
||||
} else {
|
||||
s = append(n.left, n.right...)
|
||||
}
|
||||
|
||||
self.hash = s
|
||||
if n.root {
|
||||
self.result <- s
|
||||
return
|
||||
}
|
||||
|
||||
isLeft = n.isLeft
|
||||
n = n.parent
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
// getTree obtains a BMT resource by reserving one from the pool
|
||||
func (self *Hasher) getTree() *Tree {
|
||||
if self.bmt != nil {
|
||||
return self.bmt
|
||||
}
|
||||
t := self.pool.Reserve()
|
||||
self.bmt = t
|
||||
return t
|
||||
}
|
||||
|
||||
// atomic bool toggle implementing a concurrent reusable 2-state object
|
||||
// atomic addint with %2 implements atomic bool toggle
|
||||
// it returns true if the toggler just put it in the active/waiting state
|
||||
func (self *Node) toggle() bool {
|
||||
return atomic.AddInt32(&self.state, 1)%2 == 1
|
||||
}
|
||||
|
||||
func hashstr(b []byte) string {
|
||||
end := len(b)
|
||||
if end > 4 {
|
||||
end = 4
|
||||
}
|
||||
return fmt.Sprintf("%x", b[:end])
|
||||
}
|
||||
|
||||
func depth(n int) (d int) {
|
||||
for l := (n - 1) / 2; l > 0; l /= 2 {
|
||||
d++
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// finalise is following the zigzags on the tree belonging
|
||||
// to the final datasegment
|
||||
func (self *Hasher) finalise(n *Node, i int) (d int) {
|
||||
isLeft := i%2 == 0
|
||||
for {
|
||||
// when the final segment's path is going via left segments
|
||||
// the incoming data is pushed to the parent upon pulling the left
|
||||
// we do not need toogle the state since this condition is
|
||||
// detectable
|
||||
n.unbalanced = isLeft
|
||||
n.right = nil
|
||||
if n.initial {
|
||||
n.root = true
|
||||
return d
|
||||
}
|
||||
isLeft = n.isLeft
|
||||
n = n.parent
|
||||
d++
|
||||
}
|
||||
}
|
||||
|
||||
// EOC (end of chunk) implements the error interface
|
||||
type EOC struct {
|
||||
Hash []byte // read the hash of the chunk off the error
|
||||
}
|
||||
|
||||
// Error returns the error string
|
||||
func (self *EOC) Error() string {
|
||||
return fmt.Sprintf("hasher limit reached, chunk hash: %x", self.Hash)
|
||||
}
|
||||
|
||||
// NewEOC creates new end of chunk error with the hash
|
||||
func NewEOC(hash []byte) *EOC {
|
||||
return &EOC{hash}
|
||||
}
|
85
bmt/bmt_r.go
Normal file
85
bmt/bmt_r.go
Normal file
@ -0,0 +1,85 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// simple nonconcurrent reference implementation for hashsize segment based
|
||||
// Binary Merkle tree hash on arbitrary but fixed maximum chunksize
|
||||
//
|
||||
// This implementation does not take advantage of any paralellisms and uses
|
||||
// far more memory than necessary, but it is easy to see that it is correct.
|
||||
// It can be used for generating test cases for optimized implementations.
|
||||
// see testBMTHasherCorrectness function in bmt_test.go
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"hash"
|
||||
)
|
||||
|
||||
// RefHasher is the non-optimized easy to read reference implementation of BMT
|
||||
type RefHasher struct {
|
||||
span int
|
||||
section int
|
||||
cap int
|
||||
h hash.Hash
|
||||
}
|
||||
|
||||
// NewRefHasher returns a new RefHasher
|
||||
func NewRefHasher(hasher BaseHasher, count int) *RefHasher {
|
||||
h := hasher()
|
||||
hashsize := h.Size()
|
||||
maxsize := hashsize * count
|
||||
c := 2
|
||||
for ; c < count; c *= 2 {
|
||||
}
|
||||
if c > 2 {
|
||||
c /= 2
|
||||
}
|
||||
return &RefHasher{
|
||||
section: 2 * hashsize,
|
||||
span: c * hashsize,
|
||||
cap: maxsize,
|
||||
h: h,
|
||||
}
|
||||
}
|
||||
|
||||
// Hash returns the BMT hash of the byte slice
|
||||
// implements the SwarmHash interface
|
||||
func (rh *RefHasher) Hash(d []byte) []byte {
|
||||
if len(d) > rh.cap {
|
||||
d = d[:rh.cap]
|
||||
}
|
||||
|
||||
return rh.hash(d, rh.span)
|
||||
}
|
||||
|
||||
func (rh *RefHasher) hash(d []byte, s int) []byte {
|
||||
l := len(d)
|
||||
left := d
|
||||
var right []byte
|
||||
if l > rh.section {
|
||||
for ; s >= l; s /= 2 {
|
||||
}
|
||||
left = rh.hash(d[:s], s)
|
||||
right = d[s:]
|
||||
if l-s > rh.section/2 {
|
||||
right = rh.hash(right, s)
|
||||
}
|
||||
}
|
||||
defer rh.h.Reset()
|
||||
rh.h.Write(left)
|
||||
rh.h.Write(right)
|
||||
h := rh.h.Sum(nil)
|
||||
return h
|
||||
}
|
481
bmt/bmt_test.go
Normal file
481
bmt/bmt_test.go
Normal file
@ -0,0 +1,481 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
crand "crypto/rand"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||
)
|
||||
|
||||
const (
|
||||
maxproccnt = 8
|
||||
)
|
||||
|
||||
// TestRefHasher tests that the RefHasher computes the expected BMT hash for
|
||||
// all data lengths between 0 and 256 bytes
|
||||
func TestRefHasher(t *testing.T) {
|
||||
hashFunc := sha3.NewKeccak256
|
||||
|
||||
sha3 := func(data ...[]byte) []byte {
|
||||
h := hashFunc()
|
||||
for _, v := range data {
|
||||
h.Write(v)
|
||||
}
|
||||
return h.Sum(nil)
|
||||
}
|
||||
|
||||
// the test struct is used to specify the expected BMT hash for data
|
||||
// lengths between "from" and "to"
|
||||
type test struct {
|
||||
from int64
|
||||
to int64
|
||||
expected func([]byte) []byte
|
||||
}
|
||||
|
||||
var tests []*test
|
||||
|
||||
// all lengths in [0,64] should be:
|
||||
//
|
||||
// sha3(data)
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 0,
|
||||
to: 64,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(data)
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [65,96] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// data[64:]
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 65,
|
||||
to: 96,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(data[:64]), data[64:])
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [97,128] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:])
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 97,
|
||||
to: 128,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(data[:64]), sha3(data[64:]))
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [129,160] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// data[128:]
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 129,
|
||||
to: 160,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), data[128:])
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [161,192] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(data[128:])
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 161,
|
||||
to: 192,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(data[128:]))
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [193,224] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(
|
||||
// sha3(data[128:192])
|
||||
// data[192:]
|
||||
// )
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 193,
|
||||
to: 224,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), data[192:]))
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [225,256] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(
|
||||
// sha3(data[128:192])
|
||||
// sha3(data[192:])
|
||||
// )
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 225,
|
||||
to: 256,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), sha3(data[192:])))
|
||||
},
|
||||
})
|
||||
|
||||
// run the tests
|
||||
for _, x := range tests {
|
||||
for length := x.from; length <= x.to; length++ {
|
||||
t.Run(fmt.Sprintf("%d_bytes", length), func(t *testing.T) {
|
||||
data := make([]byte, length)
|
||||
if _, err := io.ReadFull(crand.Reader, data); err != nil && err != io.EOF {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := x.expected(data)
|
||||
actual := NewRefHasher(hashFunc, 128).Hash(data)
|
||||
if !bytes.Equal(actual, expected) {
|
||||
t.Fatalf("expected %x, got %x", expected, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testDataReader(l int) (r io.Reader) {
|
||||
return io.LimitReader(crand.Reader, int64(l))
|
||||
}
|
||||
|
||||
func TestHasherCorrectness(t *testing.T) {
|
||||
err := testHasher(testBaseHasher)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testHasher(f func(BaseHasher, []byte, int, int) error) error {
|
||||
tdata := testDataReader(4128)
|
||||
data := make([]byte, 4128)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
size := hasher().Size()
|
||||
counts := []int{1, 2, 3, 4, 5, 8, 16, 32, 64, 128}
|
||||
|
||||
var err error
|
||||
for _, count := range counts {
|
||||
max := count * size
|
||||
incr := 1
|
||||
for n := 0; n <= max+incr; n += incr {
|
||||
err = f(hasher, data, n, count)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestHasherReuseWithoutRelease(t *testing.T) {
|
||||
testHasherReuse(1, t)
|
||||
}
|
||||
|
||||
func TestHasherReuseWithRelease(t *testing.T) {
|
||||
testHasherReuse(maxproccnt, t)
|
||||
}
|
||||
|
||||
func testHasherReuse(i int, t *testing.T) {
|
||||
hasher := sha3.NewKeccak256
|
||||
pool := NewTreePool(hasher, 128, i)
|
||||
defer pool.Drain(0)
|
||||
bmt := New(pool)
|
||||
|
||||
for i := 0; i < 500; i++ {
|
||||
n := rand.Intn(4096)
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
|
||||
err := testHasherCorrectness(bmt, hasher, data, n, 128)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasherConcurrency(t *testing.T) {
|
||||
hasher := sha3.NewKeccak256
|
||||
pool := NewTreePool(hasher, 128, maxproccnt)
|
||||
defer pool.Drain(0)
|
||||
wg := sync.WaitGroup{}
|
||||
cycles := 100
|
||||
wg.Add(maxproccnt * cycles)
|
||||
errc := make(chan error)
|
||||
|
||||
for p := 0; p < maxproccnt; p++ {
|
||||
for i := 0; i < cycles; i++ {
|
||||
go func() {
|
||||
bmt := New(pool)
|
||||
n := rand.Intn(4096)
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
err := testHasherCorrectness(bmt, hasher, data, n, 128)
|
||||
wg.Done()
|
||||
if err != nil {
|
||||
errc <- err
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(errc)
|
||||
}()
|
||||
var err error
|
||||
select {
|
||||
case <-time.NewTimer(5 * time.Second).C:
|
||||
err = fmt.Errorf("timed out")
|
||||
case err = <-errc:
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testBaseHasher(hasher BaseHasher, d []byte, n, count int) error {
|
||||
pool := NewTreePool(hasher, count, 1)
|
||||
defer pool.Drain(0)
|
||||
bmt := New(pool)
|
||||
return testHasherCorrectness(bmt, hasher, d, n, count)
|
||||
}
|
||||
|
||||
func testHasherCorrectness(bmt hash.Hash, hasher BaseHasher, d []byte, n, count int) (err error) {
|
||||
data := d[:n]
|
||||
rbmt := NewRefHasher(hasher, count)
|
||||
exp := rbmt.Hash(data)
|
||||
timeout := time.NewTimer(time.Second)
|
||||
c := make(chan error)
|
||||
|
||||
go func() {
|
||||
bmt.Reset()
|
||||
bmt.Write(data)
|
||||
got := bmt.Sum(nil)
|
||||
if !bytes.Equal(got, exp) {
|
||||
c <- fmt.Errorf("wrong hash: expected %x, got %x", exp, got)
|
||||
}
|
||||
close(c)
|
||||
}()
|
||||
select {
|
||||
case <-timeout.C:
|
||||
err = fmt.Errorf("BMT hash calculation timed out")
|
||||
case err = <-c:
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func BenchmarkSHA3_4k(t *testing.B) { benchmarkSHA3(4096, t) }
|
||||
func BenchmarkSHA3_2k(t *testing.B) { benchmarkSHA3(4096/2, t) }
|
||||
func BenchmarkSHA3_1k(t *testing.B) { benchmarkSHA3(4096/4, t) }
|
||||
func BenchmarkSHA3_512b(t *testing.B) { benchmarkSHA3(4096/8, t) }
|
||||
func BenchmarkSHA3_256b(t *testing.B) { benchmarkSHA3(4096/16, t) }
|
||||
func BenchmarkSHA3_128b(t *testing.B) { benchmarkSHA3(4096/32, t) }
|
||||
|
||||
func BenchmarkBMTBaseline_4k(t *testing.B) { benchmarkBMTBaseline(4096, t) }
|
||||
func BenchmarkBMTBaseline_2k(t *testing.B) { benchmarkBMTBaseline(4096/2, t) }
|
||||
func BenchmarkBMTBaseline_1k(t *testing.B) { benchmarkBMTBaseline(4096/4, t) }
|
||||
func BenchmarkBMTBaseline_512b(t *testing.B) { benchmarkBMTBaseline(4096/8, t) }
|
||||
func BenchmarkBMTBaseline_256b(t *testing.B) { benchmarkBMTBaseline(4096/16, t) }
|
||||
func BenchmarkBMTBaseline_128b(t *testing.B) { benchmarkBMTBaseline(4096/32, t) }
|
||||
|
||||
func BenchmarkRefHasher_4k(t *testing.B) { benchmarkRefHasher(4096, t) }
|
||||
func BenchmarkRefHasher_2k(t *testing.B) { benchmarkRefHasher(4096/2, t) }
|
||||
func BenchmarkRefHasher_1k(t *testing.B) { benchmarkRefHasher(4096/4, t) }
|
||||
func BenchmarkRefHasher_512b(t *testing.B) { benchmarkRefHasher(4096/8, t) }
|
||||
func BenchmarkRefHasher_256b(t *testing.B) { benchmarkRefHasher(4096/16, t) }
|
||||
func BenchmarkRefHasher_128b(t *testing.B) { benchmarkRefHasher(4096/32, t) }
|
||||
|
||||
func BenchmarkHasher_4k(t *testing.B) { benchmarkHasher(4096, t) }
|
||||
func BenchmarkHasher_2k(t *testing.B) { benchmarkHasher(4096/2, t) }
|
||||
func BenchmarkHasher_1k(t *testing.B) { benchmarkHasher(4096/4, t) }
|
||||
func BenchmarkHasher_512b(t *testing.B) { benchmarkHasher(4096/8, t) }
|
||||
func BenchmarkHasher_256b(t *testing.B) { benchmarkHasher(4096/16, t) }
|
||||
func BenchmarkHasher_128b(t *testing.B) { benchmarkHasher(4096/32, t) }
|
||||
|
||||
func BenchmarkHasherNoReuse_4k(t *testing.B) { benchmarkHasherReuse(1, 4096, t) }
|
||||
func BenchmarkHasherNoReuse_2k(t *testing.B) { benchmarkHasherReuse(1, 4096/2, t) }
|
||||
func BenchmarkHasherNoReuse_1k(t *testing.B) { benchmarkHasherReuse(1, 4096/4, t) }
|
||||
func BenchmarkHasherNoReuse_512b(t *testing.B) { benchmarkHasherReuse(1, 4096/8, t) }
|
||||
func BenchmarkHasherNoReuse_256b(t *testing.B) { benchmarkHasherReuse(1, 4096/16, t) }
|
||||
func BenchmarkHasherNoReuse_128b(t *testing.B) { benchmarkHasherReuse(1, 4096/32, t) }
|
||||
|
||||
func BenchmarkHasherReuse_4k(t *testing.B) { benchmarkHasherReuse(16, 4096, t) }
|
||||
func BenchmarkHasherReuse_2k(t *testing.B) { benchmarkHasherReuse(16, 4096/2, t) }
|
||||
func BenchmarkHasherReuse_1k(t *testing.B) { benchmarkHasherReuse(16, 4096/4, t) }
|
||||
func BenchmarkHasherReuse_512b(t *testing.B) { benchmarkHasherReuse(16, 4096/8, t) }
|
||||
func BenchmarkHasherReuse_256b(t *testing.B) { benchmarkHasherReuse(16, 4096/16, t) }
|
||||
func BenchmarkHasherReuse_128b(t *testing.B) { benchmarkHasherReuse(16, 4096/32, t) }
|
||||
|
||||
// benchmarks the minimum hashing time for a balanced (for simplicity) BMT
|
||||
// by doing count/segmentsize parallel hashings of 2*segmentsize bytes
|
||||
// doing it on n maxproccnt each reusing the base hasher
|
||||
// the premise is that this is the minimum computation needed for a BMT
|
||||
// therefore this serves as a theoretical optimum for concurrent implementations
|
||||
func benchmarkBMTBaseline(n int, t *testing.B) {
|
||||
tdata := testDataReader(64)
|
||||
data := make([]byte, 64)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
count := int32((n-1)/hasher().Size() + 1)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(maxproccnt)
|
||||
var i int32
|
||||
for j := 0; j < maxproccnt; j++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
h := hasher()
|
||||
for atomic.AddInt32(&i, 1) < count {
|
||||
h.Reset()
|
||||
h.Write(data)
|
||||
h.Sum(nil)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkHasher(n int, t *testing.B) {
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
|
||||
size := 1
|
||||
hasher := sha3.NewKeccak256
|
||||
segmentCount := 128
|
||||
pool := NewTreePool(hasher, segmentCount, size)
|
||||
bmt := New(pool)
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
bmt.Reset()
|
||||
bmt.Write(data)
|
||||
bmt.Sum(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkHasherReuse(poolsize, n int, t *testing.B) {
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
|
||||
hasher := sha3.NewKeccak256
|
||||
segmentCount := 128
|
||||
pool := NewTreePool(hasher, segmentCount, poolsize)
|
||||
cycles := 200
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(cycles)
|
||||
for j := 0; j < cycles; j++ {
|
||||
bmt := New(pool)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
bmt.Reset()
|
||||
bmt.Write(data)
|
||||
bmt.Sum(nil)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkSHA3(n int, t *testing.B) {
|
||||
data := make([]byte, n)
|
||||
tdata := testDataReader(n)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
h := hasher()
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
h.Reset()
|
||||
h.Write(data)
|
||||
h.Sum(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkRefHasher(n int, t *testing.B) {
|
||||
data := make([]byte, n)
|
||||
tdata := testDataReader(n)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
rbmt := NewRefHasher(hasher, 128)
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
rbmt.Hash(data)
|
||||
}
|
||||
}
|
@ -21,18 +21,18 @@ variable which Travis CI makes available to certain builds.
|
||||
We want to build go-ethereum with the most recent version of Go, irrespective of the Go
|
||||
version that is available in the main Ubuntu repository. In order to make this possible,
|
||||
our PPA depends on the ~gophers/ubuntu/archive PPA. Our source package build-depends on
|
||||
golang-1.8, which is co-installable alongside the regular golang package. PPA dependencies
|
||||
golang-1.9, which is co-installable alongside the regular golang package. PPA dependencies
|
||||
can be edited at https://launchpad.net/%7Eethereum/+archive/ubuntu/ethereum/+edit-dependencies
|
||||
|
||||
## Building Packages Locally (for testing)
|
||||
|
||||
You need to run Ubuntu to do test packaging.
|
||||
|
||||
Add the gophers PPA and install Go 1.8 and Debian packaging tools:
|
||||
Add the gophers PPA and install Go 1.9 and Debian packaging tools:
|
||||
|
||||
$ sudo apt-add-repository ppa:gophers/ubuntu/archive
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install build-essential golang-1.8 devscripts debhelper
|
||||
$ sudo apt-get install build-essential golang-1.9 devscripts debhelper
|
||||
|
||||
Create the source packages:
|
||||
|
||||
|
61
build/ci.go
61
build/ci.go
@ -24,7 +24,8 @@ Usage: go run ci.go <command> <command flags/arguments>
|
||||
Available commands are:
|
||||
|
||||
install [ -arch architecture ] [ packages... ] -- builds packages and executables
|
||||
test [ -coverage ] [ -misspell ] [ packages... ] -- runs the tests
|
||||
test [ -coverage ] [ packages... ] -- runs the tests
|
||||
lint -- runs certain pre-selected linters
|
||||
archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -upload dest ] -- archives build artefacts
|
||||
importkeys -- imports signing keys from env
|
||||
debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package
|
||||
@ -119,7 +120,8 @@ var (
|
||||
// Distros for which packages are created.
|
||||
// Note: vivid is unsupported because there is no golang-1.6 package for it.
|
||||
// Note: wily is unsupported because it was officially deprecated on lanchpad.
|
||||
debDistros = []string{"trusty", "xenial", "yakkety", "zesty"}
|
||||
// Note: yakkety is unsupported because it was officially deprecated on lanchpad.
|
||||
debDistros = []string{"trusty", "xenial", "zesty", "artful"}
|
||||
)
|
||||
|
||||
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
|
||||
@ -145,6 +147,8 @@ func main() {
|
||||
doInstall(os.Args[2:])
|
||||
case "test":
|
||||
doTest(os.Args[2:])
|
||||
case "lint":
|
||||
doLint(os.Args[2:])
|
||||
case "archive":
|
||||
doArchive(os.Args[2:])
|
||||
case "debsrc":
|
||||
@ -249,10 +253,7 @@ func goTool(subcmd string, args ...string) *exec.Cmd {
|
||||
}
|
||||
|
||||
func goToolArch(arch string, subcmd string, args ...string) *exec.Cmd {
|
||||
gocmd := filepath.Join(runtime.GOROOT(), "bin", "go")
|
||||
cmd := exec.Command(gocmd, subcmd)
|
||||
cmd.Args = append(cmd.Args, args...)
|
||||
|
||||
cmd := build.GoTool(subcmd, args...)
|
||||
if subcmd == "build" || subcmd == "install" || subcmd == "test" {
|
||||
// Go CGO has a Windows linker error prior to 1.8 (https://github.com/golang/go/issues/8756).
|
||||
// Work around issue by allowing multiple definitions for <1.8 builds.
|
||||
@ -282,7 +283,6 @@ func goToolArch(arch string, subcmd string, args ...string) *exec.Cmd {
|
||||
|
||||
func doTest(cmdline []string) {
|
||||
var (
|
||||
misspell = flag.Bool("misspell", false, "Whether to run the spell checker")
|
||||
coverage = flag.Bool("coverage", false, "Whether to record code coverage")
|
||||
)
|
||||
flag.CommandLine.Parse(cmdline)
|
||||
@ -296,10 +296,7 @@ func doTest(cmdline []string) {
|
||||
|
||||
// Run analysis tools before the tests.
|
||||
build.MustRun(goTool("vet", packages...))
|
||||
if *misspell {
|
||||
// TODO(karalabe): Reenable after false detection is fixed: https://github.com/client9/misspell/issues/105
|
||||
// spellcheck(packages)
|
||||
}
|
||||
|
||||
// Run the actual tests.
|
||||
gotest := goTool("test", buildFlags(env)...)
|
||||
// Test a single package at a time. CI builders are slow
|
||||
@ -308,35 +305,31 @@ func doTest(cmdline []string) {
|
||||
if *coverage {
|
||||
gotest.Args = append(gotest.Args, "-covermode=atomic", "-cover")
|
||||
}
|
||||
|
||||
gotest.Args = append(gotest.Args, packages...)
|
||||
build.MustRun(gotest)
|
||||
}
|
||||
|
||||
// spellcheck runs the client9/misspell spellchecker package on all Go, Cgo and
|
||||
// test files in the requested packages.
|
||||
func spellcheck(packages []string) {
|
||||
// Ensure the spellchecker is available
|
||||
build.MustRun(goTool("get", "github.com/client9/misspell/cmd/misspell"))
|
||||
// runs gometalinter on requested packages
|
||||
func doLint(cmdline []string) {
|
||||
flag.CommandLine.Parse(cmdline)
|
||||
|
||||
// Windows chokes on long argument lists, check packages individually
|
||||
for _, pkg := range packages {
|
||||
// The spell checker doesn't work on packages, gather all .go files for it
|
||||
out, err := goTool("list", "-f", "{{.Dir}}{{range .GoFiles}}\n{{.}}{{end}}{{range .CgoFiles}}\n{{.}}{{end}}{{range .TestGoFiles}}\n{{.}}{{end}}", pkg).CombinedOutput()
|
||||
if err != nil {
|
||||
log.Fatalf("source file listing failed: %v\n%s", err, string(out))
|
||||
}
|
||||
// Retrieve the folder and assemble the source list
|
||||
lines := strings.Split(string(out), "\n")
|
||||
root := lines[0]
|
||||
packages := []string{"./..."}
|
||||
if len(flag.CommandLine.Args()) > 0 {
|
||||
packages = flag.CommandLine.Args()
|
||||
}
|
||||
// Get metalinter and install all supported linters
|
||||
build.MustRun(goTool("get", "gopkg.in/alecthomas/gometalinter.v1"))
|
||||
build.MustRunCommand(filepath.Join(GOBIN, "gometalinter.v1"), "--install")
|
||||
|
||||
sources := make([]string, 0, len(lines)-1)
|
||||
for _, line := range lines[1:] {
|
||||
if line = strings.TrimSpace(line); line != "" {
|
||||
sources = append(sources, filepath.Join(root, line))
|
||||
}
|
||||
}
|
||||
// Run the spell checker for this particular package
|
||||
build.MustRunCommand(filepath.Join(GOBIN, "misspell"), append([]string{"-error"}, sources...)...)
|
||||
// Run fast linters batched together
|
||||
configs := []string{"--vendor", "--disable-all", "--enable=vet", "--enable=gofmt", "--enable=misspell"}
|
||||
build.MustRunCommand(filepath.Join(GOBIN, "gometalinter.v1"), append(configs, packages...)...)
|
||||
|
||||
// Run slow linters one by one
|
||||
for _, linter := range []string{"unconvert"} {
|
||||
configs = []string{"--vendor", "--deadline=10m", "--disable-all", "--enable=" + linter}
|
||||
build.MustRunCommand(filepath.Join(GOBIN, "gometalinter.v1"), append(configs, packages...)...)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2,7 +2,7 @@ Source: {{.Name}}
|
||||
Section: science
|
||||
Priority: extra
|
||||
Maintainer: {{.Author}}
|
||||
Build-Depends: debhelper (>= 8.0.0), golang-1.8
|
||||
Build-Depends: debhelper (>= 8.0.0), golang-1.9
|
||||
Standards-Version: 3.9.5
|
||||
Homepage: https://ethereum.org
|
||||
Vcs-Git: git://github.com/ethereum/go-ethereum.git
|
||||
|
@ -5,7 +5,7 @@
|
||||
#export DH_VERBOSE=1
|
||||
|
||||
override_dh_auto_build:
|
||||
build/env.sh /usr/lib/go-1.8/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
|
||||
build/env.sh /usr/lib/go-1.9/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
|
||||
|
||||
override_dh_auto_test:
|
||||
|
||||
|
@ -46,8 +46,5 @@ func disasmCmd(ctx *cli.Context) error {
|
||||
|
||||
code := strings.TrimSpace(string(in[:]))
|
||||
fmt.Printf("%v\n", code)
|
||||
if err = asm.PrintDisassembled(code); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return asm.PrintDisassembled(code)
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ func (l *JSONLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cos
|
||||
log := vm.StructLog{
|
||||
Pc: pc,
|
||||
Op: op,
|
||||
Gas: gas + cost,
|
||||
Gas: gas,
|
||||
GasCost: cost,
|
||||
MemorySize: memory.Len(),
|
||||
Storage: nil,
|
||||
@ -57,11 +57,15 @@ func (l *JSONLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cos
|
||||
}
|
||||
|
||||
// CaptureEnd is triggered at end of execution.
|
||||
func (l *JSONLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration) error {
|
||||
func (l *JSONLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) error {
|
||||
type endLog struct {
|
||||
Output string `json:"output"`
|
||||
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
|
||||
Time time.Duration `json:"time"`
|
||||
Err string `json:"error,omitempty"`
|
||||
}
|
||||
return l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t})
|
||||
if err != nil {
|
||||
return l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t, err.Error()})
|
||||
}
|
||||
return l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t, ""})
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ var (
|
||||
}
|
||||
CodeFileFlag = cli.StringFlag{
|
||||
Name: "codefile",
|
||||
Usage: "file containing EVM code",
|
||||
Usage: "File containing EVM code. If '-' is specified, code is read from stdin ",
|
||||
}
|
||||
GasFlag = cli.Uint64Flag{
|
||||
Name: "gas",
|
||||
@ -102,6 +102,10 @@ var (
|
||||
Name: "sender",
|
||||
Usage: "The transaction origin",
|
||||
}
|
||||
ReceiverFlag = cli.StringFlag{
|
||||
Name: "receiver",
|
||||
Usage: "The transaction receiver (execution context)",
|
||||
}
|
||||
DisableMemoryFlag = cli.BoolFlag{
|
||||
Name: "nomemory",
|
||||
Usage: "disable memory output",
|
||||
@ -131,6 +135,7 @@ func init() {
|
||||
GenesisFlag,
|
||||
MachineFlag,
|
||||
SenderFlag,
|
||||
ReceiverFlag,
|
||||
DisableMemoryFlag,
|
||||
DisableStackFlag,
|
||||
}
|
||||
@ -138,6 +143,7 @@ func init() {
|
||||
compileCommand,
|
||||
disasmCommand,
|
||||
runCommand,
|
||||
stateTestCommand,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -84,6 +84,7 @@ func runCmd(ctx *cli.Context) error {
|
||||
statedb *state.StateDB
|
||||
chainConfig *params.ChainConfig
|
||||
sender = common.StringToAddress("sender")
|
||||
receiver = common.StringToAddress("receiver")
|
||||
)
|
||||
if ctx.GlobalBool(MachineFlag.Name) {
|
||||
tracer = NewJSONLogger(logconfig, os.Stdout)
|
||||
@ -104,46 +105,52 @@ func runCmd(ctx *cli.Context) error {
|
||||
if ctx.GlobalString(SenderFlag.Name) != "" {
|
||||
sender = common.HexToAddress(ctx.GlobalString(SenderFlag.Name))
|
||||
}
|
||||
|
||||
statedb.CreateAccount(sender)
|
||||
|
||||
if ctx.GlobalString(ReceiverFlag.Name) != "" {
|
||||
receiver = common.HexToAddress(ctx.GlobalString(ReceiverFlag.Name))
|
||||
}
|
||||
|
||||
var (
|
||||
code []byte
|
||||
ret []byte
|
||||
err error
|
||||
)
|
||||
if fn := ctx.Args().First(); len(fn) > 0 {
|
||||
// The '--code' or '--codefile' flag overrides code in state
|
||||
if ctx.GlobalString(CodeFileFlag.Name) != "" {
|
||||
var hexcode []byte
|
||||
var err error
|
||||
// If - is specified, it means that code comes from stdin
|
||||
if ctx.GlobalString(CodeFileFlag.Name) == "-" {
|
||||
//Try reading from stdin
|
||||
if hexcode, err = ioutil.ReadAll(os.Stdin); err != nil {
|
||||
fmt.Printf("Could not load code from stdin: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
// Codefile with hex assembly
|
||||
if hexcode, err = ioutil.ReadFile(ctx.GlobalString(CodeFileFlag.Name)); err != nil {
|
||||
fmt.Printf("Could not load code from file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
code = common.Hex2Bytes(string(bytes.TrimRight(hexcode, "\n")))
|
||||
|
||||
} else if ctx.GlobalString(CodeFlag.Name) != "" {
|
||||
code = common.Hex2Bytes(ctx.GlobalString(CodeFlag.Name))
|
||||
} else if fn := ctx.Args().First(); len(fn) > 0 {
|
||||
// EASM-file to compile
|
||||
src, err := ioutil.ReadFile(fn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bin, err := compiler.Compile(fn, src, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
code = common.Hex2Bytes(bin)
|
||||
} else if ctx.GlobalString(CodeFlag.Name) != "" {
|
||||
code = common.Hex2Bytes(ctx.GlobalString(CodeFlag.Name))
|
||||
} else {
|
||||
var hexcode []byte
|
||||
if ctx.GlobalString(CodeFileFlag.Name) != "" {
|
||||
var err error
|
||||
hexcode, err = ioutil.ReadFile(ctx.GlobalString(CodeFileFlag.Name))
|
||||
if err != nil {
|
||||
fmt.Printf("Could not load code from file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
hexcode, err = ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
fmt.Printf("Could not load code from stdin: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
code = common.Hex2Bytes(string(bytes.TrimRight(hexcode, "\n")))
|
||||
}
|
||||
|
||||
initialGas := ctx.GlobalUint64(GasFlag.Name)
|
||||
runtimeConfig := runtime.Config{
|
||||
Origin: sender,
|
||||
@ -180,9 +187,9 @@ func runCmd(ctx *cli.Context) error {
|
||||
input := append(code, common.Hex2Bytes(ctx.GlobalString(InputFlag.Name))...)
|
||||
ret, _, leftOverGas, err = runtime.Create(input, &runtimeConfig)
|
||||
} else {
|
||||
receiver := common.StringToAddress("receiver")
|
||||
statedb.SetCode(receiver, code)
|
||||
|
||||
if len(code) > 0 {
|
||||
statedb.SetCode(receiver, code)
|
||||
}
|
||||
ret, leftOverGas, err = runtime.Call(receiver, common.Hex2Bytes(ctx.GlobalString(InputFlag.Name)), &runtimeConfig)
|
||||
}
|
||||
execTime := time.Since(tstart)
|
||||
@ -227,13 +234,13 @@ Gas used: %d
|
||||
`, execTime, mem.HeapObjects, mem.Alloc, mem.TotalAlloc, mem.NumGC, initialGas-leftOverGas)
|
||||
}
|
||||
if tracer != nil {
|
||||
tracer.CaptureEnd(ret, initialGas-leftOverGas, execTime)
|
||||
tracer.CaptureEnd(ret, initialGas-leftOverGas, execTime, err)
|
||||
} else {
|
||||
fmt.Printf("0x%x\n", ret)
|
||||
if err != nil {
|
||||
fmt.Printf(" error: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf(" error: %v\n", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
125
cmd/evm/staterunner.go
Normal file
125
cmd/evm/staterunner.go
Normal file
@ -0,0 +1,125 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/tests"
|
||||
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var stateTestCommand = cli.Command{
|
||||
Action: stateTestCmd,
|
||||
Name: "statetest",
|
||||
Usage: "executes the given state tests",
|
||||
ArgsUsage: "<file>",
|
||||
}
|
||||
|
||||
type StatetestResult struct {
|
||||
Name string `json:"name"`
|
||||
Pass bool `json:"pass"`
|
||||
Fork string `json:"fork"`
|
||||
Error string `json:"error,omitempty"`
|
||||
State *state.Dump `json:"state,omitempty"`
|
||||
}
|
||||
|
||||
func stateTestCmd(ctx *cli.Context) error {
|
||||
if len(ctx.Args().First()) == 0 {
|
||||
return errors.New("path-to-test argument required")
|
||||
}
|
||||
// Configure the go-ethereum logger
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
||||
glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name)))
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
// Configure the EVM logger
|
||||
config := &vm.LogConfig{
|
||||
DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name),
|
||||
DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
|
||||
}
|
||||
var (
|
||||
tracer vm.Tracer
|
||||
debugger *vm.StructLogger
|
||||
)
|
||||
switch {
|
||||
case ctx.GlobalBool(MachineFlag.Name):
|
||||
tracer = NewJSONLogger(config, os.Stderr)
|
||||
|
||||
case ctx.GlobalBool(DebugFlag.Name):
|
||||
debugger = vm.NewStructLogger(config)
|
||||
tracer = debugger
|
||||
|
||||
default:
|
||||
debugger = vm.NewStructLogger(config)
|
||||
}
|
||||
// Load the test content from the input file
|
||||
src, err := ioutil.ReadFile(ctx.Args().First())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var tests map[string]tests.StateTest
|
||||
if err = json.Unmarshal(src, &tests); err != nil {
|
||||
return err
|
||||
}
|
||||
// Iterate over all the tests, run them and aggregate the results
|
||||
cfg := vm.Config{
|
||||
Tracer: tracer,
|
||||
Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name),
|
||||
}
|
||||
results := make([]StatetestResult, 0, len(tests))
|
||||
for key, test := range tests {
|
||||
for _, st := range test.Subtests() {
|
||||
// Run the test and aggregate the result
|
||||
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
|
||||
state, err := test.Run(st, cfg)
|
||||
if err != nil {
|
||||
// Test failed, mark as so and dump any state to aid debugging
|
||||
result.Pass, result.Error = false, err.Error()
|
||||
if ctx.GlobalBool(DumpFlag.Name) && state != nil {
|
||||
dump := state.RawDump()
|
||||
result.State = &dump
|
||||
}
|
||||
}
|
||||
// print state root for evmlab tracing (already committed above, so no need to delete objects again
|
||||
if ctx.GlobalBool(MachineFlag.Name) && state != nil {
|
||||
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false))
|
||||
}
|
||||
|
||||
results = append(results, *result)
|
||||
|
||||
// Print any structured logs collected
|
||||
if ctx.GlobalBool(DebugFlag.Name) {
|
||||
if debugger != nil {
|
||||
fmt.Fprintln(os.Stderr, "#### TRACE ####")
|
||||
vm.WriteTrace(os.Stderr, debugger.StructLogs())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
out, _ := json.MarshalIndent(results, "", " ")
|
||||
fmt.Println(string(out))
|
||||
return nil
|
||||
}
|
@ -21,8 +21,10 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/zlib"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"html/template"
|
||||
@ -33,6 +35,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -181,10 +184,10 @@ func main() {
|
||||
|
||||
// request represents an accepted funding request.
|
||||
type request struct {
|
||||
Username string `json:"username"` // GitHub user for displaying an avatar
|
||||
Account common.Address `json:"account"` // Ethereum address being funded
|
||||
Time time.Time `json:"time"` // Timestamp when te request was accepted
|
||||
Tx *types.Transaction `json:"tx"` // Transaction funding the account
|
||||
Avatar string `json:"avatar"` // Avatar URL to make the UI nicer
|
||||
Account common.Address `json:"account"` // Ethereum address being funded
|
||||
Time time.Time `json:"time"` // Timestamp when the request was accepted
|
||||
Tx *types.Transaction `json:"tx"` // Transaction funding the account
|
||||
}
|
||||
|
||||
// faucet represents a crypto faucet backed by an Ethereum light client.
|
||||
@ -299,6 +302,8 @@ func (f *faucet) webHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// apiHandler handles requests for Ether grants and transaction statuses.
|
||||
func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
// Start tracking the connection and drop at the end
|
||||
defer conn.Close()
|
||||
|
||||
f.lock.Lock()
|
||||
f.conns = append(f.conns, conn)
|
||||
f.lock.Unlock()
|
||||
@ -313,25 +318,50 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
}
|
||||
f.lock.Unlock()
|
||||
}()
|
||||
// Send a few initial stats to the client
|
||||
balance, _ := f.client.BalanceAt(context.Background(), f.account.Address, nil)
|
||||
nonce, _ := f.client.NonceAt(context.Background(), f.account.Address, nil)
|
||||
// Gather the initial stats from the network to report
|
||||
var (
|
||||
head *types.Header
|
||||
balance *big.Int
|
||||
nonce uint64
|
||||
err error
|
||||
)
|
||||
for {
|
||||
// Attempt to retrieve the stats, may error on no faucet connectivity
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
head, err = f.client.HeaderByNumber(ctx, nil)
|
||||
if err == nil {
|
||||
balance, err = f.client.BalanceAt(ctx, f.account.Address, head.Number)
|
||||
if err == nil {
|
||||
nonce, err = f.client.NonceAt(ctx, f.account.Address, nil)
|
||||
}
|
||||
}
|
||||
cancel()
|
||||
|
||||
websocket.JSON.Send(conn, map[string]interface{}{
|
||||
// If stats retrieval failed, wait a bit and retry
|
||||
if err != nil {
|
||||
if err = sendError(conn, errors.New("Faucet offline: "+err.Error())); err != nil {
|
||||
log.Warn("Failed to send faucet error to client", "err", err)
|
||||
return
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
continue
|
||||
}
|
||||
// Initial stats reported successfully, proceed with user interaction
|
||||
break
|
||||
}
|
||||
// Send over the initial stats and the latest header
|
||||
if err = send(conn, map[string]interface{}{
|
||||
"funds": balance.Div(balance, ether),
|
||||
"funded": nonce,
|
||||
"peers": f.stack.Server().PeerCount(),
|
||||
"requests": f.reqs,
|
||||
})
|
||||
// Send the initial block to the client
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
header, err := f.client.HeaderByNumber(ctx, nil)
|
||||
cancel()
|
||||
|
||||
if err != nil {
|
||||
log.Error("Failed to retrieve latest header", "err", err)
|
||||
} else {
|
||||
websocket.JSON.Send(conn, header)
|
||||
}, 3*time.Second); err != nil {
|
||||
log.Warn("Failed to send initial stats to client", "err", err)
|
||||
return
|
||||
}
|
||||
if err = send(conn, head, 3*time.Second); err != nil {
|
||||
log.Warn("Failed to send initial header to client", "err", err)
|
||||
return
|
||||
}
|
||||
// Keep reading requests from the websocket until the connection breaks
|
||||
for {
|
||||
@ -341,18 +371,25 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
Tier uint `json:"tier"`
|
||||
Captcha string `json:"captcha"`
|
||||
}
|
||||
if err := websocket.JSON.Receive(conn, &msg); err != nil {
|
||||
if err = websocket.JSON.Receive(conn, &msg); err != nil {
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(msg.URL, "https://gist.github.com/") {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "URL doesn't link to GitHub Gists"})
|
||||
if !strings.HasPrefix(msg.URL, "https://gist.github.com/") && !strings.HasPrefix(msg.URL, "https://twitter.com/") &&
|
||||
!strings.HasPrefix(msg.URL, "https://plus.google.com/") && !strings.HasPrefix(msg.URL, "https://www.facebook.com/") {
|
||||
if err = sendError(conn, errors.New("URL doesn't link to supported services")); err != nil {
|
||||
log.Warn("Failed to send URL error to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
if msg.Tier >= uint(*tiersFlag) {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "Invalid funding tier requested"})
|
||||
if err = sendError(conn, errors.New("Invalid funding tier requested")); err != nil {
|
||||
log.Warn("Failed to send tier error to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
log.Info("Faucet funds requested", "gist", msg.URL, "tier", msg.Tier)
|
||||
log.Info("Faucet funds requested", "url", msg.URL, "tier", msg.Tier)
|
||||
|
||||
// If captcha verifications are enabled, make sure we're not dealing with a robot
|
||||
if *captchaToken != "" {
|
||||
@ -362,7 +399,10 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
|
||||
res, err := http.PostForm("https://www.google.com/recaptcha/api/siteverify", form)
|
||||
if err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
if err = sendError(conn, err); err != nil {
|
||||
log.Warn("Failed to send captcha post error to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
var result struct {
|
||||
@ -372,73 +412,55 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
err = json.NewDecoder(res.Body).Decode(&result)
|
||||
res.Body.Close()
|
||||
if err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
if err = sendError(conn, err); err != nil {
|
||||
log.Warn("Failed to send captcha decode error to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !result.Success {
|
||||
log.Warn("Captcha verification failed", "err", string(result.Errors))
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "Beep-bop, you're a robot!"})
|
||||
if err = sendError(conn, errors.New("Beep-bop, you're a robot!")); err != nil {
|
||||
log.Warn("Failed to send captcha failure to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Retrieve the gist from the GitHub Gist APIs
|
||||
parts := strings.Split(msg.URL, "/")
|
||||
req, _ := http.NewRequest("GET", "https://api.github.com/gists/"+parts[len(parts)-1], nil)
|
||||
if *githubUser != "" {
|
||||
req.SetBasicAuth(*githubUser, *githubToken)
|
||||
// Retrieve the Ethereum address to fund, the requesting user and a profile picture
|
||||
var (
|
||||
username string
|
||||
avatar string
|
||||
address common.Address
|
||||
)
|
||||
switch {
|
||||
case strings.HasPrefix(msg.URL, "https://gist.github.com/"):
|
||||
username, avatar, address, err = authGitHub(msg.URL)
|
||||
case strings.HasPrefix(msg.URL, "https://twitter.com/"):
|
||||
username, avatar, address, err = authTwitter(msg.URL)
|
||||
case strings.HasPrefix(msg.URL, "https://plus.google.com/"):
|
||||
username, avatar, address, err = authGooglePlus(msg.URL)
|
||||
case strings.HasPrefix(msg.URL, "https://www.facebook.com/"):
|
||||
username, avatar, address, err = authFacebook(msg.URL)
|
||||
default:
|
||||
err = errors.New("Something funky happened, please open an issue at https://github.com/ethereum/go-ethereum/issues")
|
||||
}
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
continue
|
||||
}
|
||||
var gist struct {
|
||||
Owner struct {
|
||||
Login string `json:"login"`
|
||||
} `json:"owner"`
|
||||
Files map[string]struct {
|
||||
Content string `json:"content"`
|
||||
} `json:"files"`
|
||||
}
|
||||
err = json.NewDecoder(res.Body).Decode(&gist)
|
||||
res.Body.Close()
|
||||
if err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
continue
|
||||
}
|
||||
if gist.Owner.Login == "" {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "Anonymous Gists not allowed"})
|
||||
continue
|
||||
}
|
||||
// Iterate over all the files and look for Ethereum addresses
|
||||
var address common.Address
|
||||
for _, file := range gist.Files {
|
||||
if len(file.Content) == 2+common.AddressLength*2 {
|
||||
address = common.HexToAddress(file.Content)
|
||||
if err = sendError(conn, err); err != nil {
|
||||
log.Warn("Failed to send prefix error to client", "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
if address == (common.Address{}) {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "No Ethereum address found to fund"})
|
||||
continue
|
||||
}
|
||||
// Validate the user's existence since the API is unhelpful here
|
||||
if res, err = http.Head("https://github.com/" + gist.Owner.Login); err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
continue
|
||||
}
|
||||
res.Body.Close()
|
||||
log.Info("Faucet request valid", "url", msg.URL, "tier", msg.Tier, "user", username, "address", address)
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "Invalid user... boom!"})
|
||||
continue
|
||||
}
|
||||
// Ensure the user didn't request funds too recently
|
||||
f.lock.Lock()
|
||||
var (
|
||||
fund bool
|
||||
timeout time.Time
|
||||
)
|
||||
if timeout = f.timeouts[gist.Owner.Login]; time.Now().After(timeout) {
|
||||
if timeout = f.timeouts[username]; time.Now().After(timeout) {
|
||||
// User wasn't funded recently, create the funding transaction
|
||||
amount := new(big.Int).Mul(big.NewInt(int64(*payoutFlag)), ether)
|
||||
amount = new(big.Int).Mul(amount, new(big.Int).Exp(big.NewInt(5), big.NewInt(int64(msg.Tier)), nil))
|
||||
@ -447,33 +469,45 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
tx := types.NewTransaction(f.nonce+uint64(len(f.reqs)), address, amount, big.NewInt(21000), f.price, nil)
|
||||
signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainId)
|
||||
if err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
f.lock.Unlock()
|
||||
if err = sendError(conn, err); err != nil {
|
||||
log.Warn("Failed to send transaction creation error to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Submit the transaction and mark as funded if successful
|
||||
if err := f.client.SendTransaction(context.Background(), signed); err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
f.lock.Unlock()
|
||||
if err = sendError(conn, err); err != nil {
|
||||
log.Warn("Failed to send transaction transmission error to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
f.reqs = append(f.reqs, &request{
|
||||
Username: gist.Owner.Login,
|
||||
Account: address,
|
||||
Time: time.Now(),
|
||||
Tx: signed,
|
||||
Avatar: avatar,
|
||||
Account: address,
|
||||
Time: time.Now(),
|
||||
Tx: signed,
|
||||
})
|
||||
f.timeouts[gist.Owner.Login] = time.Now().Add(time.Duration(*minutesFlag*int(math.Pow(3, float64(msg.Tier)))) * time.Minute)
|
||||
f.timeouts[username] = time.Now().Add(time.Duration(*minutesFlag*int(math.Pow(3, float64(msg.Tier)))) * time.Minute)
|
||||
fund = true
|
||||
}
|
||||
f.lock.Unlock()
|
||||
|
||||
// Send an error if too frequent funding, othewise a success
|
||||
if !fund {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": fmt.Sprintf("%s left until next allowance", common.PrettyDuration(timeout.Sub(time.Now())))})
|
||||
if err = sendError(conn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(timeout.Sub(time.Now())))); err != nil {
|
||||
log.Warn("Failed to send funding error to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
websocket.JSON.Send(conn, map[string]string{"success": fmt.Sprintf("Funding request accepted for %s into %s", gist.Owner.Login, address.Hex())})
|
||||
if err = sendSuccess(conn, fmt.Sprintf("Funding request accepted for %s into %s", username, address.Hex())); err != nil {
|
||||
log.Warn("Failed to send funding success to client", "err", err)
|
||||
return
|
||||
}
|
||||
select {
|
||||
case f.update <- struct{}{}:
|
||||
default:
|
||||
@ -496,11 +530,31 @@ func (f *faucet) loop() {
|
||||
select {
|
||||
case head := <-heads:
|
||||
// New chain head arrived, query the current stats and stream to clients
|
||||
balance, _ := f.client.BalanceAt(context.Background(), f.account.Address, nil)
|
||||
balance = new(big.Int).Div(balance, ether)
|
||||
var (
|
||||
balance *big.Int
|
||||
nonce uint64
|
||||
price *big.Int
|
||||
err error
|
||||
)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
balance, err = f.client.BalanceAt(ctx, f.account.Address, head.Number)
|
||||
if err == nil {
|
||||
nonce, err = f.client.NonceAt(ctx, f.account.Address, nil)
|
||||
if err == nil {
|
||||
price, err = f.client.SuggestGasPrice(ctx)
|
||||
}
|
||||
}
|
||||
cancel()
|
||||
|
||||
price, _ := f.client.SuggestGasPrice(context.Background())
|
||||
nonce, _ := f.client.NonceAt(context.Background(), f.account.Address, nil)
|
||||
// If querying the data failed, try for the next block
|
||||
if err != nil {
|
||||
log.Warn("Failed to update faucet state", "block", head.Number, "hash", head.Hash(), "err", err)
|
||||
continue
|
||||
} else {
|
||||
log.Info("Updated faucet state", "block", head.Number, "hash", head.Hash(), "balance", balance, "nonce", nonce, "price", price)
|
||||
}
|
||||
// Faucet state retrieved, update locally and send to clients
|
||||
balance = new(big.Int).Div(balance, ether)
|
||||
|
||||
f.lock.Lock()
|
||||
f.price, f.nonce = price, nonce
|
||||
@ -511,17 +565,17 @@ func (f *faucet) loop() {
|
||||
|
||||
f.lock.RLock()
|
||||
for _, conn := range f.conns {
|
||||
if err := websocket.JSON.Send(conn, map[string]interface{}{
|
||||
if err := send(conn, map[string]interface{}{
|
||||
"funds": balance,
|
||||
"funded": f.nonce,
|
||||
"peers": f.stack.Server().PeerCount(),
|
||||
"requests": f.reqs,
|
||||
}); err != nil {
|
||||
}, time.Second); err != nil {
|
||||
log.Warn("Failed to send stats to client", "err", err)
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
if err := websocket.JSON.Send(conn, head); err != nil {
|
||||
if err := send(conn, head, time.Second); err != nil {
|
||||
log.Warn("Failed to send header to client", "err", err)
|
||||
conn.Close()
|
||||
}
|
||||
@ -532,7 +586,7 @@ func (f *faucet) loop() {
|
||||
// Pending requests updated, stream to clients
|
||||
f.lock.RLock()
|
||||
for _, conn := range f.conns {
|
||||
if err := websocket.JSON.Send(conn, map[string]interface{}{"requests": f.reqs}); err != nil {
|
||||
if err := send(conn, map[string]interface{}{"requests": f.reqs}, time.Second); err != nil {
|
||||
log.Warn("Failed to send requests to client", "err", err)
|
||||
conn.Close()
|
||||
}
|
||||
@ -541,3 +595,184 @@ func (f *faucet) loop() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sends transmits a data packet to the remote end of the websocket, but also
|
||||
// setting a write deadline to prevent waiting forever on the node.
|
||||
func send(conn *websocket.Conn, value interface{}, timeout time.Duration) error {
|
||||
if timeout == 0 {
|
||||
timeout = 60 * time.Second
|
||||
}
|
||||
conn.SetWriteDeadline(time.Now().Add(timeout))
|
||||
return websocket.JSON.Send(conn, value)
|
||||
}
|
||||
|
||||
// sendError transmits an error to the remote end of the websocket, also setting
|
||||
// the write deadline to 1 second to prevent waiting forever.
|
||||
func sendError(conn *websocket.Conn, err error) error {
|
||||
return send(conn, map[string]string{"error": err.Error()}, time.Second)
|
||||
}
|
||||
|
||||
// sendSuccess transmits a success message to the remote end of the websocket, also
|
||||
// setting the write deadline to 1 second to prevent waiting forever.
|
||||
func sendSuccess(conn *websocket.Conn, msg string) error {
|
||||
return send(conn, map[string]string{"success": msg}, time.Second)
|
||||
}
|
||||
|
||||
// authGitHub tries to authenticate a faucet request using GitHub gists, returning
|
||||
// the username, avatar URL and Ethereum address to fund on success.
|
||||
func authGitHub(url string) (string, string, common.Address, error) {
|
||||
// Retrieve the gist from the GitHub Gist APIs
|
||||
parts := strings.Split(url, "/")
|
||||
req, _ := http.NewRequest("GET", "https://api.github.com/gists/"+parts[len(parts)-1], nil)
|
||||
if *githubUser != "" {
|
||||
req.SetBasicAuth(*githubUser, *githubToken)
|
||||
}
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
var gist struct {
|
||||
Owner struct {
|
||||
Login string `json:"login"`
|
||||
} `json:"owner"`
|
||||
Files map[string]struct {
|
||||
Content string `json:"content"`
|
||||
} `json:"files"`
|
||||
}
|
||||
err = json.NewDecoder(res.Body).Decode(&gist)
|
||||
res.Body.Close()
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
if gist.Owner.Login == "" {
|
||||
return "", "", common.Address{}, errors.New("Anonymous Gists not allowed")
|
||||
}
|
||||
// Iterate over all the files and look for Ethereum addresses
|
||||
var address common.Address
|
||||
for _, file := range gist.Files {
|
||||
content := strings.TrimSpace(file.Content)
|
||||
if len(content) == 2+common.AddressLength*2 {
|
||||
address = common.HexToAddress(content)
|
||||
}
|
||||
}
|
||||
if address == (common.Address{}) {
|
||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
// Validate the user's existence since the API is unhelpful here
|
||||
if res, err = http.Head("https://github.com/" + gist.Owner.Login); err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
res.Body.Close()
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
return "", "", common.Address{}, errors.New("Invalid user... boom!")
|
||||
}
|
||||
// Everything passed validation, return the gathered infos
|
||||
return gist.Owner.Login + "@github", fmt.Sprintf("https://github.com/%s.png?size=64", gist.Owner.Login), address, nil
|
||||
}
|
||||
|
||||
// authTwitter tries to authenticate a faucet request using Twitter posts, returning
|
||||
// the username, avatar URL and Ethereum address to fund on success.
|
||||
func authTwitter(url string) (string, string, common.Address, error) {
|
||||
// Ensure the user specified a meaningful URL, no fancy nonsense
|
||||
parts := strings.Split(url, "/")
|
||||
if len(parts) < 4 || parts[len(parts)-2] != "status" {
|
||||
return "", "", common.Address{}, errors.New("Invalid Twitter status URL")
|
||||
}
|
||||
username := parts[len(parts)-3]
|
||||
|
||||
// Twitter's API isn't really friendly with direct links. Still, we don't
|
||||
// want to do ask read permissions from users, so just load the public posts and
|
||||
// scrape it for the Ethereum address and profile URL.
|
||||
res, err := http.Get(url)
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
reader, err := zlib.NewReader(res.Body)
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
body, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
|
||||
if address == (common.Address{}) {
|
||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
var avatar string
|
||||
if parts = regexp.MustCompile("src=\"([^\"]+twimg.com/profile_images[^\"]+)\"").FindStringSubmatch(string(body)); len(parts) == 2 {
|
||||
avatar = parts[1]
|
||||
}
|
||||
return username + "@twitter", avatar, address, nil
|
||||
}
|
||||
|
||||
// authGooglePlus tries to authenticate a faucet request using GooglePlus posts,
|
||||
// returning the username, avatar URL and Ethereum address to fund on success.
|
||||
func authGooglePlus(url string) (string, string, common.Address, error) {
|
||||
// Ensure the user specified a meaningful URL, no fancy nonsense
|
||||
parts := strings.Split(url, "/")
|
||||
if len(parts) < 4 || parts[len(parts)-2] != "posts" {
|
||||
return "", "", common.Address{}, errors.New("Invalid Google+ post URL")
|
||||
}
|
||||
username := parts[len(parts)-3]
|
||||
|
||||
// Google's API isn't really friendly with direct links. Still, we don't
|
||||
// want to do ask read permissions from users, so just load the public posts and
|
||||
// scrape it for the Ethereum address and profile URL.
|
||||
res, err := http.Get(url)
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
|
||||
if address == (common.Address{}) {
|
||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
var avatar string
|
||||
if parts = regexp.MustCompile("src=\"([^\"]+googleusercontent.com[^\"]+photo.jpg)\"").FindStringSubmatch(string(body)); len(parts) == 2 {
|
||||
avatar = parts[1]
|
||||
}
|
||||
return username + "@google+", avatar, address, nil
|
||||
}
|
||||
|
||||
// authFacebook tries to authenticate a faucet request using Facebook posts,
|
||||
// returning the username, avatar URL and Ethereum address to fund on success.
|
||||
func authFacebook(url string) (string, string, common.Address, error) {
|
||||
// Ensure the user specified a meaningful URL, no fancy nonsense
|
||||
parts := strings.Split(url, "/")
|
||||
if len(parts) < 4 || parts[len(parts)-2] != "posts" {
|
||||
return "", "", common.Address{}, errors.New("Invalid Facebook post URL")
|
||||
}
|
||||
username := parts[len(parts)-3]
|
||||
|
||||
// Facebook's Graph API isn't really friendly with direct links. Still, we don't
|
||||
// want to do ask read permissions from users, so just load the public posts and
|
||||
// scrape it for the Ethereum address and profile URL.
|
||||
res, err := http.Get(url)
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
|
||||
if address == (common.Address{}) {
|
||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
var avatar string
|
||||
if parts = regexp.MustCompile("src=\"([^\"]+fbcdn.net[^\"]+)\"").FindStringSubmatch(string(body)); len(parts) == 2 {
|
||||
avatar = parts[1]
|
||||
}
|
||||
return username + "@facebook", avatar, address, nil
|
||||
}
|
||||
|
@ -5,7 +5,7 @@
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
|
||||
<title>{{.Network}}: GitHub Faucet</title>
|
||||
<title>{{.Network}}: Authenticated Faucet</title>
|
||||
|
||||
<link href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/css/bootstrap.min.css" rel="stylesheet" />
|
||||
<link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css" rel="stylesheet" />
|
||||
@ -43,13 +43,13 @@
|
||||
<div class="container">
|
||||
<div class="row" style="margin-bottom: 16px;">
|
||||
<div class="col-lg-12">
|
||||
<h1 style="text-align: center;"><i class="fa fa-bath" aria-hidden="true"></i> {{.Network}} GitHub Authenticated Faucet <i class="fa fa-github-alt" aria-hidden="true"></i></h1>
|
||||
<h1 style="text-align: center;"><i class="fa fa-bath" aria-hidden="true"></i> {{.Network}} Authenticated Faucet</h1>
|
||||
</div>
|
||||
</div>
|
||||
<div class="row">
|
||||
<div class="col-lg-8 col-lg-offset-2">
|
||||
<div class="input-group">
|
||||
<input id="gist" type="text" class="form-control" placeholder="GitHub Gist URL containing your Ethereum address...">
|
||||
<input id="url" name="url" type="text" class="form-control" placeholder="Social network URL containing your Ethereum address...">
|
||||
<span class="input-group-btn">
|
||||
<button class="btn btn-default dropdown-toggle" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Give me Ether <i class="fa fa-caret-down" aria-hidden="true"></i></button>
|
||||
<ul class="dropdown-menu dropdown-menu-right">{{range $idx, $amount := .Amounts}}
|
||||
@ -80,8 +80,21 @@
|
||||
<div class="row" style="margin-top: 32px;">
|
||||
<div class="col-lg-12">
|
||||
<h3>How does this work?</h3>
|
||||
<p>This Ether faucet is running on the {{.Network}} network. To prevent malicious actors from exhausting all available funds or accumulating enough Ether to mount long running spam attacks, requests are tied to GitHub accounts. Anyone having a GitHub account may request funds within the permitted limits.</p>
|
||||
<p>To request funds, simply create a <a href="https://gist.github.com/" target="_about:blank">GitHub Gist</a> with your Ethereum address pasted into the contents (the file name doesn't matter), copy paste the gists URL into the above input box and fire away! You can track the current pending requests below the input field to see how much you have to wait until your turn comes.</p>
|
||||
<p>This Ether faucet is running on the {{.Network}} network. To prevent malicious actors from exhausting all available funds or accumulating enough Ether to mount long running spam attacks, requests are tied to certain common 3rd party accounts. Anyone having a GitHub, Twitter, Google+ or Facebook account may request funds within the permitted limits.</p>
|
||||
<dl class="dl-horizontal">
|
||||
<dt style="width: auto; margin-left: 40px;"><i class="fa fa-github-alt" aria-hidden="true" style="font-size: 36px;"></i></dt>
|
||||
<dd style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds via GitHub, create a <a href="https://gist.github.com/" target="_about:blank">gist</a> with your Ethereum address embedded into the content (the file name doesn't matter).<br/>Copy-paste the gists URL into the above input box and fire away!</dd>
|
||||
|
||||
<dt style="width: auto; margin-left: 40px;"><i class="fa fa-twitter" aria-hidden="true" style="font-size: 36px;"></i></dt>
|
||||
<dd style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds via Twitter, make a <a href="https://twitter.com/intent/tweet?text=Requesting%20faucet%20funds%20into%200x0000000000000000000000000000000000000000%20on%20the%20%23{{.Network}}%20%23Ethereum%20test%20network." target="_about:blank">tweet</a> with your Ethereum address pasted into the contents (surrounding text doesn't matter).<br/>Copy-paste the <a href="https://support.twitter.com/articles/80586" target="_about:blank">tweets URL</a> into the above input box and fire away!</dd>
|
||||
|
||||
<dt style="width: auto; margin-left: 40px;"><i class="fa fa-google-plus-official" aria-hidden="true" style="font-size: 36px;"></i></dt>
|
||||
<dd style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds via Google Plus, publish a new <strong>public</strong> post with your Ethereum address embedded into the content (surrounding text doesn't matter).<br/>Copy-paste the posts URL into the above input box and fire away!</dd>
|
||||
|
||||
<dt style="width: auto; margin-left: 40px;"><i class="fa fa-facebook" aria-hidden="true" style="font-size: 36px;"></i></dt>
|
||||
<dd style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds via Facebook, publish a new <strong>public</strong> post with your Ethereum address embedded into the content (surrounding text doesn't matter).<br/>Copy-paste the <a href="https://www.facebook.com/help/community/question/?id=282662498552845" target="_about:blank">posts URL</a> into the above input box and fire away!</dd>
|
||||
</dl>
|
||||
<p>You can track the current pending requests below the input field to see how much you have to wait until your turn comes.</p>
|
||||
{{if .Recaptcha}}<em>The faucet is running invisible reCaptcha protection against bots.</em>{{end}}
|
||||
</div>
|
||||
</div>
|
||||
@ -93,10 +106,22 @@
|
||||
var attempt = 0;
|
||||
var server;
|
||||
var tier = 0;
|
||||
var requests = [];
|
||||
|
||||
// Define a function that creates closures to drop old requests
|
||||
var dropper = function(hash) {
|
||||
return function() {
|
||||
for (var i=0; i<requests.length; i++) {
|
||||
if (requests[i].tx.hash == hash) {
|
||||
requests.splice(i, 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
// Define the function that submits a gist url to the server
|
||||
var submit = function({{if .Recaptcha}}captcha{{end}}) {
|
||||
server.send(JSON.stringify({url: $("#gist")[0].value, tier: tier{{if .Recaptcha}}, captcha: captcha{{end}}}));{{if .Recaptcha}}
|
||||
server.send(JSON.stringify({url: $("#url")[0].value, tier: tier{{if .Recaptcha}}, captcha: captcha{{end}}}));{{if .Recaptcha}}
|
||||
grecaptcha.reset();{{end}}
|
||||
};
|
||||
// Define a method to reconnect upon server loss
|
||||
@ -127,21 +152,85 @@
|
||||
$("#block").text(parseInt(msg.number, 16));
|
||||
}
|
||||
if (msg.error !== undefined) {
|
||||
noty({layout: 'topCenter', text: msg.error, type: 'error'});
|
||||
noty({layout: 'topCenter', text: msg.error, type: 'error', timeout: 5000, progressBar: true});
|
||||
}
|
||||
if (msg.success !== undefined) {
|
||||
noty({layout: 'topCenter', text: msg.success, type: 'success'});
|
||||
noty({layout: 'topCenter', text: msg.success, type: 'success', timeout: 5000, progressBar: true});
|
||||
}
|
||||
if (msg.requests !== undefined && msg.requests !== null) {
|
||||
// Mark all previous requests missing as done
|
||||
for (var i=0; i<requests.length; i++) {
|
||||
if (msg.requests.length > 0 && msg.requests[0].tx.hash == requests[i].tx.hash) {
|
||||
break;
|
||||
}
|
||||
if (requests[i].time != "") {
|
||||
requests[i].time = "";
|
||||
setTimeout(dropper(requests[i].tx.hash), 3000);
|
||||
}
|
||||
}
|
||||
// Append any new requests into our local collection
|
||||
var common = -1;
|
||||
if (requests.length > 0) {
|
||||
for (var i=0; i<msg.requests.length; i++) {
|
||||
if (requests[requests.length-1].tx.hash == msg.requests[i].tx.hash) {
|
||||
common = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
for (var i=common+1; i<msg.requests.length; i++) {
|
||||
requests.push(msg.requests[i]);
|
||||
}
|
||||
// Iterate over our entire local collection and re-render the funding table
|
||||
var content = "";
|
||||
for (var i=0; i<msg.requests.length; i++) {
|
||||
content += "<tr><td><div style=\"background: url('https://github.com/" + msg.requests[i].username + ".png?size=64'); background-size: cover; width:32px; height: 32px; border-radius: 4px;\"></div></td><td><pre>" + msg.requests[i].account + "</pre></td><td style=\"width: 100%; text-align: center; vertical-align: middle;\">" + moment.duration(moment(msg.requests[i].time).unix()-moment().unix(), 'seconds').humanize(true) + "</td></tr>";
|
||||
for (var i=0; i<requests.length; i++) {
|
||||
var done = requests[i].time == "";
|
||||
var elapsed = moment().unix()-moment(requests[i].time).unix();
|
||||
|
||||
content += "<tr id='" + requests[i].tx.hash + "'>";
|
||||
content += " <td><div style=\"background: url('" + requests[i].avatar + "'); background-size: cover; width:32px; height: 32px; border-radius: 4px;\"></div></td>";
|
||||
content += " <td><pre>" + requests[i].account + "</pre></td>";
|
||||
content += " <td style=\"width: 100%; text-align: center; vertical-align: middle;\">";
|
||||
if (done) {
|
||||
content += " funded";
|
||||
} else {
|
||||
content += " <span id='time-" + i + "' class='timer'>" + moment.duration(-elapsed, 'seconds').humanize(true) + "</span>";
|
||||
}
|
||||
content += " <div class='progress' style='height: 4px; margin: 0;'>";
|
||||
if (done) {
|
||||
content += " <div class='progress-bar progress-bar-success' role='progressbar' aria-valuenow='30' style='width:100%;'></div>";
|
||||
} else if (elapsed > 30) {
|
||||
content += " <div class='progress-bar progress-bar-danger progress-bar-striped active' role='progressbar' aria-valuenow='30' style='width:100%;'></div>";
|
||||
} else {
|
||||
content += " <div class='progress-bar progress-bar-striped active' role='progressbar' aria-valuenow='" + elapsed + "' style='width:" + (elapsed * 100 / 30) + "%;'></div>";
|
||||
}
|
||||
content += " </div>";
|
||||
content += " </td>";
|
||||
content += "</tr>";
|
||||
}
|
||||
$("#requests").html("<tbody>" + content + "</tbody>");
|
||||
}
|
||||
}
|
||||
server.onclose = function() { setTimeout(reconnect, 3000); };
|
||||
}
|
||||
// Start a UI updater to push the progress bars forward until they are done
|
||||
setInterval(function() {
|
||||
$('.progress-bar').each(function() {
|
||||
var progress = Number($(this).attr('aria-valuenow')) + 1;
|
||||
if (progress < 30) {
|
||||
$(this).attr('aria-valuenow', progress);
|
||||
$(this).css('width', (progress * 100 / 30) + '%');
|
||||
} else if (progress == 30) {
|
||||
$(this).css('width', '100%');
|
||||
$(this).addClass("progress-bar-danger");
|
||||
}
|
||||
})
|
||||
$('.timer').each(function() {
|
||||
var index = Number($(this).attr('id').substring(5));
|
||||
$(this).html(moment.duration(moment(requests[index].time).unix()-moment().unix(), 'seconds').humanize(true));
|
||||
})
|
||||
}, 1000);
|
||||
|
||||
// Establish a websocket connection to the API server
|
||||
reconnect();
|
||||
</script>{{if .Recaptcha}}
|
||||
|
File diff suppressed because one or more lines are too long
@ -291,15 +291,28 @@ func ambiguousAddrRecovery(ks *keystore.KeyStore, err *keystore.AmbiguousAddrErr
|
||||
|
||||
// accountCreate creates a new account into the keystore defined by the CLI flags.
|
||||
func accountCreate(ctx *cli.Context) error {
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
cfg := gethConfig{Node: defaultNodeConfig()}
|
||||
// Load config file.
|
||||
if file := ctx.GlobalString(configFileFlag.Name); file != "" {
|
||||
if err := loadConfig(file, &cfg); err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
utils.SetNodeConfig(ctx, &cfg.Node)
|
||||
scryptN, scryptP, keydir, err := cfg.Node.AccountConfig()
|
||||
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read configuration: %v", err)
|
||||
}
|
||||
|
||||
password := getPassPhrase("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx))
|
||||
|
||||
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
|
||||
account, err := ks.NewAccount(password)
|
||||
address, err := keystore.StoreKey(keydir, password, scryptN, scryptP)
|
||||
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to create account: %v", err)
|
||||
}
|
||||
fmt.Printf("Address: {%x}\n", account.Address)
|
||||
fmt.Printf("Address: {%x}\n", address)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -134,7 +134,7 @@ Fatal: could not decrypt key with given passphrase
|
||||
func TestUnlockFlag(t *testing.T) {
|
||||
datadir := tmpDatadirWithKeystore(t)
|
||||
geth := runGeth(t,
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0",
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a",
|
||||
"js", "testdata/empty.js")
|
||||
geth.Expect(`
|
||||
@ -146,7 +146,7 @@ Passphrase: {{.InputLine "foobar"}}
|
||||
|
||||
wantMessages := []string{
|
||||
"Unlocked account",
|
||||
"=0xf466859ead1932d743d622cb74fc058882e8648a",
|
||||
"=0xf466859eAD1932D743d622CB74FC058882E8648A",
|
||||
}
|
||||
for _, m := range wantMessages {
|
||||
if !strings.Contains(geth.StderrText(), m) {
|
||||
@ -158,7 +158,7 @@ Passphrase: {{.InputLine "foobar"}}
|
||||
func TestUnlockFlagWrongPassword(t *testing.T) {
|
||||
datadir := tmpDatadirWithKeystore(t)
|
||||
geth := runGeth(t,
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0",
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a")
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
@ -177,7 +177,7 @@ Fatal: Failed to unlock account f466859ead1932d743d622cb74fc058882e8648a (could
|
||||
func TestUnlockFlagMultiIndex(t *testing.T) {
|
||||
datadir := tmpDatadirWithKeystore(t)
|
||||
geth := runGeth(t,
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0",
|
||||
"--unlock", "0,2",
|
||||
"js", "testdata/empty.js")
|
||||
geth.Expect(`
|
||||
@ -191,8 +191,8 @@ Passphrase: {{.InputLine "foobar"}}
|
||||
|
||||
wantMessages := []string{
|
||||
"Unlocked account",
|
||||
"=0x7ef5a6135f1fd6a02593eedc869c6d41d934aef8",
|
||||
"=0x289d485d9771714cce91d3393d764e1311907acc",
|
||||
"=0x7EF5A6135f1FD6a02593eEdC869c6D41D934aef8",
|
||||
"=0x289d485D9771714CCe91D3393D764E1311907ACc",
|
||||
}
|
||||
for _, m := range wantMessages {
|
||||
if !strings.Contains(geth.StderrText(), m) {
|
||||
@ -204,15 +204,15 @@ Passphrase: {{.InputLine "foobar"}}
|
||||
func TestUnlockFlagPasswordFile(t *testing.T) {
|
||||
datadir := tmpDatadirWithKeystore(t)
|
||||
geth := runGeth(t,
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0",
|
||||
"--password", "testdata/passwords.txt", "--unlock", "0,2",
|
||||
"js", "testdata/empty.js")
|
||||
geth.ExpectExit()
|
||||
|
||||
wantMessages := []string{
|
||||
"Unlocked account",
|
||||
"=0x7ef5a6135f1fd6a02593eedc869c6d41d934aef8",
|
||||
"=0x289d485d9771714cce91d3393d764e1311907acc",
|
||||
"=0x7EF5A6135f1FD6a02593eEdC869c6D41D934aef8",
|
||||
"=0x289d485D9771714CCe91D3393D764E1311907ACc",
|
||||
}
|
||||
for _, m := range wantMessages {
|
||||
if !strings.Contains(geth.StderrText(), m) {
|
||||
@ -224,7 +224,7 @@ func TestUnlockFlagPasswordFile(t *testing.T) {
|
||||
func TestUnlockFlagPasswordFileWrongPassword(t *testing.T) {
|
||||
datadir := tmpDatadirWithKeystore(t)
|
||||
geth := runGeth(t,
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0",
|
||||
"--password", "testdata/wrong-passwords.txt", "--unlock", "0,2")
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
@ -235,7 +235,7 @@ Fatal: Failed to unlock account 0 (could not decrypt key with given passphrase)
|
||||
func TestUnlockFlagAmbiguous(t *testing.T) {
|
||||
store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes")
|
||||
geth := runGeth(t,
|
||||
"--keystore", store, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--keystore", store, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0",
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a",
|
||||
"js", "testdata/empty.js")
|
||||
defer geth.ExpectExit()
|
||||
@ -261,7 +261,7 @@ In order to avoid this warning, you need to remove the following duplicate key f
|
||||
|
||||
wantMessages := []string{
|
||||
"Unlocked account",
|
||||
"=0xf466859ead1932d743d622cb74fc058882e8648a",
|
||||
"=0xf466859eAD1932D743d622CB74FC058882E8648A",
|
||||
}
|
||||
for _, m := range wantMessages {
|
||||
if !strings.Contains(geth.StderrText(), m) {
|
||||
@ -273,7 +273,7 @@ In order to avoid this warning, you need to remove the following duplicate key f
|
||||
func TestUnlockFlagAmbiguousWrongPassword(t *testing.T) {
|
||||
store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes")
|
||||
geth := runGeth(t,
|
||||
"--keystore", store, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--keystore", store, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0",
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a")
|
||||
defer geth.ExpectExit()
|
||||
|
||||
|
@ -31,7 +31,9 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
@ -71,7 +73,7 @@ It expects the genesis file as argument.`,
|
||||
The import command imports blocks from an RLP-encoded form. The form can be one file
|
||||
with several RLP-encoded blocks, or several files can be used.
|
||||
|
||||
If only one file is used, import error will result in failure. If several files are used,
|
||||
If only one file is used, import error will result in failure. If several files are used,
|
||||
processing will proceed even if an individual RLP-file import failure occurs.`,
|
||||
}
|
||||
exportCommand = cli.Command{
|
||||
@ -90,6 +92,23 @@ Requires a first argument of the file to write to.
|
||||
Optional second and third arguments control the first and
|
||||
last block to write. In this mode, the file will be appended
|
||||
if already existing.`,
|
||||
}
|
||||
copydbCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(copyDb),
|
||||
Name: "copydb",
|
||||
Usage: "Create a local chain from a target chaindata folder",
|
||||
ArgsUsage: "<sourceChaindataDir>",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.SyncModeFlag,
|
||||
utils.FakePoWFlag,
|
||||
utils.TestnetFlag,
|
||||
utils.RinkebyFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
The first argument must be the directory containing the blockchain to download from`,
|
||||
}
|
||||
removedbCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(removeDB),
|
||||
@ -268,6 +287,54 @@ func exportChain(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyDb(ctx *cli.Context) error {
|
||||
// Ensure we have a source chain directory to copy
|
||||
if len(ctx.Args()) != 1 {
|
||||
utils.Fatalf("Source chaindata directory path argument missing")
|
||||
}
|
||||
// Initialize a new chain for the running node to sync into
|
||||
stack := makeFullNode(ctx)
|
||||
chain, chainDb := utils.MakeChain(ctx, stack)
|
||||
|
||||
syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
|
||||
dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil)
|
||||
|
||||
// Create a source peer to satisfy downloader requests from
|
||||
db, err := ethdb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
peer := downloader.NewFakePeer("local", db, hc, dl)
|
||||
if err = dl.RegisterPeer("local", 63, peer); err != nil {
|
||||
return err
|
||||
}
|
||||
// Synchronise with the simulated peer
|
||||
start := time.Now()
|
||||
|
||||
currentHeader := hc.CurrentHeader()
|
||||
if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil {
|
||||
return err
|
||||
}
|
||||
for dl.Synchronising() {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
fmt.Printf("Database copy done in %v\n", time.Since(start))
|
||||
|
||||
// Compact the entire database to remove any sync overhead
|
||||
start = time.Now()
|
||||
fmt.Println("Compacting entire database...")
|
||||
if err = chainDb.(*ethdb.LDBDatabase).LDB().CompactRange(util.Range{}); err != nil {
|
||||
utils.Fatalf("Compaction failed: %v", err)
|
||||
}
|
||||
fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeDB(ctx *cli.Context) error {
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/contracts/release"
|
||||
"github.com/ethereum/go-ethereum/dashboard"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
@ -76,10 +77,11 @@ type ethstatsConfig struct {
|
||||
}
|
||||
|
||||
type gethConfig struct {
|
||||
Eth eth.Config
|
||||
Shh whisper.Config
|
||||
Node node.Config
|
||||
Ethstats ethstatsConfig
|
||||
Eth eth.Config
|
||||
Shh whisper.Config
|
||||
Node node.Config
|
||||
Ethstats ethstatsConfig
|
||||
Dashboard dashboard.Config
|
||||
}
|
||||
|
||||
func loadConfig(file string, cfg *gethConfig) error {
|
||||
@ -110,9 +112,10 @@ func defaultNodeConfig() node.Config {
|
||||
func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
|
||||
// Load defaults.
|
||||
cfg := gethConfig{
|
||||
Eth: eth.DefaultConfig,
|
||||
Shh: whisper.DefaultConfig,
|
||||
Node: defaultNodeConfig(),
|
||||
Eth: eth.DefaultConfig,
|
||||
Shh: whisper.DefaultConfig,
|
||||
Node: defaultNodeConfig(),
|
||||
Dashboard: dashboard.DefaultConfig,
|
||||
}
|
||||
|
||||
// Load config file.
|
||||
@ -134,6 +137,7 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
|
||||
}
|
||||
|
||||
utils.SetShhConfig(ctx, stack, &cfg.Shh)
|
||||
utils.SetDashboardConfig(ctx, &cfg.Dashboard)
|
||||
|
||||
return stack, cfg
|
||||
}
|
||||
@ -153,9 +157,12 @@ func makeFullNode(ctx *cli.Context) *node.Node {
|
||||
|
||||
utils.RegisterEthService(stack, &cfg.Eth)
|
||||
|
||||
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {
|
||||
utils.RegisterDashboardService(stack, &cfg.Dashboard)
|
||||
}
|
||||
// Whisper must be explicitly enabled by specifying at least 1 whisper flag or in dev mode
|
||||
shhEnabled := enableWhisper(ctx)
|
||||
shhAutoEnabled := !ctx.GlobalIsSet(utils.WhisperEnabledFlag.Name) && ctx.GlobalIsSet(utils.DevModeFlag.Name)
|
||||
shhAutoEnabled := !ctx.GlobalIsSet(utils.WhisperEnabledFlag.Name) && ctx.GlobalIsSet(utils.DeveloperFlag.Name)
|
||||
if shhEnabled || shhAutoEnabled {
|
||||
if ctx.GlobalIsSet(utils.WhisperMaxMessageSizeFlag.Name) {
|
||||
cfg.Shh.MaxMessageSize = uint32(ctx.Int(utils.WhisperMaxMessageSizeFlag.Name))
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -60,6 +61,11 @@ var (
|
||||
utils.DataDirFlag,
|
||||
utils.KeyStoreDirFlag,
|
||||
utils.NoUSBFlag,
|
||||
utils.DashboardEnabledFlag,
|
||||
utils.DashboardAddrFlag,
|
||||
utils.DashboardPortFlag,
|
||||
utils.DashboardRefreshFlag,
|
||||
utils.DashboardAssetsFlag,
|
||||
utils.EthashCacheDirFlag,
|
||||
utils.EthashCachesInMemoryFlag,
|
||||
utils.EthashCachesOnDiskFlag,
|
||||
@ -67,6 +73,8 @@ var (
|
||||
utils.EthashDatasetsInMemoryFlag,
|
||||
utils.EthashDatasetsOnDiskFlag,
|
||||
utils.TxPoolNoLocalsFlag,
|
||||
utils.TxPoolJournalFlag,
|
||||
utils.TxPoolRejournalFlag,
|
||||
utils.TxPoolPriceLimitFlag,
|
||||
utils.TxPoolPriceBumpFlag,
|
||||
utils.TxPoolAccountSlotsFlag,
|
||||
@ -96,7 +104,8 @@ var (
|
||||
utils.NetrestrictFlag,
|
||||
utils.NodeKeyFileFlag,
|
||||
utils.NodeKeyHexFlag,
|
||||
utils.DevModeFlag,
|
||||
utils.DeveloperFlag,
|
||||
utils.DeveloperPeriodFlag,
|
||||
utils.TestnetFlag,
|
||||
utils.RinkebyFlag,
|
||||
utils.VMEnableDebugFlag,
|
||||
@ -143,6 +152,7 @@ func init() {
|
||||
initCommand,
|
||||
importCommand,
|
||||
exportCommand,
|
||||
copydbCommand,
|
||||
removedbCommand,
|
||||
dumpCommand,
|
||||
// See monitorcmd.go:
|
||||
@ -155,6 +165,7 @@ func init() {
|
||||
attachCommand,
|
||||
javascriptCommand,
|
||||
// See misccmd.go:
|
||||
makecacheCommand,
|
||||
makedagCommand,
|
||||
versionCommand,
|
||||
bugCommand,
|
||||
@ -162,6 +173,7 @@ func init() {
|
||||
// See config.go
|
||||
dumpConfigCommand,
|
||||
}
|
||||
sort.Sort(cli.CommandsByName(app.Commands))
|
||||
|
||||
app.Flags = append(app.Flags, nodeFlags...)
|
||||
app.Flags = append(app.Flags, rpcFlags...)
|
||||
@ -234,31 +246,37 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
}
|
||||
stateReader := ethclient.NewClient(rpcClient)
|
||||
|
||||
// Open and self derive any wallets already attached
|
||||
// Open any wallets already attached
|
||||
for _, wallet := range stack.AccountManager().Wallets() {
|
||||
if err := wallet.Open(""); err != nil {
|
||||
log.Warn("Failed to open wallet", "url", wallet.URL(), "err", err)
|
||||
} else {
|
||||
wallet.SelfDerive(accounts.DefaultBaseDerivationPath, stateReader)
|
||||
}
|
||||
}
|
||||
// Listen for wallet event till termination
|
||||
for event := range events {
|
||||
if event.Arrive {
|
||||
switch event.Kind {
|
||||
case accounts.WalletArrived:
|
||||
if err := event.Wallet.Open(""); err != nil {
|
||||
log.Warn("New wallet appeared, failed to open", "url", event.Wallet.URL(), "err", err)
|
||||
}
|
||||
case accounts.WalletOpened:
|
||||
status, _ := event.Wallet.Status()
|
||||
log.Info("New wallet appeared", "url", event.Wallet.URL(), "status", status)
|
||||
|
||||
if event.Wallet.URL().Scheme == "ledger" {
|
||||
event.Wallet.SelfDerive(accounts.DefaultLedgerBaseDerivationPath, stateReader)
|
||||
} else {
|
||||
log.Info("New wallet appeared", "url", event.Wallet.URL(), "status", event.Wallet.Status())
|
||||
event.Wallet.SelfDerive(accounts.DefaultBaseDerivationPath, stateReader)
|
||||
}
|
||||
} else {
|
||||
|
||||
case accounts.WalletDropped:
|
||||
log.Info("Old wallet dropped", "url", event.Wallet.URL())
|
||||
event.Wallet.Close()
|
||||
}
|
||||
}
|
||||
}()
|
||||
// Start auxiliary services if enabled
|
||||
if ctx.GlobalBool(utils.MiningEnabledFlag.Name) {
|
||||
if ctx.GlobalBool(utils.MiningEnabledFlag.Name) || ctx.GlobalBool(utils.DeveloperFlag.Name) {
|
||||
// Mining only makes sense if a full Ethereum node is running
|
||||
var ethereum *eth.Ethereum
|
||||
if err := stack.Service(ðereum); err != nil {
|
||||
|
@ -18,9 +18,7 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -33,14 +31,27 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
makedagCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(makedag),
|
||||
Name: "makedag",
|
||||
Usage: "Generate ethash DAG (for testing)",
|
||||
makecacheCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(makecache),
|
||||
Name: "makecache",
|
||||
Usage: "Generate ethash verification cache (for testing)",
|
||||
ArgsUsage: "<blockNum> <outputDir>",
|
||||
Category: "MISCELLANEOUS COMMANDS",
|
||||
Description: `
|
||||
The makedag command generates an ethash DAG in /tmp/dag.
|
||||
The makecache command generates an ethash cache in <outputDir>.
|
||||
|
||||
This command exists to support the system testing project.
|
||||
Regular users do not need to execute it.
|
||||
`,
|
||||
}
|
||||
makedagCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(makedag),
|
||||
Name: "makedag",
|
||||
Usage: "Generate ethash mining DAG (for testing)",
|
||||
ArgsUsage: "<blockNum> <outputDir>",
|
||||
Category: "MISCELLANEOUS COMMANDS",
|
||||
Description: `
|
||||
The makedag command generates an ethash DAG in <outputDir>.
|
||||
|
||||
This command exists to support the system testing project.
|
||||
Regular users do not need to execute it.
|
||||
@ -65,33 +76,33 @@ The output of this command is supposed to be machine-readable.
|
||||
}
|
||||
)
|
||||
|
||||
// makecache generates an ethash verification cache into the provided folder.
|
||||
func makecache(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf(`Usage: geth makecache <block number> <outputdir>`)
|
||||
}
|
||||
block, err := strconv.ParseUint(args[0], 0, 64)
|
||||
if err != nil {
|
||||
utils.Fatalf("Invalid block number: %v", err)
|
||||
}
|
||||
ethash.MakeCache(block, args[1])
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// makedag generates an ethash mining DAG into the provided folder.
|
||||
func makedag(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
wrongArgs := func() {
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf(`Usage: geth makedag <block number> <outputdir>`)
|
||||
}
|
||||
switch {
|
||||
case len(args) == 2:
|
||||
blockNum, err := strconv.ParseUint(args[0], 0, 64)
|
||||
dir := args[1]
|
||||
if err != nil {
|
||||
wrongArgs()
|
||||
} else {
|
||||
dir = filepath.Clean(dir)
|
||||
// seems to require a trailing slash
|
||||
if !strings.HasSuffix(dir, "/") {
|
||||
dir = dir + "/"
|
||||
}
|
||||
_, err = ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
utils.Fatalf("Can't find dir")
|
||||
}
|
||||
fmt.Println("making DAG, this could take awhile...")
|
||||
ethash.MakeDataset(blockNum, dir)
|
||||
}
|
||||
default:
|
||||
wrongArgs()
|
||||
block, err := strconv.ParseUint(args[0], 0, 64)
|
||||
if err != nil {
|
||||
utils.Fatalf("Invalid block number: %v", err)
|
||||
}
|
||||
ethash.MakeDataset(block, args[1])
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// AppHelpTemplate is the test template for the default, global app help topic.
|
||||
@ -72,7 +73,6 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.NetworkIdFlag,
|
||||
utils.TestnetFlag,
|
||||
utils.RinkebyFlag,
|
||||
utils.DevModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
utils.EthStatsURLFlag,
|
||||
utils.IdentityFlag,
|
||||
@ -81,6 +81,12 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.LightKDFFlag,
|
||||
},
|
||||
},
|
||||
{Name: "DEVELOPER CHAIN",
|
||||
Flags: []cli.Flag{
|
||||
utils.DeveloperFlag,
|
||||
utils.DeveloperPeriodFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "ETHASH",
|
||||
Flags: []cli.Flag{
|
||||
@ -92,10 +98,22 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.EthashDatasetsOnDiskFlag,
|
||||
},
|
||||
},
|
||||
//{
|
||||
// Name: "DASHBOARD",
|
||||
// Flags: []cli.Flag{
|
||||
// utils.DashboardEnabledFlag,
|
||||
// utils.DashboardAddrFlag,
|
||||
// utils.DashboardPortFlag,
|
||||
// utils.DashboardRefreshFlag,
|
||||
// utils.DashboardAssetsFlag,
|
||||
// },
|
||||
//},
|
||||
{
|
||||
Name: "TRANSACTION POOL",
|
||||
Flags: []cli.Flag{
|
||||
utils.TxPoolNoLocalsFlag,
|
||||
utils.TxPoolJournalFlag,
|
||||
utils.TxPoolRejournalFlag,
|
||||
utils.TxPoolPriceLimitFlag,
|
||||
utils.TxPoolPriceBumpFlag,
|
||||
utils.TxPoolAccountSlotsFlag,
|
||||
@ -261,6 +279,9 @@ func init() {
|
||||
uncategorized := []cli.Flag{}
|
||||
for _, flag := range data.(*cli.App).Flags {
|
||||
if _, ok := categorized[flag.String()]; !ok {
|
||||
if strings.HasPrefix(flag.GetName(), "dashboard") {
|
||||
continue
|
||||
}
|
||||
uncategorized = append(uncategorized, flag)
|
||||
}
|
||||
}
|
||||
|
414
cmd/p2psim/main.go
Normal file
414
cmd/p2psim/main.go
Normal file
@ -0,0 +1,414 @@
|
||||
// p2psim provides a command-line client for a simulation HTTP API.
|
||||
//
|
||||
// Here is an example of creating a 2 node network with the first node
|
||||
// connected to the second:
|
||||
//
|
||||
// $ p2psim node create
|
||||
// Created node01
|
||||
//
|
||||
// $ p2psim node start node01
|
||||
// Started node01
|
||||
//
|
||||
// $ p2psim node create
|
||||
// Created node02
|
||||
//
|
||||
// $ p2psim node start node02
|
||||
// Started node02
|
||||
//
|
||||
// $ p2psim node connect node01 node02
|
||||
// Connected node01 to node02
|
||||
//
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var client *simulations.Client
|
||||
|
||||
func main() {
|
||||
app := cli.NewApp()
|
||||
app.Usage = "devp2p simulation command-line client"
|
||||
app.Flags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "api",
|
||||
Value: "http://localhost:8888",
|
||||
Usage: "simulation API URL",
|
||||
EnvVar: "P2PSIM_API_URL",
|
||||
},
|
||||
}
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
client = simulations.NewClient(ctx.GlobalString("api"))
|
||||
return nil
|
||||
}
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "show",
|
||||
Usage: "show network information",
|
||||
Action: showNetwork,
|
||||
},
|
||||
{
|
||||
Name: "events",
|
||||
Usage: "stream network events",
|
||||
Action: streamNetwork,
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "current",
|
||||
Usage: "get existing nodes and conns first",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "filter",
|
||||
Value: "",
|
||||
Usage: "message filter",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "snapshot",
|
||||
Usage: "create a network snapshot to stdout",
|
||||
Action: createSnapshot,
|
||||
},
|
||||
{
|
||||
Name: "load",
|
||||
Usage: "load a network snapshot from stdin",
|
||||
Action: loadSnapshot,
|
||||
},
|
||||
{
|
||||
Name: "node",
|
||||
Usage: "manage simulation nodes",
|
||||
Action: listNodes,
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "list",
|
||||
Usage: "list nodes",
|
||||
Action: listNodes,
|
||||
},
|
||||
{
|
||||
Name: "create",
|
||||
Usage: "create a node",
|
||||
Action: createNode,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "name",
|
||||
Value: "",
|
||||
Usage: "node name",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "services",
|
||||
Value: "",
|
||||
Usage: "node services (comma separated)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key",
|
||||
Value: "",
|
||||
Usage: "node private key (hex encoded)",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "show",
|
||||
ArgsUsage: "<node>",
|
||||
Usage: "show node information",
|
||||
Action: showNode,
|
||||
},
|
||||
{
|
||||
Name: "start",
|
||||
ArgsUsage: "<node>",
|
||||
Usage: "start a node",
|
||||
Action: startNode,
|
||||
},
|
||||
{
|
||||
Name: "stop",
|
||||
ArgsUsage: "<node>",
|
||||
Usage: "stop a node",
|
||||
Action: stopNode,
|
||||
},
|
||||
{
|
||||
Name: "connect",
|
||||
ArgsUsage: "<node> <peer>",
|
||||
Usage: "connect a node to a peer node",
|
||||
Action: connectNode,
|
||||
},
|
||||
{
|
||||
Name: "disconnect",
|
||||
ArgsUsage: "<node> <peer>",
|
||||
Usage: "disconnect a node from a peer node",
|
||||
Action: disconnectNode,
|
||||
},
|
||||
{
|
||||
Name: "rpc",
|
||||
ArgsUsage: "<node> <method> [<args>]",
|
||||
Usage: "call a node RPC method",
|
||||
Action: rpcNode,
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "subscribe",
|
||||
Usage: "method is a subscription",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
app.Run(os.Args)
|
||||
}
|
||||
|
||||
func showNetwork(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
network, err := client.GetNetwork()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := tabwriter.NewWriter(ctx.App.Writer, 1, 2, 2, ' ', 0)
|
||||
defer w.Flush()
|
||||
fmt.Fprintf(w, "NODES\t%d\n", len(network.Nodes))
|
||||
fmt.Fprintf(w, "CONNS\t%d\n", len(network.Conns))
|
||||
return nil
|
||||
}
|
||||
|
||||
func streamNetwork(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
events := make(chan *simulations.Event)
|
||||
sub, err := client.SubscribeNetwork(events, simulations.SubscribeOpts{
|
||||
Current: ctx.Bool("current"),
|
||||
Filter: ctx.String("filter"),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sub.Unsubscribe()
|
||||
enc := json.NewEncoder(ctx.App.Writer)
|
||||
for {
|
||||
select {
|
||||
case event := <-events:
|
||||
if err := enc.Encode(event); err != nil {
|
||||
return err
|
||||
}
|
||||
case err := <-sub.Err():
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createSnapshot(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
snap, err := client.CreateSnapshot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return json.NewEncoder(os.Stdout).Encode(snap)
|
||||
}
|
||||
|
||||
func loadSnapshot(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
snap := &simulations.Snapshot{}
|
||||
if err := json.NewDecoder(os.Stdin).Decode(snap); err != nil {
|
||||
return err
|
||||
}
|
||||
return client.LoadSnapshot(snap)
|
||||
}
|
||||
|
||||
func listNodes(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodes, err := client.GetNodes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := tabwriter.NewWriter(ctx.App.Writer, 1, 2, 2, ' ', 0)
|
||||
defer w.Flush()
|
||||
fmt.Fprintf(w, "NAME\tPROTOCOLS\tID\n")
|
||||
for _, node := range nodes {
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\n", node.Name, strings.Join(protocolList(node), ","), node.ID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func protocolList(node *p2p.NodeInfo) []string {
|
||||
protos := make([]string, 0, len(node.Protocols))
|
||||
for name := range node.Protocols {
|
||||
protos = append(protos, name)
|
||||
}
|
||||
return protos
|
||||
}
|
||||
|
||||
func createNode(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
config := &adapters.NodeConfig{
|
||||
Name: ctx.String("name"),
|
||||
}
|
||||
if key := ctx.String("key"); key != "" {
|
||||
privKey, err := crypto.HexToECDSA(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.ID = discover.PubkeyID(&privKey.PublicKey)
|
||||
config.PrivateKey = privKey
|
||||
}
|
||||
if services := ctx.String("services"); services != "" {
|
||||
config.Services = strings.Split(services, ",")
|
||||
}
|
||||
node, err := client.CreateNode(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(ctx.App.Writer, "Created", node.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func showNode(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
if len(args) != 1 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodeName := args[0]
|
||||
node, err := client.GetNode(nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := tabwriter.NewWriter(ctx.App.Writer, 1, 2, 2, ' ', 0)
|
||||
defer w.Flush()
|
||||
fmt.Fprintf(w, "NAME\t%s\n", node.Name)
|
||||
fmt.Fprintf(w, "PROTOCOLS\t%s\n", strings.Join(protocolList(node), ","))
|
||||
fmt.Fprintf(w, "ID\t%s\n", node.ID)
|
||||
fmt.Fprintf(w, "ENODE\t%s\n", node.Enode)
|
||||
for name, proto := range node.Protocols {
|
||||
fmt.Fprintln(w)
|
||||
fmt.Fprintf(w, "--- PROTOCOL INFO: %s\n", name)
|
||||
fmt.Fprintf(w, "%v\n", proto)
|
||||
fmt.Fprintf(w, "---\n")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func startNode(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
if len(args) != 1 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodeName := args[0]
|
||||
if err := client.StartNode(nodeName); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(ctx.App.Writer, "Started", nodeName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func stopNode(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
if len(args) != 1 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodeName := args[0]
|
||||
if err := client.StopNode(nodeName); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(ctx.App.Writer, "Stopped", nodeName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func connectNode(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
if len(args) != 2 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodeName := args[0]
|
||||
peerName := args[1]
|
||||
if err := client.ConnectNode(nodeName, peerName); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(ctx.App.Writer, "Connected", nodeName, "to", peerName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func disconnectNode(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
if len(args) != 2 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodeName := args[0]
|
||||
peerName := args[1]
|
||||
if err := client.DisconnectNode(nodeName, peerName); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(ctx.App.Writer, "Disconnected", nodeName, "from", peerName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func rpcNode(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
if len(args) < 2 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodeName := args[0]
|
||||
method := args[1]
|
||||
rpcClient, err := client.RPCClient(context.Background(), nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ctx.Bool("subscribe") {
|
||||
return rpcSubscribe(rpcClient, ctx.App.Writer, method, args[3:]...)
|
||||
}
|
||||
var result interface{}
|
||||
params := make([]interface{}, len(args[3:]))
|
||||
for i, v := range args[3:] {
|
||||
params[i] = v
|
||||
}
|
||||
if err := rpcClient.Call(&result, method, params...); err != nil {
|
||||
return err
|
||||
}
|
||||
return json.NewEncoder(ctx.App.Writer).Encode(result)
|
||||
}
|
||||
|
||||
func rpcSubscribe(client *rpc.Client, out io.Writer, method string, args ...string) error {
|
||||
parts := strings.SplitN(method, "_", 2)
|
||||
namespace := parts[0]
|
||||
method = parts[1]
|
||||
ch := make(chan interface{})
|
||||
subArgs := make([]interface{}, len(args)+1)
|
||||
subArgs[0] = method
|
||||
for i, v := range args {
|
||||
subArgs[i+1] = v
|
||||
}
|
||||
sub, err := client.Subscribe(context.Background(), namespace, ch, subArgs...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sub.Unsubscribe()
|
||||
enc := json.NewEncoder(out)
|
||||
for {
|
||||
select {
|
||||
case v := <-ch:
|
||||
if err := enc.Encode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
case err := <-sub.Err():
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
@ -425,6 +425,11 @@ services:
|
||||
- "{{.Port}}:80"{{else}}
|
||||
environment:
|
||||
- VIRTUAL_HOST={{.VHost}}{{end}}
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "1m"
|
||||
max-file: "10"
|
||||
restart: always
|
||||
`
|
||||
|
||||
|
@ -42,7 +42,7 @@ RUN \
|
||||
WORKDIR /eth-netstats
|
||||
EXPOSE 3000
|
||||
|
||||
RUN echo 'module.exports = {trusted: [{{.Trusted}}], banned: []};' > lib/utils/config.js
|
||||
RUN echo 'module.exports = {trusted: [{{.Trusted}}], banned: [{{.Banned}}], reserved: ["yournode"]};' > lib/utils/config.js
|
||||
|
||||
CMD ["npm", "start"]
|
||||
`
|
||||
@ -59,25 +59,37 @@ services:
|
||||
- "{{.Port}}:3000"{{end}}
|
||||
environment:
|
||||
- WS_SECRET={{.Secret}}{{if .VHost}}
|
||||
- VIRTUAL_HOST={{.VHost}}{{end}}
|
||||
- VIRTUAL_HOST={{.VHost}}{{end}}{{if .Banned}}
|
||||
- BANNED={{.Banned}}{{end}}
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "1m"
|
||||
max-file: "10"
|
||||
restart: always
|
||||
`
|
||||
|
||||
// deployEthstats deploys a new ethstats container to a remote machine via SSH,
|
||||
// docker and docker-compose. If an instance with the specified network name
|
||||
// already exists there, it will be overwritten!
|
||||
func deployEthstats(client *sshClient, network string, port int, secret string, vhost string, trusted []string) ([]byte, error) {
|
||||
func deployEthstats(client *sshClient, network string, port int, secret string, vhost string, trusted []string, banned []string) ([]byte, error) {
|
||||
// Generate the content to upload to the server
|
||||
workdir := fmt.Sprintf("%d", rand.Int63())
|
||||
files := make(map[string][]byte)
|
||||
|
||||
trustedLabels := make([]string, len(trusted))
|
||||
for i, address := range trusted {
|
||||
trusted[i] = fmt.Sprintf("\"%s\"", address)
|
||||
trustedLabels[i] = fmt.Sprintf("\"%s\"", address)
|
||||
}
|
||||
bannedLabels := make([]string, len(banned))
|
||||
for i, address := range banned {
|
||||
bannedLabels[i] = fmt.Sprintf("\"%s\"", address)
|
||||
}
|
||||
|
||||
dockerfile := new(bytes.Buffer)
|
||||
template.Must(template.New("").Parse(ethstatsDockerfile)).Execute(dockerfile, map[string]interface{}{
|
||||
"Trusted": strings.Join(trusted, ", "),
|
||||
"Trusted": strings.Join(trustedLabels, ", "),
|
||||
"Banned": strings.Join(bannedLabels, ", "),
|
||||
})
|
||||
files[filepath.Join(workdir, "Dockerfile")] = dockerfile.Bytes()
|
||||
|
||||
@ -87,6 +99,7 @@ func deployEthstats(client *sshClient, network string, port int, secret string,
|
||||
"Port": port,
|
||||
"Secret": secret,
|
||||
"VHost": vhost,
|
||||
"Banned": strings.Join(banned, ","),
|
||||
})
|
||||
files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes()
|
||||
|
||||
@ -107,11 +120,12 @@ type ethstatsInfos struct {
|
||||
port int
|
||||
secret string
|
||||
config string
|
||||
banned []string
|
||||
}
|
||||
|
||||
// String implements the stringer interface.
|
||||
func (info *ethstatsInfos) String() string {
|
||||
return fmt.Sprintf("host=%s, port=%d, secret=%s", info.host, info.port, info.secret)
|
||||
return fmt.Sprintf("host=%s, port=%d, secret=%s, banned=%v", info.host, info.port, info.secret, info.banned)
|
||||
}
|
||||
|
||||
// checkEthstats does a health-check against an ethstats server to verify whether
|
||||
@ -145,6 +159,9 @@ func checkEthstats(client *sshClient, network string) (*ethstatsInfos, error) {
|
||||
if port != 80 && port != 443 {
|
||||
config += fmt.Sprintf(":%d", port)
|
||||
}
|
||||
// Retrieve the IP blacklist
|
||||
banned := strings.Split(infos.envvars["BANNED"], ",")
|
||||
|
||||
// Run a sanity check to see if the port is reachable
|
||||
if err = checkPort(host, port); err != nil {
|
||||
log.Warn("Ethstats service seems unreachable", "server", host, "port", port, "err", err)
|
||||
@ -155,5 +172,6 @@ func checkEthstats(client *sshClient, network string) (*ethstatsInfos, error) {
|
||||
port: port,
|
||||
secret: secret,
|
||||
config: config,
|
||||
banned: banned,
|
||||
}, nil
|
||||
}
|
||||
|
@ -82,6 +82,11 @@ services:
|
||||
- CAPTCHA_SECRET={{.CaptchaSecret}}{{if .VHost}}
|
||||
- VIRTUAL_HOST={{.VHost}}
|
||||
- VIRTUAL_PORT=8080{{end}}
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "1m"
|
||||
max-file: "10"
|
||||
restart: always
|
||||
`
|
||||
|
||||
@ -128,7 +133,7 @@ func deployFaucet(client *sshClient, network string, bootnodes []string, config
|
||||
})
|
||||
files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes()
|
||||
|
||||
files[filepath.Join(workdir, "genesis.json")] = []byte(config.node.genesis)
|
||||
files[filepath.Join(workdir, "genesis.json")] = config.node.genesis
|
||||
files[filepath.Join(workdir, "account.json")] = []byte(config.node.keyJSON)
|
||||
files[filepath.Join(workdir, "account.pass")] = []byte(config.node.keyPass)
|
||||
|
||||
|
@ -43,6 +43,11 @@ services:
|
||||
- "{{.Port}}:80"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/tmp/docker.sock:ro
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "1m"
|
||||
max-file: "10"
|
||||
restart: always
|
||||
`
|
||||
|
||||
|
@ -30,7 +30,7 @@ import (
|
||||
|
||||
// nodeDockerfile is the Dockerfile required to run an Ethereum node.
|
||||
var nodeDockerfile = `
|
||||
FROM ethereum/client-go:alpine-develop
|
||||
FROM ethereum/client-go:latest
|
||||
|
||||
ADD genesis.json /genesis.json
|
||||
{{if .Unlock}}
|
||||
@ -38,9 +38,9 @@ ADD genesis.json /genesis.json
|
||||
ADD signer.pass /signer.pass
|
||||
{{end}}
|
||||
RUN \
|
||||
echo '/geth init /genesis.json' > geth.sh && \{{if .Unlock}}
|
||||
echo 'geth init /genesis.json' > geth.sh && \{{if .Unlock}}
|
||||
echo 'mkdir -p /root/.ethereum/keystore/ && cp /signer.json /root/.ethereum/keystore/' >> geth.sh && \{{end}}
|
||||
echo $'/geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .BootV4}}--bootnodesv4 {{.BootV4}}{{end}} {{if .BootV5}}--bootnodesv5 {{.BootV5}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine{{end}}{{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --targetgaslimit {{.GasTarget}} --gasprice {{.GasPrice}}' >> geth.sh
|
||||
echo $'geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .BootV4}}--bootnodesv4 {{.BootV4}}{{end}} {{if .BootV5}}--bootnodesv5 {{.BootV5}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine{{end}}{{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --targetgaslimit {{.GasTarget}} --gasprice {{.GasPrice}}' >> geth.sh
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "geth.sh"]
|
||||
`
|
||||
@ -68,6 +68,11 @@ services:
|
||||
- MINER_NAME={{.Etherbase}}
|
||||
- GAS_TARGET={{.GasTarget}}
|
||||
- GAS_PRICE={{.GasPrice}}
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "1m"
|
||||
max-file: "10"
|
||||
restart: always
|
||||
`
|
||||
|
||||
@ -123,7 +128,7 @@ func deployNode(client *sshClient, network string, bootv4, bootv5 []string, conf
|
||||
files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes()
|
||||
|
||||
//genesisfile, _ := json.MarshalIndent(config.genesis, "", " ")
|
||||
files[filepath.Join(workdir, "genesis.json")] = []byte(config.genesis)
|
||||
files[filepath.Join(workdir, "genesis.json")] = config.genesis
|
||||
|
||||
if config.keyJSON != "" {
|
||||
files[filepath.Join(workdir, "signer.json")] = []byte(config.keyJSON)
|
||||
@ -192,7 +197,7 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
|
||||
|
||||
// Container available, retrieve its node ID and its genesis json
|
||||
var out []byte
|
||||
if out, err = client.Run(fmt.Sprintf("docker exec %s_%s_1 /geth --exec admin.nodeInfo.id attach", network, kind)); err != nil {
|
||||
if out, err = client.Run(fmt.Sprintf("docker exec %s_%s_1 geth --exec admin.nodeInfo.id attach", network, kind)); err != nil {
|
||||
return nil, ErrServiceUnreachable
|
||||
}
|
||||
id := bytes.Trim(bytes.TrimSpace(out), "\"")
|
||||
|
@ -27,7 +27,6 @@ import (
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"golang.org/x/crypto/ssh"
|
||||
@ -78,14 +77,25 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
|
||||
} else {
|
||||
key, err := ssh.ParsePrivateKey(buf)
|
||||
if err != nil {
|
||||
log.Warn("Bad SSH key, falling back to passwords", "path", path, "err", err)
|
||||
fmt.Printf("What's the decryption password for %s? (won't be echoed)\n>", path)
|
||||
blob, err := terminal.ReadPassword(int(os.Stdin.Fd()))
|
||||
fmt.Println()
|
||||
if err != nil {
|
||||
log.Warn("Couldn't read password", "err", err)
|
||||
}
|
||||
key, err := ssh.ParsePrivateKeyWithPassphrase(buf, blob)
|
||||
if err != nil {
|
||||
log.Warn("Failed to decrypt SSH key, falling back to passwords", "path", path, "err", err)
|
||||
} else {
|
||||
auths = append(auths, ssh.PublicKeys(key))
|
||||
}
|
||||
} else {
|
||||
auths = append(auths, ssh.PublicKeys(key))
|
||||
}
|
||||
}
|
||||
auths = append(auths, ssh.PasswordCallback(func() (string, error) {
|
||||
fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", login, server)
|
||||
blob, err := terminal.ReadPassword(int(syscall.Stdin))
|
||||
blob, err := terminal.ReadPassword(int(os.Stdin.Fd()))
|
||||
|
||||
fmt.Println()
|
||||
return string(blob), err
|
||||
@ -122,7 +132,7 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
|
||||
}
|
||||
}
|
||||
// If a public key exists for this SSH server, check that it matches
|
||||
if bytes.Compare(pubkey, key.Marshal()) == 0 {
|
||||
if bytes.Equal(pubkey, key.Marshal()) {
|
||||
return nil
|
||||
}
|
||||
// We have a mismatch, forbid connecting
|
||||
|
@ -22,12 +22,12 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
@ -106,17 +106,15 @@ func (w *wizard) readString() string {
|
||||
// readDefaultString reads a single line from stdin, trimming if from spaces. If
|
||||
// an empty line is entered, the default value is returned.
|
||||
func (w *wizard) readDefaultString(def string) string {
|
||||
for {
|
||||
fmt.Printf("> ")
|
||||
text, err := w.in.ReadString('\n')
|
||||
if err != nil {
|
||||
log.Crit("Failed to read user input", "err", err)
|
||||
}
|
||||
if text = strings.TrimSpace(text); text != "" {
|
||||
return text
|
||||
}
|
||||
return def
|
||||
fmt.Printf("> ")
|
||||
text, err := w.in.ReadString('\n')
|
||||
if err != nil {
|
||||
log.Crit("Failed to read user input", "err", err)
|
||||
}
|
||||
if text = strings.TrimSpace(text); text != "" {
|
||||
return text
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
// readInt reads a single line from stdin, trimming if from spaces, enforcing it
|
||||
@ -162,6 +160,29 @@ func (w *wizard) readDefaultInt(def int) int {
|
||||
}
|
||||
}
|
||||
|
||||
// readDefaultBigInt reads a single line from stdin, trimming if from spaces,
|
||||
// enforcing it to parse into a big integer. If an empty line is entered, the
|
||||
// default value is returned.
|
||||
func (w *wizard) readDefaultBigInt(def *big.Int) *big.Int {
|
||||
for {
|
||||
fmt.Printf("> ")
|
||||
text, err := w.in.ReadString('\n')
|
||||
if err != nil {
|
||||
log.Crit("Failed to read user input", "err", err)
|
||||
}
|
||||
if text = strings.TrimSpace(text); text == "" {
|
||||
return def
|
||||
}
|
||||
val, ok := new(big.Int).SetString(text, 0)
|
||||
if !ok {
|
||||
log.Error("Invalid input, expected big integer")
|
||||
continue
|
||||
}
|
||||
return val
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
// readFloat reads a single line from stdin, trimming if from spaces, enforcing it
|
||||
// to parse into a float.
|
||||
func (w *wizard) readFloat() float64 {
|
||||
@ -182,6 +203,7 @@ func (w *wizard) readFloat() float64 {
|
||||
return val
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
// readDefaultFloat reads a single line from stdin, trimming if from spaces, enforcing
|
||||
// it to parse into a float. If an empty line is entered, the default value is returned.
|
||||
@ -207,15 +229,13 @@ func (w *wizard) readDefaultFloat(def float64) float64 {
|
||||
// readPassword reads a single line from stdin, trimming it from the trailing new
|
||||
// line and returns it. The input will not be echoed.
|
||||
func (w *wizard) readPassword() string {
|
||||
for {
|
||||
fmt.Printf("> ")
|
||||
text, err := terminal.ReadPassword(int(syscall.Stdin))
|
||||
if err != nil {
|
||||
log.Crit("Failed to read password", "err", err)
|
||||
}
|
||||
fmt.Println()
|
||||
return string(text)
|
||||
fmt.Printf("> ")
|
||||
text, err := terminal.ReadPassword(int(os.Stdin.Fd()))
|
||||
if err != nil {
|
||||
log.Crit("Failed to read password", "err", err)
|
||||
}
|
||||
fmt.Println()
|
||||
return string(text)
|
||||
}
|
||||
|
||||
// readAddress reads a single line from stdin, trimming if from spaces and converts
|
||||
@ -279,3 +299,27 @@ func (w *wizard) readJSON() string {
|
||||
return string(blob)
|
||||
}
|
||||
}
|
||||
|
||||
// readIPAddress reads a single line from stdin, trimming if from spaces and
|
||||
// returning it if it's convertible to an IP address. The reason for keeping
|
||||
// the user input format instead of returning a Go net.IP is to match with
|
||||
// weird formats used by ethstats, which compares IPs textually, not by value.
|
||||
func (w *wizard) readIPAddress() string {
|
||||
for {
|
||||
// Read the IP address from the user
|
||||
fmt.Printf("> ")
|
||||
text, err := w.in.ReadString('\n')
|
||||
if err != nil {
|
||||
log.Crit("Failed to read user input", "err", err)
|
||||
}
|
||||
if text = strings.TrimSpace(text); text == "" {
|
||||
return ""
|
||||
}
|
||||
// Make sure it looks ok and return it if so
|
||||
if ip := net.ParseIP(text); ip == nil {
|
||||
log.Error("Invalid IP address, please retry")
|
||||
continue
|
||||
}
|
||||
return text
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
@ -60,6 +61,42 @@ func (w *wizard) deployEthstats() {
|
||||
fmt.Printf("What should be the secret password for the API? (default = %s)\n", infos.secret)
|
||||
infos.secret = w.readDefaultString(infos.secret)
|
||||
}
|
||||
// Gather any blacklists to ban from reporting
|
||||
fmt.Println()
|
||||
fmt.Printf("Keep existing IP %v blacklist (y/n)? (default = yes)\n", infos.banned)
|
||||
if w.readDefaultString("y") != "y" {
|
||||
// The user might want to clear the entire list, although generally probably not
|
||||
fmt.Println()
|
||||
fmt.Printf("Clear out blacklist and start over (y/n)? (default = no)\n")
|
||||
if w.readDefaultString("n") != "n" {
|
||||
infos.banned = nil
|
||||
}
|
||||
// Offer the user to explicitly add/remove certain IP addresses
|
||||
fmt.Println()
|
||||
fmt.Println("Which additional IP addresses should be blacklisted?")
|
||||
for {
|
||||
if ip := w.readIPAddress(); ip != "" {
|
||||
infos.banned = append(infos.banned, ip)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Println("Which IP addresses should not be blacklisted?")
|
||||
for {
|
||||
if ip := w.readIPAddress(); ip != "" {
|
||||
for i, addr := range infos.banned {
|
||||
if ip == addr {
|
||||
infos.banned = append(infos.banned[:i], infos.banned[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
sort.Strings(infos.banned)
|
||||
}
|
||||
// Try to deploy the ethstats server on the host
|
||||
trusted := make([]string, 0, len(w.servers))
|
||||
for _, client := range w.servers {
|
||||
@ -67,7 +104,7 @@ func (w *wizard) deployEthstats() {
|
||||
trusted = append(trusted, client.address)
|
||||
}
|
||||
}
|
||||
if out, err := deployEthstats(client, w.network, infos.port, infos.secret, infos.host, trusted); err != nil {
|
||||
if out, err := deployEthstats(client, w.network, infos.port, infos.secret, infos.host, trusted, infos.banned); err != nil {
|
||||
log.Error("Failed to deploy ethstats container", "err", err)
|
||||
if len(out) > 0 {
|
||||
fmt.Printf("%s\n", out)
|
||||
|
@ -18,7 +18,9 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"time"
|
||||
@ -42,6 +44,7 @@ func (w *wizard) makeGenesis() {
|
||||
EIP150Block: big.NewInt(2),
|
||||
EIP155Block: big.NewInt(3),
|
||||
EIP158Block: big.NewInt(3),
|
||||
ByzantiumBlock: big.NewInt(4),
|
||||
},
|
||||
}
|
||||
// Figure out which consensus engine to choose
|
||||
@ -134,3 +137,53 @@ func (w *wizard) makeGenesis() {
|
||||
// All done, store the genesis and flush to disk
|
||||
w.conf.genesis = genesis
|
||||
}
|
||||
|
||||
// manageGenesis permits the modification of chain configuration parameters in
|
||||
// a genesis config and the export of the entire genesis spec.
|
||||
func (w *wizard) manageGenesis() {
|
||||
// Figure out whether to modify or export the genesis
|
||||
fmt.Println()
|
||||
fmt.Println(" 1. Modify existing fork rules")
|
||||
fmt.Println(" 2. Export genesis configuration")
|
||||
|
||||
choice := w.read()
|
||||
switch {
|
||||
case choice == "1":
|
||||
// Fork rule updating requested, iterate over each fork
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should Homestead come into effect? (default = %v)\n", w.conf.genesis.Config.HomesteadBlock)
|
||||
w.conf.genesis.Config.HomesteadBlock = w.readDefaultBigInt(w.conf.genesis.Config.HomesteadBlock)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should EIP150 come into effect? (default = %v)\n", w.conf.genesis.Config.EIP150Block)
|
||||
w.conf.genesis.Config.EIP150Block = w.readDefaultBigInt(w.conf.genesis.Config.EIP150Block)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should EIP155 come into effect? (default = %v)\n", w.conf.genesis.Config.EIP155Block)
|
||||
w.conf.genesis.Config.EIP155Block = w.readDefaultBigInt(w.conf.genesis.Config.EIP155Block)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should EIP158 come into effect? (default = %v)\n", w.conf.genesis.Config.EIP158Block)
|
||||
w.conf.genesis.Config.EIP158Block = w.readDefaultBigInt(w.conf.genesis.Config.EIP158Block)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should Byzantium come into effect? (default = %v)\n", w.conf.genesis.Config.ByzantiumBlock)
|
||||
w.conf.genesis.Config.ByzantiumBlock = w.readDefaultBigInt(w.conf.genesis.Config.ByzantiumBlock)
|
||||
|
||||
out, _ := json.MarshalIndent(w.conf.genesis.Config, "", " ")
|
||||
fmt.Printf("Chain configuration updated:\n\n%s\n", out)
|
||||
|
||||
case choice == "2":
|
||||
// Save whatever genesis configuration we currently have
|
||||
fmt.Println()
|
||||
fmt.Printf("Which file to save the genesis into? (default = %s.json)\n", w.network)
|
||||
out, _ := json.MarshalIndent(w.conf.genesis, "", " ")
|
||||
if err := ioutil.WriteFile(w.readDefaultString(fmt.Sprintf("%s.json", w.network)), out, 0644); err != nil {
|
||||
log.Error("Failed to save genesis file", "err", err)
|
||||
}
|
||||
log.Info("Exported existing genesis block")
|
||||
|
||||
default:
|
||||
log.Error("That's not something I can do")
|
||||
}
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ func (w *wizard) run() {
|
||||
if w.conf.genesis == nil {
|
||||
fmt.Println(" 2. Configure new genesis")
|
||||
} else {
|
||||
fmt.Println(" 2. Save existing genesis")
|
||||
fmt.Println(" 2. Manage existing genesis")
|
||||
}
|
||||
if len(w.servers) == 0 {
|
||||
fmt.Println(" 3. Track new remote server")
|
||||
@ -118,18 +118,10 @@ func (w *wizard) run() {
|
||||
w.networkStats(false)
|
||||
|
||||
case choice == "2":
|
||||
// If we don't have a genesis, make one
|
||||
if w.conf.genesis == nil {
|
||||
w.makeGenesis()
|
||||
} else {
|
||||
// Otherwise just save whatever we currently have
|
||||
fmt.Println()
|
||||
fmt.Printf("Which file to save the genesis into? (default = %s.json)\n", w.network)
|
||||
out, _ := json.MarshalIndent(w.conf.genesis, "", " ")
|
||||
if err := ioutil.WriteFile(w.readDefaultString(fmt.Sprintf("%s.json", w.network)), out, 0644); err != nil {
|
||||
log.Error("Failed to save genesis file", "err", err)
|
||||
}
|
||||
log.Info("Exported existing genesis block")
|
||||
w.manageGenesis()
|
||||
}
|
||||
case choice == "3":
|
||||
if len(w.servers) == 0 {
|
||||
|
@ -129,7 +129,7 @@ func (w *wizard) networkStats(tips bool) {
|
||||
}
|
||||
}
|
||||
// If a genesis block was found, load it into our configs
|
||||
if protips.genesis != "" {
|
||||
if protips.genesis != "" && w.conf.genesis == nil {
|
||||
genesis := new(core.Genesis)
|
||||
if err := json.Unmarshal([]byte(protips.genesis), genesis); err != nil {
|
||||
log.Error("Failed to parse remote genesis", "err", err)
|
||||
|
@ -71,22 +71,20 @@ func (w *wizard) makeServer() string {
|
||||
fmt.Println()
|
||||
fmt.Println("Please enter remote server's address:")
|
||||
|
||||
for {
|
||||
// Read and fial the server to ensure docker is present
|
||||
input := w.readString()
|
||||
// Read and dial the server to ensure docker is present
|
||||
input := w.readString()
|
||||
|
||||
client, err := dial(input, nil)
|
||||
if err != nil {
|
||||
log.Error("Server not ready for puppeth", "err", err)
|
||||
return ""
|
||||
}
|
||||
// All checks passed, start tracking the server
|
||||
w.servers[input] = client
|
||||
w.conf.Servers[input] = client.pubkey
|
||||
w.conf.flush()
|
||||
|
||||
return input
|
||||
client, err := dial(input, nil)
|
||||
if err != nil {
|
||||
log.Error("Server not ready for puppeth", "err", err)
|
||||
return ""
|
||||
}
|
||||
// All checks passed, start tracking the server
|
||||
w.servers[input] = client
|
||||
w.conf.Servers[input] = client.pubkey
|
||||
w.conf.flush()
|
||||
|
||||
return input
|
||||
}
|
||||
|
||||
// selectServer lists the user all the currnetly known servers to choose from,
|
||||
|
@ -51,7 +51,7 @@ func main() {
|
||||
var r io.Reader
|
||||
switch {
|
||||
case *hexMode != "":
|
||||
data, err := hex.DecodeString(*hexMode)
|
||||
data, err := hex.DecodeString(strings.TrimPrefix(*hexMode, "0x"))
|
||||
if err != nil {
|
||||
die(err)
|
||||
}
|
||||
|
@ -1,38 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
func cleandb(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 1 {
|
||||
utils.Fatalf("Need path to chunks database as the first and only argument")
|
||||
}
|
||||
|
||||
chunkDbPath := args[0]
|
||||
hash := storage.MakeHashFunc("SHA3")
|
||||
dbStore, err := storage.NewDbStore(chunkDbPath, hash, 10000000, 0)
|
||||
if err != nil {
|
||||
utils.Fatalf("Cannot initialise dbstore: %v", err)
|
||||
}
|
||||
dbStore.Cleanup()
|
||||
}
|
116
cmd/swarm/db.go
Normal file
116
cmd/swarm/db.go
Normal file
@ -0,0 +1,116 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
func dbExport(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database) and <file> (path to write the tar archive to, - for stdout)")
|
||||
}
|
||||
|
||||
store, err := openDbStore(args[0])
|
||||
if err != nil {
|
||||
utils.Fatalf("error opening local chunk database: %s", err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
var out io.Writer
|
||||
if args[1] == "-" {
|
||||
out = os.Stdout
|
||||
} else {
|
||||
f, err := os.Create(args[1])
|
||||
if err != nil {
|
||||
utils.Fatalf("error opening output file: %s", err)
|
||||
}
|
||||
defer f.Close()
|
||||
out = f
|
||||
}
|
||||
|
||||
count, err := store.Export(out)
|
||||
if err != nil {
|
||||
utils.Fatalf("error exporting local chunk database: %s", err)
|
||||
}
|
||||
|
||||
log.Info(fmt.Sprintf("successfully exported %d chunks", count))
|
||||
}
|
||||
|
||||
func dbImport(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database) and <file> (path to read the tar archive from, - for stdin)")
|
||||
}
|
||||
|
||||
store, err := openDbStore(args[0])
|
||||
if err != nil {
|
||||
utils.Fatalf("error opening local chunk database: %s", err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
var in io.Reader
|
||||
if args[1] == "-" {
|
||||
in = os.Stdin
|
||||
} else {
|
||||
f, err := os.Open(args[1])
|
||||
if err != nil {
|
||||
utils.Fatalf("error opening input file: %s", err)
|
||||
}
|
||||
defer f.Close()
|
||||
in = f
|
||||
}
|
||||
|
||||
count, err := store.Import(in)
|
||||
if err != nil {
|
||||
utils.Fatalf("error importing local chunk database: %s", err)
|
||||
}
|
||||
|
||||
log.Info(fmt.Sprintf("successfully imported %d chunks", count))
|
||||
}
|
||||
|
||||
func dbClean(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 1 {
|
||||
utils.Fatalf("invalid arguments, please specify <chunkdb> (path to a local chunk database)")
|
||||
}
|
||||
|
||||
store, err := openDbStore(args[0])
|
||||
if err != nil {
|
||||
utils.Fatalf("error opening local chunk database: %s", err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
store.Cleanup()
|
||||
}
|
||||
|
||||
func openDbStore(path string) (*storage.DbStore, error) {
|
||||
if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
|
||||
return nil, fmt.Errorf("invalid chunkdb path: %s", err)
|
||||
}
|
||||
hash := storage.MakeHashFunc("SHA3")
|
||||
return storage.NewDbStore(path, hash, 10000000, 0)
|
||||
}
|
@ -25,6 +25,7 @@ import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
@ -240,15 +241,69 @@ Removes a path from the manifest
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: cleandb,
|
||||
Name: "db",
|
||||
Usage: "manage the local chunk database",
|
||||
ArgsUsage: "db COMMAND",
|
||||
Description: `
|
||||
Manage the local chunk database.
|
||||
`,
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: dbExport,
|
||||
Name: "export",
|
||||
Usage: "export a local chunk database as a tar archive (use - to send to stdout)",
|
||||
ArgsUsage: "<chunkdb> <file>",
|
||||
Description: `
|
||||
Export a local chunk database as a tar archive (use - to send to stdout).
|
||||
|
||||
swarm db export ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar
|
||||
|
||||
The export may be quite large, consider piping the output through the Unix
|
||||
pv(1) tool to get a progress bar:
|
||||
|
||||
swarm db export ~/.ethereum/swarm/bzz-KEY/chunks - | pv > chunks.tar
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: dbImport,
|
||||
Name: "import",
|
||||
Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)",
|
||||
ArgsUsage: "<chunkdb> <file>",
|
||||
Description: `
|
||||
Import chunks from a tar archive into a local chunk database (use - to read from stdin).
|
||||
|
||||
swarm db import ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar
|
||||
|
||||
The import may be quite large, consider piping the input through the Unix
|
||||
pv(1) tool to get a progress bar:
|
||||
|
||||
pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: dbClean,
|
||||
Name: "clean",
|
||||
Usage: "remove corrupt entries from a local chunk database",
|
||||
ArgsUsage: "<chunkdb>",
|
||||
Description: `
|
||||
Remove corrupt entries from a local chunk database.
|
||||
`,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: func(ctx *cli.Context) {
|
||||
utils.Fatalf("ERROR: 'swarm cleandb' has been removed, please use 'swarm db clean'.")
|
||||
},
|
||||
Name: "cleandb",
|
||||
Usage: "Cleans database of corrupted entries",
|
||||
Usage: "DEPRECATED: use 'swarm db clean'",
|
||||
ArgsUsage: " ",
|
||||
Description: `
|
||||
Cleans database of corrupted entries.
|
||||
DEPRECATED: use 'swarm db clean'.
|
||||
`,
|
||||
},
|
||||
}
|
||||
sort.Sort(cli.CommandsByName(app.Commands))
|
||||
|
||||
app.Flags = []cli.Flag{
|
||||
utils.IdentityFlag,
|
||||
|
@ -161,6 +161,7 @@ func newTestNode(t *testing.T, dir string) *testNode {
|
||||
conf := &node.Config{
|
||||
DataDir: dir,
|
||||
IPCPath: "bzzd.ipc",
|
||||
NoUSB: true,
|
||||
}
|
||||
n, err := node.New(conf)
|
||||
if err != nil {
|
||||
|
@ -27,8 +27,6 @@ import (
|
||||
// TestCLISwarmUp tests that running 'swarm up' makes the resulting file
|
||||
// available from all nodes via the HTTP API
|
||||
func TestCLISwarmUp(t *testing.T) {
|
||||
t.Skip("flaky test")
|
||||
|
||||
// start 3 node cluster
|
||||
t.Log("starting 3 node cluster")
|
||||
cluster := newTestCluster(t, 3)
|
||||
|
@ -164,7 +164,7 @@ func ImportChain(chain *core.BlockChain, fn string) error {
|
||||
|
||||
func hasAllBlocks(chain *core.BlockChain, bs []*types.Block) bool {
|
||||
for _, b := range bs {
|
||||
if !chain.HasBlock(b.Hash()) {
|
||||
if !chain.HasBlock(b.Hash(), b.NumberU64()) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -31,17 +31,19 @@ import (
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/consensus/clique"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/dashboard"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/eth/gasprice"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/ethstats"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/les"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
@ -121,7 +123,7 @@ var (
|
||||
}
|
||||
NoUSBFlag = cli.BoolFlag{
|
||||
Name: "nousb",
|
||||
Usage: "Disables monitoring for and managine USB hardware wallets",
|
||||
Usage: "Disables monitoring for and managing USB hardware wallets",
|
||||
}
|
||||
NetworkIdFlag = cli.Uint64Flag{
|
||||
Name: "networkid",
|
||||
@ -136,9 +138,13 @@ var (
|
||||
Name: "rinkeby",
|
||||
Usage: "Rinkeby network: pre-configured proof-of-authority test network",
|
||||
}
|
||||
DevModeFlag = cli.BoolFlag{
|
||||
DeveloperFlag = cli.BoolFlag{
|
||||
Name: "dev",
|
||||
Usage: "Developer mode: pre-configured private network with several debugging flags",
|
||||
Usage: "Ephemeral proof-of-authority network with a pre-funded developer account, mining enabled",
|
||||
}
|
||||
DeveloperPeriodFlag = cli.IntFlag{
|
||||
Name: "dev.period",
|
||||
Usage: "Block period to use in developer mode (0 = mine only if transaction pending)",
|
||||
}
|
||||
IdentityFlag = cli.StringFlag{
|
||||
Name: "identity",
|
||||
@ -178,6 +184,31 @@ var (
|
||||
Name: "lightkdf",
|
||||
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
|
||||
}
|
||||
// Dashboard settings
|
||||
DashboardEnabledFlag = cli.BoolFlag{
|
||||
Name: "dashboard",
|
||||
Usage: "Enable the dashboard",
|
||||
}
|
||||
DashboardAddrFlag = cli.StringFlag{
|
||||
Name: "dashboard.addr",
|
||||
Usage: "Dashboard listening interface",
|
||||
Value: dashboard.DefaultConfig.Host,
|
||||
}
|
||||
DashboardPortFlag = cli.IntFlag{
|
||||
Name: "dashboard.host",
|
||||
Usage: "Dashboard listening port",
|
||||
Value: dashboard.DefaultConfig.Port,
|
||||
}
|
||||
DashboardRefreshFlag = cli.DurationFlag{
|
||||
Name: "dashboard.refresh",
|
||||
Usage: "Dashboard metrics collection refresh rate",
|
||||
Value: dashboard.DefaultConfig.Refresh,
|
||||
}
|
||||
DashboardAssetsFlag = cli.StringFlag{
|
||||
Name: "dashboard.assets",
|
||||
Usage: "Developer flag to serve the dashboard from the local file system",
|
||||
Value: dashboard.DefaultConfig.Assets,
|
||||
}
|
||||
// Ethash settings
|
||||
EthashCacheDirFlag = DirectoryFlag{
|
||||
Name: "ethash.cachedir",
|
||||
@ -213,6 +244,16 @@ var (
|
||||
Name: "txpool.nolocals",
|
||||
Usage: "Disables price exemptions for locally submitted transactions",
|
||||
}
|
||||
TxPoolJournalFlag = cli.StringFlag{
|
||||
Name: "txpool.journal",
|
||||
Usage: "Disk journal for local transaction to survive node restarts",
|
||||
Value: core.DefaultTxPoolConfig.Journal,
|
||||
}
|
||||
TxPoolRejournalFlag = cli.DurationFlag{
|
||||
Name: "txpool.rejournal",
|
||||
Usage: "Time interval to regenerate the local transaction journal",
|
||||
Value: core.DefaultTxPoolConfig.Rejournal,
|
||||
}
|
||||
TxPoolPriceLimitFlag = cli.Uint64Flag{
|
||||
Name: "txpool.pricelimit",
|
||||
Usage: "Minimum gas price limit to enforce for acceptance into the pool",
|
||||
@ -296,7 +337,7 @@ var (
|
||||
}
|
||||
PasswordFileFlag = cli.StringFlag{
|
||||
Name: "password",
|
||||
Usage: "Password file to use for non-inteactive password input",
|
||||
Usage: "Password file to use for non-interactive password input",
|
||||
Value: "",
|
||||
}
|
||||
|
||||
@ -785,7 +826,7 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
|
||||
cfg.NetRestrict = list
|
||||
}
|
||||
|
||||
if ctx.GlobalBool(DevModeFlag.Name) {
|
||||
if ctx.GlobalBool(DeveloperFlag.Name) {
|
||||
// --dev mode can't use p2p networking.
|
||||
cfg.MaxPeers = 0
|
||||
cfg.ListenAddr = ":0"
|
||||
@ -806,8 +847,8 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
|
||||
switch {
|
||||
case ctx.GlobalIsSet(DataDirFlag.Name):
|
||||
cfg.DataDir = ctx.GlobalString(DataDirFlag.Name)
|
||||
case ctx.GlobalBool(DevModeFlag.Name):
|
||||
cfg.DataDir = filepath.Join(os.TempDir(), "ethereum_dev_mode")
|
||||
case ctx.GlobalBool(DeveloperFlag.Name):
|
||||
cfg.DataDir = "" // unless explicitly requested, use memory databases
|
||||
case ctx.GlobalBool(TestnetFlag.Name):
|
||||
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "testnet")
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
@ -838,6 +879,12 @@ func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) {
|
||||
if ctx.GlobalIsSet(TxPoolNoLocalsFlag.Name) {
|
||||
cfg.NoLocals = ctx.GlobalBool(TxPoolNoLocalsFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolJournalFlag.Name) {
|
||||
cfg.Journal = ctx.GlobalString(TxPoolJournalFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolRejournalFlag.Name) {
|
||||
cfg.Rejournal = ctx.GlobalDuration(TxPoolRejournalFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolPriceLimitFlag.Name) {
|
||||
cfg.PriceLimit = ctx.GlobalUint64(TxPoolPriceLimitFlag.Name)
|
||||
}
|
||||
@ -907,7 +954,7 @@ func SetShhConfig(ctx *cli.Context, stack *node.Node, cfg *whisper.Config) {
|
||||
// SetEthConfig applies eth-related command line flags to the config.
|
||||
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
// Avoid conflicting network flags
|
||||
checkExclusive(ctx, DevModeFlag, TestnetFlag, RinkebyFlag)
|
||||
checkExclusive(ctx, DeveloperFlag, TestnetFlag, RinkebyFlag)
|
||||
checkExclusive(ctx, FastSyncFlag, LightModeFlag, SyncModeFlag)
|
||||
|
||||
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
|
||||
@ -934,10 +981,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
cfg.NetworkId = ctx.GlobalUint64(NetworkIdFlag.Name)
|
||||
}
|
||||
|
||||
// Ethereum needs to know maxPeers to calculate the light server peer ratio.
|
||||
// TODO(fjl): ensure Ethereum can get MaxPeers from node.
|
||||
cfg.MaxPeers = ctx.GlobalInt(MaxPeersFlag.Name)
|
||||
|
||||
if ctx.GlobalIsSet(CacheFlag.Name) {
|
||||
cfg.DatabaseCache = ctx.GlobalInt(CacheFlag.Name)
|
||||
}
|
||||
@ -972,20 +1015,44 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
cfg.NetworkId = 4
|
||||
}
|
||||
cfg.Genesis = core.DefaultRinkebyGenesisBlock()
|
||||
case ctx.GlobalBool(DevModeFlag.Name):
|
||||
cfg.Genesis = core.DevGenesisBlock()
|
||||
if !ctx.GlobalIsSet(GasPriceFlag.Name) {
|
||||
cfg.GasPrice = new(big.Int)
|
||||
case ctx.GlobalBool(DeveloperFlag.Name):
|
||||
// Create new developer account or reuse existing one
|
||||
var (
|
||||
developer accounts.Account
|
||||
err error
|
||||
)
|
||||
if accs := ks.Accounts(); len(accs) > 0 {
|
||||
developer = ks.Accounts()[0]
|
||||
} else {
|
||||
developer, err = ks.NewAccount("")
|
||||
if err != nil {
|
||||
Fatalf("Failed to create developer account: %v", err)
|
||||
}
|
||||
}
|
||||
cfg.PowTest = true
|
||||
}
|
||||
if err := ks.Unlock(developer, ""); err != nil {
|
||||
Fatalf("Failed to unlock developer account: %v", err)
|
||||
}
|
||||
log.Info("Using developer account", "address", developer.Address)
|
||||
|
||||
cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.GlobalInt(DeveloperPeriodFlag.Name)), developer.Address)
|
||||
if !ctx.GlobalIsSet(GasPriceFlag.Name) {
|
||||
cfg.GasPrice = big.NewInt(1)
|
||||
}
|
||||
}
|
||||
// TODO(fjl): move trie cache generations into config
|
||||
if gen := ctx.GlobalInt(TrieCacheGenFlag.Name); gen > 0 {
|
||||
state.MaxTrieCacheGen = uint16(gen)
|
||||
}
|
||||
}
|
||||
|
||||
// SetDashboardConfig applies dashboard related command line flags to the config.
|
||||
func SetDashboardConfig(ctx *cli.Context, cfg *dashboard.Config) {
|
||||
cfg.Host = ctx.GlobalString(DashboardAddrFlag.Name)
|
||||
cfg.Port = ctx.GlobalInt(DashboardPortFlag.Name)
|
||||
cfg.Refresh = ctx.GlobalDuration(DashboardRefreshFlag.Name)
|
||||
cfg.Assets = ctx.GlobalString(DashboardAssetsFlag.Name)
|
||||
}
|
||||
|
||||
// RegisterEthService adds an Ethereum client to the stack.
|
||||
func RegisterEthService(stack *node.Node, cfg *eth.Config) {
|
||||
var err error
|
||||
@ -1008,6 +1075,13 @@ func RegisterEthService(stack *node.Node, cfg *eth.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterDashboardService adds a dashboard to the stack.
|
||||
func RegisterDashboardService(stack *node.Node, cfg *dashboard.Config) {
|
||||
stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
return dashboard.New(cfg)
|
||||
})
|
||||
}
|
||||
|
||||
// RegisterShhService configures Whisper and adds it to the given node.
|
||||
func RegisterShhService(stack *node.Node, cfg *whisper.Config) {
|
||||
if err := stack.Register(func(n *node.ServiceContext) (node.Service, error) {
|
||||
@ -1064,8 +1138,8 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis {
|
||||
genesis = core.DefaultTestnetGenesisBlock()
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
genesis = core.DefaultRinkebyGenesisBlock()
|
||||
case ctx.GlobalBool(DevModeFlag.Name):
|
||||
genesis = core.DevGenesisBlock()
|
||||
case ctx.GlobalBool(DeveloperFlag.Name):
|
||||
Fatalf("Developer chains are ephemeral")
|
||||
}
|
||||
return genesis
|
||||
}
|
||||
@ -1075,16 +1149,24 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
|
||||
var err error
|
||||
chainDb = MakeChainDatabase(ctx, stack)
|
||||
|
||||
engine := ethash.NewFaker()
|
||||
if !ctx.GlobalBool(FakePoWFlag.Name) {
|
||||
engine = ethash.New("", 1, 0, "", 1, 0)
|
||||
}
|
||||
config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx))
|
||||
if err != nil {
|
||||
Fatalf("%v", err)
|
||||
}
|
||||
var engine consensus.Engine
|
||||
if config.Clique != nil {
|
||||
engine = clique.New(config.Clique, chainDb)
|
||||
} else {
|
||||
engine = ethash.NewFaker()
|
||||
if !ctx.GlobalBool(FakePoWFlag.Name) {
|
||||
engine = ethash.New(
|
||||
stack.ResolvePath(eth.DefaultConfig.EthashCacheDir), eth.DefaultConfig.EthashCachesInMem, eth.DefaultConfig.EthashCachesOnDisk,
|
||||
stack.ResolvePath(eth.DefaultConfig.EthashDatasetDir), eth.DefaultConfig.EthashDatasetsInMem, eth.DefaultConfig.EthashDatasetsOnDisk,
|
||||
)
|
||||
}
|
||||
}
|
||||
vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)}
|
||||
chain, err = core.NewBlockChain(chainDb, config, engine, new(event.TypeMux), vmcfg)
|
||||
chain, err = core.NewBlockChain(chainDb, config, engine, vmcfg)
|
||||
if err != nil {
|
||||
Fatalf("Can't create BlockChain: %v", err)
|
||||
}
|
||||
|
@ -121,20 +121,20 @@ func TestCompression(t *testing.T) {
|
||||
in := hexutil.MustDecode("0x4912385c0e7b64000000")
|
||||
out := hexutil.MustDecode("0x80fe4912385c0e7b64")
|
||||
|
||||
if data := CompressBytes(in); bytes.Compare(data, out) != 0 {
|
||||
if data := CompressBytes(in); !bytes.Equal(data, out) {
|
||||
t.Errorf("encoding mismatch for sparse data: have %x, want %x", data, out)
|
||||
}
|
||||
if data, err := DecompressBytes(out, len(in)); err != nil || bytes.Compare(data, in) != 0 {
|
||||
if data, err := DecompressBytes(out, len(in)); err != nil || !bytes.Equal(data, in) {
|
||||
t.Errorf("decoding mismatch for sparse data: have %x, want %x, error %v", data, in, err)
|
||||
}
|
||||
// Check the the compression returns the input if the bitset encoding is longer
|
||||
in = hexutil.MustDecode("0xdf7070533534333636313639343638373532313536346c1bc33339343837313070706336343035336336346c65fefb3930393233383838ac2f65fefb")
|
||||
out = hexutil.MustDecode("0xdf7070533534333636313639343638373532313536346c1bc33339343837313070706336343035336336346c65fefb3930393233383838ac2f65fefb")
|
||||
|
||||
if data := CompressBytes(in); bytes.Compare(data, out) != 0 {
|
||||
if data := CompressBytes(in); !bytes.Equal(data, out) {
|
||||
t.Errorf("encoding mismatch for dense data: have %x, want %x", data, out)
|
||||
}
|
||||
if data, err := DecompressBytes(out, len(in)); err != nil || bytes.Compare(data, in) != 0 {
|
||||
if data, err := DecompressBytes(out, len(in)); err != nil || !bytes.Equal(data, in) {
|
||||
t.Errorf("decoding mismatch for dense data: have %x, want %x, error %v", data, in, err)
|
||||
}
|
||||
// Check that decompressing a longer input than the target fails
|
||||
|
@ -47,6 +47,9 @@ func FromHex(s string) []byte {
|
||||
//
|
||||
// Returns an exact copy of the provided bytes
|
||||
func CopyBytes(b []byte) (copiedBytes []byte) {
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
copiedBytes = make([]byte, len(b))
|
||||
copy(copiedBytes, b)
|
||||
|
||||
|
@ -53,9 +53,7 @@ var (
|
||||
|
||||
type decError struct{ msg string }
|
||||
|
||||
func (err decError) Error() string {
|
||||
return string(err.msg)
|
||||
}
|
||||
func (err decError) Error() string { return err.msg }
|
||||
|
||||
// Decode decodes a hex string with 0x prefix.
|
||||
func Decode(input string) ([]byte, error) {
|
||||
|
@ -26,11 +26,10 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
textZero = []byte(`0x0`)
|
||||
bytesT = reflect.TypeOf(Bytes(nil))
|
||||
bigT = reflect.TypeOf((*Big)(nil))
|
||||
uintT = reflect.TypeOf(Uint(0))
|
||||
uint64T = reflect.TypeOf(Uint64(0))
|
||||
bytesT = reflect.TypeOf(Bytes(nil))
|
||||
bigT = reflect.TypeOf((*Big)(nil))
|
||||
uintT = reflect.TypeOf(Uint(0))
|
||||
uint64T = reflect.TypeOf(Uint64(0))
|
||||
)
|
||||
|
||||
// Bytes marshals/unmarshals as a JSON string with 0x prefix.
|
||||
@ -224,7 +223,7 @@ func (b *Uint64) UnmarshalText(input []byte) error {
|
||||
return ErrSyntax
|
||||
}
|
||||
dec *= 16
|
||||
dec += uint64(nib)
|
||||
dec += nib
|
||||
}
|
||||
*b = Uint64(dec)
|
||||
return nil
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"reflect"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -87,7 +88,7 @@ func (h Hash) MarshalText() ([]byte, error) {
|
||||
return hexutil.Bytes(h[:]).MarshalText()
|
||||
}
|
||||
|
||||
// Sets the hash to the value of b. If b is larger than len(h) it will panic
|
||||
// Sets the hash to the value of b. If b is larger than len(h), 'b' will be cropped (from the left).
|
||||
func (h *Hash) SetBytes(b []byte) {
|
||||
if len(b) > len(h) {
|
||||
b = b[len(b)-HashLength:]
|
||||
@ -96,7 +97,7 @@ func (h *Hash) SetBytes(b []byte) {
|
||||
copy(h[HashLength-len(b):], b)
|
||||
}
|
||||
|
||||
// Set string `s` to h. If s is larger than len(h) it will panic
|
||||
// Set string `s` to h. If s is larger than len(h) s will be cropped (from left) to fit.
|
||||
func (h *Hash) SetString(s string) { h.SetBytes([]byte(s)) }
|
||||
|
||||
// Sets h to other
|
||||
@ -163,7 +164,28 @@ func (a Address) Str() string { return string(a[:]) }
|
||||
func (a Address) Bytes() []byte { return a[:] }
|
||||
func (a Address) Big() *big.Int { return new(big.Int).SetBytes(a[:]) }
|
||||
func (a Address) Hash() Hash { return BytesToHash(a[:]) }
|
||||
func (a Address) Hex() string { return hexutil.Encode(a[:]) }
|
||||
|
||||
// Hex returns an EIP55-compliant hex string representation of the address.
|
||||
func (a Address) Hex() string {
|
||||
unchecksummed := hex.EncodeToString(a[:])
|
||||
sha := sha3.NewKeccak256()
|
||||
sha.Write([]byte(unchecksummed))
|
||||
hash := sha.Sum(nil)
|
||||
|
||||
result := []byte(unchecksummed)
|
||||
for i := 0; i < len(result); i++ {
|
||||
hashByte := hash[i/2]
|
||||
if i%2 == 0 {
|
||||
hashByte = hashByte >> 4
|
||||
} else {
|
||||
hashByte &= 0xf
|
||||
}
|
||||
if result[i] > '9' && hashByte > 7 {
|
||||
result[i] -= 32
|
||||
}
|
||||
}
|
||||
return "0x" + string(result)
|
||||
}
|
||||
|
||||
// String implements the stringer interface and is used also by the logger.
|
||||
func (a Address) String() string {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user