Compare commits
212 Commits
Author | SHA1 | Date | |
---|---|---|---|
10a45cb59b | |||
cd88f69715 | |||
46d0d04f97 | |||
514659a023 | |||
01c9cf1cb5 | |||
b751cf3901 | |||
d40179f882 | |||
f2c5b2cc1c | |||
a4277450b2 | |||
b664bedcf2 | |||
eebde1a2e2 | |||
b0b3cf2eeb | |||
c98d9b49bf | |||
0042f13d47 | |||
d432688886 | |||
58a1e13e6d | |||
a1f3878ec5 | |||
a20a02ce0b | |||
9a44e1035e | |||
9012863ad7 | |||
a5d08c893d | |||
98e101ef8e | |||
50c18e6eb8 | |||
60e27b51bc | |||
693d9ccbfb | |||
5c53a5be66 | |||
431cf2a1e4 | |||
4f77857f74 | |||
fade09a7ff | |||
db6e695002 | |||
335abdceb1 | |||
732273094c | |||
b8793edd83 | |||
eb92522278 | |||
061889d4ea | |||
e3dfd55820 | |||
2fefe4baa0 | |||
ac9865791a | |||
80f7c6c299 | |||
bc24b7a912 | |||
1496b3aff6 | |||
1e9f86b49e | |||
65ea913e29 | |||
9a0e433b13 | |||
04d2de9119 | |||
3285a0fda3 | |||
6171d01b11 | |||
cf87713dd4 | |||
ac92d7c411 | |||
d5a79934dc | |||
0424192e61 | |||
9c2882b2e5 | |||
1a0eb903f1 | |||
0036e2a747 | |||
727eadacca | |||
99cba96f26 | |||
f272879e5a | |||
72dd51e25a | |||
799a469000 | |||
f4d81178d8 | |||
310d2e7ef4 | |||
3ecde4e2aa | |||
a355b401db | |||
cba33029a8 | |||
9702badd83 | |||
067dc2cbf5 | |||
65979770e6 | |||
41bdf49eed | |||
ea11f7dd7a | |||
8df24760d7 | |||
71814bf6c4 | |||
ec1700600a | |||
b0f30b0b37 | |||
e96f2981e2 | |||
09d59da3a1 | |||
280609c99b | |||
309da541de | |||
dd06c85843 | |||
ae40d51410 | |||
b865fad888 | |||
afb17cf071 | |||
08959bbc70 | |||
673c92db6b | |||
c2a494c743 | |||
afdd23b5ca | |||
cb809c03da | |||
45421d3130 | |||
115e7d71cc | |||
dd5ed01f3b | |||
b7ff0d42e3 | |||
c98bce709c | |||
17f0b11942 | |||
6231edcbab | |||
07aae19e5d | |||
b596b4ba5b | |||
8b1e4c4c5e | |||
846d091bd2 | |||
a346aedb90 | |||
ef25b826e6 | |||
261b3e2351 | |||
344f25fb3e | |||
1afaea4bfe | |||
11cf5b7ead | |||
069cb661c3 | |||
3b8915e387 | |||
437ceaa9be | |||
136f78ff0a | |||
aa73420207 | |||
3556962053 | |||
e1e87d8b1a | |||
30cc1c3bf0 | |||
10582a97ca | |||
e16a7ef60f | |||
a816e75662 | |||
3ee75bec9f | |||
04b668b232 | |||
da636c53d6 | |||
2a41e76b39 | |||
4a2c17b1ab | |||
bc75351edf | |||
33b158e0ed | |||
83721a95ce | |||
e7119ce12d | |||
a5f6a1cb7c | |||
e6aff513db | |||
8a4c1fb799 | |||
10a57fc3d4 | |||
a2f23ca9b1 | |||
e20158176d | |||
ef7b9fb7d0 | |||
b0d0fafd68 | |||
90c7155ef4 | |||
df4e7eccf5 | |||
7c707d14d1 | |||
953a995116 | |||
c5840ce12f | |||
3b3989de6a | |||
40976ea1a0 | |||
d18b509e40 | |||
2e4d23a793 | |||
60293820b7 | |||
82defe5c56 | |||
dd483d7d0d | |||
dddebe469b | |||
cf19586cfb | |||
fd5d51c9ae | |||
2ec5cf1673 | |||
36a800a1d2 | |||
93832b633e | |||
021c3c2816 | |||
ff2c966e7f | |||
14cc40a31a | |||
881df0e629 | |||
1c2f6f5597 | |||
d51a9fd6b7 | |||
464f30d301 | |||
8a28408616 | |||
e1dc7ece62 | |||
99127ff2e7 | |||
81d6ec908a | |||
38b4fc8069 | |||
11ab73f6d8 | |||
18e9cb1187 | |||
502a2bd69f | |||
8b517d7f00 | |||
cad071009d | |||
181a3309df | |||
02fa3e3179 | |||
a8eafcdc0e | |||
bcf2465b0b | |||
c3dc01caf1 | |||
cf4faa491a | |||
59966255ad | |||
f8acc0af7e | |||
02a29060d2 | |||
0255ed6335 | |||
96c2ab22e0 | |||
5494e6e7ab | |||
32db571681 | |||
a6af56fa4d | |||
5884606ec3 | |||
f6c0f76cc5 | |||
f9be9a2302 | |||
95f0bd0acf | |||
8dce4c283d | |||
fff16169c6 | |||
5f7eb78918 | |||
e61035c5a3 | |||
37e3f561f1 | |||
ba3bcd16a6 | |||
207bd7d2cd | |||
4047ccad2f | |||
a13e920af0 | |||
f958d7d482 | |||
7cc6abeef6 | |||
54253aae4c | |||
09aabaea9f | |||
ecec454e92 | |||
7b2fc0643f | |||
d2fda73ad7 | |||
5aa21d8b32 | |||
9fc90b6747 | |||
edef84da2b | |||
6430e672c9 | |||
a31d268b76 | |||
e353f9c088 | |||
af48a331bf | |||
80e74fc1e0 | |||
03dffe3efd | |||
cb3f5f8b93 | |||
c7a4d9cf8a | |||
7d0ac94809 |
27
.travis.yml
27
.travis.yml
@ -6,7 +6,7 @@ matrix:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.7.5
|
||||
go: 1.7.6
|
||||
script:
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
|
||||
- sudo modprobe fuse
|
||||
@ -19,7 +19,7 @@ matrix:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.8.1
|
||||
go: 1.8.3
|
||||
script:
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
|
||||
- sudo modprobe fuse
|
||||
@ -29,7 +29,7 @@ matrix:
|
||||
- go run build/ci.go test -coverage -misspell
|
||||
|
||||
- os: osx
|
||||
go: 1.8.1
|
||||
go: 1.8.3
|
||||
sudo: required
|
||||
script:
|
||||
- brew update
|
||||
@ -42,7 +42,7 @@ matrix:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.8.1
|
||||
go: 1.8.3
|
||||
env:
|
||||
- ubuntu-ppa
|
||||
- azure-linux
|
||||
@ -53,6 +53,7 @@ matrix:
|
||||
- debhelper
|
||||
- dput
|
||||
- gcc-multilib
|
||||
- fakeroot
|
||||
script:
|
||||
# Build for the primary platforms that Trusty can manage
|
||||
- go run build/ci.go debsrc -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>" -upload ppa:ethereum/ethereum
|
||||
@ -80,7 +81,7 @@ matrix:
|
||||
sudo: required
|
||||
services:
|
||||
- docker
|
||||
go: 1.8.1
|
||||
go: 1.8.3
|
||||
env:
|
||||
- azure-linux-mips
|
||||
script:
|
||||
@ -120,16 +121,16 @@ matrix:
|
||||
- azure-android
|
||||
- maven-android
|
||||
before_install:
|
||||
- curl https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz | tar -xz
|
||||
- curl https://storage.googleapis.com/golang/go1.8.3.linux-amd64.tar.gz | tar -xz
|
||||
- export PATH=`pwd`/go/bin:$PATH
|
||||
- export GOROOT=`pwd`/go
|
||||
- export GOPATH=$HOME/go
|
||||
script:
|
||||
# Build the Android archive and upload it to Maven Central and Azure
|
||||
- curl https://dl.google.com/android/repository/android-ndk-r13b-linux-x86_64.zip -o android-ndk-r13b.zip
|
||||
- unzip -q android-ndk-r13b.zip && rm android-ndk-r13b.zip
|
||||
- mv android-ndk-r13b $HOME
|
||||
- export ANDROID_NDK=$HOME/android-ndk-r13b
|
||||
- curl https://dl.google.com/android/repository/android-ndk-r14b-linux-x86_64.zip -o android-ndk-r14b.zip
|
||||
- unzip -q android-ndk-r14b.zip && rm android-ndk-r14b.zip
|
||||
- mv android-ndk-r14b $HOME
|
||||
- export ANDROID_NDK=$HOME/android-ndk-r14b
|
||||
|
||||
- mkdir -p $GOPATH/src/github.com/ethereum
|
||||
- ln -s `pwd` $GOPATH/src/github.com/ethereum
|
||||
@ -137,7 +138,7 @@ matrix:
|
||||
|
||||
# This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads
|
||||
- os: osx
|
||||
go: 1.8.1
|
||||
go: 1.8.3
|
||||
env:
|
||||
- azure-osx
|
||||
- azure-ios
|
||||
@ -147,7 +148,7 @@ matrix:
|
||||
- go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -upload gethstore/builds
|
||||
|
||||
# Build the iOS framework and upload it to CocoaPods and Azure
|
||||
- gem uninstall cocoapods -a
|
||||
- gem uninstall cocoapods -a -x
|
||||
- gem install cocoapods
|
||||
|
||||
- mv ~/.cocoapods/repos/master ~/.cocoapods/repos/master.bak
|
||||
@ -163,7 +164,7 @@ matrix:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.8.1
|
||||
go: 1.8.3
|
||||
env:
|
||||
- azure-purge
|
||||
script:
|
||||
|
@ -4,11 +4,12 @@ ADD . /go-ethereum
|
||||
RUN \
|
||||
apk add --update git go make gcc musl-dev linux-headers && \
|
||||
(cd go-ethereum && make geth) && \
|
||||
cp go-ethereum/build/bin/geth /geth && \
|
||||
cp go-ethereum/build/bin/geth /usr/local/bin/ && \
|
||||
apk del git go make gcc musl-dev linux-headers && \
|
||||
rm -rf /go-ethereum && rm -rf /var/cache/apk/*
|
||||
|
||||
EXPOSE 8545
|
||||
EXPOSE 30303
|
||||
EXPOSE 30303/udp
|
||||
|
||||
ENTRYPOINT ["/geth"]
|
||||
ENTRYPOINT ["geth"]
|
||||
|
38
README.md
38
README.md
@ -70,7 +70,7 @@ This command will:
|
||||
(via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://github.com/ethereum/wiki/wiki/JavaScript-API)
|
||||
as well as Geth's own [management APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs).
|
||||
This too is optional and if you leave it out you can always attach to an already running Geth instance
|
||||
with `geth --attach`.
|
||||
with `geth attach`.
|
||||
|
||||
### Full node on the Ethereum test network
|
||||
|
||||
@ -84,21 +84,39 @@ $ geth --testnet --fast --cache=512 console
|
||||
```
|
||||
|
||||
The `--fast`, `--cache` flags and `console` subcommand have the exact same meaning as above and they
|
||||
are equially useful on the testnet too. Please see above for their explanations if you've skipped to
|
||||
are equally useful on the testnet too. Please see above for their explanations if you've skipped to
|
||||
here.
|
||||
|
||||
Specifying the `--testnet` flag however will reconfigure your Geth instance a bit:
|
||||
|
||||
* Instead of using the default data directory (`~/.ethereum` on Linux for example), Geth will nest
|
||||
itself one level deeper into a `testnet` subfolder (`~/.ethereum/testnet` on Linux).
|
||||
itself one level deeper into a `testnet` subfolder (`~/.ethereum/testnet` on Linux). Note, on OSX
|
||||
and Linux this also means that attaching to a running testnet node requires the use of a custom
|
||||
endpoint since `geth attach` will try to attach to a production node endpoint by default. E.g.
|
||||
`geth attach <datadir>/testnet/geth.ipc`. Windows users are not affected by this.
|
||||
* Instead of connecting the main Ethereum network, the client will connect to the test network,
|
||||
which uses different P2P bootnodes, different network IDs and genesis states.
|
||||
|
||||
*Note: Although there are some internal protective measures to prevent transactions from crossing
|
||||
over between the main network and test network (different starting nonces), you should make sure to
|
||||
always use separate accounts for play-money and real-money. Unless you manually move accounts, Geth
|
||||
will by default correctly separate the two networks and will not make any accounts available between
|
||||
them.*
|
||||
over between the main network and test network, you should make sure to always use separate accounts
|
||||
for play-money and real-money. Unless you manually move accounts, Geth will by default correctly
|
||||
separate the two networks and will not make any accounts available between them.*
|
||||
|
||||
### Configuration
|
||||
|
||||
As an alternative to passing the numerous flags to the `geth` binary, you can also pass a configuration file via:
|
||||
|
||||
```
|
||||
$ geth --config /path/to/your_config.toml
|
||||
```
|
||||
|
||||
To get an idea how the file should look like you can use the `dumpconfig` subcommand to export your existing configuration:
|
||||
|
||||
```
|
||||
$ geth --your-favourite-flags dumpconfig
|
||||
```
|
||||
|
||||
*Note: This works only with geth v1.6.0 and above*
|
||||
|
||||
#### Docker quick start
|
||||
|
||||
@ -161,6 +179,12 @@ and agree upon. This consists of a small JSON file (e.g. call it `genesis.json`)
|
||||
|
||||
```json
|
||||
{
|
||||
"config": {
|
||||
"chainId": 0,
|
||||
"homesteadBlock": 0,
|
||||
"eip155Block": 0,
|
||||
"eip158Block": 0
|
||||
},
|
||||
"alloc" : {},
|
||||
"coinbase" : "0x0000000000000000000000000000000000000000",
|
||||
"difficulty" : "0x20000",
|
||||
|
@ -38,7 +38,7 @@ var ErrNotSupported = errors.New("not supported")
|
||||
var ErrInvalidPassphrase = errors.New("invalid passphrase")
|
||||
|
||||
// ErrWalletAlreadyOpen is returned if a wallet is attempted to be opened the
|
||||
// secodn time.
|
||||
// second time.
|
||||
var ErrWalletAlreadyOpen = errors.New("wallet already open")
|
||||
|
||||
// ErrWalletClosed is returned if a wallet is attempted to be opened the
|
||||
|
@ -124,14 +124,13 @@ func (k *Key) UnmarshalJSON(j []byte) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
privkey, err := hex.DecodeString(keyJSON.PrivateKey)
|
||||
privkey, err := crypto.HexToECDSA(keyJSON.PrivateKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
k.Address = common.BytesToAddress(addr)
|
||||
k.PrivateKey = crypto.ToECDSA(privkey)
|
||||
k.PrivateKey = privkey
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -450,7 +450,6 @@ func (ks *KeyStore) ImportECDSA(priv *ecdsa.PrivateKey, passphrase string) (acco
|
||||
if ks.cache.hasAddress(key.Address) {
|
||||
return accounts.Account{}, fmt.Errorf("account already exists")
|
||||
}
|
||||
|
||||
return ks.importKey(key, passphrase)
|
||||
}
|
||||
|
||||
|
@ -182,7 +182,8 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key := crypto.ToECDSA(keyBytes)
|
||||
key := crypto.ToECDSAUnsafe(keyBytes)
|
||||
|
||||
return &Key{
|
||||
Id: uuid.UUID(keyId),
|
||||
Address: crypto.PubkeyToAddress(key.PublicKey),
|
||||
|
@ -46,7 +46,7 @@ func TestKeyEncryptDecrypt(t *testing.T) {
|
||||
// Decrypt with the correct password
|
||||
key, err := DecryptKey(keyjson, password)
|
||||
if err != nil {
|
||||
t.Errorf("test %d: json key failed to decrypt: %v", i, err)
|
||||
t.Fatalf("test %d: json key failed to decrypt: %v", i, err)
|
||||
}
|
||||
if key.Address != address {
|
||||
t.Errorf("test %d: key address mismatch: have %x, want %x", i, key.Address, address)
|
||||
|
@ -74,7 +74,8 @@ func decryptPreSaleKey(fileContent []byte, password string) (key *Key, err error
|
||||
return nil, err
|
||||
}
|
||||
ethPriv := crypto.Keccak256(plainText)
|
||||
ecKey := crypto.ToECDSA(ethPriv)
|
||||
ecKey := crypto.ToECDSAUnsafe(ethPriv)
|
||||
|
||||
key = &Key{
|
||||
Id: nil,
|
||||
Address: crypto.PubkeyToAddress(ecKey.PublicKey),
|
||||
|
@ -22,8 +22,8 @@ environment:
|
||||
|
||||
install:
|
||||
- rmdir C:\go /s /q
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.8.1.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.8.1.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.8.3.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.8.3.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- go version
|
||||
- gcc --version
|
||||
|
||||
|
@ -77,6 +77,7 @@ var (
|
||||
executablePath("puppeth"),
|
||||
executablePath("rlpdump"),
|
||||
executablePath("swarm"),
|
||||
executablePath("wnode"),
|
||||
}
|
||||
|
||||
// A debian package is created for all executables listed here.
|
||||
@ -109,6 +110,10 @@ var (
|
||||
Name: "swarm",
|
||||
Description: "Ethereum Swarm daemon and tools",
|
||||
},
|
||||
{
|
||||
Name: "wnode",
|
||||
Description: "Ethereum Whisper diagnostic tool",
|
||||
},
|
||||
}
|
||||
|
||||
// Distros for which packages are created.
|
||||
|
@ -47,11 +47,14 @@ var (
|
||||
// boring stuff
|
||||
"vendor/", "tests/files/", "build/",
|
||||
// don't relicense vendored sources
|
||||
"crypto/sha3/", "crypto/ecies/", "log/",
|
||||
"crypto/secp256k1/curve.go",
|
||||
"consensus/ethash/xor.go",
|
||||
"internal/jsre/deps",
|
||||
"cmd/internal/browser",
|
||||
"consensus/ethash/xor.go",
|
||||
"crypto/bn256/",
|
||||
"crypto/ecies/",
|
||||
"crypto/secp256k1/curve.go",
|
||||
"crypto/sha3/",
|
||||
"internal/jsre/deps",
|
||||
"log/",
|
||||
// don't license generated files
|
||||
"contracts/chequebook/contract/",
|
||||
"contracts/ens/contract/",
|
||||
|
@ -68,6 +68,7 @@ func main() {
|
||||
if err = crypto.SaveECDSA(*genKey, nodeKey); err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
return
|
||||
case *nodeKeyFile == "" && *nodeKeyHex == "":
|
||||
utils.Fatalf("Use -nodekey or -nodekeyhex to specify a private key")
|
||||
case *nodeKeyFile != "" && *nodeKeyHex != "":
|
||||
|
67
cmd/evm/json_logger.go
Normal file
67
cmd/evm/json_logger.go
Normal file
@ -0,0 +1,67 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
)
|
||||
|
||||
type JSONLogger struct {
|
||||
encoder *json.Encoder
|
||||
cfg *vm.LogConfig
|
||||
}
|
||||
|
||||
func NewJSONLogger(cfg *vm.LogConfig, writer io.Writer) *JSONLogger {
|
||||
return &JSONLogger{json.NewEncoder(writer), cfg}
|
||||
}
|
||||
|
||||
// CaptureState outputs state information on the logger.
|
||||
func (l *JSONLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error {
|
||||
log := vm.StructLog{
|
||||
Pc: pc,
|
||||
Op: op,
|
||||
Gas: gas + cost,
|
||||
GasCost: cost,
|
||||
MemorySize: memory.Len(),
|
||||
Storage: nil,
|
||||
Depth: depth,
|
||||
Err: err,
|
||||
}
|
||||
if !l.cfg.DisableMemory {
|
||||
log.Memory = memory.Data()
|
||||
}
|
||||
if !l.cfg.DisableStack {
|
||||
log.Stack = stack.Data()
|
||||
}
|
||||
return l.encoder.Encode(log)
|
||||
}
|
||||
|
||||
// CaptureEnd is triggered at end of execution.
|
||||
func (l *JSONLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration) error {
|
||||
type endLog struct {
|
||||
Output string `json:"output"`
|
||||
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
|
||||
Time time.Duration `json:"time"`
|
||||
}
|
||||
return l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t})
|
||||
}
|
@ -35,6 +35,18 @@ var (
|
||||
Name: "debug",
|
||||
Usage: "output full trace logs",
|
||||
}
|
||||
MemProfileFlag = cli.StringFlag{
|
||||
Name: "memprofile",
|
||||
Usage: "creates a memory profile at the given path",
|
||||
}
|
||||
CPUProfileFlag = cli.StringFlag{
|
||||
Name: "cpuprofile",
|
||||
Usage: "creates a CPU profile at the given path",
|
||||
}
|
||||
StatDumpFlag = cli.BoolFlag{
|
||||
Name: "statdump",
|
||||
Usage: "displays stack and heap memory information",
|
||||
}
|
||||
CodeFlag = cli.StringFlag{
|
||||
Name: "code",
|
||||
Usage: "EVM code",
|
||||
@ -78,6 +90,26 @@ var (
|
||||
Name: "nogasmetering",
|
||||
Usage: "disable gas metering",
|
||||
}
|
||||
GenesisFlag = cli.StringFlag{
|
||||
Name: "prestate",
|
||||
Usage: "JSON file with prestate (genesis) config",
|
||||
}
|
||||
MachineFlag = cli.BoolFlag{
|
||||
Name: "json",
|
||||
Usage: "output trace logs in machine readable format (json)",
|
||||
}
|
||||
SenderFlag = cli.StringFlag{
|
||||
Name: "sender",
|
||||
Usage: "The transaction origin",
|
||||
}
|
||||
DisableMemoryFlag = cli.BoolFlag{
|
||||
Name: "nomemory",
|
||||
Usage: "disable memory output",
|
||||
}
|
||||
DisableStackFlag = cli.BoolFlag{
|
||||
Name: "nostack",
|
||||
Usage: "disable stack output",
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -93,6 +125,14 @@ func init() {
|
||||
DumpFlag,
|
||||
InputFlag,
|
||||
DisableGasMeteringFlag,
|
||||
MemProfileFlag,
|
||||
CPUProfileFlag,
|
||||
StatDumpFlag,
|
||||
GenesisFlag,
|
||||
MachineFlag,
|
||||
SenderFlag,
|
||||
DisableMemoryFlag,
|
||||
DisableStackFlag,
|
||||
}
|
||||
app.Commands = []cli.Command{
|
||||
compileCommand,
|
||||
|
@ -18,9 +18,11 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"time"
|
||||
|
||||
goruntime "runtime"
|
||||
@ -28,11 +30,13 @@ import (
|
||||
"github.com/ethereum/go-ethereum/cmd/evm/internal/compiler"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/core/vm/runtime"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
@ -44,17 +48,63 @@ var runCommand = cli.Command{
|
||||
Description: `The run command runs arbitrary EVM code.`,
|
||||
}
|
||||
|
||||
// readGenesis will read the given JSON format genesis file and return
|
||||
// the initialized Genesis structure
|
||||
func readGenesis(genesisPath string) *core.Genesis {
|
||||
// Make sure we have a valid genesis JSON
|
||||
//genesisPath := ctx.Args().First()
|
||||
if len(genesisPath) == 0 {
|
||||
utils.Fatalf("Must supply path to genesis JSON file")
|
||||
}
|
||||
file, err := os.Open(genesisPath)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read genesis file: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
genesis := new(core.Genesis)
|
||||
if err := json.NewDecoder(file).Decode(genesis); err != nil {
|
||||
utils.Fatalf("invalid genesis file: %v", err)
|
||||
}
|
||||
return genesis
|
||||
}
|
||||
|
||||
func runCmd(ctx *cli.Context) error {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
||||
glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name)))
|
||||
log.Root().SetHandler(glogger)
|
||||
logconfig := &vm.LogConfig{
|
||||
DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name),
|
||||
DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
|
||||
}
|
||||
|
||||
var (
|
||||
db, _ = ethdb.NewMemDatabase()
|
||||
statedb, _ = state.New(common.Hash{}, db)
|
||||
tracer vm.Tracer
|
||||
debugLogger *vm.StructLogger
|
||||
statedb *state.StateDB
|
||||
chainConfig *params.ChainConfig
|
||||
sender = common.StringToAddress("sender")
|
||||
logger = vm.NewStructLogger(nil)
|
||||
)
|
||||
if ctx.GlobalBool(MachineFlag.Name) {
|
||||
tracer = NewJSONLogger(logconfig, os.Stdout)
|
||||
} else if ctx.GlobalBool(DebugFlag.Name) {
|
||||
debugLogger = vm.NewStructLogger(logconfig)
|
||||
tracer = debugLogger
|
||||
} else {
|
||||
debugLogger = vm.NewStructLogger(logconfig)
|
||||
}
|
||||
if ctx.GlobalString(GenesisFlag.Name) != "" {
|
||||
gen := readGenesis(ctx.GlobalString(GenesisFlag.Name))
|
||||
_, statedb = gen.ToBlock()
|
||||
chainConfig = gen.Config
|
||||
} else {
|
||||
var db, _ = ethdb.NewMemDatabase()
|
||||
statedb, _ = state.New(common.Hash{}, db)
|
||||
}
|
||||
if ctx.GlobalString(SenderFlag.Name) != "" {
|
||||
sender = common.HexToAddress(ctx.GlobalString(SenderFlag.Name))
|
||||
}
|
||||
|
||||
statedb.CreateAccount(sender)
|
||||
|
||||
var (
|
||||
@ -94,29 +144,46 @@ func runCmd(ctx *cli.Context) error {
|
||||
}
|
||||
code = common.Hex2Bytes(string(bytes.TrimRight(hexcode, "\n")))
|
||||
}
|
||||
|
||||
initialGas := ctx.GlobalUint64(GasFlag.Name)
|
||||
runtimeConfig := runtime.Config{
|
||||
Origin: sender,
|
||||
State: statedb,
|
||||
GasLimit: ctx.GlobalUint64(GasFlag.Name),
|
||||
GasLimit: initialGas,
|
||||
GasPrice: utils.GlobalBig(ctx, PriceFlag.Name),
|
||||
Value: utils.GlobalBig(ctx, ValueFlag.Name),
|
||||
EVMConfig: vm.Config{
|
||||
Tracer: logger,
|
||||
Debug: ctx.GlobalBool(DebugFlag.Name),
|
||||
Tracer: tracer,
|
||||
Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name),
|
||||
DisableGasMetering: ctx.GlobalBool(DisableGasMeteringFlag.Name),
|
||||
},
|
||||
}
|
||||
|
||||
if cpuProfilePath := ctx.GlobalString(CPUProfileFlag.Name); cpuProfilePath != "" {
|
||||
f, err := os.Create(cpuProfilePath)
|
||||
if err != nil {
|
||||
fmt.Println("could not create CPU profile: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
fmt.Println("could not start CPU profile: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
|
||||
if chainConfig != nil {
|
||||
runtimeConfig.ChainConfig = chainConfig
|
||||
}
|
||||
tstart := time.Now()
|
||||
var leftOverGas uint64
|
||||
if ctx.GlobalBool(CreateFlag.Name) {
|
||||
input := append(code, common.Hex2Bytes(ctx.GlobalString(InputFlag.Name))...)
|
||||
ret, _, err = runtime.Create(input, &runtimeConfig)
|
||||
ret, _, leftOverGas, err = runtime.Create(input, &runtimeConfig)
|
||||
} else {
|
||||
receiver := common.StringToAddress("receiver")
|
||||
statedb.SetCode(receiver, code)
|
||||
|
||||
ret, err = runtime.Call(receiver, common.Hex2Bytes(ctx.GlobalString(InputFlag.Name)), &runtimeConfig)
|
||||
ret, leftOverGas, err = runtime.Call(receiver, common.Hex2Bytes(ctx.GlobalString(InputFlag.Name)), &runtimeConfig)
|
||||
}
|
||||
execTime := time.Since(tstart)
|
||||
|
||||
@ -125,12 +192,29 @@ func runCmd(ctx *cli.Context) error {
|
||||
fmt.Println(string(statedb.Dump()))
|
||||
}
|
||||
|
||||
if memProfilePath := ctx.GlobalString(MemProfileFlag.Name); memProfilePath != "" {
|
||||
f, err := os.Create(memProfilePath)
|
||||
if err != nil {
|
||||
fmt.Println("could not create memory profile: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := pprof.WriteHeapProfile(f); err != nil {
|
||||
fmt.Println("could not write memory profile: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
|
||||
if ctx.GlobalBool(DebugFlag.Name) {
|
||||
if debugLogger != nil {
|
||||
fmt.Fprintln(os.Stderr, "#### TRACE ####")
|
||||
vm.WriteTrace(os.Stderr, logger.StructLogs())
|
||||
vm.WriteTrace(os.Stderr, debugLogger.StructLogs())
|
||||
}
|
||||
fmt.Fprintln(os.Stderr, "#### LOGS ####")
|
||||
vm.WriteLogs(os.Stderr, statedb.Logs())
|
||||
}
|
||||
|
||||
if ctx.GlobalBool(StatDumpFlag.Name) {
|
||||
var mem goruntime.MemStats
|
||||
goruntime.ReadMemStats(&mem)
|
||||
fmt.Fprintf(os.Stderr, `evm execution time: %v
|
||||
@ -138,14 +222,18 @@ heap objects: %d
|
||||
allocations: %d
|
||||
total allocations: %d
|
||||
GC calls: %d
|
||||
Gas used: %d
|
||||
|
||||
`, execTime, mem.HeapObjects, mem.Alloc, mem.TotalAlloc, mem.NumGC)
|
||||
`, execTime, mem.HeapObjects, mem.Alloc, mem.TotalAlloc, mem.NumGC, initialGas-leftOverGas)
|
||||
}
|
||||
if tracer != nil {
|
||||
tracer.CaptureEnd(ret, initialGas-leftOverGas, execTime)
|
||||
} else {
|
||||
fmt.Printf("0x%x\n", ret)
|
||||
}
|
||||
|
||||
fmt.Printf("0x%x", ret)
|
||||
if err != nil {
|
||||
fmt.Printf(" error: %v", err)
|
||||
fmt.Printf(" error: %v\n", err)
|
||||
}
|
||||
fmt.Println()
|
||||
return nil
|
||||
}
|
||||
|
@ -27,10 +27,13 @@ import (
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@ -60,12 +63,13 @@ var (
|
||||
apiPortFlag = flag.Int("apiport", 8080, "Listener port for the HTTP API connection")
|
||||
ethPortFlag = flag.Int("ethport", 30303, "Listener port for the devp2p connection")
|
||||
bootFlag = flag.String("bootnodes", "", "Comma separated bootnode enode URLs to seed with")
|
||||
netFlag = flag.Int("network", 0, "Network ID to use for the Ethereum protocol")
|
||||
netFlag = flag.Uint64("network", 0, "Network ID to use for the Ethereum protocol")
|
||||
statsFlag = flag.String("ethstats", "", "Ethstats network monitoring auth string")
|
||||
|
||||
netnameFlag = flag.String("faucet.name", "", "Network name to assign to the faucet")
|
||||
payoutFlag = flag.Int("faucet.amount", 1, "Number of Ethers to pay out per user request")
|
||||
minutesFlag = flag.Int("faucet.minutes", 1440, "Number of minutes to wait between funding rounds")
|
||||
tiersFlag = flag.Int("faucet.tiers", 3, "Number of funding tiers to enable (x3 time, x2.5 funds)")
|
||||
|
||||
accJSONFlag = flag.String("account.json", "", "Key json file to fund user requests with")
|
||||
accPassFlag = flag.String("account.pass", "", "Decryption password to access faucet funds")
|
||||
@ -73,6 +77,9 @@ var (
|
||||
githubUser = flag.String("github.user", "", "GitHub user to authenticate with for Gist access")
|
||||
githubToken = flag.String("github.token", "", "GitHub personal token to access Gists with")
|
||||
|
||||
captchaToken = flag.String("captcha.token", "", "Recaptcha site key to authenticate client side")
|
||||
captchaSecret = flag.String("captcha.secret", "", "Recaptcha secret key to authenticate server side")
|
||||
|
||||
logFlag = flag.Int("loglevel", 3, "Log level to use for Ethereum and the faucet")
|
||||
)
|
||||
|
||||
@ -85,21 +92,47 @@ func main() {
|
||||
flag.Parse()
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*logFlag), log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
||||
|
||||
// Construct the payout tiers
|
||||
amounts := make([]string, *tiersFlag)
|
||||
periods := make([]string, *tiersFlag)
|
||||
for i := 0; i < *tiersFlag; i++ {
|
||||
// Calculate the amount for the next tier and format it
|
||||
amount := float64(*payoutFlag) * math.Pow(2.5, float64(i))
|
||||
amounts[i] = fmt.Sprintf("%s Ethers", strconv.FormatFloat(amount, 'f', -1, 64))
|
||||
if amount == 1 {
|
||||
amounts[i] = strings.TrimSuffix(amounts[i], "s")
|
||||
}
|
||||
// Calculate the period for the next tier and format it
|
||||
period := *minutesFlag * int(math.Pow(3, float64(i)))
|
||||
periods[i] = fmt.Sprintf("%d mins", period)
|
||||
if period%60 == 0 {
|
||||
period /= 60
|
||||
periods[i] = fmt.Sprintf("%d hours", period)
|
||||
|
||||
if period%24 == 0 {
|
||||
period /= 24
|
||||
periods[i] = fmt.Sprintf("%d days", period)
|
||||
}
|
||||
}
|
||||
if period == 1 {
|
||||
periods[i] = strings.TrimSuffix(periods[i], "s")
|
||||
}
|
||||
}
|
||||
// Load up and render the faucet website
|
||||
tmpl, err := Asset("faucet.html")
|
||||
if err != nil {
|
||||
log.Crit("Failed to load the faucet template", "err", err)
|
||||
}
|
||||
period := fmt.Sprintf("%d minute(s)", *minutesFlag)
|
||||
if *minutesFlag%60 == 0 {
|
||||
period = fmt.Sprintf("%d hour(s)", *minutesFlag/60)
|
||||
}
|
||||
website := new(bytes.Buffer)
|
||||
template.Must(template.New("").Parse(string(tmpl))).Execute(website, map[string]interface{}{
|
||||
err = template.Must(template.New("").Parse(string(tmpl))).Execute(website, map[string]interface{}{
|
||||
"Network": *netnameFlag,
|
||||
"Amount": *payoutFlag,
|
||||
"Period": period,
|
||||
"Amounts": amounts,
|
||||
"Periods": periods,
|
||||
"Recaptcha": *captchaToken,
|
||||
})
|
||||
if err != nil {
|
||||
log.Crit("Failed to render the faucet template", "err", err)
|
||||
}
|
||||
// Load and parse the genesis block requested by the user
|
||||
blob, err := ioutil.ReadFile(*genesisFlag)
|
||||
if err != nil {
|
||||
@ -167,14 +200,14 @@ type faucet struct {
|
||||
price *big.Int // Current gas price to issue funds with
|
||||
|
||||
conns []*websocket.Conn // Currently live websocket connections
|
||||
history map[string]time.Time // History of users and their funding requests
|
||||
timeouts map[string]time.Time // History of users and their funding timeouts
|
||||
reqs []*request // Currently pending funding requests
|
||||
update chan struct{} // Channel to signal request updates
|
||||
|
||||
lock sync.RWMutex // Lock protecting the faucet's internals
|
||||
}
|
||||
|
||||
func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network int, stats string, ks *keystore.KeyStore, index []byte) (*faucet, error) {
|
||||
func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network uint64, stats string, ks *keystore.KeyStore, index []byte) (*faucet, error) {
|
||||
// Assemble the raw devp2p protocol stack
|
||||
stack, err := node.New(&node.Config{
|
||||
Name: "geth",
|
||||
@ -236,7 +269,7 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network i
|
||||
index: index,
|
||||
keystore: ks,
|
||||
account: ks.Accounts()[0],
|
||||
history: make(map[string]time.Time),
|
||||
timeouts: make(map[string]time.Time),
|
||||
update: make(chan struct{}, 1),
|
||||
}, nil
|
||||
}
|
||||
@ -290,14 +323,23 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
"peers": f.stack.Server().PeerCount(),
|
||||
"requests": f.reqs,
|
||||
})
|
||||
header, _ := f.client.HeaderByNumber(context.Background(), nil)
|
||||
websocket.JSON.Send(conn, header)
|
||||
// Send the initial block to the client
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
header, err := f.client.HeaderByNumber(ctx, nil)
|
||||
cancel()
|
||||
|
||||
if err != nil {
|
||||
log.Error("Failed to retrieve latest header", "err", err)
|
||||
} else {
|
||||
websocket.JSON.Send(conn, header)
|
||||
}
|
||||
// Keep reading requests from the websocket until the connection breaks
|
||||
for {
|
||||
// Fetch the next funding request and validate against github
|
||||
var msg struct {
|
||||
URL string `json:"url"`
|
||||
Tier uint `json:"tier"`
|
||||
Captcha string `json:"captcha"`
|
||||
}
|
||||
if err := websocket.JSON.Receive(conn, &msg); err != nil {
|
||||
return
|
||||
@ -306,8 +348,39 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "URL doesn't link to GitHub Gists"})
|
||||
continue
|
||||
}
|
||||
log.Info("Faucet funds requested", "gist", msg.URL)
|
||||
if msg.Tier >= uint(*tiersFlag) {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "Invalid funding tier requested"})
|
||||
continue
|
||||
}
|
||||
log.Info("Faucet funds requested", "gist", msg.URL, "tier", msg.Tier)
|
||||
|
||||
// If captcha verifications are enabled, make sure we're not dealing with a robot
|
||||
if *captchaToken != "" {
|
||||
form := url.Values{}
|
||||
form.Add("secret", *captchaSecret)
|
||||
form.Add("response", msg.Captcha)
|
||||
|
||||
res, err := http.PostForm("https://www.google.com/recaptcha/api/siteverify", form)
|
||||
if err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
continue
|
||||
}
|
||||
var result struct {
|
||||
Success bool `json:"success"`
|
||||
Errors json.RawMessage `json:"error-codes"`
|
||||
}
|
||||
err = json.NewDecoder(res.Body).Decode(&result)
|
||||
res.Body.Close()
|
||||
if err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
continue
|
||||
}
|
||||
if !result.Success {
|
||||
log.Warn("Captcha verification failed", "err", string(result.Errors))
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "Beep-bop, you're a robot!"})
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Retrieve the gist from the GitHub Gist APIs
|
||||
parts := strings.Split(msg.URL, "/")
|
||||
req, _ := http.NewRequest("GET", "https://api.github.com/gists/"+parts[len(parts)-1], nil)
|
||||
@ -334,7 +407,7 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
continue
|
||||
}
|
||||
if gist.Owner.Login == "" {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "Nice try ;)"})
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "Anonymous Gists not allowed"})
|
||||
continue
|
||||
}
|
||||
// Iterate over all the files and look for Ethereum addresses
|
||||
@ -348,15 +421,30 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "No Ethereum address found to fund"})
|
||||
continue
|
||||
}
|
||||
// Validate the user's existence since the API is unhelpful here
|
||||
if res, err = http.Head("https://github.com/" + gist.Owner.Login); err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
continue
|
||||
}
|
||||
res.Body.Close()
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "Invalid user... boom!"})
|
||||
continue
|
||||
}
|
||||
// Ensure the user didn't request funds too recently
|
||||
f.lock.Lock()
|
||||
var (
|
||||
fund bool
|
||||
elapsed time.Duration
|
||||
timeout time.Time
|
||||
)
|
||||
if elapsed = time.Since(f.history[gist.Owner.Login]); elapsed > time.Duration(*minutesFlag)*time.Minute {
|
||||
if timeout = f.timeouts[gist.Owner.Login]; time.Now().After(timeout) {
|
||||
// User wasn't funded recently, create the funding transaction
|
||||
tx := types.NewTransaction(f.nonce+uint64(len(f.reqs)), address, new(big.Int).Mul(big.NewInt(int64(*payoutFlag)), ether), big.NewInt(21000), f.price, nil)
|
||||
amount := new(big.Int).Mul(big.NewInt(int64(*payoutFlag)), ether)
|
||||
amount = new(big.Int).Mul(amount, new(big.Int).Exp(big.NewInt(5), big.NewInt(int64(msg.Tier)), nil))
|
||||
amount = new(big.Int).Div(amount, new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(msg.Tier)), nil))
|
||||
|
||||
tx := types.NewTransaction(f.nonce+uint64(len(f.reqs)), address, amount, big.NewInt(21000), f.price, nil)
|
||||
signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainId)
|
||||
if err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
@ -375,14 +463,14 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
Time: time.Now(),
|
||||
Tx: signed,
|
||||
})
|
||||
f.history[gist.Owner.Login] = time.Now()
|
||||
f.timeouts[gist.Owner.Login] = time.Now().Add(time.Duration(*minutesFlag*int(math.Pow(3, float64(msg.Tier)))) * time.Minute)
|
||||
fund = true
|
||||
}
|
||||
f.lock.Unlock()
|
||||
|
||||
// Send an error if too frequent funding, othewise a success
|
||||
if !fund {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": fmt.Sprintf("User already funded %s ago", common.PrettyDuration(elapsed))})
|
||||
websocket.JSON.Send(conn, map[string]string{"error": fmt.Sprintf("%s left until next allowance", common.PrettyDuration(timeout.Sub(time.Now())))})
|
||||
continue
|
||||
}
|
||||
websocket.JSON.Send(conn, map[string]string{"success": fmt.Sprintf("Funding request accepted for %s into %s", gist.Owner.Login, address.Hex())})
|
||||
|
@ -51,9 +51,13 @@
|
||||
<div class="input-group">
|
||||
<input id="gist" type="text" class="form-control" placeholder="GitHub Gist URL containing your Ethereum address...">
|
||||
<span class="input-group-btn">
|
||||
<button class="btn btn-default" type="button" onclick="submit()">Give me Ether!</button>
|
||||
<button class="btn btn-default dropdown-toggle" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Give me Ether <i class="fa fa-caret-down" aria-hidden="true"></i></button>
|
||||
<ul class="dropdown-menu dropdown-menu-right">{{range $idx, $amount := .Amounts}}
|
||||
<li><a style="text-align: center;" onclick="tier={{$idx}}; {{if $.Recaptcha}}grecaptcha.execute(){{else}}submit({{$idx}}){{end}}">{{$amount}} / {{index $.Periods $idx}}</a></li>{{end}}
|
||||
</ul>
|
||||
</span>
|
||||
</div>
|
||||
</div>{{if .Recaptcha}}
|
||||
<div class="g-recaptcha" data-sitekey="{{.Recaptcha}}" data-callback="submit" data-size="invisible"></div>{{end}}
|
||||
</div>
|
||||
</div>
|
||||
<div class="row" style="margin-top: 32px;">
|
||||
@ -76,8 +80,9 @@
|
||||
<div class="row" style="margin-top: 32px;">
|
||||
<div class="col-lg-12">
|
||||
<h3>How does this work?</h3>
|
||||
<p>This Ether faucet is running on the {{.Network}} network. To prevent malicious actors from exhausting all available funds or accumulating enough Ether to mount long running spam attacks, requests are tied to GitHub accounts. Anyone having a GitHub account may request funds within the permitted limit of <strong>{{.Amount}} Ether(s) / {{.Period}}</strong>.</p>
|
||||
<p>This Ether faucet is running on the {{.Network}} network. To prevent malicious actors from exhausting all available funds or accumulating enough Ether to mount long running spam attacks, requests are tied to GitHub accounts. Anyone having a GitHub account may request funds within the permitted limits.</p>
|
||||
<p>To request funds, simply create a <a href="https://gist.github.com/" target="_about:blank">GitHub Gist</a> with your Ethereum address pasted into the contents (the file name doesn't matter), copy paste the gists URL into the above input box and fire away! You can track the current pending requests below the input field to see how much you have to wait until your turn comes.</p>
|
||||
{{if .Recaptcha}}<em>The faucet is running invisible reCaptcha protection against bots.</em>{{end}}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@ -87,10 +92,12 @@
|
||||
// Global variables to hold the current status of the faucet
|
||||
var attempt = 0;
|
||||
var server;
|
||||
var tier = 0;
|
||||
|
||||
// Define the function that submits a gist url to the server
|
||||
var submit = function() {
|
||||
server.send(JSON.stringify({url: $("#gist")[0].value}));
|
||||
var submit = function({{if .Recaptcha}}captcha{{end}}) {
|
||||
server.send(JSON.stringify({url: $("#gist")[0].value, tier: tier{{if .Recaptcha}}, captcha: captcha{{end}}}));{{if .Recaptcha}}
|
||||
grecaptcha.reset();{{end}}
|
||||
};
|
||||
// Define a method to reconnect upon server loss
|
||||
var reconnect = function() {
|
||||
@ -134,10 +141,10 @@
|
||||
}
|
||||
}
|
||||
server.onclose = function() { setTimeout(reconnect, 3000); };
|
||||
server.onerror = function() { setTimeout(reconnect, 3000); };
|
||||
}
|
||||
// Establish a websocket connection to the API server
|
||||
reconnect();
|
||||
</script>
|
||||
</script>{{if .Recaptcha}}
|
||||
<script src="https://www.google.com/recaptcha/api.js" async defer></script>{{end}}
|
||||
</body>
|
||||
</html>
|
||||
|
File diff suppressed because one or more lines are too long
@ -40,32 +40,39 @@ var (
|
||||
|
||||
will prompt for your password and imports your ether presale account.
|
||||
It can be used non-interactively with the --password option taking a
|
||||
passwordfile as argument containing the wallet password in plaintext.
|
||||
|
||||
`,
|
||||
passwordfile as argument containing the wallet password in plaintext.`,
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: importWallet,
|
||||
|
||||
Name: "import",
|
||||
Usage: "Import Ethereum presale wallet",
|
||||
ArgsUsage: "<keyFile>",
|
||||
Action: utils.MigrateFlags(importWallet),
|
||||
Category: "ACCOUNT COMMANDS",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.KeyStoreDirFlag,
|
||||
utils.PasswordFileFlag,
|
||||
utils.LightKDFFlag,
|
||||
},
|
||||
Description: `
|
||||
TODO: Please write this
|
||||
`,
|
||||
geth wallet [options] /path/to/my/presale.wallet
|
||||
|
||||
will prompt for your password and imports your ether presale account.
|
||||
It can be used non-interactively with the --password option taking a
|
||||
passwordfile as argument containing the wallet password in plaintext.`,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
accountCommand = cli.Command{
|
||||
Action: accountList,
|
||||
Name: "account",
|
||||
Usage: "Manage accounts",
|
||||
ArgsUsage: "",
|
||||
Category: "ACCOUNT COMMANDS",
|
||||
Description: `
|
||||
Manage accounts lets you create new accounts, list all existing accounts,
|
||||
import a private key into a new account.
|
||||
|
||||
' help' shows a list of subcommands or help for one subcommand.
|
||||
Manage accounts, list all existing accounts, import a private key into a new
|
||||
account, create a new account or update an existing account.
|
||||
|
||||
It supports interactive mode, when you are prompted for password as well as
|
||||
non-interactive mode where passwords are supplied via a given password file.
|
||||
@ -80,36 +87,34 @@ Note that exporting your key in unencrypted format is NOT supported.
|
||||
Keys are stored under <DATADIR>/keystore.
|
||||
It is safe to transfer the entire directory or the individual keys therein
|
||||
between ethereum nodes by simply copying.
|
||||
Make sure you backup your keys regularly.
|
||||
|
||||
In order to use your account to send transactions, you need to unlock them using
|
||||
the '--unlock' option. The argument is a space separated list of addresses or
|
||||
indexes. If used non-interactively with a passwordfile, the file should contain
|
||||
the respective passwords one per line. If you unlock n accounts and the password
|
||||
file contains less than n entries, then the last password is meant to apply to
|
||||
all remaining accounts.
|
||||
|
||||
And finally. DO NOT FORGET YOUR PASSWORD.
|
||||
`,
|
||||
Make sure you backup your keys regularly.`,
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: accountList,
|
||||
Name: "list",
|
||||
Usage: "Print account addresses",
|
||||
ArgsUsage: " ",
|
||||
Usage: "Print summary of existing accounts",
|
||||
Action: utils.MigrateFlags(accountList),
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.KeyStoreDirFlag,
|
||||
},
|
||||
Description: `
|
||||
TODO: Please write this
|
||||
`,
|
||||
Print a short summary of all accounts`,
|
||||
},
|
||||
{
|
||||
Action: accountCreate,
|
||||
Name: "new",
|
||||
Usage: "Create a new account",
|
||||
ArgsUsage: " ",
|
||||
Action: utils.MigrateFlags(accountCreate),
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.KeyStoreDirFlag,
|
||||
utils.PasswordFileFlag,
|
||||
utils.LightKDFFlag,
|
||||
},
|
||||
Description: `
|
||||
geth account new
|
||||
|
||||
Creates a new account. Prints the address.
|
||||
Creates a new account and prints the address.
|
||||
|
||||
The account is saved in encrypted format, you are prompted for a passphrase.
|
||||
|
||||
@ -117,17 +122,20 @@ You must remember this passphrase to unlock your account in the future.
|
||||
|
||||
For non-interactive use the passphrase can be specified with the --password flag:
|
||||
|
||||
geth --password <passwordfile> account new
|
||||
|
||||
Note, this is meant to be used for testing only, it is a bad idea to save your
|
||||
password to file or expose in any other way.
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: accountUpdate,
|
||||
Name: "update",
|
||||
Usage: "Update an existing account",
|
||||
Action: utils.MigrateFlags(accountUpdate),
|
||||
ArgsUsage: "<address>",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.KeyStoreDirFlag,
|
||||
utils.LightKDFFlag,
|
||||
},
|
||||
Description: `
|
||||
geth account update <address>
|
||||
|
||||
@ -141,16 +149,22 @@ format to the newest format or change the password for an account.
|
||||
|
||||
For non-interactive use the passphrase can be specified with the --password flag:
|
||||
|
||||
geth --password <passwordfile> account update <address>
|
||||
geth account update [options] <address>
|
||||
|
||||
Since only one password can be given, only format update can be performed,
|
||||
changing your password is only possible interactively.
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: accountImport,
|
||||
Name: "import",
|
||||
Usage: "Import a private key into a new account",
|
||||
Action: utils.MigrateFlags(accountImport),
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.KeyStoreDirFlag,
|
||||
utils.PasswordFileFlag,
|
||||
utils.LightKDFFlag,
|
||||
},
|
||||
ArgsUsage: "<keyFile>",
|
||||
Description: `
|
||||
geth account import <keyfile>
|
||||
@ -166,7 +180,7 @@ You must remember this passphrase to unlock your account in the future.
|
||||
|
||||
For non-interactive use the passphrase can be specified with the -password flag:
|
||||
|
||||
geth --password <passwordfile> account import <keyfile>
|
||||
geth account import [options] <keyfile>
|
||||
|
||||
Note:
|
||||
As you can directly copy your encrypted accounts to another ethereum instance,
|
||||
@ -219,7 +233,7 @@ func unlockAccount(ctx *cli.Context, ks *keystore.KeyStore, address string, i in
|
||||
return accounts.Account{}, ""
|
||||
}
|
||||
|
||||
// getPassPhrase retrieves the passwor associated with an account, either fetched
|
||||
// getPassPhrase retrieves the password associated with an account, either fetched
|
||||
// from a list of preloaded passphrases, or requested interactively from the user.
|
||||
func getPassPhrase(prompt string, confirmation bool, i int, passwords []string) string {
|
||||
// If a list of passwords was supplied, retrieve from them
|
||||
@ -298,11 +312,13 @@ func accountUpdate(ctx *cli.Context) error {
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
|
||||
|
||||
account, oldPassword := unlockAccount(ctx, ks, ctx.Args().First(), 0, nil)
|
||||
for _, addr := range ctx.Args() {
|
||||
account, oldPassword := unlockAccount(ctx, ks, addr, 0, nil)
|
||||
newPassword := getPassPhrase("Please give a new password. Do not forget this password.", true, 0, nil)
|
||||
if err := ks.Update(account, oldPassword, newPassword); err != nil {
|
||||
utils.Fatalf("Could not update the account: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -43,22 +43,22 @@ func tmpDatadirWithKeystore(t *testing.T) string {
|
||||
}
|
||||
|
||||
func TestAccountListEmpty(t *testing.T) {
|
||||
geth := runGeth(t, "account")
|
||||
geth.expectExit()
|
||||
geth := runGeth(t, "account", "list")
|
||||
geth.ExpectExit()
|
||||
}
|
||||
|
||||
func TestAccountList(t *testing.T) {
|
||||
datadir := tmpDatadirWithKeystore(t)
|
||||
geth := runGeth(t, "--datadir", datadir, "account")
|
||||
defer geth.expectExit()
|
||||
geth := runGeth(t, "account", "list", "--datadir", datadir)
|
||||
defer geth.ExpectExit()
|
||||
if runtime.GOOS == "windows" {
|
||||
geth.expect(`
|
||||
geth.Expect(`
|
||||
Account #0: {7ef5a6135f1fd6a02593eedc869c6d41d934aef8} keystore://{{.Datadir}}\keystore\UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8
|
||||
Account #1: {f466859ead1932d743d622cb74fc058882e8648a} keystore://{{.Datadir}}\keystore\aaa
|
||||
Account #2: {289d485d9771714cce91d3393d764e1311907acc} keystore://{{.Datadir}}\keystore\zzz
|
||||
`)
|
||||
} else {
|
||||
geth.expect(`
|
||||
geth.Expect(`
|
||||
Account #0: {7ef5a6135f1fd6a02593eedc869c6d41d934aef8} keystore://{{.Datadir}}/keystore/UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8
|
||||
Account #1: {f466859ead1932d743d622cb74fc058882e8648a} keystore://{{.Datadir}}/keystore/aaa
|
||||
Account #2: {289d485d9771714cce91d3393d764e1311907acc} keystore://{{.Datadir}}/keystore/zzz
|
||||
@ -67,21 +67,21 @@ Account #2: {289d485d9771714cce91d3393d764e1311907acc} keystore://{{.Datadir}}/k
|
||||
}
|
||||
|
||||
func TestAccountNew(t *testing.T) {
|
||||
geth := runGeth(t, "--lightkdf", "account", "new")
|
||||
defer geth.expectExit()
|
||||
geth.expect(`
|
||||
geth := runGeth(t, "account", "new", "--lightkdf")
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
Your new account is locked with a password. Please give a password. Do not forget this password.
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "foobar"}}
|
||||
Repeat passphrase: {{.InputLine "foobar"}}
|
||||
`)
|
||||
geth.expectRegexp(`Address: \{[0-9a-f]{40}\}\n`)
|
||||
geth.ExpectRegexp(`Address: \{[0-9a-f]{40}\}\n`)
|
||||
}
|
||||
|
||||
func TestAccountNewBadRepeat(t *testing.T) {
|
||||
geth := runGeth(t, "--lightkdf", "account", "new")
|
||||
defer geth.expectExit()
|
||||
geth.expect(`
|
||||
geth := runGeth(t, "account", "new", "--lightkdf")
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
Your new account is locked with a password. Please give a password. Do not forget this password.
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "something"}}
|
||||
@ -92,11 +92,11 @@ Fatal: Passphrases do not match
|
||||
|
||||
func TestAccountUpdate(t *testing.T) {
|
||||
datadir := tmpDatadirWithKeystore(t)
|
||||
geth := runGeth(t,
|
||||
geth := runGeth(t, "account", "update",
|
||||
"--datadir", datadir, "--lightkdf",
|
||||
"account", "update", "f466859ead1932d743d622cb74fc058882e8648a")
|
||||
defer geth.expectExit()
|
||||
geth.expect(`
|
||||
"f466859ead1932d743d622cb74fc058882e8648a")
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "foobar"}}
|
||||
@ -107,9 +107,9 @@ Repeat passphrase: {{.InputLine "foobar2"}}
|
||||
}
|
||||
|
||||
func TestWalletImport(t *testing.T) {
|
||||
geth := runGeth(t, "--lightkdf", "wallet", "import", "testdata/guswallet.json")
|
||||
defer geth.expectExit()
|
||||
geth.expect(`
|
||||
geth := runGeth(t, "wallet", "import", "--lightkdf", "testdata/guswallet.json")
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "foo"}}
|
||||
Address: {d4584b5f6229b7be90727b0fc8c6b91bb427821f}
|
||||
@ -122,9 +122,9 @@ Address: {d4584b5f6229b7be90727b0fc8c6b91bb427821f}
|
||||
}
|
||||
|
||||
func TestWalletImportBadPassword(t *testing.T) {
|
||||
geth := runGeth(t, "--lightkdf", "wallet", "import", "testdata/guswallet.json")
|
||||
defer geth.expectExit()
|
||||
geth.expect(`
|
||||
geth := runGeth(t, "wallet", "import", "--lightkdf", "testdata/guswallet.json")
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "wrong"}}
|
||||
Fatal: could not decrypt key with given passphrase
|
||||
@ -137,19 +137,19 @@ func TestUnlockFlag(t *testing.T) {
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a",
|
||||
"js", "testdata/empty.js")
|
||||
geth.expect(`
|
||||
geth.Expect(`
|
||||
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "foobar"}}
|
||||
`)
|
||||
geth.expectExit()
|
||||
geth.ExpectExit()
|
||||
|
||||
wantMessages := []string{
|
||||
"Unlocked account",
|
||||
"=0xf466859ead1932d743d622cb74fc058882e8648a",
|
||||
}
|
||||
for _, m := range wantMessages {
|
||||
if !strings.Contains(geth.stderrText(), m) {
|
||||
if !strings.Contains(geth.StderrText(), m) {
|
||||
t.Errorf("stderr text does not contain %q", m)
|
||||
}
|
||||
}
|
||||
@ -160,8 +160,8 @@ func TestUnlockFlagWrongPassword(t *testing.T) {
|
||||
geth := runGeth(t,
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a")
|
||||
defer geth.expectExit()
|
||||
geth.expect(`
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "wrong1"}}
|
||||
@ -180,14 +180,14 @@ func TestUnlockFlagMultiIndex(t *testing.T) {
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--unlock", "0,2",
|
||||
"js", "testdata/empty.js")
|
||||
geth.expect(`
|
||||
geth.Expect(`
|
||||
Unlocking account 0 | Attempt 1/3
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "foobar"}}
|
||||
Unlocking account 2 | Attempt 1/3
|
||||
Passphrase: {{.InputLine "foobar"}}
|
||||
`)
|
||||
geth.expectExit()
|
||||
geth.ExpectExit()
|
||||
|
||||
wantMessages := []string{
|
||||
"Unlocked account",
|
||||
@ -195,7 +195,7 @@ Passphrase: {{.InputLine "foobar"}}
|
||||
"=0x289d485d9771714cce91d3393d764e1311907acc",
|
||||
}
|
||||
for _, m := range wantMessages {
|
||||
if !strings.Contains(geth.stderrText(), m) {
|
||||
if !strings.Contains(geth.StderrText(), m) {
|
||||
t.Errorf("stderr text does not contain %q", m)
|
||||
}
|
||||
}
|
||||
@ -207,7 +207,7 @@ func TestUnlockFlagPasswordFile(t *testing.T) {
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--password", "testdata/passwords.txt", "--unlock", "0,2",
|
||||
"js", "testdata/empty.js")
|
||||
geth.expectExit()
|
||||
geth.ExpectExit()
|
||||
|
||||
wantMessages := []string{
|
||||
"Unlocked account",
|
||||
@ -215,7 +215,7 @@ func TestUnlockFlagPasswordFile(t *testing.T) {
|
||||
"=0x289d485d9771714cce91d3393d764e1311907acc",
|
||||
}
|
||||
for _, m := range wantMessages {
|
||||
if !strings.Contains(geth.stderrText(), m) {
|
||||
if !strings.Contains(geth.StderrText(), m) {
|
||||
t.Errorf("stderr text does not contain %q", m)
|
||||
}
|
||||
}
|
||||
@ -226,8 +226,8 @@ func TestUnlockFlagPasswordFileWrongPassword(t *testing.T) {
|
||||
geth := runGeth(t,
|
||||
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--password", "testdata/wrong-passwords.txt", "--unlock", "0,2")
|
||||
defer geth.expectExit()
|
||||
geth.expect(`
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
Fatal: Failed to unlock account 0 (could not decrypt key with given passphrase)
|
||||
`)
|
||||
}
|
||||
@ -238,14 +238,14 @@ func TestUnlockFlagAmbiguous(t *testing.T) {
|
||||
"--keystore", store, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a",
|
||||
"js", "testdata/empty.js")
|
||||
defer geth.expectExit()
|
||||
defer geth.ExpectExit()
|
||||
|
||||
// Helper for the expect template, returns absolute keystore path.
|
||||
geth.setTemplateFunc("keypath", func(file string) string {
|
||||
geth.SetTemplateFunc("keypath", func(file string) string {
|
||||
abs, _ := filepath.Abs(filepath.Join(store, file))
|
||||
return abs
|
||||
})
|
||||
geth.expect(`
|
||||
geth.Expect(`
|
||||
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "foobar"}}
|
||||
@ -257,14 +257,14 @@ Your passphrase unlocked keystore://{{keypath "1"}}
|
||||
In order to avoid this warning, you need to remove the following duplicate key files:
|
||||
keystore://{{keypath "2"}}
|
||||
`)
|
||||
geth.expectExit()
|
||||
geth.ExpectExit()
|
||||
|
||||
wantMessages := []string{
|
||||
"Unlocked account",
|
||||
"=0xf466859ead1932d743d622cb74fc058882e8648a",
|
||||
}
|
||||
for _, m := range wantMessages {
|
||||
if !strings.Contains(geth.stderrText(), m) {
|
||||
if !strings.Contains(geth.StderrText(), m) {
|
||||
t.Errorf("stderr text does not contain %q", m)
|
||||
}
|
||||
}
|
||||
@ -275,14 +275,14 @@ func TestUnlockFlagAmbiguousWrongPassword(t *testing.T) {
|
||||
geth := runGeth(t,
|
||||
"--keystore", store, "--nat", "none", "--nodiscover", "--dev",
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a")
|
||||
defer geth.expectExit()
|
||||
defer geth.ExpectExit()
|
||||
|
||||
// Helper for the expect template, returns absolute keystore path.
|
||||
geth.setTemplateFunc("keypath", func(file string) string {
|
||||
geth.SetTemplateFunc("keypath", func(file string) string {
|
||||
abs, _ := filepath.Abs(filepath.Join(store, file))
|
||||
return abs
|
||||
})
|
||||
geth.expect(`
|
||||
geth.Expect(`
|
||||
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "wrong"}}
|
||||
@ -292,5 +292,5 @@ Multiple key files exist for address f466859ead1932d743d622cb74fc058882e8648a:
|
||||
Testing your passphrase against all of them...
|
||||
Fatal: None of the listed files could be unlocked.
|
||||
`)
|
||||
geth.expectExit()
|
||||
geth.ExpectExit()
|
||||
}
|
||||
|
@ -29,11 +29,12 @@ import (
|
||||
"github.com/ethereum/go-ethereum/cmd/internal/browser"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var bugCommand = cli.Command{
|
||||
Action: reportBug,
|
||||
Action: utils.MigrateFlags(reportBug),
|
||||
Name: "bug",
|
||||
Usage: "opens a window to report a bug on the geth repo",
|
||||
ArgsUsage: " ",
|
||||
|
@ -40,80 +40,98 @@ import (
|
||||
|
||||
var (
|
||||
initCommand = cli.Command{
|
||||
Action: initGenesis,
|
||||
Action: utils.MigrateFlags(initGenesis),
|
||||
Name: "init",
|
||||
Usage: "Bootstrap and initialize a new genesis block",
|
||||
ArgsUsage: "<genesisPath>",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.LightModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
The init command initializes a new genesis block and definition for the network.
|
||||
This is a destructive action and changes the network in which you will be
|
||||
participating.
|
||||
`,
|
||||
|
||||
It expects the genesis file as argument.`,
|
||||
}
|
||||
importCommand = cli.Command{
|
||||
Action: importChain,
|
||||
Action: utils.MigrateFlags(importChain),
|
||||
Name: "import",
|
||||
Usage: "Import a blockchain file",
|
||||
ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.LightModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
The import command imports blocks from an RLP-encoded form. The form can be one file
|
||||
with several RLP-encoded blocks, or several files can be used.
|
||||
|
||||
If only one file is used, import error will result in failure. If several files are used,
|
||||
processing will proceed even if an individual RLP-file import failure occurs.
|
||||
`,
|
||||
processing will proceed even if an individual RLP-file import failure occurs.`,
|
||||
}
|
||||
exportCommand = cli.Command{
|
||||
Action: exportChain,
|
||||
Action: utils.MigrateFlags(exportChain),
|
||||
Name: "export",
|
||||
Usage: "Export blockchain into file",
|
||||
ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.LightModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
Requires a first argument of the file to write to.
|
||||
Optional second and third arguments control the first and
|
||||
last block to write. In this mode, the file will be appended
|
||||
if already existing.
|
||||
`,
|
||||
if already existing.`,
|
||||
}
|
||||
removedbCommand = cli.Command{
|
||||
Action: removeDB,
|
||||
Action: utils.MigrateFlags(removeDB),
|
||||
Name: "removedb",
|
||||
Usage: "Remove blockchain and state databases",
|
||||
ArgsUsage: " ",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.LightModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
TODO: Please write this
|
||||
`,
|
||||
Remove blockchain and state databases`,
|
||||
}
|
||||
dumpCommand = cli.Command{
|
||||
Action: dump,
|
||||
Action: utils.MigrateFlags(dump),
|
||||
Name: "dump",
|
||||
Usage: "Dump a specific block from storage",
|
||||
ArgsUsage: "[<blockHash> | <blockNum>]...",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.LightModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
The arguments are interpreted as block numbers or hashes.
|
||||
Use "ethereum dump 0" to dump the genesis block.
|
||||
`,
|
||||
Use "ethereum dump 0" to dump the genesis block.`,
|
||||
}
|
||||
)
|
||||
|
||||
// initGenesis will initialise the given JSON format genesis file and writes it as
|
||||
// the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
|
||||
func initGenesis(ctx *cli.Context) error {
|
||||
// Make sure we have a valid genesis JSON
|
||||
genesisPath := ctx.Args().First()
|
||||
if len(genesisPath) == 0 {
|
||||
utils.Fatalf("must supply path to genesis JSON file")
|
||||
utils.Fatalf("Must supply path to genesis JSON file")
|
||||
}
|
||||
|
||||
stack := makeFullNode(ctx)
|
||||
chaindb := utils.MakeChainDatabase(ctx, stack)
|
||||
|
||||
file, err := os.Open(genesisPath)
|
||||
if err != nil {
|
||||
utils.Fatalf("failed to read genesis file: %v", err)
|
||||
utils.Fatalf("Failed to read genesis file: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
@ -121,12 +139,19 @@ func initGenesis(ctx *cli.Context) error {
|
||||
if err := json.NewDecoder(file).Decode(genesis); err != nil {
|
||||
utils.Fatalf("invalid genesis file: %v", err)
|
||||
}
|
||||
|
||||
// Open an initialise both full and light databases
|
||||
stack := makeFullNode(ctx)
|
||||
for _, name := range []string{"chaindata", "lightchaindata"} {
|
||||
chaindb, err := stack.OpenDatabase(name, 0, 0)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to open database: %v", err)
|
||||
}
|
||||
_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
|
||||
if err != nil {
|
||||
utils.Fatalf("failed to write genesis block: %v", err)
|
||||
utils.Fatalf("Failed to write genesis block: %v", err)
|
||||
}
|
||||
log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
|
||||
}
|
||||
log.Info("Successfully wrote genesis state", "hash", hash)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -245,24 +270,29 @@ func exportChain(ctx *cli.Context) error {
|
||||
|
||||
func removeDB(ctx *cli.Context) error {
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
dbdir := stack.ResolvePath(utils.ChainDbName(ctx))
|
||||
if !common.FileExist(dbdir) {
|
||||
fmt.Println(dbdir, "does not exist")
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, name := range []string{"chaindata", "lightchaindata"} {
|
||||
// Ensure the database exists in the first place
|
||||
logger := log.New("database", name)
|
||||
|
||||
dbdir := stack.ResolvePath(name)
|
||||
if !common.FileExist(dbdir) {
|
||||
logger.Info("Database doesn't exist, skipping", "path", dbdir)
|
||||
continue
|
||||
}
|
||||
// Confirm removal and execute
|
||||
fmt.Println(dbdir)
|
||||
confirm, err := console.Stdin.PromptConfirm("Remove this database?")
|
||||
switch {
|
||||
case err != nil:
|
||||
utils.Fatalf("%v", err)
|
||||
case !confirm:
|
||||
fmt.Println("Operation aborted")
|
||||
logger.Warn("Database deletion aborted")
|
||||
default:
|
||||
fmt.Println("Removing...")
|
||||
start := time.Now()
|
||||
os.RemoveAll(dbdir)
|
||||
fmt.Printf("Removed in %v\n", time.Since(start))
|
||||
logger.Info("Database successfully deleted", "elapsed", common.PrettyDuration(time.Since(start)))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -38,10 +38,11 @@ import (
|
||||
|
||||
var (
|
||||
dumpConfigCommand = cli.Command{
|
||||
Action: dumpConfig,
|
||||
Action: utils.MigrateFlags(dumpConfig),
|
||||
Name: "dumpconfig",
|
||||
Usage: "Show configuration values",
|
||||
ArgsUsage: "",
|
||||
Flags: append(nodeFlags, rpcFlags...),
|
||||
Category: "MISCELLANEOUS COMMANDS",
|
||||
Description: `The dumpconfig command shows configuration values.`,
|
||||
}
|
||||
|
@ -29,41 +29,44 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
consoleFlags = []cli.Flag{utils.JSpathFlag, utils.ExecFlag, utils.PreloadJSFlag}
|
||||
|
||||
consoleCommand = cli.Command{
|
||||
Action: localConsole,
|
||||
Action: utils.MigrateFlags(localConsole),
|
||||
Name: "console",
|
||||
Usage: "Start an interactive JavaScript environment",
|
||||
ArgsUsage: "", // TODO: Write this!
|
||||
Flags: append(append(nodeFlags, rpcFlags...), consoleFlags...),
|
||||
Category: "CONSOLE COMMANDS",
|
||||
Description: `
|
||||
The Geth console is an interactive shell for the JavaScript runtime environment
|
||||
which exposes a node admin interface as well as the Ðapp JavaScript API.
|
||||
See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console
|
||||
`,
|
||||
See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console.`,
|
||||
}
|
||||
|
||||
attachCommand = cli.Command{
|
||||
Action: remoteConsole,
|
||||
Action: utils.MigrateFlags(remoteConsole),
|
||||
Name: "attach",
|
||||
Usage: "Start an interactive JavaScript environment (connect to node)",
|
||||
ArgsUsage: "", // TODO: Write this!
|
||||
ArgsUsage: "[endpoint]",
|
||||
Flags: append(consoleFlags, utils.DataDirFlag),
|
||||
Category: "CONSOLE COMMANDS",
|
||||
Description: `
|
||||
The Geth console is an interactive shell for the JavaScript runtime environment
|
||||
which exposes a node admin interface as well as the Ðapp JavaScript API.
|
||||
See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console.
|
||||
This command allows to open a console on a running geth node.
|
||||
`,
|
||||
This command allows to open a console on a running geth node.`,
|
||||
}
|
||||
|
||||
javascriptCommand = cli.Command{
|
||||
Action: ephemeralConsole,
|
||||
Action: utils.MigrateFlags(ephemeralConsole),
|
||||
Name: "js",
|
||||
Usage: "Execute the specified JavaScript files",
|
||||
ArgsUsage: "", // TODO: Write this!
|
||||
ArgsUsage: "<jsfile> [jsfile...]",
|
||||
Flags: append(nodeFlags, consoleFlags...),
|
||||
Category: "CONSOLE COMMANDS",
|
||||
Description: `
|
||||
The JavaScript VM exposes a node admin interface as well as the Ðapp
|
||||
JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console
|
||||
`,
|
||||
JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console`,
|
||||
}
|
||||
)
|
||||
|
||||
@ -81,11 +84,12 @@ func localConsole(ctx *cli.Context) error {
|
||||
utils.Fatalf("Failed to attach to the inproc geth: %v", err)
|
||||
}
|
||||
config := console.Config{
|
||||
DataDir: node.DataDir(),
|
||||
DataDir: utils.MakeDataDir(ctx),
|
||||
DocRoot: ctx.GlobalString(utils.JSpathFlag.Name),
|
||||
Client: client,
|
||||
Preload: utils.MakeConsolePreloads(ctx),
|
||||
}
|
||||
|
||||
console, err := console.New(config)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to start the JavaScript console: %v", err)
|
||||
@ -118,17 +122,18 @@ func remoteConsole(ctx *cli.Context) error {
|
||||
Client: client,
|
||||
Preload: utils.MakeConsolePreloads(ctx),
|
||||
}
|
||||
|
||||
console, err := console.New(config)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to start the JavaScript console: %v", err)
|
||||
}
|
||||
defer console.Stop(false)
|
||||
|
||||
// If only a short execution was requested, evaluate and return
|
||||
if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" {
|
||||
console.Evaluate(script)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Otherwise print the welcome screen and enter interactive mode
|
||||
console.Welcome()
|
||||
console.Interactive()
|
||||
@ -151,7 +156,7 @@ func dialRPC(endpoint string) (*rpc.Client, error) {
|
||||
}
|
||||
|
||||
// ephemeralConsole starts a new geth node, attaches an ephemeral JavaScript
|
||||
// console to it, and each of the files specified as arguments and tears the
|
||||
// console to it, executes each of the files specified as arguments and tears
|
||||
// everything down.
|
||||
func ephemeralConsole(ctx *cli.Context) error {
|
||||
// Create and start the node based on the CLI flags
|
||||
@ -165,11 +170,12 @@ func ephemeralConsole(ctx *cli.Context) error {
|
||||
utils.Fatalf("Failed to attach to the inproc geth: %v", err)
|
||||
}
|
||||
config := console.Config{
|
||||
DataDir: node.DataDir(),
|
||||
DataDir: utils.MakeDataDir(ctx),
|
||||
DocRoot: ctx.GlobalString(utils.JSpathFlag.Name),
|
||||
Client: client,
|
||||
Preload: utils.MakeConsolePreloads(ctx),
|
||||
}
|
||||
|
||||
console, err := console.New(config)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to start the JavaScript console: %v", err)
|
||||
|
@ -47,15 +47,15 @@ func TestConsoleWelcome(t *testing.T) {
|
||||
"console")
|
||||
|
||||
// Gather all the infos the welcome message needs to contain
|
||||
geth.setTemplateFunc("goos", func() string { return runtime.GOOS })
|
||||
geth.setTemplateFunc("goarch", func() string { return runtime.GOARCH })
|
||||
geth.setTemplateFunc("gover", runtime.Version)
|
||||
geth.setTemplateFunc("gethver", func() string { return params.Version })
|
||||
geth.setTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
|
||||
geth.setTemplateFunc("apis", func() string { return ipcAPIs })
|
||||
geth.SetTemplateFunc("goos", func() string { return runtime.GOOS })
|
||||
geth.SetTemplateFunc("goarch", func() string { return runtime.GOARCH })
|
||||
geth.SetTemplateFunc("gover", runtime.Version)
|
||||
geth.SetTemplateFunc("gethver", func() string { return params.Version })
|
||||
geth.SetTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
|
||||
geth.SetTemplateFunc("apis", func() string { return ipcAPIs })
|
||||
|
||||
// Verify the actual welcome message to the required template
|
||||
geth.expect(`
|
||||
geth.Expect(`
|
||||
Welcome to the Geth JavaScript console!
|
||||
|
||||
instance: Geth/v{{gethver}}/{{goos}}-{{goarch}}/{{gover}}
|
||||
@ -66,7 +66,7 @@ at block: 0 ({{niltime}})
|
||||
|
||||
> {{.InputLine "exit"}}
|
||||
`)
|
||||
geth.expectExit()
|
||||
geth.ExpectExit()
|
||||
}
|
||||
|
||||
// Tests that a console can be attached to a running node via various means.
|
||||
@ -90,8 +90,8 @@ func TestIPCAttachWelcome(t *testing.T) {
|
||||
time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open
|
||||
testAttachWelcome(t, geth, "ipc:"+ipc, ipcAPIs)
|
||||
|
||||
geth.interrupt()
|
||||
geth.expectExit()
|
||||
geth.Interrupt()
|
||||
geth.ExpectExit()
|
||||
}
|
||||
|
||||
func TestHTTPAttachWelcome(t *testing.T) {
|
||||
@ -104,8 +104,8 @@ func TestHTTPAttachWelcome(t *testing.T) {
|
||||
time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open
|
||||
testAttachWelcome(t, geth, "http://localhost:"+port, httpAPIs)
|
||||
|
||||
geth.interrupt()
|
||||
geth.expectExit()
|
||||
geth.Interrupt()
|
||||
geth.ExpectExit()
|
||||
}
|
||||
|
||||
func TestWSAttachWelcome(t *testing.T) {
|
||||
@ -119,29 +119,29 @@ func TestWSAttachWelcome(t *testing.T) {
|
||||
time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open
|
||||
testAttachWelcome(t, geth, "ws://localhost:"+port, httpAPIs)
|
||||
|
||||
geth.interrupt()
|
||||
geth.expectExit()
|
||||
geth.Interrupt()
|
||||
geth.ExpectExit()
|
||||
}
|
||||
|
||||
func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) {
|
||||
// Attach to a running geth note and terminate immediately
|
||||
attach := runGeth(t, "attach", endpoint)
|
||||
defer attach.expectExit()
|
||||
attach.stdin.Close()
|
||||
defer attach.ExpectExit()
|
||||
attach.CloseStdin()
|
||||
|
||||
// Gather all the infos the welcome message needs to contain
|
||||
attach.setTemplateFunc("goos", func() string { return runtime.GOOS })
|
||||
attach.setTemplateFunc("goarch", func() string { return runtime.GOARCH })
|
||||
attach.setTemplateFunc("gover", runtime.Version)
|
||||
attach.setTemplateFunc("gethver", func() string { return params.Version })
|
||||
attach.setTemplateFunc("etherbase", func() string { return geth.Etherbase })
|
||||
attach.setTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
|
||||
attach.setTemplateFunc("ipc", func() bool { return strings.HasPrefix(endpoint, "ipc") })
|
||||
attach.setTemplateFunc("datadir", func() string { return geth.Datadir })
|
||||
attach.setTemplateFunc("apis", func() string { return apis })
|
||||
attach.SetTemplateFunc("goos", func() string { return runtime.GOOS })
|
||||
attach.SetTemplateFunc("goarch", func() string { return runtime.GOARCH })
|
||||
attach.SetTemplateFunc("gover", runtime.Version)
|
||||
attach.SetTemplateFunc("gethver", func() string { return params.Version })
|
||||
attach.SetTemplateFunc("etherbase", func() string { return geth.Etherbase })
|
||||
attach.SetTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
|
||||
attach.SetTemplateFunc("ipc", func() bool { return strings.HasPrefix(endpoint, "ipc") })
|
||||
attach.SetTemplateFunc("datadir", func() string { return geth.Datadir })
|
||||
attach.SetTemplateFunc("apis", func() string { return apis })
|
||||
|
||||
// Verify the actual welcome message to the required template
|
||||
attach.expect(`
|
||||
attach.Expect(`
|
||||
Welcome to the Geth JavaScript console!
|
||||
|
||||
instance: Geth/v{{gethver}}/{{goos}}-{{goarch}}/{{gover}}
|
||||
@ -152,7 +152,7 @@ at block: 0 ({{niltime}}){{if ipc}}
|
||||
|
||||
> {{.InputLine "exit" }}
|
||||
`)
|
||||
attach.expectExit()
|
||||
attach.ExpectExit()
|
||||
}
|
||||
|
||||
// trulyRandInt generates a crypto random integer used by the console tests to
|
||||
|
@ -112,12 +112,12 @@ func testDAOForkBlockNewChain(t *testing.T, test int, genesis string, expectBloc
|
||||
if err := ioutil.WriteFile(json, []byte(genesis), 0600); err != nil {
|
||||
t.Fatalf("test %d: failed to write genesis file: %v", test, err)
|
||||
}
|
||||
runGeth(t, "--datadir", datadir, "init", json).cmd.Wait()
|
||||
runGeth(t, "--datadir", datadir, "init", json).WaitExit()
|
||||
} else {
|
||||
// Force chain initialization
|
||||
args := []string{"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", "--ipcdisable", "--datadir", datadir}
|
||||
geth := runGeth(t, append(args, []string{"--exec", "2+2", "console"}...)...)
|
||||
geth.cmd.Wait()
|
||||
geth.WaitExit()
|
||||
}
|
||||
// Retrieve the DAO config flag from the database
|
||||
path := filepath.Join(datadir, "geth", "chaindata")
|
||||
|
@ -97,14 +97,14 @@ func TestCustomGenesis(t *testing.T) {
|
||||
if err := ioutil.WriteFile(json, []byte(tt.genesis), 0600); err != nil {
|
||||
t.Fatalf("test %d: failed to write genesis file: %v", i, err)
|
||||
}
|
||||
runGeth(t, "--datadir", datadir, "init", json).cmd.Wait()
|
||||
runGeth(t, "--datadir", datadir, "init", json).WaitExit()
|
||||
|
||||
// Query the custom genesis block
|
||||
geth := runGeth(t,
|
||||
"--datadir", datadir, "--maxpeers", "0", "--port", "0",
|
||||
"--nodiscover", "--nat", "none", "--ipcdisable",
|
||||
"--exec", tt.query, "console")
|
||||
geth.expectRegexp(tt.result)
|
||||
geth.expectExit()
|
||||
geth.ExpectRegexp(tt.result)
|
||||
geth.ExpectExit()
|
||||
}
|
||||
}
|
||||
|
147
cmd/geth/main.go
147
cmd/geth/main.go
@ -49,6 +49,82 @@ var (
|
||||
relOracle = common.HexToAddress("0xfa7b9770ca4cb04296cac84f37736d4041251cdf")
|
||||
// The app that holds all commands and flags.
|
||||
app = utils.NewApp(gitCommit, "the go-ethereum command line interface")
|
||||
// flags that configure the node
|
||||
nodeFlags = []cli.Flag{
|
||||
utils.IdentityFlag,
|
||||
utils.UnlockedAccountFlag,
|
||||
utils.PasswordFileFlag,
|
||||
utils.BootnodesFlag,
|
||||
utils.BootnodesV4Flag,
|
||||
utils.BootnodesV5Flag,
|
||||
utils.DataDirFlag,
|
||||
utils.KeyStoreDirFlag,
|
||||
utils.NoUSBFlag,
|
||||
utils.EthashCacheDirFlag,
|
||||
utils.EthashCachesInMemoryFlag,
|
||||
utils.EthashCachesOnDiskFlag,
|
||||
utils.EthashDatasetDirFlag,
|
||||
utils.EthashDatasetsInMemoryFlag,
|
||||
utils.EthashDatasetsOnDiskFlag,
|
||||
utils.TxPoolPriceLimitFlag,
|
||||
utils.TxPoolPriceBumpFlag,
|
||||
utils.TxPoolAccountSlotsFlag,
|
||||
utils.TxPoolGlobalSlotsFlag,
|
||||
utils.TxPoolAccountQueueFlag,
|
||||
utils.TxPoolGlobalQueueFlag,
|
||||
utils.TxPoolLifetimeFlag,
|
||||
utils.FastSyncFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
utils.LightServFlag,
|
||||
utils.LightPeersFlag,
|
||||
utils.LightKDFFlag,
|
||||
utils.CacheFlag,
|
||||
utils.TrieCacheGenFlag,
|
||||
utils.ListenPortFlag,
|
||||
utils.MaxPeersFlag,
|
||||
utils.MaxPendingPeersFlag,
|
||||
utils.EtherbaseFlag,
|
||||
utils.GasPriceFlag,
|
||||
utils.MinerThreadsFlag,
|
||||
utils.MiningEnabledFlag,
|
||||
utils.TargetGasLimitFlag,
|
||||
utils.NATFlag,
|
||||
utils.NoDiscoverFlag,
|
||||
utils.DiscoveryV5Flag,
|
||||
utils.NetrestrictFlag,
|
||||
utils.NodeKeyFileFlag,
|
||||
utils.NodeKeyHexFlag,
|
||||
utils.WhisperEnabledFlag,
|
||||
utils.DevModeFlag,
|
||||
utils.TestnetFlag,
|
||||
utils.RinkebyFlag,
|
||||
utils.VMEnableDebugFlag,
|
||||
utils.NetworkIdFlag,
|
||||
utils.RPCCORSDomainFlag,
|
||||
utils.EthStatsURLFlag,
|
||||
utils.MetricsEnabledFlag,
|
||||
utils.FakePoWFlag,
|
||||
utils.NoCompactionFlag,
|
||||
utils.GpoBlocksFlag,
|
||||
utils.GpoPercentileFlag,
|
||||
utils.ExtraDataFlag,
|
||||
configFileFlag,
|
||||
}
|
||||
|
||||
rpcFlags = []cli.Flag{
|
||||
utils.RPCEnabledFlag,
|
||||
utils.RPCListenAddrFlag,
|
||||
utils.RPCPortFlag,
|
||||
utils.RPCApiFlag,
|
||||
utils.WSEnabledFlag,
|
||||
utils.WSListenAddrFlag,
|
||||
utils.WSPortFlag,
|
||||
utils.WSApiFlag,
|
||||
utils.WSAllowedOriginsFlag,
|
||||
utils.IPCDisabledFlag,
|
||||
utils.IPCPathFlag,
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -81,70 +157,9 @@ func init() {
|
||||
dumpConfigCommand,
|
||||
}
|
||||
|
||||
app.Flags = []cli.Flag{
|
||||
utils.IdentityFlag,
|
||||
utils.UnlockedAccountFlag,
|
||||
utils.PasswordFileFlag,
|
||||
utils.BootnodesFlag,
|
||||
utils.DataDirFlag,
|
||||
utils.KeyStoreDirFlag,
|
||||
utils.EthashCacheDirFlag,
|
||||
utils.EthashCachesInMemoryFlag,
|
||||
utils.EthashCachesOnDiskFlag,
|
||||
utils.EthashDatasetDirFlag,
|
||||
utils.EthashDatasetsInMemoryFlag,
|
||||
utils.EthashDatasetsOnDiskFlag,
|
||||
utils.FastSyncFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
utils.LightServFlag,
|
||||
utils.LightPeersFlag,
|
||||
utils.LightKDFFlag,
|
||||
utils.CacheFlag,
|
||||
utils.TrieCacheGenFlag,
|
||||
utils.JSpathFlag,
|
||||
utils.ListenPortFlag,
|
||||
utils.MaxPeersFlag,
|
||||
utils.MaxPendingPeersFlag,
|
||||
utils.EtherbaseFlag,
|
||||
utils.GasPriceFlag,
|
||||
utils.MinerThreadsFlag,
|
||||
utils.MiningEnabledFlag,
|
||||
utils.TargetGasLimitFlag,
|
||||
utils.NATFlag,
|
||||
utils.NoDiscoverFlag,
|
||||
utils.DiscoveryV5Flag,
|
||||
utils.NetrestrictFlag,
|
||||
utils.NodeKeyFileFlag,
|
||||
utils.NodeKeyHexFlag,
|
||||
utils.RPCEnabledFlag,
|
||||
utils.RPCListenAddrFlag,
|
||||
utils.RPCPortFlag,
|
||||
utils.RPCApiFlag,
|
||||
utils.WSEnabledFlag,
|
||||
utils.WSListenAddrFlag,
|
||||
utils.WSPortFlag,
|
||||
utils.WSApiFlag,
|
||||
utils.WSAllowedOriginsFlag,
|
||||
utils.IPCDisabledFlag,
|
||||
utils.IPCPathFlag,
|
||||
utils.ExecFlag,
|
||||
utils.PreloadJSFlag,
|
||||
utils.WhisperEnabledFlag,
|
||||
utils.DevModeFlag,
|
||||
utils.TestNetFlag,
|
||||
utils.VMEnableDebugFlag,
|
||||
utils.NetworkIdFlag,
|
||||
utils.RPCCORSDomainFlag,
|
||||
utils.EthStatsURLFlag,
|
||||
utils.MetricsEnabledFlag,
|
||||
utils.FakePoWFlag,
|
||||
utils.NoCompactionFlag,
|
||||
utils.GpoBlocksFlag,
|
||||
utils.GpoPercentileFlag,
|
||||
utils.ExtraDataFlag,
|
||||
configFileFlag,
|
||||
}
|
||||
app.Flags = append(app.Flags, nodeFlags...)
|
||||
app.Flags = append(app.Flags, rpcFlags...)
|
||||
app.Flags = append(app.Flags, consoleFlags...)
|
||||
app.Flags = append(app.Flags, debug.Flags...)
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
@ -237,10 +252,12 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
}()
|
||||
// Start auxiliary services if enabled
|
||||
if ctx.GlobalBool(utils.MiningEnabledFlag.Name) {
|
||||
// Mining only makes sense if a full Ethereum node is running
|
||||
var ethereum *eth.Ethereum
|
||||
if err := stack.Service(ðereum); err != nil {
|
||||
utils.Fatalf("ethereum service not running: %v", err)
|
||||
}
|
||||
// Use a reduced number of threads if requested
|
||||
if threads := ctx.GlobalInt(utils.MinerThreadsFlag.Name); threads > 0 {
|
||||
type threaded interface {
|
||||
SetThreads(threads int)
|
||||
@ -249,6 +266,8 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
th.SetThreads(threads)
|
||||
}
|
||||
}
|
||||
// Set the gas price to the limits from the CLI and start mining
|
||||
ethereum.TxPool().SetGasPrice(utils.GlobalBig(ctx, utils.GasPriceFlag.Name))
|
||||
if err := ethereum.StartMining(true); err != nil {
|
||||
utils.Fatalf("Failed to start mining: %v", err)
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ import (
|
||||
|
||||
var (
|
||||
makedagCommand = cli.Command{
|
||||
Action: makedag,
|
||||
Action: utils.MigrateFlags(makedag),
|
||||
Name: "makedag",
|
||||
Usage: "Generate ethash DAG (for testing)",
|
||||
ArgsUsage: "<blockNum> <outputDir>",
|
||||
@ -47,7 +47,7 @@ Regular users do not need to execute it.
|
||||
`,
|
||||
}
|
||||
versionCommand = cli.Command{
|
||||
Action: version,
|
||||
Action: utils.MigrateFlags(version),
|
||||
Name: "version",
|
||||
Usage: "Print version numbers",
|
||||
ArgsUsage: " ",
|
||||
@ -57,7 +57,7 @@ The output of this command is supposed to be machine-readable.
|
||||
`,
|
||||
}
|
||||
licenseCommand = cli.Command{
|
||||
Action: license,
|
||||
Action: utils.MigrateFlags(license),
|
||||
Name: "license",
|
||||
Usage: "Display license information",
|
||||
ArgsUsage: " ",
|
||||
@ -103,7 +103,7 @@ func version(ctx *cli.Context) error {
|
||||
}
|
||||
fmt.Println("Architecture:", runtime.GOARCH)
|
||||
fmt.Println("Protocol Versions:", eth.ProtocolVersions)
|
||||
fmt.Println("Network Id:", ctx.GlobalInt(utils.NetworkIdFlag.Name))
|
||||
fmt.Println("Network Id:", eth.DefaultConfig.NetworkId)
|
||||
fmt.Println("Go Version:", runtime.Version())
|
||||
fmt.Println("Operating System:", runtime.GOOS)
|
||||
fmt.Printf("GOPATH=%s\n", os.Getenv("GOPATH"))
|
||||
|
@ -49,7 +49,7 @@ var (
|
||||
Usage: "Refresh interval in seconds",
|
||||
}
|
||||
monitorCommand = cli.Command{
|
||||
Action: monitor,
|
||||
Action: utils.MigrateFlags(monitor), // keep track of migration progress
|
||||
Name: "monitor",
|
||||
Usage: "Monitor and visualize node metrics",
|
||||
ArgsUsage: " ",
|
||||
|
@ -17,18 +17,13 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"sync"
|
||||
"testing"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/ethereum/go-ethereum/internal/cmdtest"
|
||||
)
|
||||
|
||||
func tmpdir(t *testing.T) string {
|
||||
@ -40,36 +35,37 @@ func tmpdir(t *testing.T) string {
|
||||
}
|
||||
|
||||
type testgeth struct {
|
||||
// For total convenience, all testing methods are available.
|
||||
*testing.T
|
||||
*cmdtest.TestCmd
|
||||
|
||||
// template variables for expect
|
||||
Datadir string
|
||||
Executable string
|
||||
Etherbase string
|
||||
Func template.FuncMap
|
||||
|
||||
removeDatadir bool
|
||||
cmd *exec.Cmd
|
||||
stdout *bufio.Reader
|
||||
stdin io.WriteCloser
|
||||
stderr *testlogger
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Run the app if we're the child process for runGeth.
|
||||
if os.Getenv("GETH_TEST_CHILD") != "" {
|
||||
// Run the app if we've been exec'd as "geth-test" in runGeth.
|
||||
reexec.Register("geth-test", func() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
})
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
// check if we have been reexec'd
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
// spawns geth with the given command line args. If the args don't set --datadir, the
|
||||
// child g gets a temporary data directory.
|
||||
func runGeth(t *testing.T, args ...string) *testgeth {
|
||||
tt := &testgeth{T: t, Executable: os.Args[0]}
|
||||
tt := &testgeth{}
|
||||
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
|
||||
for i, arg := range args {
|
||||
switch {
|
||||
case arg == "-datadir" || arg == "--datadir":
|
||||
@ -84,215 +80,19 @@ func runGeth(t *testing.T, args ...string) *testgeth {
|
||||
}
|
||||
if tt.Datadir == "" {
|
||||
tt.Datadir = tmpdir(t)
|
||||
tt.removeDatadir = true
|
||||
tt.Cleanup = func() { os.RemoveAll(tt.Datadir) }
|
||||
args = append([]string{"-datadir", tt.Datadir}, args...)
|
||||
// Remove the temporary datadir if something fails below.
|
||||
defer func() {
|
||||
if t.Failed() {
|
||||
os.RemoveAll(tt.Datadir)
|
||||
tt.Cleanup()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Boot "geth". This actually runs the test binary but the init function
|
||||
// will prevent any tests from running.
|
||||
tt.stderr = &testlogger{t: t}
|
||||
tt.cmd = exec.Command(os.Args[0], args...)
|
||||
tt.cmd.Env = append(os.Environ(), "GETH_TEST_CHILD=1")
|
||||
tt.cmd.Stderr = tt.stderr
|
||||
stdout, err := tt.cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tt.stdout = bufio.NewReader(stdout)
|
||||
if tt.stdin, err = tt.cmd.StdinPipe(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := tt.cmd.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Boot "geth". This actually runs the test binary but the TestMain
|
||||
// function will prevent any tests from running.
|
||||
tt.Run("geth-test", args...)
|
||||
|
||||
return tt
|
||||
}
|
||||
|
||||
// InputLine writes the given text to the childs stdin.
|
||||
// This method can also be called from an expect template, e.g.:
|
||||
//
|
||||
// geth.expect(`Passphrase: {{.InputLine "password"}}`)
|
||||
func (tt *testgeth) InputLine(s string) string {
|
||||
io.WriteString(tt.stdin, s+"\n")
|
||||
return ""
|
||||
}
|
||||
|
||||
func (tt *testgeth) setTemplateFunc(name string, fn interface{}) {
|
||||
if tt.Func == nil {
|
||||
tt.Func = make(map[string]interface{})
|
||||
}
|
||||
tt.Func[name] = fn
|
||||
}
|
||||
|
||||
// expect runs its argument as a template, then expects the
|
||||
// child process to output the result of the template within 5s.
|
||||
//
|
||||
// If the template starts with a newline, the newline is removed
|
||||
// before matching.
|
||||
func (tt *testgeth) expect(tplsource string) {
|
||||
// Generate the expected output by running the template.
|
||||
tpl := template.Must(template.New("").Funcs(tt.Func).Parse(tplsource))
|
||||
wantbuf := new(bytes.Buffer)
|
||||
if err := tpl.Execute(wantbuf, tt); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// Trim exactly one newline at the beginning. This makes tests look
|
||||
// much nicer because all expect strings are at column 0.
|
||||
want := bytes.TrimPrefix(wantbuf.Bytes(), []byte("\n"))
|
||||
if err := tt.matchExactOutput(want); err != nil {
|
||||
tt.Fatal(err)
|
||||
}
|
||||
tt.Logf("Matched stdout text:\n%s", want)
|
||||
}
|
||||
|
||||
func (tt *testgeth) matchExactOutput(want []byte) error {
|
||||
buf := make([]byte, len(want))
|
||||
n := 0
|
||||
tt.withKillTimeout(func() { n, _ = io.ReadFull(tt.stdout, buf) })
|
||||
buf = buf[:n]
|
||||
if n < len(want) || !bytes.Equal(buf, want) {
|
||||
// Grab any additional buffered output in case of mismatch
|
||||
// because it might help with debugging.
|
||||
buf = append(buf, make([]byte, tt.stdout.Buffered())...)
|
||||
tt.stdout.Read(buf[n:])
|
||||
// Find the mismatch position.
|
||||
for i := 0; i < n; i++ {
|
||||
if want[i] != buf[i] {
|
||||
return fmt.Errorf("Output mismatch at ◊:\n---------------- (stdout text)\n%s◊%s\n---------------- (expected text)\n%s",
|
||||
buf[:i], buf[i:n], want)
|
||||
}
|
||||
}
|
||||
if n < len(want) {
|
||||
return fmt.Errorf("Not enough output, got until ◊:\n---------------- (stdout text)\n%s\n---------------- (expected text)\n%s◊%s",
|
||||
buf, want[:n], want[n:])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// expectRegexp expects the child process to output text matching the
|
||||
// given regular expression within 5s.
|
||||
//
|
||||
// Note that an arbitrary amount of output may be consumed by the
|
||||
// regular expression. This usually means that expect cannot be used
|
||||
// after expectRegexp.
|
||||
func (tt *testgeth) expectRegexp(resource string) (*regexp.Regexp, []string) {
|
||||
var (
|
||||
re = regexp.MustCompile(resource)
|
||||
rtee = &runeTee{in: tt.stdout}
|
||||
matches []int
|
||||
)
|
||||
tt.withKillTimeout(func() { matches = re.FindReaderSubmatchIndex(rtee) })
|
||||
output := rtee.buf.Bytes()
|
||||
if matches == nil {
|
||||
tt.Fatalf("Output did not match:\n---------------- (stdout text)\n%s\n---------------- (regular expression)\n%s",
|
||||
output, resource)
|
||||
return re, nil
|
||||
}
|
||||
tt.Logf("Matched stdout text:\n%s", output)
|
||||
var submatch []string
|
||||
for i := 0; i < len(matches); i += 2 {
|
||||
submatch = append(submatch, string(output[i:i+1]))
|
||||
}
|
||||
return re, submatch
|
||||
}
|
||||
|
||||
// expectExit expects the child process to exit within 5s without
|
||||
// printing any additional text on stdout.
|
||||
func (tt *testgeth) expectExit() {
|
||||
var output []byte
|
||||
tt.withKillTimeout(func() {
|
||||
output, _ = ioutil.ReadAll(tt.stdout)
|
||||
})
|
||||
tt.cmd.Wait()
|
||||
if tt.removeDatadir {
|
||||
os.RemoveAll(tt.Datadir)
|
||||
}
|
||||
if len(output) > 0 {
|
||||
tt.Errorf("Unmatched stdout text:\n%s", output)
|
||||
}
|
||||
}
|
||||
|
||||
func (tt *testgeth) interrupt() {
|
||||
tt.cmd.Process.Signal(os.Interrupt)
|
||||
}
|
||||
|
||||
// stderrText returns any stderr output written so far.
|
||||
// The returned text holds all log lines after expectExit has
|
||||
// returned.
|
||||
func (tt *testgeth) stderrText() string {
|
||||
tt.stderr.mu.Lock()
|
||||
defer tt.stderr.mu.Unlock()
|
||||
return tt.stderr.buf.String()
|
||||
}
|
||||
|
||||
func (tt *testgeth) withKillTimeout(fn func()) {
|
||||
timeout := time.AfterFunc(5*time.Second, func() {
|
||||
tt.Log("killing the child process (timeout)")
|
||||
tt.cmd.Process.Kill()
|
||||
if tt.removeDatadir {
|
||||
os.RemoveAll(tt.Datadir)
|
||||
}
|
||||
})
|
||||
defer timeout.Stop()
|
||||
fn()
|
||||
}
|
||||
|
||||
// testlogger logs all written lines via t.Log and also
|
||||
// collects them for later inspection.
|
||||
type testlogger struct {
|
||||
t *testing.T
|
||||
mu sync.Mutex
|
||||
buf bytes.Buffer
|
||||
}
|
||||
|
||||
func (tl *testlogger) Write(b []byte) (n int, err error) {
|
||||
lines := bytes.Split(b, []byte("\n"))
|
||||
for _, line := range lines {
|
||||
if len(line) > 0 {
|
||||
tl.t.Logf("(stderr) %s", line)
|
||||
}
|
||||
}
|
||||
tl.mu.Lock()
|
||||
tl.buf.Write(b)
|
||||
tl.mu.Unlock()
|
||||
return len(b), err
|
||||
}
|
||||
|
||||
// runeTee collects text read through it into buf.
|
||||
type runeTee struct {
|
||||
in interface {
|
||||
io.Reader
|
||||
io.ByteReader
|
||||
io.RuneReader
|
||||
}
|
||||
buf bytes.Buffer
|
||||
}
|
||||
|
||||
func (rtee *runeTee) Read(b []byte) (n int, err error) {
|
||||
n, err = rtee.in.Read(b)
|
||||
rtee.buf.Write(b[:n])
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (rtee *runeTee) ReadRune() (r rune, size int, err error) {
|
||||
r, size, err = rtee.in.ReadRune()
|
||||
if err == nil {
|
||||
rtee.buf.WriteRune(r)
|
||||
}
|
||||
return r, size, err
|
||||
}
|
||||
|
||||
func (rtee *runeTee) ReadByte() (b byte, err error) {
|
||||
b, err = rtee.in.ReadByte()
|
||||
if err == nil {
|
||||
rtee.buf.WriteByte(b)
|
||||
}
|
||||
return b, err
|
||||
}
|
||||
|
@ -20,6 +20,7 @@ package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
@ -67,8 +68,10 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
configFileFlag,
|
||||
utils.DataDirFlag,
|
||||
utils.KeyStoreDirFlag,
|
||||
utils.NoUSBFlag,
|
||||
utils.NetworkIdFlag,
|
||||
utils.TestNetFlag,
|
||||
utils.TestnetFlag,
|
||||
utils.RinkebyFlag,
|
||||
utils.DevModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
utils.EthStatsURLFlag,
|
||||
@ -89,6 +92,18 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.EthashDatasetsOnDiskFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "TRANSACTION POOL",
|
||||
Flags: []cli.Flag{
|
||||
utils.TxPoolPriceLimitFlag,
|
||||
utils.TxPoolPriceBumpFlag,
|
||||
utils.TxPoolAccountSlotsFlag,
|
||||
utils.TxPoolGlobalSlotsFlag,
|
||||
utils.TxPoolAccountQueueFlag,
|
||||
utils.TxPoolGlobalQueueFlag,
|
||||
utils.TxPoolLifetimeFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "PERFORMANCE TUNING",
|
||||
Flags: []cli.Flag{
|
||||
@ -127,6 +142,8 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
Name: "NETWORKING",
|
||||
Flags: []cli.Flag{
|
||||
utils.BootnodesFlag,
|
||||
utils.BootnodesV4Flag,
|
||||
utils.BootnodesV5Flag,
|
||||
utils.ListenPortFlag,
|
||||
utils.MaxPeersFlag,
|
||||
utils.MaxPendingPeersFlag,
|
||||
@ -185,6 +202,39 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
},
|
||||
}
|
||||
|
||||
// byCategory sorts an array of flagGroup by Name in the order
|
||||
// defined in AppHelpFlagGroups.
|
||||
type byCategory []flagGroup
|
||||
|
||||
func (a byCategory) Len() int { return len(a) }
|
||||
func (a byCategory) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a byCategory) Less(i, j int) bool {
|
||||
iCat, jCat := a[i].Name, a[j].Name
|
||||
iIdx, jIdx := len(AppHelpFlagGroups), len(AppHelpFlagGroups) // ensure non categorized flags come last
|
||||
|
||||
for i, group := range AppHelpFlagGroups {
|
||||
if iCat == group.Name {
|
||||
iIdx = i
|
||||
}
|
||||
if jCat == group.Name {
|
||||
jIdx = i
|
||||
}
|
||||
}
|
||||
|
||||
return iIdx < jIdx
|
||||
}
|
||||
|
||||
func flagCategory(flag cli.Flag) string {
|
||||
for _, category := range AppHelpFlagGroups {
|
||||
for _, flg := range category.Flags {
|
||||
if flg.GetName() == flag.GetName() {
|
||||
return category.Name
|
||||
}
|
||||
}
|
||||
}
|
||||
return "MISC"
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Override the default app help template
|
||||
cli.AppHelpTemplate = AppHelpTemplate
|
||||
@ -194,6 +244,7 @@ func init() {
|
||||
App interface{}
|
||||
FlagGroups []flagGroup
|
||||
}
|
||||
|
||||
// Override the default app help printer, but only for the global app help
|
||||
originalHelpPrinter := cli.HelpPrinter
|
||||
cli.HelpPrinter = func(w io.Writer, tmpl string, data interface{}) {
|
||||
@ -223,6 +274,27 @@ func init() {
|
||||
}
|
||||
// Render out custom usage screen
|
||||
originalHelpPrinter(w, tmpl, helpData{data, AppHelpFlagGroups})
|
||||
} else if tmpl == utils.CommandHelpTemplate {
|
||||
// Iterate over all command specific flags and categorize them
|
||||
categorized := make(map[string][]cli.Flag)
|
||||
for _, flag := range data.(cli.Command).Flags {
|
||||
if _, ok := categorized[flag.String()]; !ok {
|
||||
categorized[flagCategory(flag)] = append(categorized[flagCategory(flag)], flag)
|
||||
}
|
||||
}
|
||||
|
||||
// sort to get a stable ordering
|
||||
sorted := make([]flagGroup, 0, len(categorized))
|
||||
for cat, flgs := range categorized {
|
||||
sorted = append(sorted, flagGroup{cat, flgs})
|
||||
}
|
||||
sort.Sort(byCategory(sorted))
|
||||
|
||||
// add sorted array to data and render with default printer
|
||||
originalHelpPrinter(w, tmpl, map[string]interface{}{
|
||||
"cmd": data,
|
||||
"categorizedFlags": sorted,
|
||||
})
|
||||
} else {
|
||||
originalHelpPrinter(w, tmpl, data)
|
||||
}
|
||||
|
@ -51,9 +51,10 @@ ADD account.pass /account.pass
|
||||
EXPOSE 8080
|
||||
|
||||
CMD [ \
|
||||
"/faucet", "--genesis", "/genesis.json", "--network", "{{.NetworkID}}", "--bootnodes", "{{.Bootnodes}}", "--ethstats", "{{.Ethstats}}", \
|
||||
"--ethport", "{{.EthPort}}", "--faucet.name", "{{.FaucetName}}", "--faucet.amount", "{{.FaucetAmount}}", "--faucet.minutes", "{{.FaucetMinutes}}", \
|
||||
"/faucet", "--genesis", "/genesis.json", "--network", "{{.NetworkID}}", "--bootnodes", "{{.Bootnodes}}", "--ethstats", "{{.Ethstats}}", "--ethport", "{{.EthPort}}", \
|
||||
"--faucet.name", "{{.FaucetName}}", "--faucet.amount", "{{.FaucetAmount}}", "--faucet.minutes", "{{.FaucetMinutes}}", "--faucet.tiers", "{{.FaucetTiers}}", \
|
||||
"--github.user", "{{.GitHubUser}}", "--github.token", "{{.GitHubToken}}", "--account.json", "/account.json", "--account.pass", "/account.pass" \
|
||||
{{if .CaptchaToken}}, "--captcha.token", "{{.CaptchaToken}}", "--captcha.secret", "{{.CaptchaSecret}}"{{end}} \
|
||||
]`
|
||||
|
||||
// faucetComposefile is the docker-compose.yml file required to deploy and maintain
|
||||
@ -74,8 +75,11 @@ services:
|
||||
- ETH_NAME={{.EthName}}
|
||||
- FAUCET_AMOUNT={{.FaucetAmount}}
|
||||
- FAUCET_MINUTES={{.FaucetMinutes}}
|
||||
- FAUCET_TIERS={{.FaucetTiers}}
|
||||
- GITHUB_USER={{.GitHubUser}}
|
||||
- GITHUB_TOKEN={{.GitHubToken}}{{if .VHost}}
|
||||
- GITHUB_TOKEN={{.GitHubToken}}
|
||||
- CAPTCHA_TOKEN={{.CaptchaToken}}
|
||||
- CAPTCHA_SECRET={{.CaptchaSecret}}{{if .VHost}}
|
||||
- VIRTUAL_HOST={{.VHost}}
|
||||
- VIRTUAL_PORT=8080{{end}}
|
||||
restart: always
|
||||
@ -97,9 +101,12 @@ func deployFaucet(client *sshClient, network string, bootnodes []string, config
|
||||
"EthPort": config.node.portFull,
|
||||
"GitHubUser": config.githubUser,
|
||||
"GitHubToken": config.githubToken,
|
||||
"CaptchaToken": config.captchaToken,
|
||||
"CaptchaSecret": config.captchaSecret,
|
||||
"FaucetName": strings.Title(network),
|
||||
"FaucetAmount": config.amount,
|
||||
"FaucetMinutes": config.minutes,
|
||||
"FaucetTiers": config.tiers,
|
||||
})
|
||||
files[filepath.Join(workdir, "Dockerfile")] = dockerfile.Bytes()
|
||||
|
||||
@ -113,8 +120,11 @@ func deployFaucet(client *sshClient, network string, bootnodes []string, config
|
||||
"EthName": config.node.ethstats[:strings.Index(config.node.ethstats, ":")],
|
||||
"GitHubUser": config.githubUser,
|
||||
"GitHubToken": config.githubToken,
|
||||
"CaptchaToken": config.captchaToken,
|
||||
"CaptchaSecret": config.captchaSecret,
|
||||
"FaucetAmount": config.amount,
|
||||
"FaucetMinutes": config.minutes,
|
||||
"FaucetTiers": config.tiers,
|
||||
})
|
||||
files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes()
|
||||
|
||||
@ -140,13 +150,16 @@ type faucetInfos struct {
|
||||
port int
|
||||
amount int
|
||||
minutes int
|
||||
tiers int
|
||||
githubUser string
|
||||
githubToken string
|
||||
captchaToken string
|
||||
captchaSecret string
|
||||
}
|
||||
|
||||
// String implements the stringer interface.
|
||||
func (info *faucetInfos) String() string {
|
||||
return fmt.Sprintf("host=%s, api=%d, eth=%d, amount=%d, minutes=%d, github=%s, ethstats=%s", info.host, info.port, info.node.portFull, info.amount, info.minutes, info.githubUser, info.node.ethstats)
|
||||
return fmt.Sprintf("host=%s, api=%d, eth=%d, amount=%d, minutes=%d, tiers=%d, github=%s, captcha=%v, ethstats=%s", info.host, info.port, info.node.portFull, info.amount, info.minutes, info.tiers, info.githubUser, info.captchaToken != "", info.node.ethstats)
|
||||
}
|
||||
|
||||
// checkFaucet does a health-check against an faucet server to verify whether
|
||||
@ -177,6 +190,7 @@ func checkFaucet(client *sshClient, network string) (*faucetInfos, error) {
|
||||
}
|
||||
amount, _ := strconv.Atoi(infos.envvars["FAUCET_AMOUNT"])
|
||||
minutes, _ := strconv.Atoi(infos.envvars["FAUCET_MINUTES"])
|
||||
tiers, _ := strconv.Atoi(infos.envvars["FAUCET_TIERS"])
|
||||
|
||||
// Retrieve the funding account informations
|
||||
var out []byte
|
||||
@ -204,7 +218,10 @@ func checkFaucet(client *sshClient, network string) (*faucetInfos, error) {
|
||||
port: port,
|
||||
amount: amount,
|
||||
minutes: minutes,
|
||||
tiers: tiers,
|
||||
githubUser: infos.envvars["GITHUB_USER"],
|
||||
githubToken: infos.envvars["GITHUB_TOKEN"],
|
||||
captchaToken: infos.envvars["CAPTCHA_TOKEN"],
|
||||
captchaSecret: infos.envvars["CAPTCHA_SECRET"],
|
||||
}, nil
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ ADD genesis.json /genesis.json
|
||||
RUN \
|
||||
echo '/geth init /genesis.json' > geth.sh && \{{if .Unlock}}
|
||||
echo 'mkdir -p /root/.ethereum/keystore/ && cp /signer.json /root/.ethereum/keystore/' >> geth.sh && \{{end}}
|
||||
echo $'/geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine{{end}}{{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}}' >> geth.sh
|
||||
echo $'/geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .BootV4}}--bootnodesv4 {{.BootV4}}{{end}} {{if .BootV5}}--bootnodesv5 {{.BootV5}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine{{end}}{{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --targetgaslimit {{.GasTarget}} --gasprice {{.GasPrice}}' >> geth.sh
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "geth.sh"]
|
||||
`
|
||||
@ -66,17 +66,20 @@ services:
|
||||
- LIGHT_PEERS={{.LightPeers}}
|
||||
- STATS_NAME={{.Ethstats}}
|
||||
- MINER_NAME={{.Etherbase}}
|
||||
- GAS_TARGET={{.GasTarget}}
|
||||
- GAS_PRICE={{.GasPrice}}
|
||||
restart: always
|
||||
`
|
||||
|
||||
// deployNode deploys a new Ethereum node container to a remote machine via SSH,
|
||||
// docker and docker-compose. If an instance with the specified network name
|
||||
// already exists there, it will be overwritten!
|
||||
func deployNode(client *sshClient, network string, bootnodes []string, config *nodeInfos) ([]byte, error) {
|
||||
func deployNode(client *sshClient, network string, bootv4, bootv5 []string, config *nodeInfos) ([]byte, error) {
|
||||
kind := "sealnode"
|
||||
if config.keyJSON == "" && config.etherbase == "" {
|
||||
kind = "bootnode"
|
||||
bootnodes = make([]string, 0)
|
||||
bootv4 = make([]string, 0)
|
||||
bootv5 = make([]string, 0)
|
||||
}
|
||||
// Generate the content to upload to the server
|
||||
workdir := fmt.Sprintf("%d", rand.Int63())
|
||||
@ -92,9 +95,12 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n
|
||||
"Port": config.portFull,
|
||||
"Peers": config.peersTotal,
|
||||
"LightFlag": lightFlag,
|
||||
"Bootnodes": strings.Join(bootnodes, ","),
|
||||
"BootV4": strings.Join(bootv4, ","),
|
||||
"BootV5": strings.Join(bootv5, ","),
|
||||
"Ethstats": config.ethstats,
|
||||
"Etherbase": config.etherbase,
|
||||
"GasTarget": uint64(1000000 * config.gasTarget),
|
||||
"GasPrice": uint64(1000000000 * config.gasPrice),
|
||||
"Unlock": config.keyJSON != "",
|
||||
})
|
||||
files[filepath.Join(workdir, "Dockerfile")] = dockerfile.Bytes()
|
||||
@ -111,6 +117,8 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n
|
||||
"LightPeers": config.peersLight,
|
||||
"Ethstats": config.ethstats[:strings.Index(config.ethstats, ":")],
|
||||
"Etherbase": config.etherbase,
|
||||
"GasTarget": config.gasTarget,
|
||||
"GasPrice": config.gasPrice,
|
||||
})
|
||||
files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes()
|
||||
|
||||
@ -127,7 +135,7 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n
|
||||
}
|
||||
defer client.Run("rm -rf " + workdir)
|
||||
|
||||
// Build and deploy the bootnode service
|
||||
// Build and deploy the boot or seal node service
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build", workdir, network))
|
||||
}
|
||||
|
||||
@ -147,6 +155,8 @@ type nodeInfos struct {
|
||||
etherbase string
|
||||
keyJSON string
|
||||
keyPass string
|
||||
gasTarget float64
|
||||
gasPrice float64
|
||||
}
|
||||
|
||||
// String implements the stringer interface.
|
||||
@ -155,7 +165,8 @@ func (info *nodeInfos) String() string {
|
||||
if info.peersLight > 0 {
|
||||
discv5 = fmt.Sprintf(", portv5=%d", info.portLight)
|
||||
}
|
||||
return fmt.Sprintf("port=%d%s, datadir=%s, peers=%d, lights=%d, ethstats=%s", info.portFull, discv5, info.datadir, info.peersTotal, info.peersLight, info.ethstats)
|
||||
return fmt.Sprintf("port=%d%s, datadir=%s, peers=%d, lights=%d, ethstats=%s, gastarget=%0.3f MGas, gasprice=%0.3f GWei",
|
||||
info.portFull, discv5, info.datadir, info.peersTotal, info.peersLight, info.ethstats, info.gasTarget, info.gasPrice)
|
||||
}
|
||||
|
||||
// checkNode does a health-check against an boot or seal node server to verify
|
||||
@ -176,6 +187,8 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
|
||||
// Resolve a few types from the environmental variables
|
||||
totalPeers, _ := strconv.Atoi(infos.envvars["TOTAL_PEERS"])
|
||||
lightPeers, _ := strconv.Atoi(infos.envvars["LIGHT_PEERS"])
|
||||
gasTarget, _ := strconv.ParseFloat(infos.envvars["GAS_TARGET"], 64)
|
||||
gasPrice, _ := strconv.ParseFloat(infos.envvars["GAS_PRICE"], 64)
|
||||
|
||||
// Container available, retrieve its node ID and its genesis json
|
||||
var out []byte
|
||||
@ -213,6 +226,8 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
|
||||
etherbase: infos.envvars["MINER_NAME"],
|
||||
keyJSON: keyJSON,
|
||||
keyPass: keyPass,
|
||||
gasTarget: gasTarget,
|
||||
gasPrice: gasPrice,
|
||||
}
|
||||
stats.enodeFull = fmt.Sprintf("enode://%s@%s:%d", id, client.address, stats.portFull)
|
||||
if stats.portLight != 0 {
|
||||
|
@ -17,6 +17,8 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@ -37,18 +39,26 @@ import (
|
||||
type sshClient struct {
|
||||
server string // Server name or IP without port number
|
||||
address string // IP address of the remote server
|
||||
pubkey []byte // RSA public key to authenticate the server
|
||||
client *ssh.Client
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
// dial establishes an SSH connection to a remote node using the current user and
|
||||
// the user's configured private RSA key.
|
||||
func dial(server string) (*sshClient, error) {
|
||||
// the user's configured private RSA key. If that fails, password authentication
|
||||
// is fallen back to. The caller may override the login user via user@server:port.
|
||||
func dial(server string, pubkey []byte) (*sshClient, error) {
|
||||
// Figure out a label for the server and a logger
|
||||
label := server
|
||||
if strings.Contains(label, ":") {
|
||||
label = label[:strings.Index(label, ":")]
|
||||
}
|
||||
login := ""
|
||||
if strings.Contains(server, "@") {
|
||||
login = label[:strings.Index(label, "@")]
|
||||
label = label[strings.Index(label, "@")+1:]
|
||||
server = server[strings.Index(server, "@")+1:]
|
||||
}
|
||||
logger := log.New("server", label)
|
||||
logger.Debug("Attempting to establish SSH connection")
|
||||
|
||||
@ -56,6 +66,9 @@ func dial(server string) (*sshClient, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if login == "" {
|
||||
login = user.Username
|
||||
}
|
||||
// Configure the supported authentication methods (private key and password)
|
||||
var auths []ssh.AuthMethod
|
||||
|
||||
@ -71,7 +84,7 @@ func dial(server string) (*sshClient, error) {
|
||||
}
|
||||
}
|
||||
auths = append(auths, ssh.PasswordCallback(func() (string, error) {
|
||||
fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", user.Username, server)
|
||||
fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", login, server)
|
||||
blob, err := terminal.ReadPassword(int(syscall.Stdin))
|
||||
|
||||
fmt.Println()
|
||||
@ -86,11 +99,36 @@ func dial(server string) (*sshClient, error) {
|
||||
return nil, errors.New("no IPs associated with domain")
|
||||
}
|
||||
// Try to dial in to the remote server
|
||||
logger.Trace("Dialing remote SSH server", "user", user.Username, "key", path)
|
||||
logger.Trace("Dialing remote SSH server", "user", login)
|
||||
if !strings.Contains(server, ":") {
|
||||
server += ":22"
|
||||
}
|
||||
client, err := ssh.Dial("tcp", server, &ssh.ClientConfig{User: user.Username, Auth: auths})
|
||||
keycheck := func(hostname string, remote net.Addr, key ssh.PublicKey) error {
|
||||
// If no public key is known for SSH, ask the user to confirm
|
||||
if pubkey == nil {
|
||||
fmt.Printf("The authenticity of host '%s (%s)' can't be established.\n", hostname, remote)
|
||||
fmt.Printf("SSH key fingerprint is %s [MD5]\n", ssh.FingerprintLegacyMD5(key))
|
||||
fmt.Printf("Are you sure you want to continue connecting (yes/no)? ")
|
||||
|
||||
text, err := bufio.NewReader(os.Stdin).ReadString('\n')
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case strings.TrimSpace(text) == "yes":
|
||||
pubkey = key.Marshal()
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("unknown auth choice: %v", text)
|
||||
}
|
||||
}
|
||||
// If a public key exists for this SSH server, check that it matches
|
||||
if bytes.Compare(pubkey, key.Marshal()) == 0 {
|
||||
return nil
|
||||
}
|
||||
// We have a mismatch, forbid connecting
|
||||
return errors.New("ssh key mismatch, readd the machine to update")
|
||||
}
|
||||
client, err := ssh.Dial("tcp", server, &ssh.ClientConfig{User: login, Auth: auths, HostKeyCallback: keycheck})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -98,6 +136,7 @@ func dial(server string) (*sshClient, error) {
|
||||
c := &sshClient{
|
||||
server: label,
|
||||
address: addr[0],
|
||||
pubkey: pubkey,
|
||||
client: client,
|
||||
logger: logger,
|
||||
}
|
||||
|
@ -44,14 +44,24 @@ type config struct {
|
||||
bootLight []string // Bootnodes to always connect to by light nodes
|
||||
ethstats string // Ethstats settings to cache for node deploys
|
||||
|
||||
Servers []string `json:"servers,omitempty"`
|
||||
Servers map[string][]byte `json:"servers,omitempty"`
|
||||
}
|
||||
|
||||
// servers retrieves an alphabetically sorted list of servers.
|
||||
func (c config) servers() []string {
|
||||
servers := make([]string, 0, len(c.Servers))
|
||||
for server := range c.Servers {
|
||||
servers = append(servers, server)
|
||||
}
|
||||
sort.Strings(servers)
|
||||
|
||||
return servers
|
||||
}
|
||||
|
||||
// flush dumps the contents of config to disk.
|
||||
func (c config) flush() {
|
||||
os.MkdirAll(filepath.Dir(c.path), 0755)
|
||||
|
||||
sort.Strings(c.Servers)
|
||||
out, _ := json.MarshalIndent(c, "", " ")
|
||||
if err := ioutil.WriteFile(c.path, out, 0644); err != nil {
|
||||
log.Warn("Failed to save puppeth configs", "file", c.path, "err", err)
|
||||
@ -152,6 +162,48 @@ func (w *wizard) readDefaultInt(def int) int {
|
||||
}
|
||||
}
|
||||
|
||||
// readFloat reads a single line from stdin, trimming if from spaces, enforcing it
|
||||
// to parse into a float.
|
||||
func (w *wizard) readFloat() float64 {
|
||||
for {
|
||||
fmt.Printf("> ")
|
||||
text, err := w.in.ReadString('\n')
|
||||
if err != nil {
|
||||
log.Crit("Failed to read user input", "err", err)
|
||||
}
|
||||
if text = strings.TrimSpace(text); text == "" {
|
||||
continue
|
||||
}
|
||||
val, err := strconv.ParseFloat(strings.TrimSpace(text), 64)
|
||||
if err != nil {
|
||||
log.Error("Invalid input, expected float", "err", err)
|
||||
continue
|
||||
}
|
||||
return val
|
||||
}
|
||||
}
|
||||
|
||||
// readDefaultFloat reads a single line from stdin, trimming if from spaces, enforcing
|
||||
// it to parse into a float. If an empty line is entered, the default value is returned.
|
||||
func (w *wizard) readDefaultFloat(def float64) float64 {
|
||||
for {
|
||||
fmt.Printf("> ")
|
||||
text, err := w.in.ReadString('\n')
|
||||
if err != nil {
|
||||
log.Crit("Failed to read user input", "err", err)
|
||||
}
|
||||
if text = strings.TrimSpace(text); text == "" {
|
||||
return def
|
||||
}
|
||||
val, err := strconv.ParseFloat(strings.TrimSpace(text), 64)
|
||||
if err != nil {
|
||||
log.Error("Invalid input, expected float", "err", err)
|
||||
continue
|
||||
}
|
||||
return val
|
||||
}
|
||||
}
|
||||
|
||||
// readPassword reads a single line from stdin, trimming it from the trailing new
|
||||
// line and returns it. The input will not be echoed.
|
||||
func (w *wizard) readPassword() string {
|
||||
|
@ -44,6 +44,7 @@ func (w *wizard) deployFaucet() {
|
||||
host: client.server,
|
||||
amount: 1,
|
||||
minutes: 1440,
|
||||
tiers: 3,
|
||||
}
|
||||
}
|
||||
infos.node.genesis, _ = json.MarshalIndent(w.conf.genesis, "", " ")
|
||||
@ -68,10 +69,17 @@ func (w *wizard) deployFaucet() {
|
||||
fmt.Printf("How many minutes to enforce between requests? (default = %d)\n", infos.minutes)
|
||||
infos.minutes = w.readDefaultInt(infos.minutes)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("How many funding tiers to feature (x2.5 amounts, x3 timeout)? (default = %d)\n", infos.tiers)
|
||||
infos.tiers = w.readDefaultInt(infos.tiers)
|
||||
if infos.tiers == 0 {
|
||||
log.Error("At least one funding tier must be set")
|
||||
return
|
||||
}
|
||||
// Accessing GitHub gists requires API authorization, retrieve it
|
||||
if infos.githubUser != "" {
|
||||
fmt.Println()
|
||||
fmt.Printf("Reused previous (%s) GitHub API authorization (y/n)? (default = yes)\n", infos.githubUser)
|
||||
fmt.Printf("Reuse previous (%s) GitHub API authorization (y/n)? (default = yes)\n", infos.githubUser)
|
||||
if w.readDefaultString("y") != "y" {
|
||||
infos.githubUser, infos.githubToken = "", ""
|
||||
}
|
||||
@ -109,6 +117,29 @@ func (w *wizard) deployFaucet() {
|
||||
return
|
||||
}
|
||||
}
|
||||
// Accessing the reCaptcha service requires API authorizations, request it
|
||||
if infos.captchaToken != "" {
|
||||
fmt.Println()
|
||||
fmt.Println("Reuse previous reCaptcha API authorization (y/n)? (default = yes)")
|
||||
if w.readDefaultString("y") != "y" {
|
||||
infos.captchaToken, infos.captchaSecret = "", ""
|
||||
}
|
||||
}
|
||||
if infos.captchaToken == "" {
|
||||
// No previous authorization (or old one discarded)
|
||||
fmt.Println()
|
||||
fmt.Println("Enable reCaptcha protection against robots (y/n)? (default = no)")
|
||||
if w.readDefaultString("n") == "y" {
|
||||
// Captcha protection explicitly requested, read the site and secret keys
|
||||
fmt.Println()
|
||||
fmt.Printf("What is the reCaptcha site key to authenticate human users?\n")
|
||||
infos.captchaToken = w.readString()
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("What is the reCaptcha secret key to verify authentications? (won't be echoed)\n")
|
||||
infos.captchaSecret = w.readPassword()
|
||||
}
|
||||
}
|
||||
// Figure out where the user wants to store the persistent data
|
||||
fmt.Println()
|
||||
if infos.node.datadir == "" {
|
||||
|
@ -120,7 +120,7 @@ func (w *wizard) makeGenesis() {
|
||||
// Query the user for some custom extras
|
||||
fmt.Println()
|
||||
fmt.Println("Specify your chain/network ID if you want an explicit one (default = random)")
|
||||
genesis.Config.ChainId = big.NewInt(int64(w.readDefaultInt(rand.Intn(65536))))
|
||||
genesis.Config.ChainId = new(big.Int).SetUint64(uint64(w.readDefaultInt(rand.Intn(65536))))
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("Anything fun to embed into the genesis block? (max 32 bytes)")
|
||||
|
@ -32,6 +32,9 @@ import (
|
||||
func makeWizard(network string) *wizard {
|
||||
return &wizard{
|
||||
network: network,
|
||||
conf: config{
|
||||
Servers: make(map[string][]byte),
|
||||
},
|
||||
servers: make(map[string]*sshClient),
|
||||
services: make(map[string][]string),
|
||||
in: bufio.NewReader(os.Stdin),
|
||||
@ -77,9 +80,9 @@ func (w *wizard) run() {
|
||||
} else if err := json.Unmarshal(blob, &w.conf); err != nil {
|
||||
log.Crit("Previous configuration corrupted", "path", w.conf.path, "err", err)
|
||||
} else {
|
||||
for _, server := range w.conf.Servers {
|
||||
for server, pubkey := range w.conf.Servers {
|
||||
log.Info("Dialing previously configured server", "server", server)
|
||||
client, err := dial(server)
|
||||
client, err := dial(server, pubkey)
|
||||
if err != nil {
|
||||
log.Error("Previous server unreachable", "server", server, "err", err)
|
||||
}
|
||||
|
@ -39,16 +39,16 @@ func (w *wizard) networkStats(tips bool) {
|
||||
// Iterate over all the specified hosts and check their status
|
||||
stats := tablewriter.NewWriter(os.Stdout)
|
||||
stats.SetHeader([]string{"Server", "IP", "Status", "Service", "Details"})
|
||||
stats.SetColWidth(128)
|
||||
stats.SetColWidth(100)
|
||||
|
||||
for _, server := range w.conf.Servers {
|
||||
for server, pubkey := range w.conf.Servers {
|
||||
client := w.servers[server]
|
||||
logger := log.New("server", server)
|
||||
logger.Info("Starting remote server health-check")
|
||||
|
||||
// If the server is not connected, try to connect again
|
||||
if client == nil {
|
||||
conn, err := dial(server)
|
||||
conn, err := dial(server, pubkey)
|
||||
if err != nil {
|
||||
logger.Error("Failed to establish remote connection", "err", err)
|
||||
stats.Append([]string{server, "", err.Error(), "", ""})
|
||||
|
@ -28,7 +28,9 @@ import (
|
||||
func (w *wizard) manageServers() {
|
||||
// List all the servers we can disconnect, along with an entry to connect a new one
|
||||
fmt.Println()
|
||||
for i, server := range w.conf.Servers {
|
||||
|
||||
servers := w.conf.servers()
|
||||
for i, server := range servers {
|
||||
fmt.Printf(" %d. Disconnect %s\n", i+1, server)
|
||||
}
|
||||
fmt.Printf(" %d. Connect another server\n", len(w.conf.Servers)+1)
|
||||
@ -40,14 +42,14 @@ func (w *wizard) manageServers() {
|
||||
}
|
||||
// If the user selected an existing server, drop it
|
||||
if choice <= len(w.conf.Servers) {
|
||||
server := w.conf.Servers[choice-1]
|
||||
server := servers[choice-1]
|
||||
client := w.servers[server]
|
||||
|
||||
delete(w.servers, server)
|
||||
if client != nil {
|
||||
client.Close()
|
||||
}
|
||||
w.conf.Servers = append(w.conf.Servers[:choice-1], w.conf.Servers[choice:]...)
|
||||
delete(w.conf.Servers, server)
|
||||
w.conf.flush()
|
||||
|
||||
log.Info("Disconnected existing server", "server", server)
|
||||
@ -73,14 +75,14 @@ func (w *wizard) makeServer() string {
|
||||
// Read and fial the server to ensure docker is present
|
||||
input := w.readString()
|
||||
|
||||
client, err := dial(input)
|
||||
client, err := dial(input, nil)
|
||||
if err != nil {
|
||||
log.Error("Server not ready for puppeth", "err", err)
|
||||
return ""
|
||||
}
|
||||
// All checks passed, start tracking the server
|
||||
w.servers[input] = client
|
||||
w.conf.Servers = append(w.conf.Servers, input)
|
||||
w.conf.Servers[input] = client.pubkey
|
||||
w.conf.flush()
|
||||
|
||||
return input
|
||||
@ -93,7 +95,9 @@ func (w *wizard) selectServer() string {
|
||||
// List the available server to the user and wait for a choice
|
||||
fmt.Println()
|
||||
fmt.Println("Which server do you want to interact with?")
|
||||
for i, server := range w.conf.Servers {
|
||||
|
||||
servers := w.conf.servers()
|
||||
for i, server := range servers {
|
||||
fmt.Printf(" %d. %s\n", i+1, server)
|
||||
}
|
||||
fmt.Printf(" %d. Connect another server\n", len(w.conf.Servers)+1)
|
||||
@ -105,7 +109,7 @@ func (w *wizard) selectServer() string {
|
||||
}
|
||||
// If the user requested connecting to a new server, go for it
|
||||
if choice <= len(w.conf.Servers) {
|
||||
return w.conf.Servers[choice-1]
|
||||
return servers[choice-1]
|
||||
}
|
||||
return w.makeServer()
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ func (w *wizard) deployNode(boot bool) {
|
||||
if boot {
|
||||
infos = &nodeInfos{portFull: 30303, peersTotal: 512, peersLight: 256}
|
||||
} else {
|
||||
infos = &nodeInfos{portFull: 30303, peersTotal: 50, peersLight: 0}
|
||||
infos = &nodeInfos{portFull: 30303, peersTotal: 50, peersLight: 0, gasTarget: 4.7, gasPrice: 18}
|
||||
}
|
||||
}
|
||||
infos.genesis, _ = json.MarshalIndent(w.conf.genesis, "", " ")
|
||||
@ -109,8 +109,7 @@ func (w *wizard) deployNode(boot bool) {
|
||||
} else if w.conf.genesis.Config.Clique != nil {
|
||||
// If a previous signer was already set, offer to reuse it
|
||||
if infos.keyJSON != "" {
|
||||
var key keystore.Key
|
||||
if err := json.Unmarshal([]byte(infos.keyJSON), &key); err != nil {
|
||||
if key, err := keystore.DecryptKey([]byte(infos.keyJSON), infos.keyPass); err != nil {
|
||||
infos.keyJSON, infos.keyPass = "", ""
|
||||
} else {
|
||||
fmt.Println()
|
||||
@ -136,9 +135,17 @@ func (w *wizard) deployNode(boot bool) {
|
||||
}
|
||||
}
|
||||
}
|
||||
// Establish the gas dynamics to be enforced by the signer
|
||||
fmt.Println()
|
||||
fmt.Printf("What gas limit should empty blocks target (MGas)? (default = %0.3f)\n", infos.gasTarget)
|
||||
infos.gasTarget = w.readDefaultFloat(infos.gasTarget)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("What gas price should the signer require (GWei)? (default = %0.3f)\n", infos.gasPrice)
|
||||
infos.gasPrice = w.readDefaultFloat(infos.gasPrice)
|
||||
}
|
||||
// Try to deploy the full node on the host
|
||||
if out, err := deployNode(client, w.network, w.conf.bootFull, infos); err != nil {
|
||||
if out, err := deployNode(client, w.network, w.conf.bootFull, w.conf.bootLight, infos); err != nil {
|
||||
log.Error("Failed to deploy Ethereum node container", "err", err)
|
||||
if len(out) > 0 {
|
||||
fmt.Printf("%s\n", out)
|
||||
|
@ -67,6 +67,10 @@ var (
|
||||
Name: "bzzaccount",
|
||||
Usage: "Swarm account key file",
|
||||
}
|
||||
SwarmListenAddrFlag = cli.StringFlag{
|
||||
Name: "httpaddr",
|
||||
Usage: "Swarm HTTP API listening interface",
|
||||
}
|
||||
SwarmPortFlag = cli.StringFlag{
|
||||
Name: "bzzport",
|
||||
Usage: "Swarm local http api port",
|
||||
@ -249,6 +253,7 @@ Cleans database of corrupted entries.
|
||||
SwarmConfigPathFlag,
|
||||
SwarmSwapEnabledFlag,
|
||||
SwarmSyncEnabledFlag,
|
||||
SwarmListenAddrFlag,
|
||||
SwarmPortFlag,
|
||||
SwarmAccountFlag,
|
||||
SwarmNetworkIdFlag,
|
||||
@ -345,6 +350,9 @@ func registerBzzService(ctx *cli.Context, stack *node.Node) {
|
||||
if len(bzzport) > 0 {
|
||||
bzzconfig.Port = bzzport
|
||||
}
|
||||
if bzzaddr := ctx.GlobalString(SwarmListenAddrFlag.Name); bzzaddr != "" {
|
||||
bzzconfig.ListenAddr = bzzaddr
|
||||
}
|
||||
swapEnabled := ctx.GlobalBool(SwarmSwapEnabledFlag.Name)
|
||||
syncEnabled := ctx.GlobalBoolT(SwarmSyncEnabledFlag.Name)
|
||||
|
||||
|
255
cmd/swarm/run_test.go
Normal file
255
cmd/swarm/run_test.go
Normal file
@ -0,0 +1,255 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/internal/cmdtest"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/swarm"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Run the app if we've been exec'd as "swarm-test" in runSwarm.
|
||||
reexec.Register("swarm-test", func() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
})
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
// check if we have been reexec'd
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func runSwarm(t *testing.T, args ...string) *cmdtest.TestCmd {
|
||||
tt := cmdtest.NewTestCmd(t, nil)
|
||||
|
||||
// Boot "swarm". This actually runs the test binary but the TestMain
|
||||
// function will prevent any tests from running.
|
||||
tt.Run("swarm-test", args...)
|
||||
|
||||
return tt
|
||||
}
|
||||
|
||||
type testCluster struct {
|
||||
Nodes []*testNode
|
||||
TmpDir string
|
||||
}
|
||||
|
||||
// newTestCluster starts a test swarm cluster of the given size.
|
||||
//
|
||||
// A temporary directory is created and each node gets a data directory inside
|
||||
// it.
|
||||
//
|
||||
// Each node listens on 127.0.0.1 with random ports for both the HTTP and p2p
|
||||
// ports (assigned by first listening on 127.0.0.1:0 and then passing the ports
|
||||
// as flags).
|
||||
//
|
||||
// When starting more than one node, they are connected together using the
|
||||
// admin SetPeer RPC method.
|
||||
func newTestCluster(t *testing.T, size int) *testCluster {
|
||||
cluster := &testCluster{}
|
||||
defer func() {
|
||||
if t.Failed() {
|
||||
cluster.Shutdown()
|
||||
}
|
||||
}()
|
||||
|
||||
tmpdir, err := ioutil.TempDir("", "swarm-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cluster.TmpDir = tmpdir
|
||||
|
||||
// start the nodes
|
||||
cluster.Nodes = make([]*testNode, 0, size)
|
||||
for i := 0; i < size; i++ {
|
||||
dir := filepath.Join(cluster.TmpDir, fmt.Sprintf("swarm%02d", i))
|
||||
if err := os.Mkdir(dir, 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
node := newTestNode(t, dir)
|
||||
node.Name = fmt.Sprintf("swarm%02d", i)
|
||||
|
||||
cluster.Nodes = append(cluster.Nodes, node)
|
||||
}
|
||||
|
||||
if size == 1 {
|
||||
return cluster
|
||||
}
|
||||
|
||||
// connect the nodes together
|
||||
for _, node := range cluster.Nodes {
|
||||
if err := node.Client.Call(nil, "admin_addPeer", cluster.Nodes[0].Enode); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// wait until all nodes have the correct number of peers
|
||||
outer:
|
||||
for _, node := range cluster.Nodes {
|
||||
var peers []*p2p.PeerInfo
|
||||
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(50 * time.Millisecond) {
|
||||
if err := node.Client.Call(&peers, "admin_peers"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(peers) == len(cluster.Nodes)-1 {
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
t.Fatalf("%s only has %d / %d peers", node.Name, len(peers), len(cluster.Nodes)-1)
|
||||
}
|
||||
|
||||
return cluster
|
||||
}
|
||||
|
||||
func (c *testCluster) Shutdown() {
|
||||
for _, node := range c.Nodes {
|
||||
node.Shutdown()
|
||||
}
|
||||
os.RemoveAll(c.TmpDir)
|
||||
}
|
||||
|
||||
type testNode struct {
|
||||
Name string
|
||||
Addr string
|
||||
URL string
|
||||
Enode string
|
||||
Dir string
|
||||
Client *rpc.Client
|
||||
Cmd *cmdtest.TestCmd
|
||||
}
|
||||
|
||||
const testPassphrase = "swarm-test-passphrase"
|
||||
|
||||
func newTestNode(t *testing.T, dir string) *testNode {
|
||||
// create key
|
||||
conf := &node.Config{
|
||||
DataDir: dir,
|
||||
IPCPath: "bzzd.ipc",
|
||||
}
|
||||
n, err := node.New(conf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
account, err := n.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore).NewAccount(testPassphrase)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
node := &testNode{Dir: dir}
|
||||
|
||||
// use a unique IPCPath when running tests on Windows
|
||||
if runtime.GOOS == "windows" {
|
||||
conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", account.Address.String())
|
||||
}
|
||||
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
p2pPort, err := assignTCPPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// start the node
|
||||
node.Cmd = runSwarm(t,
|
||||
"--port", p2pPort,
|
||||
"--nodiscover",
|
||||
"--datadir", dir,
|
||||
"--ipcpath", conf.IPCPath,
|
||||
"--ethapi", "",
|
||||
"--bzzaccount", account.Address.String(),
|
||||
"--bzznetworkid", "321",
|
||||
"--bzzport", httpPort,
|
||||
"--verbosity", "6",
|
||||
)
|
||||
node.Cmd.InputLine(testPassphrase)
|
||||
defer func() {
|
||||
if t.Failed() {
|
||||
node.Shutdown()
|
||||
}
|
||||
}()
|
||||
|
||||
// wait for the node to start
|
||||
for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
|
||||
node.Client, err = rpc.Dial(conf.IPCEndpoint())
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if node.Client == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// load info
|
||||
var info swarm.Info
|
||||
if err := node.Client.Call(&info, "bzz_info"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
node.Addr = net.JoinHostPort("127.0.0.1", info.Port)
|
||||
node.URL = "http://" + node.Addr
|
||||
|
||||
var nodeInfo p2p.NodeInfo
|
||||
if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort)
|
||||
|
||||
return node
|
||||
}
|
||||
|
||||
func (n *testNode) Shutdown() {
|
||||
if n.Cmd != nil {
|
||||
n.Cmd.Kill()
|
||||
}
|
||||
}
|
||||
|
||||
func assignTCPPort() (string, error) {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
l.Close()
|
||||
_, port, err := net.SplitHostPort(l.Addr().String())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return port, nil
|
||||
}
|
@ -18,6 +18,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@ -87,24 +88,32 @@ func upload(ctx *cli.Context) {
|
||||
if err != nil {
|
||||
utils.Fatalf("Error opening file: %s", err)
|
||||
}
|
||||
var hash string
|
||||
|
||||
// define a function which either uploads a directory or single file
|
||||
// based on the type of the file being uploaded
|
||||
var doUpload func() (hash string, err error)
|
||||
if stat.IsDir() {
|
||||
doUpload = func() (string, error) {
|
||||
if !recursive {
|
||||
utils.Fatalf("Argument is a directory and recursive upload is disabled")
|
||||
return "", errors.New("Argument is a directory and recursive upload is disabled")
|
||||
}
|
||||
return client.UploadDirectory(file, defaultPath, "")
|
||||
}
|
||||
hash, err = client.UploadDirectory(file, defaultPath, "")
|
||||
} else {
|
||||
doUpload = func() (string, error) {
|
||||
f, err := swarm.Open(file)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error opening file: %s", err)
|
||||
}
|
||||
defer f.Close()
|
||||
if mimeType == "" {
|
||||
mimeType = detectMimeType(file)
|
||||
}
|
||||
f, err := swarm.Open(file)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error opening file: %s", err)
|
||||
}
|
||||
defer f.Close()
|
||||
f.ContentType = mimeType
|
||||
hash, err = client.Upload(f, "")
|
||||
return client.Upload(f, "")
|
||||
}
|
||||
}
|
||||
hash, err := doUpload()
|
||||
if err != nil {
|
||||
utils.Fatalf("Upload failed: %s", err)
|
||||
}
|
||||
|
78
cmd/swarm/upload_test.go
Normal file
78
cmd/swarm/upload_test.go
Normal file
@ -0,0 +1,78 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestCLISwarmUp tests that running 'swarm up' makes the resulting file
|
||||
// available from all nodes via the HTTP API
|
||||
func TestCLISwarmUp(t *testing.T) {
|
||||
t.Skip("flaky test")
|
||||
|
||||
// start 3 node cluster
|
||||
t.Log("starting 3 node cluster")
|
||||
cluster := newTestCluster(t, 3)
|
||||
defer cluster.Shutdown()
|
||||
|
||||
// create a tmp file
|
||||
tmp, err := ioutil.TempFile("", "swarm-test")
|
||||
assertNil(t, err)
|
||||
defer tmp.Close()
|
||||
defer os.Remove(tmp.Name())
|
||||
_, err = io.WriteString(tmp, "data")
|
||||
assertNil(t, err)
|
||||
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
t.Log("uploading file with 'swarm up'")
|
||||
up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", tmp.Name())
|
||||
_, matches := up.ExpectRegexp(`[a-f\d]{64}`)
|
||||
up.ExpectExit()
|
||||
hash := matches[0]
|
||||
t.Logf("file uploaded with hash %s", hash)
|
||||
|
||||
// get the file from the HTTP API of each node
|
||||
for _, node := range cluster.Nodes {
|
||||
t.Logf("getting file from %s", node.Name)
|
||||
res, err := http.Get(node.URL + "/bzz:/" + hash)
|
||||
assertNil(t, err)
|
||||
assertHTTPResponse(t, res, http.StatusOK, "data")
|
||||
}
|
||||
}
|
||||
|
||||
func assertNil(t *testing.T, err error) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func assertHTTPResponse(t *testing.T, res *http.Response, expectedStatus int, expectedBody string) {
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != expectedStatus {
|
||||
t.Fatalf("expected HTTP status %d, got %s", expectedStatus, res.Status)
|
||||
}
|
||||
data, err := ioutil.ReadAll(res.Body)
|
||||
assertNil(t, err)
|
||||
if string(data) != expectedBody {
|
||||
t.Fatalf("expected HTTP body %q, got %q", expectedBody, data)
|
||||
}
|
||||
}
|
@ -52,10 +52,23 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
"github.com/ethereum/go-ethereum/p2p/netutil"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
whisper "github.com/ethereum/go-ethereum/whisper/whisperv2"
|
||||
whisper "github.com/ethereum/go-ethereum/whisper/whisperv5"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
CommandHelpTemplate = `{{.cmd.Name}}{{if .cmd.Subcommands}} command{{end}}{{if .cmd.Flags}} [command options]{{end}} [arguments...]
|
||||
{{if .cmd.Description}}{{.cmd.Description}}
|
||||
{{end}}{{if .cmd.Subcommands}}
|
||||
SUBCOMMANDS:
|
||||
{{range .cmd.Subcommands}}{{.cmd.Name}}{{with .cmd.ShortName}}, {{.cmd}}{{end}}{{ "\t" }}{{.cmd.Usage}}
|
||||
{{end}}{{end}}{{if .categorizedFlags}}
|
||||
{{range $idx, $categorized := .categorizedFlags}}{{$categorized.Name}} OPTIONS:
|
||||
{{range $categorized.Flags}}{{"\t"}}{{.}}
|
||||
{{end}}
|
||||
{{end}}{{end}}`
|
||||
)
|
||||
|
||||
func init() {
|
||||
cli.AppHelpTemplate = `{{.Name}} {{if .Flags}}[global options] {{end}}command{{if .Flags}} [command options]{{end}} [arguments...]
|
||||
|
||||
@ -70,16 +83,7 @@ GLOBAL OPTIONS:
|
||||
{{end}}{{end}}
|
||||
`
|
||||
|
||||
cli.CommandHelpTemplate = `{{.Name}}{{if .Subcommands}} command{{end}}{{if .Flags}} [command options]{{end}} [arguments...]
|
||||
{{if .Description}}{{.Description}}
|
||||
{{end}}{{if .Subcommands}}
|
||||
SUBCOMMANDS:
|
||||
{{range .Subcommands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
|
||||
{{end}}{{end}}{{if .Flags}}
|
||||
OPTIONS:
|
||||
{{range .Flags}}{{.}}
|
||||
{{end}}{{end}}
|
||||
`
|
||||
cli.CommandHelpTemplate = CommandHelpTemplate
|
||||
}
|
||||
|
||||
// NewApp creates an app with sane defaults.
|
||||
@ -115,44 +119,23 @@ var (
|
||||
Name: "keystore",
|
||||
Usage: "Directory for the keystore (default = inside the datadir)",
|
||||
}
|
||||
EthashCacheDirFlag = DirectoryFlag{
|
||||
Name: "ethash.cachedir",
|
||||
Usage: "Directory to store the ethash verification caches (default = inside the datadir)",
|
||||
NoUSBFlag = cli.BoolFlag{
|
||||
Name: "nousb",
|
||||
Usage: "Disables monitoring for and managine USB hardware wallets",
|
||||
}
|
||||
EthashCachesInMemoryFlag = cli.IntFlag{
|
||||
Name: "ethash.cachesinmem",
|
||||
Usage: "Number of recent ethash caches to keep in memory (16MB each)",
|
||||
Value: eth.DefaultConfig.EthashCachesInMem,
|
||||
}
|
||||
EthashCachesOnDiskFlag = cli.IntFlag{
|
||||
Name: "ethash.cachesondisk",
|
||||
Usage: "Number of recent ethash caches to keep on disk (16MB each)",
|
||||
Value: eth.DefaultConfig.EthashCachesOnDisk,
|
||||
}
|
||||
EthashDatasetDirFlag = DirectoryFlag{
|
||||
Name: "ethash.dagdir",
|
||||
Usage: "Directory to store the ethash mining DAGs (default = inside home folder)",
|
||||
Value: DirectoryString{eth.DefaultConfig.EthashDatasetDir},
|
||||
}
|
||||
EthashDatasetsInMemoryFlag = cli.IntFlag{
|
||||
Name: "ethash.dagsinmem",
|
||||
Usage: "Number of recent ethash mining DAGs to keep in memory (1+GB each)",
|
||||
Value: eth.DefaultConfig.EthashDatasetsInMem,
|
||||
}
|
||||
EthashDatasetsOnDiskFlag = cli.IntFlag{
|
||||
Name: "ethash.dagsondisk",
|
||||
Usage: "Number of recent ethash mining DAGs to keep on disk (1+GB each)",
|
||||
Value: eth.DefaultConfig.EthashDatasetsOnDisk,
|
||||
}
|
||||
NetworkIdFlag = cli.IntFlag{
|
||||
NetworkIdFlag = cli.Uint64Flag{
|
||||
Name: "networkid",
|
||||
Usage: "Network identifier (integer, 1=Frontier, 2=Morden (disused), 3=Ropsten)",
|
||||
Usage: "Network identifier (integer, 1=Frontier, 2=Morden (disused), 3=Ropsten, 4=Rinkeby)",
|
||||
Value: eth.DefaultConfig.NetworkId,
|
||||
}
|
||||
TestNetFlag = cli.BoolFlag{
|
||||
TestnetFlag = cli.BoolFlag{
|
||||
Name: "testnet",
|
||||
Usage: "Ropsten network: pre-configured proof-of-work test network",
|
||||
}
|
||||
RinkebyFlag = cli.BoolFlag{
|
||||
Name: "rinkeby",
|
||||
Usage: "Rinkeby network: pre-configured proof-of-authority test network",
|
||||
}
|
||||
DevModeFlag = cli.BoolFlag{
|
||||
Name: "dev",
|
||||
Usage: "Developer mode: pre-configured private network with several debugging flags",
|
||||
@ -195,6 +178,72 @@ var (
|
||||
Name: "lightkdf",
|
||||
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
|
||||
}
|
||||
// Ethash settings
|
||||
EthashCacheDirFlag = DirectoryFlag{
|
||||
Name: "ethash.cachedir",
|
||||
Usage: "Directory to store the ethash verification caches (default = inside the datadir)",
|
||||
}
|
||||
EthashCachesInMemoryFlag = cli.IntFlag{
|
||||
Name: "ethash.cachesinmem",
|
||||
Usage: "Number of recent ethash caches to keep in memory (16MB each)",
|
||||
Value: eth.DefaultConfig.EthashCachesInMem,
|
||||
}
|
||||
EthashCachesOnDiskFlag = cli.IntFlag{
|
||||
Name: "ethash.cachesondisk",
|
||||
Usage: "Number of recent ethash caches to keep on disk (16MB each)",
|
||||
Value: eth.DefaultConfig.EthashCachesOnDisk,
|
||||
}
|
||||
EthashDatasetDirFlag = DirectoryFlag{
|
||||
Name: "ethash.dagdir",
|
||||
Usage: "Directory to store the ethash mining DAGs (default = inside home folder)",
|
||||
Value: DirectoryString{eth.DefaultConfig.EthashDatasetDir},
|
||||
}
|
||||
EthashDatasetsInMemoryFlag = cli.IntFlag{
|
||||
Name: "ethash.dagsinmem",
|
||||
Usage: "Number of recent ethash mining DAGs to keep in memory (1+GB each)",
|
||||
Value: eth.DefaultConfig.EthashDatasetsInMem,
|
||||
}
|
||||
EthashDatasetsOnDiskFlag = cli.IntFlag{
|
||||
Name: "ethash.dagsondisk",
|
||||
Usage: "Number of recent ethash mining DAGs to keep on disk (1+GB each)",
|
||||
Value: eth.DefaultConfig.EthashDatasetsOnDisk,
|
||||
}
|
||||
// Transaction pool settings
|
||||
TxPoolPriceLimitFlag = cli.Uint64Flag{
|
||||
Name: "txpool.pricelimit",
|
||||
Usage: "Minimum gas price limit to enforce for acceptance into the pool",
|
||||
Value: eth.DefaultConfig.TxPool.PriceLimit,
|
||||
}
|
||||
TxPoolPriceBumpFlag = cli.Uint64Flag{
|
||||
Name: "txpool.pricebump",
|
||||
Usage: "Price bump percentage to replace an already existing transaction",
|
||||
Value: eth.DefaultConfig.TxPool.PriceBump,
|
||||
}
|
||||
TxPoolAccountSlotsFlag = cli.Uint64Flag{
|
||||
Name: "txpool.accountslots",
|
||||
Usage: "Minimum number of executable transaction slots guaranteed per account",
|
||||
Value: eth.DefaultConfig.TxPool.AccountSlots,
|
||||
}
|
||||
TxPoolGlobalSlotsFlag = cli.Uint64Flag{
|
||||
Name: "txpool.globalslots",
|
||||
Usage: "Maximum number of executable transaction slots for all accounts",
|
||||
Value: eth.DefaultConfig.TxPool.GlobalSlots,
|
||||
}
|
||||
TxPoolAccountQueueFlag = cli.Uint64Flag{
|
||||
Name: "txpool.accountqueue",
|
||||
Usage: "Maximum number of non-executable transaction slots permitted per account",
|
||||
Value: eth.DefaultConfig.TxPool.AccountQueue,
|
||||
}
|
||||
TxPoolGlobalQueueFlag = cli.Uint64Flag{
|
||||
Name: "txpool.globalqueue",
|
||||
Usage: "Maximum number of non-executable transaction slots for all accounts",
|
||||
Value: eth.DefaultConfig.TxPool.GlobalQueue,
|
||||
}
|
||||
TxPoolLifetimeFlag = cli.DurationFlag{
|
||||
Name: "txpool.lifetime",
|
||||
Usage: "Maximum amount of time non-executable transaction are queued",
|
||||
Value: eth.DefaultConfig.TxPool.Lifetime,
|
||||
}
|
||||
// Performance tuning settings
|
||||
CacheFlag = cli.IntFlag{
|
||||
Name: "cache",
|
||||
@ -229,7 +278,7 @@ var (
|
||||
GasPriceFlag = BigFlag{
|
||||
Name: "gasprice",
|
||||
Usage: "Minimal gas price to accept for mining a transactions",
|
||||
Value: big.NewInt(20 * params.Shannon),
|
||||
Value: eth.DefaultConfig.GasPrice,
|
||||
}
|
||||
ExtraDataFlag = cli.StringFlag{
|
||||
Name: "extradata",
|
||||
@ -327,7 +376,7 @@ var (
|
||||
}
|
||||
ExecFlag = cli.StringFlag{
|
||||
Name: "exec",
|
||||
Usage: "Execute JavaScript statement (only in combination with console/attach)",
|
||||
Usage: "Execute JavaScript statement",
|
||||
}
|
||||
PreloadJSFlag = cli.StringFlag{
|
||||
Name: "preload",
|
||||
@ -352,7 +401,17 @@ var (
|
||||
}
|
||||
BootnodesFlag = cli.StringFlag{
|
||||
Name: "bootnodes",
|
||||
Usage: "Comma separated enode URLs for P2P discovery bootstrap",
|
||||
Usage: "Comma separated enode URLs for P2P discovery bootstrap (set v4+v5 instead for light servers)",
|
||||
Value: "",
|
||||
}
|
||||
BootnodesV4Flag = cli.StringFlag{
|
||||
Name: "bootnodesv4",
|
||||
Usage: "Comma separated enode URLs for P2P v4 discovery bootstrap (light server, full nodes)",
|
||||
Value: "",
|
||||
}
|
||||
BootnodesV5Flag = cli.StringFlag{
|
||||
Name: "bootnodesv5",
|
||||
Usage: "Comma separated enode URLs for P2P v5 discovery bootstrap (light server, light nodes)",
|
||||
Value: "",
|
||||
}
|
||||
NodeKeyFileFlag = cli.StringFlag{
|
||||
@ -411,10 +470,12 @@ var (
|
||||
// the a subdirectory of the specified datadir will be used.
|
||||
func MakeDataDir(ctx *cli.Context) string {
|
||||
if path := ctx.GlobalString(DataDirFlag.Name); path != "" {
|
||||
// TODO: choose a different location outside of the regular datadir.
|
||||
if ctx.GlobalBool(TestNetFlag.Name) {
|
||||
if ctx.GlobalBool(TestnetFlag.Name) {
|
||||
return filepath.Join(path, "testnet")
|
||||
}
|
||||
if ctx.GlobalBool(RinkebyFlag.Name) {
|
||||
return filepath.Join(path, "rinkeby")
|
||||
}
|
||||
return path
|
||||
}
|
||||
Fatalf("Cannot determine default data directory, please set manually (--datadir)")
|
||||
@ -458,10 +519,17 @@ func setNodeUserIdent(ctx *cli.Context, cfg *node.Config) {
|
||||
// flags, reverting to pre-configured ones if none have been specified.
|
||||
func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
|
||||
urls := params.MainnetBootnodes
|
||||
if ctx.GlobalIsSet(BootnodesFlag.Name) {
|
||||
switch {
|
||||
case ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(BootnodesV4Flag.Name):
|
||||
if ctx.GlobalIsSet(BootnodesV4Flag.Name) {
|
||||
urls = strings.Split(ctx.GlobalString(BootnodesV4Flag.Name), ",")
|
||||
} else {
|
||||
urls = strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",")
|
||||
} else if ctx.GlobalBool(TestNetFlag.Name) {
|
||||
}
|
||||
case ctx.GlobalBool(TestnetFlag.Name):
|
||||
urls = params.TestnetBootnodes
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
urls = params.RinkebyBootnodes
|
||||
}
|
||||
|
||||
cfg.BootstrapNodes = make([]*discover.Node, 0, len(urls))
|
||||
@ -479,9 +547,16 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
|
||||
// flags, reverting to pre-configured ones if none have been specified.
|
||||
func setBootstrapNodesV5(ctx *cli.Context, cfg *p2p.Config) {
|
||||
urls := params.DiscoveryV5Bootnodes
|
||||
if ctx.GlobalIsSet(BootnodesFlag.Name) {
|
||||
switch {
|
||||
case ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(BootnodesV5Flag.Name):
|
||||
if ctx.GlobalIsSet(BootnodesV5Flag.Name) {
|
||||
urls = strings.Split(ctx.GlobalString(BootnodesV5Flag.Name), ",")
|
||||
} else {
|
||||
urls = strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",")
|
||||
} else if cfg.BootstrapNodesV5 == nil {
|
||||
}
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
urls = params.RinkebyV5Bootnodes
|
||||
case cfg.BootstrapNodesV5 != nil:
|
||||
return // already set, don't apply defaults.
|
||||
}
|
||||
|
||||
@ -643,7 +718,7 @@ func setEtherbase(ctx *cli.Context, ks *keystore.KeyStore, cfg *eth.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
// MakePasswordList reads password lines from the file specified by --password.
|
||||
// MakePasswordList reads password lines from the file specified by the global --password flag.
|
||||
func MakePasswordList(ctx *cli.Context) []string {
|
||||
path := ctx.GlobalString(PasswordFileFlag.Name)
|
||||
if path == "" {
|
||||
@ -701,6 +776,7 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
|
||||
// --dev mode can't use p2p networking.
|
||||
cfg.MaxPeers = 0
|
||||
cfg.ListenAddr = ":0"
|
||||
cfg.DiscoveryV5Addr = ":0"
|
||||
cfg.NoDiscovery = true
|
||||
cfg.DiscoveryV5 = false
|
||||
}
|
||||
@ -719,8 +795,10 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
|
||||
cfg.DataDir = ctx.GlobalString(DataDirFlag.Name)
|
||||
case ctx.GlobalBool(DevModeFlag.Name):
|
||||
cfg.DataDir = filepath.Join(os.TempDir(), "ethereum_dev_mode")
|
||||
case ctx.GlobalBool(TestNetFlag.Name):
|
||||
case ctx.GlobalBool(TestnetFlag.Name):
|
||||
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "testnet")
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby")
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(KeyStoreDirFlag.Name) {
|
||||
@ -729,6 +807,9 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
|
||||
if ctx.GlobalIsSet(LightKDFFlag.Name) {
|
||||
cfg.UseLightweightKDF = ctx.GlobalBool(LightKDFFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(NoUSBFlag.Name) {
|
||||
cfg.NoUSB = ctx.GlobalBool(NoUSBFlag.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
|
||||
@ -740,6 +821,30 @@ func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) {
|
||||
if ctx.GlobalIsSet(TxPoolPriceLimitFlag.Name) {
|
||||
cfg.PriceLimit = ctx.GlobalUint64(TxPoolPriceLimitFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolPriceBumpFlag.Name) {
|
||||
cfg.PriceBump = ctx.GlobalUint64(TxPoolPriceBumpFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolAccountSlotsFlag.Name) {
|
||||
cfg.AccountSlots = ctx.GlobalUint64(TxPoolAccountSlotsFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolGlobalSlotsFlag.Name) {
|
||||
cfg.GlobalSlots = ctx.GlobalUint64(TxPoolGlobalSlotsFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolAccountQueueFlag.Name) {
|
||||
cfg.AccountQueue = ctx.GlobalUint64(TxPoolAccountQueueFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolGlobalQueueFlag.Name) {
|
||||
cfg.GlobalQueue = ctx.GlobalUint64(TxPoolGlobalQueueFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolLifetimeFlag.Name) {
|
||||
cfg.Lifetime = ctx.GlobalDuration(TxPoolLifetimeFlag.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func setEthash(ctx *cli.Context, cfg *eth.Config) {
|
||||
if ctx.GlobalIsSet(EthashCacheDirFlag.Name) {
|
||||
cfg.EthashCacheDir = ctx.GlobalString(EthashCacheDirFlag.Name)
|
||||
@ -776,12 +881,13 @@ func checkExclusive(ctx *cli.Context, flags ...cli.Flag) {
|
||||
// SetEthConfig applies eth-related command line flags to the config.
|
||||
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
// Avoid conflicting network flags
|
||||
checkExclusive(ctx, DevModeFlag, TestNetFlag)
|
||||
checkExclusive(ctx, DevModeFlag, TestnetFlag, RinkebyFlag)
|
||||
checkExclusive(ctx, FastSyncFlag, LightModeFlag, SyncModeFlag)
|
||||
|
||||
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
|
||||
setEtherbase(ctx, ks, cfg)
|
||||
setGPO(ctx, &cfg.GPO)
|
||||
setTxPool(ctx, &cfg.TxPool)
|
||||
setEthash(ctx, cfg)
|
||||
|
||||
switch {
|
||||
@ -799,7 +905,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
cfg.LightPeers = ctx.GlobalInt(LightPeersFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||
cfg.NetworkId = ctx.GlobalInt(NetworkIdFlag.Name)
|
||||
cfg.NetworkId = ctx.GlobalUint64(NetworkIdFlag.Name)
|
||||
}
|
||||
|
||||
// Ethereum needs to know maxPeers to calculate the light server peer ratio.
|
||||
@ -828,13 +934,18 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
cfg.EnablePreimageRecording = ctx.GlobalBool(VMEnableDebugFlag.Name)
|
||||
}
|
||||
|
||||
// Override any default configs for --dev and --testnet.
|
||||
// Override any default configs for hard coded networks.
|
||||
switch {
|
||||
case ctx.GlobalBool(TestNetFlag.Name):
|
||||
case ctx.GlobalBool(TestnetFlag.Name):
|
||||
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||
cfg.NetworkId = 3
|
||||
}
|
||||
cfg.Genesis = core.DefaultTestnetGenesisBlock()
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||
cfg.NetworkId = 4
|
||||
}
|
||||
cfg.Genesis = core.DefaultRinkebyGenesisBlock()
|
||||
case ctx.GlobalBool(DevModeFlag.Name):
|
||||
cfg.Genesis = core.DevGenesisBlock()
|
||||
if !ctx.GlobalIsSet(GasPriceFlag.Name) {
|
||||
@ -901,22 +1012,16 @@ func SetupNetwork(ctx *cli.Context) {
|
||||
params.TargetGasLimit = new(big.Int).SetUint64(ctx.GlobalUint64(TargetGasLimitFlag.Name))
|
||||
}
|
||||
|
||||
func ChainDbName(ctx *cli.Context) string {
|
||||
if ctx.GlobalBool(LightModeFlag.Name) {
|
||||
return "lightchaindata"
|
||||
} else {
|
||||
return "chaindata"
|
||||
}
|
||||
}
|
||||
|
||||
// MakeChainDatabase open an LevelDB using the flags passed to the client and will hard crash if it fails.
|
||||
func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database {
|
||||
var (
|
||||
cache = ctx.GlobalInt(CacheFlag.Name)
|
||||
handles = makeDatabaseHandles()
|
||||
name = ChainDbName(ctx)
|
||||
)
|
||||
|
||||
name := "chaindata"
|
||||
if ctx.GlobalBool(LightModeFlag.Name) {
|
||||
name = "lightchaindata"
|
||||
}
|
||||
chainDb, err := stack.OpenDatabase(name, cache, handles)
|
||||
if err != nil {
|
||||
Fatalf("Could not open database: %v", err)
|
||||
@ -927,8 +1032,10 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database {
|
||||
func MakeGenesis(ctx *cli.Context) *core.Genesis {
|
||||
var genesis *core.Genesis
|
||||
switch {
|
||||
case ctx.GlobalBool(TestNetFlag.Name):
|
||||
case ctx.GlobalBool(TestnetFlag.Name):
|
||||
genesis = core.DefaultTestnetGenesisBlock()
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
genesis = core.DefaultRinkebyGenesisBlock()
|
||||
case ctx.GlobalBool(DevModeFlag.Name):
|
||||
genesis = core.DevGenesisBlock()
|
||||
}
|
||||
@ -972,3 +1079,27 @@ func MakeConsolePreloads(ctx *cli.Context) []string {
|
||||
}
|
||||
return preloads
|
||||
}
|
||||
|
||||
// MigrateFlags sets the global flag from a local flag when it's set.
|
||||
// This is a temporary function used for migrating old command/flags to the
|
||||
// new format.
|
||||
//
|
||||
// e.g. geth account new --keystore /tmp/mykeystore --lightkdf
|
||||
//
|
||||
// is equivalent after calling this method with:
|
||||
//
|
||||
// geth --keystore /tmp/mykeystore --lightkdf account new
|
||||
//
|
||||
// This allows the use of the existing configuration functionality.
|
||||
// When all flags are migrated this function can be removed and the existing
|
||||
// configuration functionality must be changed that is uses local flags
|
||||
func MigrateFlags(action func(ctx *cli.Context) error) func(*cli.Context) error {
|
||||
return func(ctx *cli.Context) error {
|
||||
for _, name := range ctx.FlagNames() {
|
||||
if ctx.IsSet(name) {
|
||||
ctx.GlobalSet(name, ctx.String(name))
|
||||
}
|
||||
}
|
||||
return action(ctx)
|
||||
}
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ var (
|
||||
pub *ecdsa.PublicKey
|
||||
asymKey *ecdsa.PrivateKey
|
||||
nodeid *ecdsa.PrivateKey
|
||||
topic []byte
|
||||
topic whisper.TopicType
|
||||
asymKeyID string
|
||||
filterID string
|
||||
symPass string
|
||||
@ -84,7 +84,7 @@ var (
|
||||
testMode = flag.Bool("test", false, "use of predefined parameters for diagnostics")
|
||||
echoMode = flag.Bool("echo", false, "echo mode: prints some arguments for diagnostics")
|
||||
|
||||
argVerbosity = flag.Int("verbosity", int(log.LvlWarn), "log verbosity level")
|
||||
argVerbosity = flag.Int("verbosity", int(log.LvlError), "log verbosity level")
|
||||
argTTL = flag.Uint("ttl", 30, "time-to-live for messages in seconds")
|
||||
argWorkTime = flag.Uint("work", 5, "work time in seconds")
|
||||
argMaxSize = flag.Int("maxsize", whisper.DefaultMaxMessageLength, "max size of message")
|
||||
@ -129,7 +129,7 @@ func processArgs() {
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to parse the topic: %s", err)
|
||||
}
|
||||
topic = x
|
||||
topic = whisper.BytesToTopic(x)
|
||||
}
|
||||
|
||||
if *asymmetricMode && len(*argPub) > 0 {
|
||||
@ -183,7 +183,7 @@ func initialize() {
|
||||
|
||||
if *testMode {
|
||||
symPass = "wwww" // ascii code: 0x77777777
|
||||
msPassword = "mail server test password"
|
||||
msPassword = "wwww"
|
||||
}
|
||||
|
||||
if *bootstrapMode {
|
||||
@ -307,7 +307,11 @@ func configureNode() {
|
||||
if *asymmetricMode {
|
||||
if len(*argPub) == 0 {
|
||||
s := scanLine("Please enter the peer's public key: ")
|
||||
pub = crypto.ToECDSAPub(common.FromHex(s))
|
||||
b := common.FromHex(s)
|
||||
if b == nil {
|
||||
utils.Fatalf("Error: can not convert hexadecimal string")
|
||||
}
|
||||
pub = crypto.ToECDSAPub(b)
|
||||
if !isKeyValid(pub) {
|
||||
utils.Fatalf("Error: invalid public key")
|
||||
}
|
||||
@ -326,7 +330,7 @@ func configureNode() {
|
||||
|
||||
if !*asymmetricMode && !*forwarderMode {
|
||||
if len(symPass) == 0 {
|
||||
symPass, err = console.Stdin.PromptPassword("Please enter the password: ")
|
||||
symPass, err = console.Stdin.PromptPassword("Please enter the password for symmetric encryption: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase: %v", err)
|
||||
}
|
||||
@ -343,6 +347,8 @@ func configureNode() {
|
||||
if len(*argTopic) == 0 {
|
||||
generateTopic([]byte(symPass))
|
||||
}
|
||||
|
||||
fmt.Printf("Filter is configured for the topic: %x \n", topic)
|
||||
}
|
||||
|
||||
if *mailServerMode {
|
||||
@ -354,18 +360,17 @@ func configureNode() {
|
||||
filter := whisper.Filter{
|
||||
KeySym: symKey,
|
||||
KeyAsym: asymKey,
|
||||
Topics: [][]byte{topic},
|
||||
Topics: [][]byte{topic[:]},
|
||||
AllowP2P: p2pAccept,
|
||||
}
|
||||
filterID, err = shh.Subscribe(&filter)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to install filter: %s", err)
|
||||
}
|
||||
fmt.Printf("Filter is configured for the topic: %x \n", topic)
|
||||
}
|
||||
|
||||
func generateTopic(password []byte) {
|
||||
x := pbkdf2.Key(password, password, 8196, 128, sha512.New)
|
||||
x := pbkdf2.Key(password, password, 4096, 128, sha512.New)
|
||||
for i := 0; i < len(x); i++ {
|
||||
topic[i%whisper.TopicLength] ^= x[i]
|
||||
}
|
||||
@ -485,16 +490,15 @@ func sendMsg(payload []byte) common.Hash {
|
||||
Dst: pub,
|
||||
KeySym: symKey,
|
||||
Payload: payload,
|
||||
Topic: whisper.BytesToTopic(topic),
|
||||
Topic: topic,
|
||||
TTL: uint32(*argTTL),
|
||||
PoW: *argPoW,
|
||||
WorkTime: uint32(*argWorkTime),
|
||||
}
|
||||
|
||||
msg := whisper.NewSentMessage(¶ms)
|
||||
if msg == nil {
|
||||
fmt.Printf("failed to create new message (OS level error)")
|
||||
os.Exit(0)
|
||||
msg, err := whisper.NewSentMessage(¶ms)
|
||||
if err != nil {
|
||||
utils.Fatalf("failed to create new message: %s", err)
|
||||
}
|
||||
envelope, err := msg.Wrap(¶ms)
|
||||
if err != nil {
|
||||
@ -624,9 +628,9 @@ func requestExpiredMessagesLoop() {
|
||||
params.Src = nodeid
|
||||
params.WorkTime = 5
|
||||
|
||||
msg := whisper.NewSentMessage(¶ms)
|
||||
if msg == nil {
|
||||
utils.Fatalf("failed to create new message (OS level error)")
|
||||
msg, err := whisper.NewSentMessage(¶ms)
|
||||
if err != nil {
|
||||
utils.Fatalf("failed to create new message: %s", err)
|
||||
}
|
||||
env, err := msg.Wrap(¶ms)
|
||||
if err != nil {
|
||||
|
188
common/bitutil/bitutil.go
Normal file
188
common/bitutil/bitutil.go
Normal file
@ -0,0 +1,188 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Adapted from: https://golang.org/src/crypto/cipher/xor.go
|
||||
|
||||
// Package bitutil implements fast bitwise operations.
|
||||
package bitutil
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const wordSize = int(unsafe.Sizeof(uintptr(0)))
|
||||
const supportsUnaligned = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x"
|
||||
|
||||
// XORBytes xors the bytes in a and b. The destination is assumed to have enough
|
||||
// space. Returns the number of bytes xor'd.
|
||||
func XORBytes(dst, a, b []byte) int {
|
||||
if supportsUnaligned {
|
||||
return fastXORBytes(dst, a, b)
|
||||
}
|
||||
return safeXORBytes(dst, a, b)
|
||||
}
|
||||
|
||||
// fastXORBytes xors in bulk. It only works on architectures that support
|
||||
// unaligned read/writes.
|
||||
func fastXORBytes(dst, a, b []byte) int {
|
||||
n := len(a)
|
||||
if len(b) < n {
|
||||
n = len(b)
|
||||
}
|
||||
w := n / wordSize
|
||||
if w > 0 {
|
||||
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
|
||||
aw := *(*[]uintptr)(unsafe.Pointer(&a))
|
||||
bw := *(*[]uintptr)(unsafe.Pointer(&b))
|
||||
for i := 0; i < w; i++ {
|
||||
dw[i] = aw[i] ^ bw[i]
|
||||
}
|
||||
}
|
||||
for i := (n - n%wordSize); i < n; i++ {
|
||||
dst[i] = a[i] ^ b[i]
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// safeXORBytes xors one by one. It works on all architectures, independent if
|
||||
// it supports unaligned read/writes or not.
|
||||
func safeXORBytes(dst, a, b []byte) int {
|
||||
n := len(a)
|
||||
if len(b) < n {
|
||||
n = len(b)
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
dst[i] = a[i] ^ b[i]
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// ANDBytes ands the bytes in a and b. The destination is assumed to have enough
|
||||
// space. Returns the number of bytes and'd.
|
||||
func ANDBytes(dst, a, b []byte) int {
|
||||
if supportsUnaligned {
|
||||
return fastANDBytes(dst, a, b)
|
||||
}
|
||||
return safeANDBytes(dst, a, b)
|
||||
}
|
||||
|
||||
// fastANDBytes ands in bulk. It only works on architectures that support
|
||||
// unaligned read/writes.
|
||||
func fastANDBytes(dst, a, b []byte) int {
|
||||
n := len(a)
|
||||
if len(b) < n {
|
||||
n = len(b)
|
||||
}
|
||||
w := n / wordSize
|
||||
if w > 0 {
|
||||
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
|
||||
aw := *(*[]uintptr)(unsafe.Pointer(&a))
|
||||
bw := *(*[]uintptr)(unsafe.Pointer(&b))
|
||||
for i := 0; i < w; i++ {
|
||||
dw[i] = aw[i] & bw[i]
|
||||
}
|
||||
}
|
||||
for i := (n - n%wordSize); i < n; i++ {
|
||||
dst[i] = a[i] & b[i]
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// safeANDBytes ands one by one. It works on all architectures, independent if
|
||||
// it supports unaligned read/writes or not.
|
||||
func safeANDBytes(dst, a, b []byte) int {
|
||||
n := len(a)
|
||||
if len(b) < n {
|
||||
n = len(b)
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
dst[i] = a[i] & b[i]
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// ORBytes ors the bytes in a and b. The destination is assumed to have enough
|
||||
// space. Returns the number of bytes or'd.
|
||||
func ORBytes(dst, a, b []byte) int {
|
||||
if supportsUnaligned {
|
||||
return fastORBytes(dst, a, b)
|
||||
}
|
||||
return safeORBytes(dst, a, b)
|
||||
}
|
||||
|
||||
// fastORBytes ors in bulk. It only works on architectures that support
|
||||
// unaligned read/writes.
|
||||
func fastORBytes(dst, a, b []byte) int {
|
||||
n := len(a)
|
||||
if len(b) < n {
|
||||
n = len(b)
|
||||
}
|
||||
w := n / wordSize
|
||||
if w > 0 {
|
||||
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
|
||||
aw := *(*[]uintptr)(unsafe.Pointer(&a))
|
||||
bw := *(*[]uintptr)(unsafe.Pointer(&b))
|
||||
for i := 0; i < w; i++ {
|
||||
dw[i] = aw[i] | bw[i]
|
||||
}
|
||||
}
|
||||
for i := (n - n%wordSize); i < n; i++ {
|
||||
dst[i] = a[i] | b[i]
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// safeORBytes ors one by one. It works on all architectures, independent if
|
||||
// it supports unaligned read/writes or not.
|
||||
func safeORBytes(dst, a, b []byte) int {
|
||||
n := len(a)
|
||||
if len(b) < n {
|
||||
n = len(b)
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
dst[i] = a[i] | b[i]
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// TestBytes tests whether any bit is set in the input byte slice.
|
||||
func TestBytes(p []byte) bool {
|
||||
if supportsUnaligned {
|
||||
return fastTestBytes(p)
|
||||
}
|
||||
return safeTestBytes(p)
|
||||
}
|
||||
|
||||
// fastTestBytes tests for set bits in bulk. It only works on architectures that
|
||||
// support unaligned read/writes.
|
||||
func fastTestBytes(p []byte) bool {
|
||||
n := len(p)
|
||||
w := n / wordSize
|
||||
if w > 0 {
|
||||
pw := *(*[]uintptr)(unsafe.Pointer(&p))
|
||||
for i := 0; i < w; i++ {
|
||||
if pw[i] != 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := (n - n%wordSize); i < n; i++ {
|
||||
if p[i] != 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// safeTestBytes tests for set bits one byte at a time. It works on all
|
||||
// architectures, independent if it supports unaligned read/writes or not.
|
||||
func safeTestBytes(p []byte) bool {
|
||||
for i := 0; i < len(p); i++ {
|
||||
if p[i] != 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
215
common/bitutil/bitutil_test.go
Normal file
215
common/bitutil/bitutil_test.go
Normal file
@ -0,0 +1,215 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Adapted from: https://golang.org/src/crypto/cipher/xor_test.go
|
||||
|
||||
package bitutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Tests that bitwise XOR works for various alignments.
|
||||
func TestXOR(t *testing.T) {
|
||||
for alignP := 0; alignP < 2; alignP++ {
|
||||
for alignQ := 0; alignQ < 2; alignQ++ {
|
||||
for alignD := 0; alignD < 2; alignD++ {
|
||||
p := make([]byte, 1023)[alignP:]
|
||||
q := make([]byte, 1023)[alignQ:]
|
||||
|
||||
for i := 0; i < len(p); i++ {
|
||||
p[i] = byte(i)
|
||||
}
|
||||
for i := 0; i < len(q); i++ {
|
||||
q[i] = byte(len(q) - i)
|
||||
}
|
||||
d1 := make([]byte, 1023+alignD)[alignD:]
|
||||
d2 := make([]byte, 1023+alignD)[alignD:]
|
||||
|
||||
XORBytes(d1, p, q)
|
||||
safeXORBytes(d2, p, q)
|
||||
if !bytes.Equal(d1, d2) {
|
||||
t.Error("not equal", d1, d2)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that bitwise AND works for various alignments.
|
||||
func TestAND(t *testing.T) {
|
||||
for alignP := 0; alignP < 2; alignP++ {
|
||||
for alignQ := 0; alignQ < 2; alignQ++ {
|
||||
for alignD := 0; alignD < 2; alignD++ {
|
||||
p := make([]byte, 1023)[alignP:]
|
||||
q := make([]byte, 1023)[alignQ:]
|
||||
|
||||
for i := 0; i < len(p); i++ {
|
||||
p[i] = byte(i)
|
||||
}
|
||||
for i := 0; i < len(q); i++ {
|
||||
q[i] = byte(len(q) - i)
|
||||
}
|
||||
d1 := make([]byte, 1023+alignD)[alignD:]
|
||||
d2 := make([]byte, 1023+alignD)[alignD:]
|
||||
|
||||
ANDBytes(d1, p, q)
|
||||
safeANDBytes(d2, p, q)
|
||||
if !bytes.Equal(d1, d2) {
|
||||
t.Error("not equal")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that bitwise OR works for various alignments.
|
||||
func TestOR(t *testing.T) {
|
||||
for alignP := 0; alignP < 2; alignP++ {
|
||||
for alignQ := 0; alignQ < 2; alignQ++ {
|
||||
for alignD := 0; alignD < 2; alignD++ {
|
||||
p := make([]byte, 1023)[alignP:]
|
||||
q := make([]byte, 1023)[alignQ:]
|
||||
|
||||
for i := 0; i < len(p); i++ {
|
||||
p[i] = byte(i)
|
||||
}
|
||||
for i := 0; i < len(q); i++ {
|
||||
q[i] = byte(len(q) - i)
|
||||
}
|
||||
d1 := make([]byte, 1023+alignD)[alignD:]
|
||||
d2 := make([]byte, 1023+alignD)[alignD:]
|
||||
|
||||
ORBytes(d1, p, q)
|
||||
safeORBytes(d2, p, q)
|
||||
if !bytes.Equal(d1, d2) {
|
||||
t.Error("not equal")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that bit testing works for various alignments.
|
||||
func TestTest(t *testing.T) {
|
||||
for align := 0; align < 2; align++ {
|
||||
// Test for bits set in the bulk part
|
||||
p := make([]byte, 1023)[align:]
|
||||
p[100] = 1
|
||||
|
||||
if TestBytes(p) != safeTestBytes(p) {
|
||||
t.Error("not equal")
|
||||
}
|
||||
// Test for bits set in the tail part
|
||||
q := make([]byte, 1023)[align:]
|
||||
q[len(q)-1] = 1
|
||||
|
||||
if TestBytes(q) != safeTestBytes(q) {
|
||||
t.Error("not equal")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the potentially optimized XOR performance.
|
||||
func BenchmarkFastXOR1KB(b *testing.B) { benchmarkFastXOR(b, 1024) }
|
||||
func BenchmarkFastXOR2KB(b *testing.B) { benchmarkFastXOR(b, 2048) }
|
||||
func BenchmarkFastXOR4KB(b *testing.B) { benchmarkFastXOR(b, 4096) }
|
||||
|
||||
func benchmarkFastXOR(b *testing.B, size int) {
|
||||
p, q := make([]byte, size), make([]byte, size)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
XORBytes(p, p, q)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the baseline XOR performance.
|
||||
func BenchmarkBaseXOR1KB(b *testing.B) { benchmarkBaseXOR(b, 1024) }
|
||||
func BenchmarkBaseXOR2KB(b *testing.B) { benchmarkBaseXOR(b, 2048) }
|
||||
func BenchmarkBaseXOR4KB(b *testing.B) { benchmarkBaseXOR(b, 4096) }
|
||||
|
||||
func benchmarkBaseXOR(b *testing.B, size int) {
|
||||
p, q := make([]byte, size), make([]byte, size)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
safeXORBytes(p, p, q)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the potentially optimized AND performance.
|
||||
func BenchmarkFastAND1KB(b *testing.B) { benchmarkFastAND(b, 1024) }
|
||||
func BenchmarkFastAND2KB(b *testing.B) { benchmarkFastAND(b, 2048) }
|
||||
func BenchmarkFastAND4KB(b *testing.B) { benchmarkFastAND(b, 4096) }
|
||||
|
||||
func benchmarkFastAND(b *testing.B, size int) {
|
||||
p, q := make([]byte, size), make([]byte, size)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
ANDBytes(p, p, q)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the baseline AND performance.
|
||||
func BenchmarkBaseAND1KB(b *testing.B) { benchmarkBaseAND(b, 1024) }
|
||||
func BenchmarkBaseAND2KB(b *testing.B) { benchmarkBaseAND(b, 2048) }
|
||||
func BenchmarkBaseAND4KB(b *testing.B) { benchmarkBaseAND(b, 4096) }
|
||||
|
||||
func benchmarkBaseAND(b *testing.B, size int) {
|
||||
p, q := make([]byte, size), make([]byte, size)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
safeANDBytes(p, p, q)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the potentially optimized OR performance.
|
||||
func BenchmarkFastOR1KB(b *testing.B) { benchmarkFastOR(b, 1024) }
|
||||
func BenchmarkFastOR2KB(b *testing.B) { benchmarkFastOR(b, 2048) }
|
||||
func BenchmarkFastOR4KB(b *testing.B) { benchmarkFastOR(b, 4096) }
|
||||
|
||||
func benchmarkFastOR(b *testing.B, size int) {
|
||||
p, q := make([]byte, size), make([]byte, size)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
ORBytes(p, p, q)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the baseline OR performance.
|
||||
func BenchmarkBaseOR1KB(b *testing.B) { benchmarkBaseOR(b, 1024) }
|
||||
func BenchmarkBaseOR2KB(b *testing.B) { benchmarkBaseOR(b, 2048) }
|
||||
func BenchmarkBaseOR4KB(b *testing.B) { benchmarkBaseOR(b, 4096) }
|
||||
|
||||
func benchmarkBaseOR(b *testing.B, size int) {
|
||||
p, q := make([]byte, size), make([]byte, size)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
safeORBytes(p, p, q)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the potentially optimized bit testing performance.
|
||||
func BenchmarkFastTest1KB(b *testing.B) { benchmarkFastTest(b, 1024) }
|
||||
func BenchmarkFastTest2KB(b *testing.B) { benchmarkFastTest(b, 2048) }
|
||||
func BenchmarkFastTest4KB(b *testing.B) { benchmarkFastTest(b, 4096) }
|
||||
|
||||
func benchmarkFastTest(b *testing.B, size int) {
|
||||
p := make([]byte, size)
|
||||
for i := 0; i < b.N; i++ {
|
||||
TestBytes(p)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the baseline bit testing performance.
|
||||
func BenchmarkBaseTest1KB(b *testing.B) { benchmarkBaseTest(b, 1024) }
|
||||
func BenchmarkBaseTest2KB(b *testing.B) { benchmarkBaseTest(b, 2048) }
|
||||
func BenchmarkBaseTest4KB(b *testing.B) { benchmarkBaseTest(b, 4096) }
|
||||
|
||||
func benchmarkBaseTest(b *testing.B, size int) {
|
||||
p := make([]byte, size)
|
||||
for i := 0; i < b.N; i++ {
|
||||
safeTestBytes(p)
|
||||
}
|
||||
}
|
170
common/bitutil/compress.go
Normal file
170
common/bitutil/compress.go
Normal file
@ -0,0 +1,170 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bitutil
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
// errMissingData is returned from decompression if the byte referenced by
|
||||
// the bitset header overflows the input data.
|
||||
errMissingData = errors.New("missing bytes on input")
|
||||
|
||||
// errUnreferencedData is returned from decompression if not all bytes were used
|
||||
// up from the input data after decompressing it.
|
||||
errUnreferencedData = errors.New("extra bytes on input")
|
||||
|
||||
// errExceededTarget is returned from decompression if the bitset header has
|
||||
// more bits defined than the number of target buffer space available.
|
||||
errExceededTarget = errors.New("target data size exceeded")
|
||||
|
||||
// errZeroContent is returned from decompression if a data byte referenced in
|
||||
// the bitset header is actually a zero byte.
|
||||
errZeroContent = errors.New("zero byte in input content")
|
||||
)
|
||||
|
||||
// The compression algorithm implemented by CompressBytes and DecompressBytes is
|
||||
// optimized for sparse input data which contains a lot of zero bytes. Decompression
|
||||
// requires knowledge of the decompressed data length.
|
||||
//
|
||||
// Compression works as follows:
|
||||
//
|
||||
// if data only contains zeroes,
|
||||
// CompressBytes(data) == nil
|
||||
// otherwise if len(data) <= 1,
|
||||
// CompressBytes(data) == data
|
||||
// otherwise:
|
||||
// CompressBytes(data) == append(CompressBytes(nonZeroBitset(data)), nonZeroBytes(data)...)
|
||||
// where
|
||||
// nonZeroBitset(data) is a bit vector with len(data) bits (MSB first):
|
||||
// nonZeroBitset(data)[i/8] && (1 << (7-i%8)) != 0 if data[i] != 0
|
||||
// len(nonZeroBitset(data)) == (len(data)+7)/8
|
||||
// nonZeroBytes(data) contains the non-zero bytes of data in the same order
|
||||
|
||||
// CompressBytes compresses the input byte slice according to the sparse bitset
|
||||
// representation algorithm. If the result is bigger than the original input, no
|
||||
// compression is done.
|
||||
func CompressBytes(data []byte) []byte {
|
||||
if out := bitsetEncodeBytes(data); len(out) < len(data) {
|
||||
return out
|
||||
}
|
||||
cpy := make([]byte, len(data))
|
||||
copy(cpy, data)
|
||||
return cpy
|
||||
}
|
||||
|
||||
// bitsetEncodeBytes compresses the input byte slice according to the sparse
|
||||
// bitset representation algorithm.
|
||||
func bitsetEncodeBytes(data []byte) []byte {
|
||||
// Empty slices get compressed to nil
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
}
|
||||
// One byte slices compress to nil or retain the single byte
|
||||
if len(data) == 1 {
|
||||
if data[0] == 0 {
|
||||
return nil
|
||||
}
|
||||
return data
|
||||
}
|
||||
// Calculate the bitset of set bytes, and gather the non-zero bytes
|
||||
nonZeroBitset := make([]byte, (len(data)+7)/8)
|
||||
nonZeroBytes := make([]byte, 0, len(data))
|
||||
|
||||
for i, b := range data {
|
||||
if b != 0 {
|
||||
nonZeroBytes = append(nonZeroBytes, b)
|
||||
nonZeroBitset[i/8] |= 1 << byte(7-i%8)
|
||||
}
|
||||
}
|
||||
if len(nonZeroBytes) == 0 {
|
||||
return nil
|
||||
}
|
||||
return append(bitsetEncodeBytes(nonZeroBitset), nonZeroBytes...)
|
||||
}
|
||||
|
||||
// DecompressBytes decompresses data with a known target size. If the input data
|
||||
// matches the size of the target, it means no compression was done in the first
|
||||
// place.
|
||||
func DecompressBytes(data []byte, target int) ([]byte, error) {
|
||||
if len(data) > target {
|
||||
return nil, errExceededTarget
|
||||
}
|
||||
if len(data) == target {
|
||||
cpy := make([]byte, len(data))
|
||||
copy(cpy, data)
|
||||
return cpy, nil
|
||||
}
|
||||
return bitsetDecodeBytes(data, target)
|
||||
}
|
||||
|
||||
// bitsetDecodeBytes decompresses data with a known target size.
|
||||
func bitsetDecodeBytes(data []byte, target int) ([]byte, error) {
|
||||
out, size, err := bitsetDecodePartialBytes(data, target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if size != len(data) {
|
||||
return nil, errUnreferencedData
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// bitsetDecodePartialBytes decompresses data with a known target size, but does
|
||||
// not enforce consuming all the input bytes. In addition to the decompressed
|
||||
// output, the function returns the length of compressed input data corresponding
|
||||
// to the output as the input slice may be longer.
|
||||
func bitsetDecodePartialBytes(data []byte, target int) ([]byte, int, error) {
|
||||
// Sanity check 0 targets to avoid infinite recursion
|
||||
if target == 0 {
|
||||
return nil, 0, nil
|
||||
}
|
||||
// Handle the zero and single byte corner cases
|
||||
decomp := make([]byte, target)
|
||||
if len(data) == 0 {
|
||||
return decomp, 0, nil
|
||||
}
|
||||
if target == 1 {
|
||||
decomp[0] = data[0] // copy to avoid referencing the input slice
|
||||
if data[0] != 0 {
|
||||
return decomp, 1, nil
|
||||
}
|
||||
return decomp, 0, nil
|
||||
}
|
||||
// Decompress the bitset of set bytes and distribute the non zero bytes
|
||||
nonZeroBitset, ptr, err := bitsetDecodePartialBytes(data, (target+7)/8)
|
||||
if err != nil {
|
||||
return nil, ptr, err
|
||||
}
|
||||
for i := 0; i < 8*len(nonZeroBitset); i++ {
|
||||
if nonZeroBitset[i/8]&(1<<byte(7-i%8)) != 0 {
|
||||
// Make sure we have enough data to push into the correct slot
|
||||
if ptr >= len(data) {
|
||||
return nil, 0, errMissingData
|
||||
}
|
||||
if i >= len(decomp) {
|
||||
return nil, 0, errExceededTarget
|
||||
}
|
||||
// Make sure the data is valid and push into the slot
|
||||
if data[ptr] == 0 {
|
||||
return nil, 0, errZeroContent
|
||||
}
|
||||
decomp[i] = data[ptr]
|
||||
ptr++
|
||||
}
|
||||
}
|
||||
return decomp, ptr, nil
|
||||
}
|
56
common/bitutil/compress_fuzz.go
Normal file
56
common/bitutil/compress_fuzz.go
Normal file
@ -0,0 +1,56 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build gofuzz
|
||||
|
||||
package bitutil
|
||||
|
||||
import "bytes"
|
||||
|
||||
// Fuzz implements a go-fuzz fuzzer method to test various encoding method
|
||||
// invocations.
|
||||
func Fuzz(data []byte) int {
|
||||
if len(data) == 0 {
|
||||
return -1
|
||||
}
|
||||
if data[0]%2 == 0 {
|
||||
return fuzzEncode(data[1:])
|
||||
}
|
||||
return fuzzDecode(data[1:])
|
||||
}
|
||||
|
||||
// fuzzEncode implements a go-fuzz fuzzer method to test the bitset encoding and
|
||||
// decoding algorithm.
|
||||
func fuzzEncode(data []byte) int {
|
||||
proc, _ := bitsetDecodeBytes(bitsetEncodeBytes(data), len(data))
|
||||
if !bytes.Equal(data, proc) {
|
||||
panic("content mismatch")
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// fuzzDecode implements a go-fuzz fuzzer method to test the bit decoding and
|
||||
// reencoding algorithm.
|
||||
func fuzzDecode(data []byte) int {
|
||||
blob, err := bitsetDecodeBytes(data, 1024)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
if comp := bitsetEncodeBytes(blob); !bytes.Equal(comp, data) {
|
||||
panic("content mismatch")
|
||||
}
|
||||
return 0
|
||||
}
|
181
common/bitutil/compress_test.go
Normal file
181
common/bitutil/compress_test.go
Normal file
@ -0,0 +1,181 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bitutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
// Tests that data bitset encoding and decoding works and is bijective.
|
||||
func TestEncodingCycle(t *testing.T) {
|
||||
tests := []string{
|
||||
// Tests generated by go-fuzz to maximize code coverage
|
||||
"0x000000000000000000",
|
||||
"0xef0400",
|
||||
"0xdf7070533534333636313639343638373532313536346c1bc33339343837313070706336343035336336346c65fefb3930393233383838ac2f65fefb",
|
||||
"0x7b64000000",
|
||||
"0x000034000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f0000000000000000000",
|
||||
"0x4912385c0e7b64000000",
|
||||
"0x000034000000000000000000000000000000",
|
||||
"0x00",
|
||||
"0x000003e834ff7f0000",
|
||||
"0x0000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000ff00",
|
||||
"0x895f0c6a020f850c6a020f85f88df88d",
|
||||
"0xdf7070533534333636313639343638373432313536346c1bc3315aac2f65fefb",
|
||||
"0x0000000000",
|
||||
"0xdf70706336346c65fefb",
|
||||
"0x00006d643634000000",
|
||||
"0xdf7070533534333636313639343638373532313536346c1bc333393438373130707063363430353639343638373532313536346c1bc333393438336336346c65fe",
|
||||
}
|
||||
for i, tt := range tests {
|
||||
data := hexutil.MustDecode(tt)
|
||||
|
||||
proc, err := bitsetDecodeBytes(bitsetEncodeBytes(data), len(data))
|
||||
if err != nil {
|
||||
t.Errorf("test %d: failed to decompress compressed data: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(data, proc) {
|
||||
t.Errorf("test %d: compress/decompress mismatch: have %x, want %x", i, proc, data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that data bitset decoding and rencoding works and is bijective.
|
||||
func TestDecodingCycle(t *testing.T) {
|
||||
tests := []struct {
|
||||
size int
|
||||
input string
|
||||
fail error
|
||||
}{
|
||||
{size: 0, input: "0x"},
|
||||
|
||||
// Crashers generated by go-fuzz
|
||||
{size: 0, input: "0x0020", fail: errUnreferencedData},
|
||||
{size: 0, input: "0x30", fail: errUnreferencedData},
|
||||
{size: 1, input: "0x00", fail: errUnreferencedData},
|
||||
{size: 2, input: "0x07", fail: errMissingData},
|
||||
{size: 1024, input: "0x8000", fail: errZeroContent},
|
||||
|
||||
// Tests generated by go-fuzz to maximize code coverage
|
||||
{size: 29490, input: "0x343137343733323134333839373334323073333930783e3078333930783e70706336346c65303e", fail: errMissingData},
|
||||
{size: 59395, input: "0x00", fail: errUnreferencedData},
|
||||
{size: 52574, input: "0x70706336346c65c0de", fail: errExceededTarget},
|
||||
{size: 42264, input: "0x07", fail: errMissingData},
|
||||
{size: 52, input: "0xa5045bad48f4", fail: errExceededTarget},
|
||||
{size: 52574, input: "0xc0de", fail: errMissingData},
|
||||
{size: 52574, input: "0x"},
|
||||
{size: 29490, input: "0x34313734373332313433383937333432307333393078073034333839373334323073333930783e3078333937333432307333393078073061333930783e70706336346c65303e", fail: errMissingData},
|
||||
{size: 29491, input: "0x3973333930783e30783e", fail: errMissingData},
|
||||
|
||||
{size: 1024, input: "0x808080608080"},
|
||||
{size: 1024, input: "0x808470705e3632383337363033313434303137393130306c6580ef46806380635a80"},
|
||||
{size: 1024, input: "0x8080808070"},
|
||||
{size: 1024, input: "0x808070705e36346c6580ef46806380635a80"},
|
||||
{size: 1024, input: "0x80808046802680"},
|
||||
{size: 1024, input: "0x4040404035"},
|
||||
{size: 1024, input: "0x4040bf3ba2b3f684402d353234373438373934409fe5b1e7ada94ebfd7d0505e27be4035"},
|
||||
{size: 1024, input: "0x404040bf3ba2b3f6844035"},
|
||||
{size: 1024, input: "0x40402d35323437343837393440bfd7d0505e27be4035"},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
data := hexutil.MustDecode(tt.input)
|
||||
|
||||
orig, err := bitsetDecodeBytes(data, tt.size)
|
||||
if err != tt.fail {
|
||||
t.Errorf("test %d: failure mismatch: have %v, want %v", i, err, tt.fail)
|
||||
}
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if comp := bitsetEncodeBytes(orig); !bytes.Equal(comp, data) {
|
||||
t.Errorf("test %d: decompress/compress mismatch: have %x, want %x", i, comp, data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestCompression tests that compression works by returning either the bitset
|
||||
// encoded input, or the actual input if the bitset version is longer.
|
||||
func TestCompression(t *testing.T) {
|
||||
// Check the the compression returns the bitset encoding is shorter
|
||||
in := hexutil.MustDecode("0x4912385c0e7b64000000")
|
||||
out := hexutil.MustDecode("0x80fe4912385c0e7b64")
|
||||
|
||||
if data := CompressBytes(in); bytes.Compare(data, out) != 0 {
|
||||
t.Errorf("encoding mismatch for sparse data: have %x, want %x", data, out)
|
||||
}
|
||||
if data, err := DecompressBytes(out, len(in)); err != nil || bytes.Compare(data, in) != 0 {
|
||||
t.Errorf("decoding mismatch for sparse data: have %x, want %x, error %v", data, in, err)
|
||||
}
|
||||
// Check the the compression returns the input if the bitset encoding is longer
|
||||
in = hexutil.MustDecode("0xdf7070533534333636313639343638373532313536346c1bc33339343837313070706336343035336336346c65fefb3930393233383838ac2f65fefb")
|
||||
out = hexutil.MustDecode("0xdf7070533534333636313639343638373532313536346c1bc33339343837313070706336343035336336346c65fefb3930393233383838ac2f65fefb")
|
||||
|
||||
if data := CompressBytes(in); bytes.Compare(data, out) != 0 {
|
||||
t.Errorf("encoding mismatch for dense data: have %x, want %x", data, out)
|
||||
}
|
||||
if data, err := DecompressBytes(out, len(in)); err != nil || bytes.Compare(data, in) != 0 {
|
||||
t.Errorf("decoding mismatch for dense data: have %x, want %x, error %v", data, in, err)
|
||||
}
|
||||
// Check that decompressing a longer input than the target fails
|
||||
if _, err := DecompressBytes([]byte{0xc0, 0x01, 0x01}, 2); err != errExceededTarget {
|
||||
t.Errorf("decoding error mismatch for long data: have %v, want %v", err, errExceededTarget)
|
||||
}
|
||||
}
|
||||
|
||||
// Crude benchmark for compressing random slices of bytes.
|
||||
func BenchmarkEncoding1KBVerySparse(b *testing.B) { benchmarkEncoding(b, 1024, 0.0001) }
|
||||
func BenchmarkEncoding2KBVerySparse(b *testing.B) { benchmarkEncoding(b, 2048, 0.0001) }
|
||||
func BenchmarkEncoding4KBVerySparse(b *testing.B) { benchmarkEncoding(b, 4096, 0.0001) }
|
||||
|
||||
func BenchmarkEncoding1KBSparse(b *testing.B) { benchmarkEncoding(b, 1024, 0.001) }
|
||||
func BenchmarkEncoding2KBSparse(b *testing.B) { benchmarkEncoding(b, 2048, 0.001) }
|
||||
func BenchmarkEncoding4KBSparse(b *testing.B) { benchmarkEncoding(b, 4096, 0.001) }
|
||||
|
||||
func BenchmarkEncoding1KBDense(b *testing.B) { benchmarkEncoding(b, 1024, 0.1) }
|
||||
func BenchmarkEncoding2KBDense(b *testing.B) { benchmarkEncoding(b, 2048, 0.1) }
|
||||
func BenchmarkEncoding4KBDense(b *testing.B) { benchmarkEncoding(b, 4096, 0.1) }
|
||||
|
||||
func BenchmarkEncoding1KBSaturated(b *testing.B) { benchmarkEncoding(b, 1024, 0.5) }
|
||||
func BenchmarkEncoding2KBSaturated(b *testing.B) { benchmarkEncoding(b, 2048, 0.5) }
|
||||
func BenchmarkEncoding4KBSaturated(b *testing.B) { benchmarkEncoding(b, 4096, 0.5) }
|
||||
|
||||
func benchmarkEncoding(b *testing.B, bytes int, fill float64) {
|
||||
// Generate a random slice of bytes to compress
|
||||
random := rand.NewSource(0) // reproducible and comparable
|
||||
|
||||
data := make([]byte, bytes)
|
||||
bits := int(float64(bytes) * 8 * fill)
|
||||
|
||||
for i := 0; i < bits; i++ {
|
||||
idx := random.Int63() % int64(len(data))
|
||||
bit := uint(random.Int63() % 8)
|
||||
data[idx] |= 1 << bit
|
||||
}
|
||||
// Reset the benchmark and measure encoding/decoding
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bitsetDecodeBytes(bitsetEncodeBytes(data), len(data))
|
||||
}
|
||||
}
|
@ -89,18 +89,18 @@ func Hex2BytesFixed(str string, flen int) []byte {
|
||||
}
|
||||
|
||||
func RightPadBytes(slice []byte, l int) []byte {
|
||||
if l < len(slice) {
|
||||
if l <= len(slice) {
|
||||
return slice
|
||||
}
|
||||
|
||||
padded := make([]byte, l)
|
||||
copy(padded[0:len(slice)], slice)
|
||||
copy(padded, slice)
|
||||
|
||||
return padded
|
||||
}
|
||||
|
||||
func LeftPadBytes(slice []byte, l int) []byte {
|
||||
if l < len(slice) {
|
||||
if l <= len(slice) {
|
||||
return slice
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,8 @@ var (
|
||||
tt256 = BigPow(2, 256)
|
||||
tt256m1 = new(big.Int).Sub(tt256, big.NewInt(1))
|
||||
MaxBig256 = new(big.Int).Set(tt256m1)
|
||||
tt63 = BigPow(2, 63)
|
||||
MaxBig63 = new(big.Int).Sub(tt63, big.NewInt(1))
|
||||
)
|
||||
|
||||
const (
|
||||
@ -128,6 +130,34 @@ func PaddedBigBytes(bigint *big.Int, n int) []byte {
|
||||
return ret
|
||||
}
|
||||
|
||||
// bigEndianByteAt returns the byte at position n,
|
||||
// in Big-Endian encoding
|
||||
// So n==0 returns the least significant byte
|
||||
func bigEndianByteAt(bigint *big.Int, n int) byte {
|
||||
words := bigint.Bits()
|
||||
// Check word-bucket the byte will reside in
|
||||
i := n / wordBytes
|
||||
if i >= len(words) {
|
||||
return byte(0)
|
||||
}
|
||||
word := words[i]
|
||||
// Offset of the byte
|
||||
shift := 8 * uint(n%wordBytes)
|
||||
|
||||
return byte(word >> shift)
|
||||
}
|
||||
|
||||
// Byte returns the byte at position n,
|
||||
// with the supplied padlength in Little-Endian encoding.
|
||||
// n==0 returns the MSB
|
||||
// Example: bigint '5', padlength 32, n=31 => 5
|
||||
func Byte(bigint *big.Int, padlength, n int) byte {
|
||||
if n >= padlength {
|
||||
return byte(0)
|
||||
}
|
||||
return bigEndianByteAt(bigint, padlength-1-n)
|
||||
}
|
||||
|
||||
// ReadBits encodes the absolute value of bigint as big-endian bytes. Callers must ensure
|
||||
// that buf has enough space. If buf is too short the result will be incomplete.
|
||||
func ReadBits(bigint *big.Int, buf []byte) {
|
||||
|
@ -21,6 +21,8 @@ import (
|
||||
"encoding/hex"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
func TestHexOrDecimal256(t *testing.T) {
|
||||
@ -133,8 +135,44 @@ func TestPaddedBigBytes(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkPaddedBigBytes(b *testing.B) {
|
||||
func BenchmarkPaddedBigBytesLargePadding(b *testing.B) {
|
||||
bigint := MustParseBig256("123456789123456789123456789123456789")
|
||||
for i := 0; i < b.N; i++ {
|
||||
PaddedBigBytes(bigint, 200)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkPaddedBigBytesSmallPadding(b *testing.B) {
|
||||
bigint := MustParseBig256("0x18F8F8F1000111000110011100222004330052300000000000000000FEFCF3CC")
|
||||
for i := 0; i < b.N; i++ {
|
||||
PaddedBigBytes(bigint, 5)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkPaddedBigBytesSmallOnePadding(b *testing.B) {
|
||||
bigint := MustParseBig256("0x18F8F8F1000111000110011100222004330052300000000000000000FEFCF3CC")
|
||||
for i := 0; i < b.N; i++ {
|
||||
PaddedBigBytes(bigint, 32)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkByteAtBrandNew(b *testing.B) {
|
||||
bigint := MustParseBig256("0x18F8F8F1000111000110011100222004330052300000000000000000FEFCF3CC")
|
||||
for i := 0; i < b.N; i++ {
|
||||
bigEndianByteAt(bigint, 15)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkByteAt(b *testing.B) {
|
||||
bigint := MustParseBig256("0x18F8F8F1000111000110011100222004330052300000000000000000FEFCF3CC")
|
||||
for i := 0; i < b.N; i++ {
|
||||
bigEndianByteAt(bigint, 15)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkByteAtOld(b *testing.B) {
|
||||
|
||||
bigint := MustParseBig256("0x18F8F8F1000111000110011100222004330052300000000000000000FEFCF3CC")
|
||||
for i := 0; i < b.N; i++ {
|
||||
PaddedBigBytes(bigint, 32)
|
||||
}
|
||||
@ -174,6 +212,65 @@ func TestU256(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBigEndianByteAt(t *testing.T) {
|
||||
tests := []struct {
|
||||
x string
|
||||
y int
|
||||
exp byte
|
||||
}{
|
||||
{"00", 0, 0x00},
|
||||
{"01", 1, 0x00},
|
||||
{"00", 1, 0x00},
|
||||
{"01", 0, 0x01},
|
||||
{"0000000000000000000000000000000000000000000000000000000000102030", 0, 0x30},
|
||||
{"0000000000000000000000000000000000000000000000000000000000102030", 1, 0x20},
|
||||
{"ABCDEF0908070605040302010000000000000000000000000000000000000000", 31, 0xAB},
|
||||
{"ABCDEF0908070605040302010000000000000000000000000000000000000000", 32, 0x00},
|
||||
{"ABCDEF0908070605040302010000000000000000000000000000000000000000", 500, 0x00},
|
||||
}
|
||||
for _, test := range tests {
|
||||
v := new(big.Int).SetBytes(common.Hex2Bytes(test.x))
|
||||
actual := bigEndianByteAt(v, test.y)
|
||||
if actual != test.exp {
|
||||
t.Fatalf("Expected [%v] %v:th byte to be %v, was %v.", test.x, test.y, test.exp, actual)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
func TestLittleEndianByteAt(t *testing.T) {
|
||||
tests := []struct {
|
||||
x string
|
||||
y int
|
||||
exp byte
|
||||
}{
|
||||
{"00", 0, 0x00},
|
||||
{"01", 1, 0x00},
|
||||
{"00", 1, 0x00},
|
||||
{"01", 0, 0x00},
|
||||
{"0000000000000000000000000000000000000000000000000000000000102030", 0, 0x00},
|
||||
{"0000000000000000000000000000000000000000000000000000000000102030", 1, 0x00},
|
||||
{"ABCDEF0908070605040302010000000000000000000000000000000000000000", 31, 0x00},
|
||||
{"ABCDEF0908070605040302010000000000000000000000000000000000000000", 32, 0x00},
|
||||
{"ABCDEF0908070605040302010000000000000000000000000000000000000000", 0, 0xAB},
|
||||
{"ABCDEF0908070605040302010000000000000000000000000000000000000000", 1, 0xCD},
|
||||
{"00CDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff", 0, 0x00},
|
||||
{"00CDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff", 1, 0xCD},
|
||||
{"0000000000000000000000000000000000000000000000000000000000102030", 31, 0x30},
|
||||
{"0000000000000000000000000000000000000000000000000000000000102030", 30, 0x20},
|
||||
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 32, 0x0},
|
||||
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 31, 0xFF},
|
||||
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 0xFFFF, 0x0},
|
||||
}
|
||||
for _, test := range tests {
|
||||
v := new(big.Int).SetBytes(common.Hex2Bytes(test.x))
|
||||
actual := Byte(v, 32, test.y)
|
||||
if actual != test.exp {
|
||||
t.Fatalf("Expected [%v] %v:th byte to be %v, was %v.", test.x, test.y, test.exp, actual)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestS256(t *testing.T) {
|
||||
tests := []struct{ x, y *big.Int }{
|
||||
{x: big.NewInt(0), y: big.NewInt(0)},
|
||||
|
@ -33,102 +33,18 @@ func (s *CompressionRleSuite) TestDecompressSimple(c *checker.C) {
|
||||
res, err := Decompress([]byte{token, 0xfd})
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(res, checker.DeepEquals, exp)
|
||||
// if bytes.Compare(res, exp) != 0 {
|
||||
// t.Error("empty sha3", res)
|
||||
// }
|
||||
|
||||
exp = []byte{0x56, 0xe8, 0x1f, 0x17, 0x1b, 0xcc, 0x55, 0xa6, 0xff, 0x83, 0x45, 0xe6, 0x92, 0xc0, 0xf8, 0x6e, 0x5b, 0x48, 0xe0, 0x1b, 0x99, 0x6c, 0xad, 0xc0, 0x1, 0x62, 0x2f, 0xb5, 0xe3, 0x63, 0xb4, 0x21}
|
||||
res, err = Decompress([]byte{token, 0xfe})
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(res, checker.DeepEquals, exp)
|
||||
// if bytes.Compare(res, exp) != 0 {
|
||||
// t.Error("0x80 sha3", res)
|
||||
// }
|
||||
|
||||
res, err = Decompress([]byte{token, 0xff})
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(res, checker.DeepEquals, []byte{token})
|
||||
// if bytes.Compare(res, []byte{token}) != 0 {
|
||||
// t.Error("token", res)
|
||||
// }
|
||||
|
||||
res, err = Decompress([]byte{token, 12})
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(res, checker.DeepEquals, make([]byte, 10))
|
||||
// if bytes.Compare(res, make([]byte, 10)) != 0 {
|
||||
// t.Error("10 * zero", res)
|
||||
// }
|
||||
|
||||
}
|
||||
|
||||
// func TestDecompressMulti(t *testing.T) {
|
||||
// res, err := Decompress([]byte{token, 0xfd, token, 0xfe, token, 12})
|
||||
// if err != nil {
|
||||
// t.Error(err)
|
||||
// }
|
||||
|
||||
// var exp []byte
|
||||
// exp = append(exp, crypto.Keccak256([]byte(""))...)
|
||||
// exp = append(exp, crypto.Keccak256([]byte{0x80})...)
|
||||
// exp = append(exp, make([]byte, 10)...)
|
||||
|
||||
// if bytes.Compare(res, res) != 0 {
|
||||
// t.Error("Expected", exp, "result", res)
|
||||
// }
|
||||
// }
|
||||
|
||||
// func TestCompressSimple(t *testing.T) {
|
||||
// res := Compress([]byte{0, 0, 0, 0, 0})
|
||||
// if bytes.Compare(res, []byte{token, 7}) != 0 {
|
||||
// t.Error("5 * zero", res)
|
||||
// }
|
||||
|
||||
// res = Compress(crypto.Keccak256([]byte("")))
|
||||
// if bytes.Compare(res, []byte{token, emptyShaToken}) != 0 {
|
||||
// t.Error("empty sha", res)
|
||||
// }
|
||||
|
||||
// res = Compress(crypto.Keccak256([]byte{0x80}))
|
||||
// if bytes.Compare(res, []byte{token, emptyListShaToken}) != 0 {
|
||||
// t.Error("empty list sha", res)
|
||||
// }
|
||||
|
||||
// res = Compress([]byte{token})
|
||||
// if bytes.Compare(res, []byte{token, tokenToken}) != 0 {
|
||||
// t.Error("token", res)
|
||||
// }
|
||||
// }
|
||||
|
||||
// func TestCompressMulti(t *testing.T) {
|
||||
// in := []byte{0, 0, 0, 0, 0}
|
||||
// in = append(in, crypto.Keccak256([]byte(""))...)
|
||||
// in = append(in, crypto.Keccak256([]byte{0x80})...)
|
||||
// in = append(in, token)
|
||||
// res := Compress(in)
|
||||
|
||||
// exp := []byte{token, 7, token, emptyShaToken, token, emptyListShaToken, token, tokenToken}
|
||||
// if bytes.Compare(res, exp) != 0 {
|
||||
// t.Error("expected", exp, "got", res)
|
||||
// }
|
||||
// }
|
||||
|
||||
// func TestCompressDecompress(t *testing.T) {
|
||||
// var in []byte
|
||||
|
||||
// for i := 0; i < 20; i++ {
|
||||
// in = append(in, []byte{0, 0, 0, 0, 0}...)
|
||||
// in = append(in, crypto.Keccak256([]byte(""))...)
|
||||
// in = append(in, crypto.Keccak256([]byte{0x80})...)
|
||||
// in = append(in, []byte{123, 2, 19, 89, 245, 254, 255, token, 98, 233}...)
|
||||
// in = append(in, token)
|
||||
// }
|
||||
|
||||
// c := Compress(in)
|
||||
// d, err := Decompress(c)
|
||||
// if err != nil {
|
||||
// t.Error(err)
|
||||
// }
|
||||
|
||||
// if bytes.Compare(d, in) != 0 {
|
||||
// t.Error("multi failed\n", d, "\n", in)
|
||||
// }
|
||||
// }
|
||||
|
@ -44,7 +44,7 @@ import (
|
||||
const (
|
||||
checkpointInterval = 1024 // Number of blocks after which to save the vote snapshot to the database
|
||||
inmemorySnapshots = 128 // Number of recent vote snapshots to keep in memory
|
||||
inmemorySignatures = 1024 // Number of recent blocks to keep in memory
|
||||
inmemorySignatures = 4096 // Number of recent block signatures to keep in memory
|
||||
|
||||
wiggleTime = 500 * time.Millisecond // Random delay (per signer) to allow concurrent signers
|
||||
)
|
||||
@ -76,7 +76,7 @@ var (
|
||||
errUnknownBlock = errors.New("unknown block")
|
||||
|
||||
// errInvalidCheckpointBeneficiary is returned if a checkpoint/epoch transition
|
||||
// block has a beneficiary set to non zeroes.
|
||||
// block has a beneficiary set to non-zeroes.
|
||||
errInvalidCheckpointBeneficiary = errors.New("beneficiary in checkpoint block non-zero")
|
||||
|
||||
// errInvalidVote is returned if a nonce value is something else that the two
|
||||
@ -84,7 +84,7 @@ var (
|
||||
errInvalidVote = errors.New("vote nonce not 0x00..0 or 0xff..f")
|
||||
|
||||
// errInvalidCheckpointVote is returned if a checkpoint/epoch transition block
|
||||
// has a vote nonce set to non zeroes.
|
||||
// has a vote nonce set to non-zeroes.
|
||||
errInvalidCheckpointVote = errors.New("vote nonce in checkpoint block non-zero")
|
||||
|
||||
// errMissingVanity is returned if a block's extra-data section is shorter than
|
||||
@ -99,12 +99,12 @@ var (
|
||||
// their extra-data fields.
|
||||
errExtraSigners = errors.New("non-checkpoint block contains extra signer list")
|
||||
|
||||
// drrInvalidCheckpointSigners is returned if a checkpoint block contains an
|
||||
// errInvalidCheckpointSigners is returned if a checkpoint block contains an
|
||||
// invalid list of signers (i.e. non divisible by 20 bytes, or not the correct
|
||||
// ones).
|
||||
drrInvalidCheckpointSigners = errors.New("invalid signer list on checkpoint block")
|
||||
errInvalidCheckpointSigners = errors.New("invalid signer list on checkpoint block")
|
||||
|
||||
// errInvalidMixDigest is returned if a block's mix digest is non zero.
|
||||
// errInvalidMixDigest is returned if a block's mix digest is non-zero.
|
||||
errInvalidMixDigest = errors.New("non-zero mix digest")
|
||||
|
||||
// errInvalidUncleHash is returned if a block contains an non-empty uncle list.
|
||||
@ -122,7 +122,7 @@ var (
|
||||
// be modified via out-of-range or non-contiguous headers.
|
||||
errInvalidVotingChain = errors.New("invalid voting chain")
|
||||
|
||||
// errUnauthorized is returned if a header is signed by a non authorized entity.
|
||||
// errUnauthorized is returned if a header is signed by a non-authorized entity.
|
||||
errUnauthorized = errors.New("unauthorized")
|
||||
)
|
||||
|
||||
@ -162,7 +162,12 @@ func sigHash(header *types.Header) (hash common.Hash) {
|
||||
}
|
||||
|
||||
// ecrecover extracts the Ethereum account address from a signed header.
|
||||
func ecrecover(header *types.Header) (common.Address, error) {
|
||||
func ecrecover(header *types.Header, sigcache *lru.ARCCache) (common.Address, error) {
|
||||
// If the signature's already cached, return that
|
||||
hash := header.Hash()
|
||||
if address, known := sigcache.Get(hash); known {
|
||||
return address.(common.Address), nil
|
||||
}
|
||||
// Retrieve the signature from the header extra-data
|
||||
if len(header.Extra) < extraSeal {
|
||||
return common.Address{}, errMissingSignature
|
||||
@ -177,6 +182,7 @@ func ecrecover(header *types.Header) (common.Address, error) {
|
||||
var signer common.Address
|
||||
copy(signer[:], crypto.Keccak256(pubkey[1:])[12:])
|
||||
|
||||
sigcache.Add(hash, signer)
|
||||
return signer, nil
|
||||
}
|
||||
|
||||
@ -223,7 +229,7 @@ func New(config *params.CliqueConfig, db ethdb.Database) *Clique {
|
||||
// Author implements consensus.Engine, returning the Ethereum address recovered
|
||||
// from the signature in the header's extra-data section.
|
||||
func (c *Clique) Author(header *types.Header) (common.Address, error) {
|
||||
return ecrecover(header)
|
||||
return ecrecover(header, c.signatures)
|
||||
}
|
||||
|
||||
// VerifyHeader checks whether a header conforms to the consensus rules.
|
||||
@ -291,7 +297,7 @@ func (c *Clique) verifyHeader(chain consensus.ChainReader, header *types.Header,
|
||||
return errExtraSigners
|
||||
}
|
||||
if checkpoint && signersBytes%common.AddressLength != 0 {
|
||||
return drrInvalidCheckpointSigners
|
||||
return errInvalidCheckpointSigners
|
||||
}
|
||||
// Ensure that the mix digest is zero as we don't have fork protection currently
|
||||
if header.MixDigest != (common.Hash{}) {
|
||||
@ -347,7 +353,7 @@ func (c *Clique) verifyCascadingFields(chain consensus.ChainReader, header *type
|
||||
}
|
||||
extraSuffix := len(header.Extra) - extraSeal
|
||||
if !bytes.Equal(header.Extra[extraVanity:extraSuffix], signers) {
|
||||
return drrInvalidCheckpointSigners
|
||||
return errInvalidCheckpointSigners
|
||||
}
|
||||
}
|
||||
// All basic checks passed, verify the seal and return
|
||||
@ -369,7 +375,7 @@ func (c *Clique) snapshot(chain consensus.ChainReader, number uint64, hash commo
|
||||
}
|
||||
// If an on-disk checkpoint snapshot can be found, use that
|
||||
if number%checkpointInterval == 0 {
|
||||
if s, err := loadSnapshot(c.config, c.db, hash); err == nil {
|
||||
if s, err := loadSnapshot(c.config, c.signatures, c.db, hash); err == nil {
|
||||
log.Trace("Loaded voting snapshot form disk", "number", number, "hash", hash)
|
||||
snap = s
|
||||
break
|
||||
@ -385,7 +391,7 @@ func (c *Clique) snapshot(chain consensus.ChainReader, number uint64, hash commo
|
||||
for i := 0; i < len(signers); i++ {
|
||||
copy(signers[i][:], genesis.Extra[extraVanity+i*common.AddressLength:])
|
||||
}
|
||||
snap = newSnapshot(c.config, 0, genesis.Hash(), signers)
|
||||
snap = newSnapshot(c.config, c.signatures, 0, genesis.Hash(), signers)
|
||||
if err := snap.store(c.db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -461,10 +467,9 @@ func (c *Clique) verifySeal(chain consensus.ChainReader, header *types.Header, p
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.recents.Add(snap.Hash, snap)
|
||||
|
||||
// Resolve the authorization key and check against signers
|
||||
signer, err := ecrecover(header)
|
||||
signer, err := ecrecover(header, c.signatures)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -473,13 +478,13 @@ func (c *Clique) verifySeal(chain consensus.ChainReader, header *types.Header, p
|
||||
}
|
||||
for seen, recent := range snap.Recents {
|
||||
if recent == signer {
|
||||
// Signer is among recents, only fail if the current block doens't shift it out
|
||||
// Signer is among recents, only fail if the current block doesn't shift it out
|
||||
if limit := uint64(len(snap.Signers)/2 + 1); seen > number-limit {
|
||||
return errUnauthorized
|
||||
}
|
||||
}
|
||||
}
|
||||
// Ensure that the difficulty corresponts to the turn-ness of the signer
|
||||
// Ensure that the difficulty corresponds to the turn-ness of the signer
|
||||
inturn := snap.inturn(header.Number.Uint64(), signer)
|
||||
if inturn && header.Difficulty.Cmp(diffInTurn) != 0 {
|
||||
return errInvalidDifficulty
|
||||
@ -493,18 +498,29 @@ func (c *Clique) verifySeal(chain consensus.ChainReader, header *types.Header, p
|
||||
// Prepare implements consensus.Engine, preparing all the consensus fields of the
|
||||
// header for running the transactions on top.
|
||||
func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) error {
|
||||
// If the block isn't a checkpoint, cast a random vote (good enough fror now)
|
||||
// If the block isn't a checkpoint, cast a random vote (good enough for now)
|
||||
header.Coinbase = common.Address{}
|
||||
header.Nonce = types.BlockNonce{}
|
||||
|
||||
number := header.Number.Uint64()
|
||||
|
||||
// Assemble the voting snapshot to check which votes make sense
|
||||
snap, err := c.snapshot(chain, number-1, header.ParentHash, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if number%c.config.Epoch != 0 {
|
||||
c.lock.RLock()
|
||||
if len(c.proposals) > 0 {
|
||||
|
||||
// Gather all the proposals that make sense voting on
|
||||
addresses := make([]common.Address, 0, len(c.proposals))
|
||||
for address := range c.proposals {
|
||||
for address, authorize := range c.proposals {
|
||||
if snap.validVote(address, authorize) {
|
||||
addresses = append(addresses, address)
|
||||
}
|
||||
}
|
||||
// If there's pending proposals, cast a vote on them
|
||||
if len(addresses) > 0 {
|
||||
header.Coinbase = addresses[rand.Intn(len(addresses))]
|
||||
if c.proposals[header.Coinbase] {
|
||||
copy(header.Nonce[:], nonceAuthVote)
|
||||
@ -514,11 +530,7 @@ func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) erro
|
||||
}
|
||||
c.lock.RUnlock()
|
||||
}
|
||||
// Assemble the voting snapshot and set the correct difficulty
|
||||
snap, err := c.snapshot(chain, number-1, header.ParentHash, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Set the correct difficulty
|
||||
header.Difficulty = diffNoTurn
|
||||
if snap.inturn(header.Number.Uint64(), c.signer) {
|
||||
header.Difficulty = diffInTurn
|
||||
@ -595,11 +607,11 @@ func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, stop <-ch
|
||||
if _, authorized := snap.Signers[signer]; !authorized {
|
||||
return nil, errUnauthorized
|
||||
}
|
||||
// If we're amongs the recent signers, wait for the next block
|
||||
// If we're amongst the recent signers, wait for the next block
|
||||
for seen, recent := range snap.Recents {
|
||||
if recent == signer {
|
||||
// Signer is among recents, only wait if the current block doens't shift it out
|
||||
if limit := uint64(len(snap.Signers)/2 + 1); seen > number-limit {
|
||||
// Signer is among recents, only wait if the current block doesn't shift it out
|
||||
if limit := uint64(len(snap.Signers)/2 + 1); number < limit || seen > number-limit {
|
||||
log.Info("Signed recently, must wait for others")
|
||||
<-stop
|
||||
return nil, nil
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
)
|
||||
|
||||
// Vote represents a single vote that an authorized signer made to modify the
|
||||
@ -38,13 +39,14 @@ type Vote struct {
|
||||
// Tally is a simple vote tally to keep the current score of votes. Votes that
|
||||
// go against the proposal aren't counted since it's equivalent to not voting.
|
||||
type Tally struct {
|
||||
Authorize bool `json:"authorize"` // Whether the vote it about authorizing or kicking someone
|
||||
Authorize bool `json:"authorize"` // Whether the vote is about authorizing or kicking someone
|
||||
Votes int `json:"votes"` // Number of votes until now wanting to pass the proposal
|
||||
}
|
||||
|
||||
// Snapshot is the state of the authorization voting at a given point in time.
|
||||
type Snapshot struct {
|
||||
config *params.CliqueConfig // Consensus engine parameters to fine tune behavior
|
||||
sigcache *lru.ARCCache // Cache of recent block signatures to speed up ecrecover
|
||||
|
||||
Number uint64 `json:"number"` // Block number where the snapshot was created
|
||||
Hash common.Hash `json:"hash"` // Block hash where the snapshot was created
|
||||
@ -54,12 +56,13 @@ type Snapshot struct {
|
||||
Tally map[common.Address]Tally `json:"tally"` // Current vote tally to avoid recalculating
|
||||
}
|
||||
|
||||
// newSnapshot create a new snapshot with the specified startup parameters. This
|
||||
// newSnapshot creates a new snapshot with the specified startup parameters. This
|
||||
// method does not initialize the set of recent signers, so only ever use if for
|
||||
// the genesis block.
|
||||
func newSnapshot(config *params.CliqueConfig, number uint64, hash common.Hash, signers []common.Address) *Snapshot {
|
||||
func newSnapshot(config *params.CliqueConfig, sigcache *lru.ARCCache, number uint64, hash common.Hash, signers []common.Address) *Snapshot {
|
||||
snap := &Snapshot{
|
||||
config: config,
|
||||
sigcache: sigcache,
|
||||
Number: number,
|
||||
Hash: hash,
|
||||
Signers: make(map[common.Address]struct{}),
|
||||
@ -73,7 +76,7 @@ func newSnapshot(config *params.CliqueConfig, number uint64, hash common.Hash, s
|
||||
}
|
||||
|
||||
// loadSnapshot loads an existing snapshot from the database.
|
||||
func loadSnapshot(config *params.CliqueConfig, db ethdb.Database, hash common.Hash) (*Snapshot, error) {
|
||||
func loadSnapshot(config *params.CliqueConfig, sigcache *lru.ARCCache, db ethdb.Database, hash common.Hash) (*Snapshot, error) {
|
||||
blob, err := db.Get(append([]byte("clique-"), hash[:]...))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -83,6 +86,7 @@ func loadSnapshot(config *params.CliqueConfig, db ethdb.Database, hash common.Ha
|
||||
return nil, err
|
||||
}
|
||||
snap.config = config
|
||||
snap.sigcache = sigcache
|
||||
|
||||
return snap, nil
|
||||
}
|
||||
@ -100,6 +104,7 @@ func (s *Snapshot) store(db ethdb.Database) error {
|
||||
func (s *Snapshot) copy() *Snapshot {
|
||||
cpy := &Snapshot{
|
||||
config: s.config,
|
||||
sigcache: s.sigcache,
|
||||
Number: s.Number,
|
||||
Hash: s.Hash,
|
||||
Signers: make(map[common.Address]struct{}),
|
||||
@ -121,11 +126,17 @@ func (s *Snapshot) copy() *Snapshot {
|
||||
return cpy
|
||||
}
|
||||
|
||||
// validVote returns whether it makes sense to cast the specified vote in the
|
||||
// given snapshot context (e.g. don't try to add an already authorized signer).
|
||||
func (s *Snapshot) validVote(address common.Address, authorize bool) bool {
|
||||
_, signer := s.Signers[address]
|
||||
return (signer && !authorize) || (!signer && authorize)
|
||||
}
|
||||
|
||||
// cast adds a new vote into the tally.
|
||||
func (s *Snapshot) cast(address common.Address, authorize bool) bool {
|
||||
// Ensure the vote is meaningful
|
||||
_, signer := s.Signers[address]
|
||||
if (signer && authorize) || (!signer && !authorize) {
|
||||
if !s.validVote(address, authorize) {
|
||||
return false
|
||||
}
|
||||
// Cast the vote into an existing or new tally
|
||||
@ -190,7 +201,7 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) {
|
||||
delete(snap.Recents, number-limit)
|
||||
}
|
||||
// Resolve the authorization key and check against signers
|
||||
signer, err := ecrecover(header)
|
||||
signer, err := ecrecover(header, s.sigcache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -243,7 +243,7 @@ func TestVoting(t *testing.T) {
|
||||
},
|
||||
results: []string{"A", "B"},
|
||||
}, {
|
||||
// Cascading changes are not allowed, only the the account being voted on may change
|
||||
// Cascading changes are not allowed, only the account being voted on may change
|
||||
signers: []string{"A", "B", "C", "D"},
|
||||
votes: []testerVote{
|
||||
{signer: "A", voted: "C", auth: false},
|
||||
@ -293,7 +293,7 @@ func TestVoting(t *testing.T) {
|
||||
results: []string{"A", "B", "C"},
|
||||
}, {
|
||||
// Ensure that pending votes don't survive authorization status changes. This
|
||||
// corner case can only appear if a signer is quickly added, remove and then
|
||||
// corner case can only appear if a signer is quickly added, removed and then
|
||||
// readded (or the inverse), while one of the original voters dropped. If a
|
||||
// past vote is left cached in the system somewhere, this will interfere with
|
||||
// the final signer outcome.
|
||||
|
@ -79,8 +79,7 @@ type Engine interface {
|
||||
|
||||
// Finalize runs any post-transaction state modifications (e.g. block rewards)
|
||||
// and assembles the final block.
|
||||
//
|
||||
// Note, the block header and state database might be updated to reflect any
|
||||
// Note: The block header and state database might be updated to reflect any
|
||||
// consensus rules that happen at finalization (e.g. block rewards).
|
||||
Finalize(chain ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
|
||||
uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error)
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/bitutil"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
@ -52,7 +53,6 @@ type hasher func(dest []byte, data []byte)
|
||||
|
||||
// makeHasher creates a repetitive hasher, allowing the same hash data structures
|
||||
// to be reused between hash runs instead of requiring new ones to be created.
|
||||
//
|
||||
// The returned function is not thread safe!
|
||||
func makeHasher(h hash.Hash) hasher {
|
||||
return func(dest []byte, data []byte) {
|
||||
@ -81,7 +81,6 @@ func seedHash(block uint64) []byte {
|
||||
// memory, then performing two passes of Sergio Demian Lerner's RandMemoHash
|
||||
// algorithm from Strict Memory Hard Hashing Functions (2014). The output is a
|
||||
// set of 524288 64-byte values.
|
||||
//
|
||||
// This method places the result into dest in machine byte order.
|
||||
func generateCache(dest []uint32, epoch uint64, seed []byte) {
|
||||
// Print some debug logs to allow analysis on low end devices
|
||||
@ -142,7 +141,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
|
||||
dstOff = j * hashBytes
|
||||
xorOff = (binary.LittleEndian.Uint32(cache[dstOff:]) % uint32(rows)) * hashBytes
|
||||
)
|
||||
xorBytes(temp, cache[srcOff:srcOff+hashBytes], cache[xorOff:xorOff+hashBytes])
|
||||
bitutil.XORBytes(temp, cache[srcOff:srcOff+hashBytes], cache[xorOff:xorOff+hashBytes])
|
||||
keccak512(cache[dstOff:], temp)
|
||||
|
||||
atomic.AddUint32(&progress, 1)
|
||||
@ -219,7 +218,6 @@ func generateDatasetItem(cache []uint32, index uint32, keccak512 hasher) []byte
|
||||
}
|
||||
|
||||
// generateDataset generates the entire ethash dataset for mining.
|
||||
//
|
||||
// This method places the result into dest in machine byte order.
|
||||
func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
|
||||
// Print some debug logs to allow analysis on low end devices
|
||||
|
@ -20,7 +20,7 @@ package ethash
|
||||
|
||||
import "testing"
|
||||
|
||||
// Tests whether the dataset size calculator work correctly by cross checking the
|
||||
// Tests whether the dataset size calculator works correctly by cross checking the
|
||||
// hard coded lookup table with the value generated by it.
|
||||
func TestSizeCalculations(t *testing.T) {
|
||||
var tests []uint64
|
||||
|
@ -218,7 +218,6 @@ func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Blo
|
||||
|
||||
// verifyHeader checks whether a header conforms to the consensus rules of the
|
||||
// stock Ethereum ethash engine.
|
||||
//
|
||||
// See YP section 4.3.4. "Block Header Validity"
|
||||
func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *types.Header, uncle bool, seal bool) error {
|
||||
// Ensure that the header's extra-data section is of a reasonable size
|
||||
@ -239,10 +238,19 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *
|
||||
return errZeroBlockTime
|
||||
}
|
||||
// Verify the block's difficulty based in it's timestamp and parent's difficulty
|
||||
expected := CalcDifficulty(chain.Config(), header.Time.Uint64(), parent.Time.Uint64(), parent.Number, parent.Difficulty)
|
||||
expected := CalcDifficulty(chain.Config(), header.Time.Uint64(), parent)
|
||||
if expected.Cmp(header.Difficulty) != 0 {
|
||||
return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, expected)
|
||||
}
|
||||
// Verify that the gas limit is <= 2^63-1
|
||||
if header.GasLimit.Cmp(math.MaxBig63) > 0 {
|
||||
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, math.MaxBig63)
|
||||
}
|
||||
// Verify that the gasUsed is <= gasLimit
|
||||
if header.GasUsed.Cmp(header.GasLimit) > 0 {
|
||||
return fmt.Errorf("invalid gasUsed: have %v, gasLimit %v", header.GasUsed, header.GasLimit)
|
||||
}
|
||||
|
||||
// Verify that the gas limit remains within allowed bounds
|
||||
diff := new(big.Int).Set(parent.GasLimit)
|
||||
diff = diff.Sub(diff, header.GasLimit)
|
||||
@ -274,16 +282,18 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *
|
||||
return nil
|
||||
}
|
||||
|
||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
|
||||
// that a new block should have when created at time given the parent block's time
|
||||
// and difficulty.
|
||||
//
|
||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns
|
||||
// the difficulty that a new block should have when created at time
|
||||
// given the parent block's time and difficulty.
|
||||
// TODO (karalabe): Move the chain maker into this package and make this private!
|
||||
func CalcDifficulty(config *params.ChainConfig, time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
|
||||
if config.IsHomestead(new(big.Int).Add(parentNumber, common.Big1)) {
|
||||
return calcDifficultyHomestead(time, parentTime, parentNumber, parentDiff)
|
||||
func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
|
||||
next := new(big.Int).Add(parent.Number, common.Big1)
|
||||
switch {
|
||||
case config.IsHomestead(next):
|
||||
return calcDifficultyHomestead(time, parent)
|
||||
default:
|
||||
return calcDifficultyFrontier(time, parent)
|
||||
}
|
||||
return calcDifficultyFrontier(time, parentTime, parentNumber, parentDiff)
|
||||
}
|
||||
|
||||
// Some weird constants to avoid constant memory allocs for them.
|
||||
@ -296,7 +306,7 @@ var (
|
||||
// calcDifficultyHomestead is the difficulty adjustment algorithm. It returns
|
||||
// the difficulty that a new block should have when created at time given the
|
||||
// parent block's time and difficulty. The calculation uses the Homestead rules.
|
||||
func calcDifficultyHomestead(time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
|
||||
func calcDifficultyHomestead(time uint64, parent *types.Header) *big.Int {
|
||||
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.mediawiki
|
||||
// algorithm:
|
||||
// diff = (parent_diff +
|
||||
@ -304,7 +314,7 @@ func calcDifficultyHomestead(time, parentTime uint64, parentNumber, parentDiff *
|
||||
// ) + 2^(periodCount - 2)
|
||||
|
||||
bigTime := new(big.Int).SetUint64(time)
|
||||
bigParentTime := new(big.Int).SetUint64(parentTime)
|
||||
bigParentTime := new(big.Int).Set(parent.Time)
|
||||
|
||||
// holds intermediate values to make the algo easier to read & audit
|
||||
x := new(big.Int)
|
||||
@ -320,16 +330,16 @@ func calcDifficultyHomestead(time, parentTime uint64, parentNumber, parentDiff *
|
||||
x.Set(bigMinus99)
|
||||
}
|
||||
// (parent_diff + parent_diff // 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
|
||||
y.Div(parentDiff, params.DifficultyBoundDivisor)
|
||||
y.Div(parent.Difficulty, params.DifficultyBoundDivisor)
|
||||
x.Mul(y, x)
|
||||
x.Add(parentDiff, x)
|
||||
x.Add(parent.Difficulty, x)
|
||||
|
||||
// minimum difficulty can ever be (before exponential factor)
|
||||
if x.Cmp(params.MinimumDifficulty) < 0 {
|
||||
x.Set(params.MinimumDifficulty)
|
||||
}
|
||||
// for the exponential factor
|
||||
periodCount := new(big.Int).Add(parentNumber, common.Big1)
|
||||
periodCount := new(big.Int).Add(parent.Number, common.Big1)
|
||||
periodCount.Div(periodCount, expDiffPeriod)
|
||||
|
||||
// the exponential factor, commonly referred to as "the bomb"
|
||||
@ -345,25 +355,25 @@ func calcDifficultyHomestead(time, parentTime uint64, parentNumber, parentDiff *
|
||||
// calcDifficultyFrontier is the difficulty adjustment algorithm. It returns the
|
||||
// difficulty that a new block should have when created at time given the parent
|
||||
// block's time and difficulty. The calculation uses the Frontier rules.
|
||||
func calcDifficultyFrontier(time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
|
||||
func calcDifficultyFrontier(time uint64, parent *types.Header) *big.Int {
|
||||
diff := new(big.Int)
|
||||
adjust := new(big.Int).Div(parentDiff, params.DifficultyBoundDivisor)
|
||||
adjust := new(big.Int).Div(parent.Difficulty, params.DifficultyBoundDivisor)
|
||||
bigTime := new(big.Int)
|
||||
bigParentTime := new(big.Int)
|
||||
|
||||
bigTime.SetUint64(time)
|
||||
bigParentTime.SetUint64(parentTime)
|
||||
bigParentTime.Set(parent.Time)
|
||||
|
||||
if bigTime.Sub(bigTime, bigParentTime).Cmp(params.DurationLimit) < 0 {
|
||||
diff.Add(parentDiff, adjust)
|
||||
diff.Add(parent.Difficulty, adjust)
|
||||
} else {
|
||||
diff.Sub(parentDiff, adjust)
|
||||
diff.Sub(parent.Difficulty, adjust)
|
||||
}
|
||||
if diff.Cmp(params.MinimumDifficulty) < 0 {
|
||||
diff.Set(params.MinimumDifficulty)
|
||||
}
|
||||
|
||||
periodCount := new(big.Int).Add(parentNumber, common.Big1)
|
||||
periodCount := new(big.Int).Add(parent.Number, common.Big1)
|
||||
periodCount.Div(periodCount, expDiffPeriod)
|
||||
if periodCount.Cmp(common.Big1) > 0 {
|
||||
// diff = diff + 2^(periodCount - 2)
|
||||
@ -425,8 +435,7 @@ func (ethash *Ethash) Prepare(chain consensus.ChainReader, header *types.Header)
|
||||
if parent == nil {
|
||||
return consensus.ErrUnknownAncestor
|
||||
}
|
||||
header.Difficulty = CalcDifficulty(chain.Config(), header.Time.Uint64(),
|
||||
parent.Time.Uint64(), parent.Number, parent.Difficulty)
|
||||
header.Difficulty = CalcDifficulty(chain.Config(), header.Time.Uint64(), parent)
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -451,7 +460,6 @@ var (
|
||||
// AccumulateRewards credits the coinbase of the given block with the mining
|
||||
// reward. The total reward consists of the static block reward and rewards for
|
||||
// included uncles. The coinbase of each uncle block is also rewarded.
|
||||
//
|
||||
// TODO (karalabe): Move the chain maker into this package and make this private!
|
||||
func AccumulateRewards(state *state.StateDB, header *types.Header, uncles []*types.Header) {
|
||||
reward := new(big.Int).Set(blockReward)
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
@ -71,7 +72,11 @@ func TestCalcDifficulty(t *testing.T) {
|
||||
config := ¶ms.ChainConfig{HomesteadBlock: big.NewInt(1150000)}
|
||||
for name, test := range tests {
|
||||
number := new(big.Int).Sub(test.CurrentBlocknumber, big.NewInt(1))
|
||||
diff := CalcDifficulty(config, test.CurrentTimestamp, test.ParentTimestamp, number, test.ParentDifficulty)
|
||||
diff := CalcDifficulty(config, test.CurrentTimestamp, &types.Header{
|
||||
Number: number,
|
||||
Time: new(big.Int).SetUint64(test.ParentTimestamp),
|
||||
Difficulty: test.ParentDifficulty,
|
||||
})
|
||||
if diff.Cmp(test.CurrentDifficulty) != 0 {
|
||||
t.Error(name, "failed. Expected", test.CurrentDifficulty, "and calculated", diff)
|
||||
}
|
||||
|
@ -308,14 +308,14 @@ func (d *dataset) release() {
|
||||
|
||||
// MakeCache generates a new ethash cache and optionally stores it to disk.
|
||||
func MakeCache(block uint64, dir string) {
|
||||
c := cache{epoch: block/epochLength + 1}
|
||||
c := cache{epoch: block / epochLength}
|
||||
c.generate(dir, math.MaxInt32, false)
|
||||
c.release()
|
||||
}
|
||||
|
||||
// MakeDataset generates a new ethash dataset and optionally stores it to disk.
|
||||
func MakeDataset(block uint64, dir string) {
|
||||
d := dataset{epoch: block/epochLength + 1}
|
||||
d := dataset{epoch: block / epochLength}
|
||||
d.generate(dir, math.MaxInt32, false)
|
||||
d.release()
|
||||
}
|
||||
@ -355,7 +355,7 @@ type Ethash struct {
|
||||
// New creates a full sized ethash PoW scheme.
|
||||
func New(cachedir string, cachesinmem, cachesondisk int, dagdir string, dagsinmem, dagsondisk int) *Ethash {
|
||||
if cachesinmem <= 0 {
|
||||
log.Warn("One ethash cache must alwast be in memory", "requested", cachesinmem)
|
||||
log.Warn("One ethash cache must always be in memory", "requested", cachesinmem)
|
||||
cachesinmem = 1
|
||||
}
|
||||
if cachedir != "" && cachesondisk > 0 {
|
||||
@ -412,7 +412,7 @@ func NewFakeDelayer(delay time.Duration) *Ethash {
|
||||
return &Ethash{fakeMode: true, fakeDelay: delay}
|
||||
}
|
||||
|
||||
// NewFullFaker creates a ethash consensus engine with a full fake scheme that
|
||||
// NewFullFaker creates an ethash consensus engine with a full fake scheme that
|
||||
// accepts all blocks as valid, without checking any consensus rules whatsoever.
|
||||
func NewFullFaker() *Ethash {
|
||||
return &Ethash{fakeMode: true, fakeFull: true}
|
||||
@ -467,8 +467,9 @@ func (ethash *Ethash) cache(block uint64) []uint32 {
|
||||
future = &cache{epoch: epoch + 1}
|
||||
ethash.fcache = future
|
||||
}
|
||||
}
|
||||
// New current cache, set its initial timestamp
|
||||
current.used = time.Now()
|
||||
}
|
||||
ethash.lock.Unlock()
|
||||
|
||||
// Wait for generation finish, bump the timestamp and finalize the cache
|
||||
@ -529,8 +530,9 @@ func (ethash *Ethash) dataset(block uint64) []uint32 {
|
||||
future = &dataset{epoch: epoch + 1}
|
||||
ethash.fdataset = future
|
||||
}
|
||||
}
|
||||
// New current dataset, set its initial timestamp
|
||||
current.used = time.Now()
|
||||
}
|
||||
ethash.lock.Unlock()
|
||||
|
||||
// Wait for generation finish, bump the timestamp and finalize the cache
|
||||
|
@ -1,85 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Source: https://golang.org/src/crypto/cipher/xor.go
|
||||
|
||||
package ethash
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const wordSize = int(unsafe.Sizeof(uintptr(0)))
|
||||
const supportsUnaligned = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x"
|
||||
|
||||
// fastXORBytes xors in bulk. It only works on architectures that
|
||||
// support unaligned read/writes.
|
||||
func fastXORBytes(dst, a, b []byte) int {
|
||||
n := len(a)
|
||||
if len(b) < n {
|
||||
n = len(b)
|
||||
}
|
||||
|
||||
w := n / wordSize
|
||||
if w > 0 {
|
||||
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
|
||||
aw := *(*[]uintptr)(unsafe.Pointer(&a))
|
||||
bw := *(*[]uintptr)(unsafe.Pointer(&b))
|
||||
for i := 0; i < w; i++ {
|
||||
dw[i] = aw[i] ^ bw[i]
|
||||
}
|
||||
}
|
||||
|
||||
for i := (n - n%wordSize); i < n; i++ {
|
||||
dst[i] = a[i] ^ b[i]
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
func safeXORBytes(dst, a, b []byte) int {
|
||||
n := len(a)
|
||||
if len(b) < n {
|
||||
n = len(b)
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
dst[i] = a[i] ^ b[i]
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// xorBytes xors the bytes in a and b. The destination is assumed to have enough
|
||||
// space. Returns the number of bytes xor'd.
|
||||
func xorBytes(dst, a, b []byte) int {
|
||||
if supportsUnaligned {
|
||||
return fastXORBytes(dst, a, b)
|
||||
}
|
||||
// TODO(hanwen): if (dst, a, b) have common alignment
|
||||
// we could still try fastXORBytes. It is not clear
|
||||
// how often this happens, and it's only worth it if
|
||||
// the block encryption itself is hardware
|
||||
// accelerated.
|
||||
return safeXORBytes(dst, a, b)
|
||||
}
|
||||
|
||||
// fastXORWords XORs multiples of 4 or 8 bytes (depending on architecture.)
|
||||
// The arguments are assumed to be of equal length.
|
||||
func fastXORWords(dst, a, b []byte) {
|
||||
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
|
||||
aw := *(*[]uintptr)(unsafe.Pointer(&a))
|
||||
bw := *(*[]uintptr)(unsafe.Pointer(&b))
|
||||
n := len(b) / wordSize
|
||||
for i := 0; i < n; i++ {
|
||||
dw[i] = aw[i] ^ bw[i]
|
||||
}
|
||||
}
|
||||
|
||||
func xorWords(dst, a, b []byte) {
|
||||
if supportsUnaligned {
|
||||
fastXORWords(dst, a, b)
|
||||
} else {
|
||||
safeXORBytes(dst, a, b)
|
||||
}
|
||||
}
|
@ -54,7 +54,7 @@ func VerifyDAOHeaderExtraData(config *params.ChainConfig, header *types.Header)
|
||||
if header.Number.Cmp(config.DAOForkBlock) < 0 || header.Number.Cmp(limit) >= 0 {
|
||||
return nil
|
||||
}
|
||||
// Depending whether we support or oppose the fork, validate the extra-data contents
|
||||
// Depending on whether we support or oppose the fork, validate the extra-data contents
|
||||
if config.DAOForkSupport {
|
||||
if !bytes.Equal(header.Extra, params.DAOForkBlockExtra) {
|
||||
return ErrBadProDAOExtra
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
@ -240,17 +241,19 @@ func (b *bridge) Send(call otto.FunctionCall) (response otto.Value) {
|
||||
throwJSException(err.Error())
|
||||
}
|
||||
var (
|
||||
rawReq = []byte(reqVal.String())
|
||||
rawReq = reqVal.String()
|
||||
dec = json.NewDecoder(strings.NewReader(rawReq))
|
||||
reqs []jsonrpcCall
|
||||
batch bool
|
||||
)
|
||||
dec.UseNumber() // avoid float64s
|
||||
if rawReq[0] == '[' {
|
||||
batch = true
|
||||
json.Unmarshal(rawReq, &reqs)
|
||||
dec.Decode(&reqs)
|
||||
} else {
|
||||
batch = false
|
||||
reqs = make([]jsonrpcCall, 1)
|
||||
json.Unmarshal(rawReq, &reqs[0])
|
||||
dec.Decode(&reqs[0])
|
||||
}
|
||||
|
||||
// Execute the requests.
|
||||
|
@ -2,7 +2,7 @@ FROM alpine:3.5
|
||||
|
||||
RUN \
|
||||
apk add --update go git make gcc musl-dev linux-headers ca-certificates && \
|
||||
git clone --depth 1 --branch release/1.5 https://github.com/ethereum/go-ethereum && \
|
||||
git clone --depth 1 --branch release/1.6 https://github.com/ethereum/go-ethereum && \
|
||||
(cd go-ethereum && make geth) && \
|
||||
cp go-ethereum/build/bin/geth /geth && \
|
||||
apk del go git make gcc musl-dev linux-headers && \
|
||||
|
1
containers/vagrant/.gitignore
vendored
Normal file
1
containers/vagrant/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
.vagrant
|
49
containers/vagrant/Vagrantfile
vendored
49
containers/vagrant/Vagrantfile
vendored
@ -1,29 +1,38 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
Vagrant.configure(2) do |config|
|
||||
config.vm.box = "ubuntu/trusty64"
|
||||
require 'yaml'
|
||||
|
||||
VAGRANTFILE_API_VERSION = 2
|
||||
VM_RAM = 2048
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
|
||||
config.vm.define "ubuntu", :primary => true do |ubuntu|
|
||||
ubuntu.vm.box = "ubuntu/trusty64"
|
||||
ubuntu.vm.provision "shell", :path => "provisioners/shell/ubuntu.sh"
|
||||
end
|
||||
|
||||
config.vm.define "debian", :primary => true do |debian|
|
||||
debian.vm.box = "debian/jessie64"
|
||||
debian.vm.provision "shell", :path => "provisioners/shell/debian.sh"
|
||||
end
|
||||
|
||||
config.vm.define "centos", :autostart => false do |centos|
|
||||
centos.vm.box = "centos/7"
|
||||
centos.vm.provision "shell", :path => "provisioners/shell/centos.sh"
|
||||
end
|
||||
|
||||
config.vm.provider "virtualbox" do |vb|
|
||||
vb.memory = "2048"
|
||||
vb.memory = VM_RAM
|
||||
end
|
||||
|
||||
config.vm.provider "libvirt" do |lv|
|
||||
lv.memory = VM_RAM
|
||||
|
||||
config.vm.synced_folder ".", "/home/vagrant/sync", :disabled => true
|
||||
end
|
||||
|
||||
config.vm.synced_folder ".", "/vagrant", :disabled => true
|
||||
config.vm.synced_folder "../../", "/home/vagrant/go/src/github.com/ethereum/go-ethereum"
|
||||
config.vm.synced_folder ".", "/vagrant", disabled: true
|
||||
|
||||
config.vm.provision "shell", inline: <<-SHELL
|
||||
sudo apt-get install software-properties-common
|
||||
sudo add-apt-repository -y ppa:ethereum/ethereum
|
||||
sudo add-apt-repository -y ppa:ethereum/ethereum-dev
|
||||
sudo apt-get update
|
||||
|
||||
sudo apt-get install -y build-essential golang git-all
|
||||
|
||||
GOPATH=/home/vagrant/go go get github.com/tools/godep
|
||||
|
||||
sudo chown -R vagrant:vagrant ~vagrant/go
|
||||
|
||||
echo "export GOPATH=/home/vagrant/go" >> ~vagrant/.bashrc
|
||||
echo "export PATH=\\\$PATH:\\\$GOPATH/bin:/usr/local/go/bin" >> ~vagrant/.bashrc
|
||||
SHELL
|
||||
end
|
||||
|
11
containers/vagrant/provisioners/shell/centos.sh
Executable file
11
containers/vagrant/provisioners/shell/centos.sh
Executable file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
sudo yum install -y git wget
|
||||
sudo yum update -y
|
||||
|
||||
wget --continue https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz
|
||||
sudo tar -C /usr/local -xzf go1.8.1.linux-amd64.tar.gz
|
||||
|
||||
GETH_PATH="~vagrant/go/src/github.com/ethereum/go-ethereum/build/bin/"
|
||||
|
||||
echo "export PATH=$PATH:/usr/local/go/bin:$GETH_PATH" >> ~vagrant/.bashrc
|
11
containers/vagrant/provisioners/shell/debian.sh
Executable file
11
containers/vagrant/provisioners/shell/debian.sh
Executable file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
sudo apt-get install -y build-essential git-all wget
|
||||
sudo apt-get update
|
||||
|
||||
wget --continue https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz
|
||||
sudo tar -C /usr/local -xzf go1.8.1.linux-amd64.tar.gz
|
||||
|
||||
GETH_PATH="~vagrant/go/src/github.com/ethereum/go-ethereum/build/bin/"
|
||||
|
||||
echo "export PATH=$PATH:/usr/local/go/bin:$GETH_PATH" >> ~vagrant/.bashrc
|
11
containers/vagrant/provisioners/shell/ubuntu.sh
Executable file
11
containers/vagrant/provisioners/shell/ubuntu.sh
Executable file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
sudo apt-get install -y build-essential git-all wget
|
||||
sudo apt-get update
|
||||
|
||||
wget --continue https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz
|
||||
sudo tar -C /usr/local -xzf go1.8.1.linux-amd64.tar.gz
|
||||
|
||||
GETH_PATH="~vagrant/go/src/github.com/ethereum/go-ethereum/build/bin/"
|
||||
|
||||
echo "export PATH=$PATH:/usr/local/go/bin:$GETH_PATH" >> ~vagrant/.bashrc
|
@ -49,7 +49,7 @@ import (
|
||||
// TODO(zelig): watch peer solvency and notify of bouncing cheques
|
||||
// TODO(zelig): enable paying with cheque by signing off
|
||||
|
||||
// Some functionality require interacting with the blockchain:
|
||||
// Some functionality requires interacting with the blockchain:
|
||||
// * setting current balance on peer's chequebook
|
||||
// * sending the transaction to cash the cheque
|
||||
// * depositing ether to the chequebook
|
||||
@ -100,13 +100,13 @@ type Chequebook struct {
|
||||
// persisted fields
|
||||
balance *big.Int // not synced with blockchain
|
||||
contractAddr common.Address // contract address
|
||||
sent map[common.Address]*big.Int //tallies for beneficiarys
|
||||
sent map[common.Address]*big.Int //tallies for beneficiaries
|
||||
|
||||
txhash string // tx hash of last deposit tx
|
||||
threshold *big.Int // threshold that triggers autodeposit if not nil
|
||||
buffer *big.Int // buffer to keep on top of balance for fork protection
|
||||
|
||||
log log.Logger // contextual logger with the contrac address embedded
|
||||
log log.Logger // contextual logger with the contract address embedded
|
||||
}
|
||||
|
||||
func (self *Chequebook) String() string {
|
||||
@ -442,7 +442,7 @@ type Inbox struct {
|
||||
maxUncashed *big.Int // threshold that triggers autocashing
|
||||
cashed *big.Int // cumulative amount cashed
|
||||
cheque *Cheque // last cheque, nil if none yet received
|
||||
log log.Logger // contextual logger with the contrac address embedded
|
||||
log log.Logger // contextual logger with the contract address embedded
|
||||
}
|
||||
|
||||
// NewInbox creates an Inbox. An Inboxes is not persisted, the cumulative sum is updated
|
||||
@ -509,9 +509,8 @@ func (self *Inbox) AutoCash(cashInterval time.Duration, maxUncashed *big.Int) {
|
||||
self.autoCash(cashInterval)
|
||||
}
|
||||
|
||||
// autoCash starts a loop that periodically clears the last check
|
||||
// autoCash starts a loop that periodically clears the last cheque
|
||||
// if the peer is trusted. Clearing period could be 24h or a week.
|
||||
//
|
||||
// The caller must hold self.lock.
|
||||
func (self *Inbox) autoCash(cashInterval time.Duration) {
|
||||
if self.quit != nil {
|
||||
@ -557,7 +556,7 @@ func (self *Inbox) Receive(promise swap.Promise) (*big.Int, error) {
|
||||
|
||||
var sum *big.Int
|
||||
if self.cheque == nil {
|
||||
// the sum is checked against the blockchain once a check is received
|
||||
// the sum is checked against the blockchain once a cheque is received
|
||||
tally, err := self.session.Sent(self.beneficiary)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("inbox: error calling backend to set amount: %v", err)
|
||||
|
@ -414,21 +414,10 @@ func TestCash(t *testing.T) {
|
||||
t.Fatalf("expected no error, got %v", err)
|
||||
}
|
||||
backend.Commit()
|
||||
// expBalance := big.NewInt(2)
|
||||
// gotBalance := backend.BalanceAt(addr1)
|
||||
// if gotBalance.Cmp(expBalance) != 0 {
|
||||
// t.Fatalf("expected beneficiary balance %v, got %v", expBalance, gotBalance)
|
||||
// }
|
||||
// after 3x interval time and 2 cheques received, exactly one cashing tx is sent
|
||||
time.Sleep(4 * interval)
|
||||
backend.Commit()
|
||||
|
||||
// expBalance = big.NewInt(4)
|
||||
// gotBalance = backend.BalanceAt(addr1)
|
||||
// if gotBalance.Cmp(expBalance) != 0 {
|
||||
// t.Fatalf("expected beneficiary balance %v, got %v", expBalance, gotBalance)
|
||||
// }
|
||||
|
||||
// after stopping autocash no more tx are sent
|
||||
ch2, err := chbook.Issue(addr1, amount)
|
||||
if err != nil {
|
||||
@ -441,11 +430,6 @@ func TestCash(t *testing.T) {
|
||||
}
|
||||
time.Sleep(2 * interval)
|
||||
backend.Commit()
|
||||
// expBalance = big.NewInt(4)
|
||||
// gotBalance = backend.BalanceAt(addr1)
|
||||
// if gotBalance.Cmp(expBalance) != 0 {
|
||||
// t.Fatalf("expected beneficiary balance %v, got %v", expBalance, gotBalance)
|
||||
// }
|
||||
|
||||
// autocash below 1
|
||||
chbook.balance = big.NewInt(2)
|
||||
@ -456,11 +440,6 @@ func TestCash(t *testing.T) {
|
||||
t.Fatalf("expected no error, got %v", err)
|
||||
}
|
||||
backend.Commit()
|
||||
// expBalance = big.NewInt(4)
|
||||
// gotBalance = backend.BalanceAt(addr1)
|
||||
// if gotBalance.Cmp(expBalance) != 0 {
|
||||
// t.Fatalf("expected beneficiary balance %v, got %v", expBalance, gotBalance)
|
||||
// }
|
||||
|
||||
ch4, err := chbook.Issue(addr1, amount)
|
||||
if err != nil {
|
||||
@ -479,13 +458,6 @@ func TestCash(t *testing.T) {
|
||||
}
|
||||
backend.Commit()
|
||||
|
||||
// 2 checks of amount 1 received, exactly 1 tx is sent
|
||||
// expBalance = big.NewInt(6)
|
||||
// gotBalance = backend.BalanceAt(addr1)
|
||||
// if gotBalance.Cmp(expBalance) != 0 {
|
||||
// t.Fatalf("expected beneficiary balance %v, got %v", expBalance, gotBalance)
|
||||
// }
|
||||
|
||||
// autochash on receipt when maxUncashed is 0
|
||||
chbook.balance = new(big.Int).Set(common.Big2)
|
||||
chbox.AutoCash(0, common.Big0)
|
||||
@ -495,11 +467,6 @@ func TestCash(t *testing.T) {
|
||||
t.Fatalf("expected no error, got %v", err)
|
||||
}
|
||||
backend.Commit()
|
||||
// expBalance = big.NewInt(5)
|
||||
// gotBalance = backend.BalanceAt(addr1)
|
||||
// if gotBalance.Cmp(expBalance) != 0 {
|
||||
// t.Fatalf("expected beneficiary balance %v, got %v", expBalance, gotBalance)
|
||||
// }
|
||||
|
||||
ch6, err := chbook.Issue(addr1, amount)
|
||||
if err != nil {
|
||||
@ -511,21 +478,11 @@ func TestCash(t *testing.T) {
|
||||
t.Fatalf("expected no error, got %v", err)
|
||||
}
|
||||
backend.Commit()
|
||||
// expBalance = big.NewInt(4)
|
||||
// gotBalance = backend.BalanceAt(addr1)
|
||||
// if gotBalance.Cmp(expBalance) != 0 {
|
||||
// t.Fatalf("expected beneficiary balance %v, got %v", expBalance, gotBalance)
|
||||
// }
|
||||
|
||||
_, err = chbox.Receive(ch6)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got %v", err)
|
||||
}
|
||||
backend.Commit()
|
||||
// expBalance = big.NewInt(6)
|
||||
// gotBalance = backend.BalanceAt(addr1)
|
||||
// if gotBalance.Cmp(expBalance) != 0 {
|
||||
// t.Fatalf("expected beneficiary balance %v, got %v", expBalance, gotBalance)
|
||||
// }
|
||||
|
||||
}
|
||||
|
@ -3,12 +3,12 @@
|
||||
## Usage
|
||||
|
||||
Full documentation for the Ethereum Name Service [can be found as EIP 137](https://github.com/ethereum/EIPs/issues/137).
|
||||
This package offers a simple binding that streamlines the registration arbitrary utf8 domain names to swarm content hashes.
|
||||
This package offers a simple binding that streamlines the registration of arbitrary UTF8 domain names to swarm content hashes.
|
||||
|
||||
## Development
|
||||
|
||||
The SOL file in contract subdirectory implements the ENS root registry, a simple
|
||||
first-in-first-served registrar for the root namespace, and a simple resolver contract;
|
||||
first-in, first-served registrar for the root namespace, and a simple resolver contract;
|
||||
they're used in tests, and can be used to deploy these contracts for your own purposes.
|
||||
|
||||
The solidity source code can be found at [github.com/arachnid/ens/](https://github.com/arachnid/ens/).
|
||||
|
@ -52,7 +52,7 @@ func NewENS(transactOpts *bind.TransactOpts, contractAddr common.Address, contra
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DeployENS deploys an instance of the ENS nameservice, with a 'first in first served' root registrar.
|
||||
// DeployENS deploys an instance of the ENS nameservice, with a 'first-in, first-served' root registrar.
|
||||
func DeployENS(transactOpts *bind.TransactOpts, contractBackend bind.ContractBackend) (*ENS, error) {
|
||||
// Deploy the ENS registry
|
||||
ensAddr, _, _, err := contract.DeployENS(transactOpts, contractBackend, transactOpts.From)
|
||||
|
@ -79,7 +79,7 @@ func TestSignerPromotion(t *testing.T) {
|
||||
// Gradually promote the keys, until all are authorized
|
||||
keys = append([]*ecdsa.PrivateKey{key}, keys...)
|
||||
for i := 1; i < len(keys); i++ {
|
||||
// Check that no votes are accepted from the not yet authed user
|
||||
// Check that no votes are accepted from the not yet authorized user
|
||||
if _, err := oracle.Promote(bind.NewKeyedTransactor(keys[i]), common.Address{}); err != nil {
|
||||
t.Fatalf("Iter #%d: failed invalid promotion attempt: %v", i, err)
|
||||
}
|
||||
@ -216,7 +216,7 @@ func TestVersionRelease(t *testing.T) {
|
||||
// Gradually push releases, always requiring more signers than previously
|
||||
keys = append([]*ecdsa.PrivateKey{key}, keys...)
|
||||
for i := 1; i < len(keys); i++ {
|
||||
// Check that no votes are accepted from the not yet authed user
|
||||
// Check that no votes are accepted from the not yet authorized user
|
||||
if _, err := oracle.Release(bind.NewKeyedTransactor(keys[i]), 0, 0, 0, [20]byte{0}); err != nil {
|
||||
t.Fatalf("Iter #%d: failed invalid release attempt: %v", i, err)
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -18,7 +18,7 @@ package core
|
||||
|
||||
import "github.com/ethereum/go-ethereum/common"
|
||||
|
||||
// Set of manually tracked bad hashes (usually hard forks)
|
||||
// BadHashes represent a set of manually tracked bad hashes (usually hard forks)
|
||||
var BadHashes = map[common.Hash]bool{
|
||||
common.HexToHash("05bef30ef572270f654746da22639a7a0c97dd97a7050b9e252391996aaeb689"): true,
|
||||
common.HexToHash("7d05d08cbc596a2e5e4f13b80a743e53e09221b5323c3a61946b20873e58583f"): true,
|
||||
|
@ -84,7 +84,7 @@ func (b *BlockGen) AddTx(tx *types.Transaction) {
|
||||
if b.gasPool == nil {
|
||||
b.SetCoinbase(common.Address{})
|
||||
}
|
||||
b.statedb.StartRecord(tx.Hash(), common.Hash{}, len(b.txs))
|
||||
b.statedb.Prepare(tx.Hash(), common.Hash{}, len(b.txs))
|
||||
receipt, _, err := ApplyTransaction(b.config, nil, &b.header.Coinbase, b.gasPool, b.statedb, b.header, tx, b.header.GasUsed, vm.Config{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@ -98,10 +98,10 @@ func (b *BlockGen) Number() *big.Int {
|
||||
return new(big.Int).Set(b.header.Number)
|
||||
}
|
||||
|
||||
// AddUncheckedReceipts forcefully adds a receipts to the block without a
|
||||
// AddUncheckedReceipt forcefully adds a receipts to the block without a
|
||||
// backing transaction.
|
||||
//
|
||||
// AddUncheckedReceipts will cause consensus failures when used during real
|
||||
// AddUncheckedReceipt will cause consensus failures when used during real
|
||||
// chain processing. This is best used in conjunction with raw block insertion.
|
||||
func (b *BlockGen) AddUncheckedReceipt(receipt *types.Receipt) {
|
||||
b.receipts = append(b.receipts, receipt)
|
||||
@ -142,7 +142,7 @@ func (b *BlockGen) OffsetTime(seconds int64) {
|
||||
if b.header.Time.Cmp(b.parent.Header().Time) <= 0 {
|
||||
panic("block time out of range")
|
||||
}
|
||||
b.header.Difficulty = ethash.CalcDifficulty(b.config, b.header.Time.Uint64(), b.parent.Time().Uint64(), b.parent.Number(), b.parent.Difficulty())
|
||||
b.header.Difficulty = ethash.CalcDifficulty(b.config, b.header.Time.Uint64(), b.parent.Header())
|
||||
}
|
||||
|
||||
// GenerateChain creates a chain of n blocks. The first block's
|
||||
@ -209,11 +209,16 @@ func makeHeader(config *params.ChainConfig, parent *types.Block, state *state.St
|
||||
} else {
|
||||
time = new(big.Int).Add(parent.Time(), big.NewInt(10)) // block time is fixed at 10 seconds
|
||||
}
|
||||
|
||||
return &types.Header{
|
||||
Root: state.IntermediateRoot(config.IsEIP158(parent.Number())),
|
||||
ParentHash: parent.Hash(),
|
||||
Coinbase: parent.Coinbase(),
|
||||
Difficulty: ethash.CalcDifficulty(config, time.Uint64(), new(big.Int).Sub(time, big.NewInt(10)).Uint64(), parent.Number(), parent.Difficulty()),
|
||||
Difficulty: ethash.CalcDifficulty(config, time.Uint64(), &types.Header{
|
||||
Number: parent.Number(),
|
||||
Time: new(big.Int).Sub(time, big.NewInt(10)),
|
||||
Difficulty: parent.Difficulty(),
|
||||
}),
|
||||
GasLimit: CalcGasLimit(parent),
|
||||
GasUsed: new(big.Int),
|
||||
Number: new(big.Int).Add(parent.Number(), common.Big1),
|
||||
|
@ -64,7 +64,7 @@ var (
|
||||
oldBlockReceiptsPrefix = []byte("receipts-block-")
|
||||
oldBlockHashPrefix = []byte("block-hash-") // [deprecated by the header/block split, remove eventually]
|
||||
|
||||
ChainConfigNotFoundErr = errors.New("ChainConfig not found") // general config not found error
|
||||
ErrChainConfigNotFound = errors.New("ChainConfig not found") // general config not found error
|
||||
|
||||
mipmapBloomMu sync.Mutex // protect against race condition when updating mipmap blooms
|
||||
|
||||
@ -546,7 +546,7 @@ func mipmapKey(num, level uint64) []byte {
|
||||
return append(mipmapPre, append(lkey, key.Bytes()...)...)
|
||||
}
|
||||
|
||||
// WriteMapmapBloom writes each address included in the receipts' logs to the
|
||||
// WriteMipmapBloom writes each address included in the receipts' logs to the
|
||||
// MIP bloom bin.
|
||||
func WriteMipmapBloom(db ethdb.Database, number uint64, receipts types.Receipts) error {
|
||||
mipmapBloomMu.Lock()
|
||||
@ -638,7 +638,7 @@ func WriteChainConfig(db ethdb.Database, hash common.Hash, cfg *params.ChainConf
|
||||
func GetChainConfig(db ethdb.Database, hash common.Hash) (*params.ChainConfig, error) {
|
||||
jsonChainConfig, _ := db.Get(append(configPrefix, hash[:]...))
|
||||
if len(jsonChainConfig) == 0 {
|
||||
return nil, ChainConfigNotFoundErr
|
||||
return nil, ErrChainConfigNotFound
|
||||
}
|
||||
|
||||
var config params.ChainConfig
|
||||
|
@ -17,8 +17,6 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
@ -43,15 +41,9 @@ type NewMinedBlockEvent struct{ Block *types.Block }
|
||||
// RemovedTransactionEvent is posted when a reorg happens
|
||||
type RemovedTransactionEvent struct{ Txs types.Transactions }
|
||||
|
||||
// RemovedLogEvent is posted when a reorg happens
|
||||
// RemovedLogsEvent is posted when a reorg happens
|
||||
type RemovedLogsEvent struct{ Logs []*types.Log }
|
||||
|
||||
// ChainSplit is posted when a new head is detected
|
||||
type ChainSplitEvent struct {
|
||||
Block *types.Block
|
||||
Logs []*types.Log
|
||||
}
|
||||
|
||||
type ChainEvent struct {
|
||||
Block *types.Block
|
||||
Hash common.Hash
|
||||
@ -73,8 +65,6 @@ type ChainUncleEvent struct {
|
||||
|
||||
type ChainHeadEvent struct{ Block *types.Block }
|
||||
|
||||
type GasPriceChanged struct{ Price *big.Int }
|
||||
|
||||
// Mining operation events
|
||||
type StartMining struct{}
|
||||
type TopMining struct{}
|
||||
|
@ -20,4 +20,4 @@ import (
|
||||
"math/big"
|
||||
)
|
||||
|
||||
var BlockReward *big.Int = big.NewInt(5e+18)
|
||||
var BlockReward = big.NewInt(5e+18)
|
||||
|
@ -86,7 +86,7 @@ type GenesisMismatchError struct {
|
||||
}
|
||||
|
||||
func (e *GenesisMismatchError) Error() string {
|
||||
return fmt.Sprintf("wrong genesis block in database (have %x, new %x)", e.Stored[:8], e.New[:8])
|
||||
return fmt.Sprintf("database already contains an incompatible genesis block (have %x, new %x)", e.Stored[:8], e.New[:8])
|
||||
}
|
||||
|
||||
// SetupGenesisBlock writes or updates the genesis block in db.
|
||||
@ -133,7 +133,7 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
|
||||
newcfg := genesis.configOrDefault(stored)
|
||||
storedcfg, err := GetChainConfig(db, stored)
|
||||
if err != nil {
|
||||
if err == ChainConfigNotFoundErr {
|
||||
if err == ErrChainConfigNotFound {
|
||||
// This case happens if a genesis write was interrupted.
|
||||
log.Warn("Found genesis block without chain config")
|
||||
err = WriteChainConfig(db, stored, newcfg)
|
||||
@ -278,6 +278,18 @@ func DefaultTestnetGenesisBlock() *Genesis {
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultRinkebyGenesisBlock returns the Rinkeby network genesis block.
|
||||
func DefaultRinkebyGenesisBlock() *Genesis {
|
||||
return &Genesis{
|
||||
Config: params.RinkebyChainConfig,
|
||||
Timestamp: 1492009146,
|
||||
ExtraData: hexutil.MustDecode("0x52657370656374206d7920617574686f7269746168207e452e436172746d616e42eb768f2244c8811c63729a21a3569731535f067ffc57839b00206d1ad20c69a1981b489f772031b279182d99e65703f0076e4812653aab85fca0f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
|
||||
GasLimit: 4700000,
|
||||
Difficulty: big.NewInt(1),
|
||||
Alloc: decodePrealloc(rinkebyAllocData),
|
||||
}
|
||||
}
|
||||
|
||||
// DevGenesisBlock returns the 'geth --dev' genesis block.
|
||||
func DevGenesisBlock() *Genesis {
|
||||
return &Genesis{
|
||||
|
File diff suppressed because one or more lines are too long
@ -201,15 +201,6 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
|
||||
// header writes should be protected by the parent chain mutex individually.
|
||||
type WhCallback func(*types.Header) error
|
||||
|
||||
// InsertHeaderChain attempts to insert the given header chain in to the local
|
||||
// chain, possibly creating a reorg. If an error is returned, it will return the
|
||||
// index number of the failing header as well an error describing what went wrong.
|
||||
//
|
||||
// The verify parameter can be used to fine tune whether nonce verification
|
||||
// should be done or not. The reason behind the optional check is because some
|
||||
// of the header retrieval mechanisms already need to verfy nonces, as well as
|
||||
// because nonces can be verified sparsely, not needing to check each.
|
||||
|
||||
func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
|
||||
// Do a sanity check that the provided chain is actually ordered and linked
|
||||
for i := 1; i < len(chain); i++ {
|
||||
@ -257,6 +248,14 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int)
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// InsertHeaderChain attempts to insert the given header chain in to the local
|
||||
// chain, possibly creating a reorg. If an error is returned, it will return the
|
||||
// index number of the failing header as well an error describing what went wrong.
|
||||
//
|
||||
// The verify parameter can be used to fine tune whether nonce verification
|
||||
// should be done or not. The reason behind the optional check is because some
|
||||
// of the header retrieval mechanisms already need to verfy nonces, as well as
|
||||
// because nonces can be verified sparsely, not needing to check each.
|
||||
func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, writeHeader WhCallback, start time.Time) (int, error) {
|
||||
// Collect some import statistics to report on
|
||||
stats := struct{ processed, ignored int }{}
|
||||
|
@ -21,8 +21,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
// "github.com/ethereum/go-ethereum/crypto"
|
||||
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
)
|
||||
@ -38,24 +36,24 @@ type TestManager struct {
|
||||
Blocks []*types.Block
|
||||
}
|
||||
|
||||
func (s *TestManager) IsListening() bool {
|
||||
func (tm *TestManager) IsListening() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *TestManager) IsMining() bool {
|
||||
func (tm *TestManager) IsMining() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *TestManager) PeerCount() int {
|
||||
func (tm *TestManager) PeerCount() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (s *TestManager) Peers() *list.List {
|
||||
func (tm *TestManager) Peers() *list.List {
|
||||
return list.New()
|
||||
}
|
||||
|
||||
func (s *TestManager) BlockChain() *BlockChain {
|
||||
return s.blockChain
|
||||
func (tm *TestManager) BlockChain() *BlockChain {
|
||||
return tm.blockChain
|
||||
}
|
||||
|
||||
func (tm *TestManager) TxPool() *TxPool {
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
)
|
||||
|
||||
type DumpAccount struct {
|
||||
@ -44,7 +45,7 @@ func (self *StateDB) RawDump() Dump {
|
||||
Accounts: make(map[string]DumpAccount),
|
||||
}
|
||||
|
||||
it := self.trie.Iterator()
|
||||
it := trie.NewIterator(self.trie.NodeIterator(nil))
|
||||
for it.Next() {
|
||||
addr := self.trie.GetKey(it.Key)
|
||||
var data Account
|
||||
@ -61,7 +62,7 @@ func (self *StateDB) RawDump() Dump {
|
||||
Code: common.Bytes2Hex(obj.Code(self.db)),
|
||||
Storage: make(map[string]string),
|
||||
}
|
||||
storageIt := obj.getTrie(self.db).Iterator()
|
||||
storageIt := trie.NewIterator(obj.getTrie(self.db).NodeIterator(nil))
|
||||
for storageIt.Next() {
|
||||
account.Storage[common.Bytes2Hex(self.trie.GetKey(storageIt.Key))] = common.Bytes2Hex(storageIt.Value)
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ func (it *NodeIterator) step() error {
|
||||
}
|
||||
// Initialize the iterator if we've just started
|
||||
if it.stateIt == nil {
|
||||
it.stateIt = it.state.trie.NodeIterator()
|
||||
it.stateIt = it.state.trie.NodeIterator(nil)
|
||||
}
|
||||
// If we had data nodes previously, we surely have at least state nodes
|
||||
if it.dataIt != nil {
|
||||
@ -118,7 +118,7 @@ func (it *NodeIterator) step() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
it.dataIt = trie.NewNodeIterator(dataTrie)
|
||||
it.dataIt = dataTrie.NodeIterator(nil)
|
||||
if !it.dataIt.Next(true) {
|
||||
it.dataIt = nil
|
||||
}
|
||||
|
@ -91,6 +91,11 @@ func (ch suicideChange) undo(s *StateDB) {
|
||||
if obj != nil {
|
||||
obj.suicided = ch.prev
|
||||
obj.setBalance(ch.prevbalance)
|
||||
// if the object wasn't suicided before, remove
|
||||
// it from the list of destructed objects as well.
|
||||
if !obj.suicided {
|
||||
delete(s.stateObjectsDestructed, *ch.account)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -201,7 +201,7 @@ func (self *stateObject) setState(key, value common.Hash) {
|
||||
}
|
||||
|
||||
// updateTrie writes cached storage modifications into the object's storage trie.
|
||||
func (self *stateObject) updateTrie(db trie.Database) {
|
||||
func (self *stateObject) updateTrie(db trie.Database) *trie.SecureTrie {
|
||||
tr := self.getTrie(db)
|
||||
for key, value := range self.dirtyStorage {
|
||||
delete(self.dirtyStorage, key)
|
||||
@ -213,6 +213,7 @@ func (self *stateObject) updateTrie(db trie.Database) {
|
||||
v, _ := rlp.EncodeToBytes(bytes.TrimLeft(value[:], "\x00"))
|
||||
tr.Update(key[:], v)
|
||||
}
|
||||
return tr
|
||||
}
|
||||
|
||||
// UpdateRoot sets the trie root to the current root hash of
|
||||
@ -280,7 +281,11 @@ func (c *stateObject) ReturnGas(gas *big.Int) {}
|
||||
|
||||
func (self *stateObject) deepCopy(db *StateDB, onDirty func(addr common.Address)) *stateObject {
|
||||
stateObject := newObject(db, self.address, self.data, onDirty)
|
||||
stateObject.trie = self.trie
|
||||
if self.trie != nil {
|
||||
// A shallow copy makes the two tries independent.
|
||||
cpy := *self.trie
|
||||
stateObject.trie = &cpy
|
||||
}
|
||||
stateObject.code = self.code
|
||||
stateObject.dirtyStorage = self.dirtyStorage.Copy()
|
||||
stateObject.cachedStorage = self.dirtyStorage.Copy()
|
||||
|
@ -64,6 +64,7 @@ type StateDB struct {
|
||||
// This map holds 'live' objects, which will get modified while processing a state transition.
|
||||
stateObjects map[common.Address]*stateObject
|
||||
stateObjectsDirty map[common.Address]struct{}
|
||||
stateObjectsDestructed map[common.Address]struct{}
|
||||
|
||||
// The refund counter, also used by state transitioning.
|
||||
refund *big.Int
|
||||
@ -97,6 +98,7 @@ func New(root common.Hash, db ethdb.Database) (*StateDB, error) {
|
||||
codeSizeCache: csc,
|
||||
stateObjects: make(map[common.Address]*stateObject),
|
||||
stateObjectsDirty: make(map[common.Address]struct{}),
|
||||
stateObjectsDestructed: make(map[common.Address]struct{}),
|
||||
refund: new(big.Int),
|
||||
logs: make(map[common.Hash][]*types.Log),
|
||||
preimages: make(map[common.Hash][]byte),
|
||||
@ -119,6 +121,7 @@ func (self *StateDB) New(root common.Hash) (*StateDB, error) {
|
||||
codeSizeCache: self.codeSizeCache,
|
||||
stateObjects: make(map[common.Address]*stateObject),
|
||||
stateObjectsDirty: make(map[common.Address]struct{}),
|
||||
stateObjectsDestructed: make(map[common.Address]struct{}),
|
||||
refund: new(big.Int),
|
||||
logs: make(map[common.Hash][]*types.Log),
|
||||
preimages: make(map[common.Hash][]byte),
|
||||
@ -138,6 +141,7 @@ func (self *StateDB) Reset(root common.Hash) error {
|
||||
self.trie = tr
|
||||
self.stateObjects = make(map[common.Address]*stateObject)
|
||||
self.stateObjectsDirty = make(map[common.Address]struct{})
|
||||
self.stateObjectsDestructed = make(map[common.Address]struct{})
|
||||
self.thash = common.Hash{}
|
||||
self.bhash = common.Hash{}
|
||||
self.txIndex = 0
|
||||
@ -173,12 +177,6 @@ func (self *StateDB) pushTrie(t *trie.SecureTrie) {
|
||||
}
|
||||
}
|
||||
|
||||
func (self *StateDB) StartRecord(thash, bhash common.Hash, ti int) {
|
||||
self.thash = thash
|
||||
self.bhash = bhash
|
||||
self.txIndex = ti
|
||||
}
|
||||
|
||||
func (self *StateDB) AddLog(log *types.Log) {
|
||||
self.journal = append(self.journal, addLogChange{txhash: self.thash})
|
||||
|
||||
@ -296,6 +294,17 @@ func (self *StateDB) GetState(a common.Address, b common.Hash) common.Hash {
|
||||
return common.Hash{}
|
||||
}
|
||||
|
||||
// StorageTrie returns the storage trie of an account.
|
||||
// The return value is a copy and is nil for non-existent accounts.
|
||||
func (self *StateDB) StorageTrie(a common.Address) *trie.SecureTrie {
|
||||
stateObject := self.getStateObject(a)
|
||||
if stateObject == nil {
|
||||
return nil
|
||||
}
|
||||
cpy := stateObject.deepCopy(self, nil)
|
||||
return cpy.updateTrie(self.db)
|
||||
}
|
||||
|
||||
func (self *StateDB) HasSuicided(addr common.Address) bool {
|
||||
stateObject := self.getStateObject(addr)
|
||||
if stateObject != nil {
|
||||
@ -369,6 +378,8 @@ func (self *StateDB) Suicide(addr common.Address) bool {
|
||||
})
|
||||
stateObject.markSuicided()
|
||||
stateObject.data.Balance = new(big.Int)
|
||||
self.stateObjectsDestructed[addr] = struct{}{}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@ -481,7 +492,7 @@ func (db *StateDB) ForEachStorage(addr common.Address, cb func(key, value common
|
||||
cb(h, value)
|
||||
}
|
||||
|
||||
it := so.getTrie(db.db).Iterator()
|
||||
it := trie.NewIterator(so.getTrie(db.db).NodeIterator(nil))
|
||||
for it.Next() {
|
||||
// ignore cached values
|
||||
key := common.BytesToHash(db.trie.GetKey(it.Key))
|
||||
@ -505,6 +516,7 @@ func (self *StateDB) Copy() *StateDB {
|
||||
codeSizeCache: self.codeSizeCache,
|
||||
stateObjects: make(map[common.Address]*stateObject, len(self.stateObjectsDirty)),
|
||||
stateObjectsDirty: make(map[common.Address]struct{}, len(self.stateObjectsDirty)),
|
||||
stateObjectsDestructed: make(map[common.Address]struct{}, len(self.stateObjectsDestructed)),
|
||||
refund: new(big.Int).Set(self.refund),
|
||||
logs: make(map[common.Hash][]*types.Log, len(self.logs)),
|
||||
logSize: self.logSize,
|
||||
@ -514,6 +526,9 @@ func (self *StateDB) Copy() *StateDB {
|
||||
for addr := range self.stateObjectsDirty {
|
||||
state.stateObjects[addr] = self.stateObjects[addr].deepCopy(state, state.MarkStateObjectDirty)
|
||||
state.stateObjectsDirty[addr] = struct{}{}
|
||||
if self.stateObjects[addr].suicided {
|
||||
state.stateObjectsDestructed[addr] = struct{}{}
|
||||
}
|
||||
}
|
||||
for hash, logs := range self.logs {
|
||||
state.logs[hash] = make([]*types.Log, len(logs))
|
||||
@ -579,6 +594,27 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
|
||||
return s.trie.Hash()
|
||||
}
|
||||
|
||||
// Prepare sets the current transaction hash and index and block hash which is
|
||||
// used when the EVM emits new state logs.
|
||||
func (self *StateDB) Prepare(thash, bhash common.Hash, ti int) {
|
||||
self.thash = thash
|
||||
self.bhash = bhash
|
||||
self.txIndex = ti
|
||||
}
|
||||
|
||||
// Finalise finalises the state by removing the self destructed objects
|
||||
// in the current stateObjectsDestructed buffer and clears the journal
|
||||
// as well as the refunds.
|
||||
//
|
||||
// Please note that Finalise is used by EIP#98 and is used instead of
|
||||
// IntermediateRoot.
|
||||
func (s *StateDB) Finalise() {
|
||||
for addr := range s.stateObjectsDestructed {
|
||||
s.deleteStateObject(s.stateObjects[addr])
|
||||
}
|
||||
s.clearJournalAndRefund()
|
||||
}
|
||||
|
||||
// DeleteSuicides flags the suicided objects for deletion so that it
|
||||
// won't be referenced again when called / queried up on.
|
||||
//
|
||||
|
@ -59,10 +59,16 @@ func (s *StateSync) Missing(max int) []common.Hash {
|
||||
}
|
||||
|
||||
// Process injects a batch of retrieved trie nodes data, returning if something
|
||||
// was committed to the database and also the index of an entry if processing of
|
||||
// was committed to the memcache and also the index of an entry if processing of
|
||||
// it failed.
|
||||
func (s *StateSync) Process(list []trie.SyncResult, dbw trie.DatabaseWriter) (bool, int, error) {
|
||||
return (*trie.TrieSync)(s).Process(list, dbw)
|
||||
func (s *StateSync) Process(list []trie.SyncResult) (bool, int, error) {
|
||||
return (*trie.TrieSync)(s).Process(list)
|
||||
}
|
||||
|
||||
// Commit flushes the data stored in the internal memcache out to persistent
|
||||
// storage, returning th enumber of items written and any occurred error.
|
||||
func (s *StateSync) Commit(dbw trie.DatabaseWriter) (int, error) {
|
||||
return (*trie.TrieSync)(s).Commit(dbw)
|
||||
}
|
||||
|
||||
// Pending returns the number of state entries currently pending for download.
|
||||
|
@ -138,9 +138,12 @@ func testIterativeStateSync(t *testing.T, batch int) {
|
||||
}
|
||||
results[i] = trie.SyncResult{Hash: hash, Data: data}
|
||||
}
|
||||
if _, index, err := sched.Process(results, dstDb); err != nil {
|
||||
if _, index, err := sched.Process(results); err != nil {
|
||||
t.Fatalf("failed to process result #%d: %v", index, err)
|
||||
}
|
||||
if index, err := sched.Commit(dstDb); err != nil {
|
||||
t.Fatalf("failed to commit data #%d: %v", index, err)
|
||||
}
|
||||
queue = append(queue[:0], sched.Missing(batch)...)
|
||||
}
|
||||
// Cross check that the two states are in sync
|
||||
@ -168,9 +171,12 @@ func TestIterativeDelayedStateSync(t *testing.T) {
|
||||
}
|
||||
results[i] = trie.SyncResult{Hash: hash, Data: data}
|
||||
}
|
||||
if _, index, err := sched.Process(results, dstDb); err != nil {
|
||||
if _, index, err := sched.Process(results); err != nil {
|
||||
t.Fatalf("failed to process result #%d: %v", index, err)
|
||||
}
|
||||
if index, err := sched.Commit(dstDb); err != nil {
|
||||
t.Fatalf("failed to commit data #%d: %v", index, err)
|
||||
}
|
||||
queue = append(queue[len(results):], sched.Missing(0)...)
|
||||
}
|
||||
// Cross check that the two states are in sync
|
||||
@ -206,9 +212,12 @@ func testIterativeRandomStateSync(t *testing.T, batch int) {
|
||||
results = append(results, trie.SyncResult{Hash: hash, Data: data})
|
||||
}
|
||||
// Feed the retrieved results back and queue new tasks
|
||||
if _, index, err := sched.Process(results, dstDb); err != nil {
|
||||
if _, index, err := sched.Process(results); err != nil {
|
||||
t.Fatalf("failed to process result #%d: %v", index, err)
|
||||
}
|
||||
if index, err := sched.Commit(dstDb); err != nil {
|
||||
t.Fatalf("failed to commit data #%d: %v", index, err)
|
||||
}
|
||||
queue = make(map[common.Hash]struct{})
|
||||
for _, hash := range sched.Missing(batch) {
|
||||
queue[hash] = struct{}{}
|
||||
@ -249,9 +258,12 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) {
|
||||
}
|
||||
}
|
||||
// Feed the retrieved results back and queue new tasks
|
||||
if _, index, err := sched.Process(results, dstDb); err != nil {
|
||||
if _, index, err := sched.Process(results); err != nil {
|
||||
t.Fatalf("failed to process result #%d: %v", index, err)
|
||||
}
|
||||
if index, err := sched.Commit(dstDb); err != nil {
|
||||
t.Fatalf("failed to commit data #%d: %v", index, err)
|
||||
}
|
||||
for _, hash := range sched.Missing(0) {
|
||||
queue[hash] = struct{}{}
|
||||
}
|
||||
@ -283,9 +295,12 @@ func TestIncompleteStateSync(t *testing.T) {
|
||||
results[i] = trie.SyncResult{Hash: hash, Data: data}
|
||||
}
|
||||
// Process each of the state nodes
|
||||
if _, index, err := sched.Process(results, dstDb); err != nil {
|
||||
if _, index, err := sched.Process(results); err != nil {
|
||||
t.Fatalf("failed to process result #%d: %v", index, err)
|
||||
}
|
||||
if index, err := sched.Commit(dstDb); err != nil {
|
||||
t.Fatalf("failed to commit data #%d: %v", index, err)
|
||||
}
|
||||
for _, result := range results {
|
||||
added = append(added, result.Hash)
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
|
||||
}
|
||||
// Iterate over and process the individual transactions
|
||||
for i, tx := range block.Transactions() {
|
||||
statedb.StartRecord(tx.Hash(), block.Hash(), i)
|
||||
statedb.Prepare(tx.Hash(), block.Hash(), i)
|
||||
receipt, _, err := ApplyTransaction(p.config, p.bc, nil, gp, statedb, header, tx, totalUsedGas, cfg)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
@ -107,7 +107,8 @@ func ApplyTransaction(config *params.ChainConfig, bc *BlockChain, author *common
|
||||
usedGas.Add(usedGas, gas)
|
||||
// Create a new receipt for the transaction, storing the intermediate root and gas used by the tx
|
||||
// based on the eip phase, we're passing wether the root touch-delete accounts.
|
||||
receipt := types.NewReceipt(statedb.IntermediateRoot(config.IsEIP158(header.Number)).Bytes(), usedGas)
|
||||
root := statedb.IntermediateRoot(config.IsEIP158(header.Number))
|
||||
receipt := types.NewReceipt(root.Bytes(), usedGas)
|
||||
receipt.TxHash = tx.Hash()
|
||||
receipt.GasUsed = new(big.Int).Set(gas)
|
||||
// if the transaction created a contract, store the creation address in the receipt.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user