Compare commits
64 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
421df866cd | ||
|
dfc63c49c7 | ||
|
e44b2dc881 | ||
|
fed692f67e | ||
|
99a0c76435 | ||
|
5ca5ccf90c | ||
|
c4ed34f008 | ||
|
0ab7e90cbb | ||
|
bdbfe572f1 | ||
|
c4e4baf668 | ||
|
86493f9103 | ||
|
6c672a55c0 | ||
|
48709d5340 | ||
|
65da8f601f | ||
|
2c6214e846 | ||
|
0398075ced | ||
|
d1696dbf07 | ||
|
626604e86d | ||
|
9eb2873a9c | ||
|
08a7cd74da | ||
|
35d479b6d3 | ||
|
5f55d95aea | ||
|
c2eea6306e | ||
|
1d6b65cd84 | ||
|
1b2941cd56 | ||
|
b8c0883770 | ||
|
14bad7e212 | ||
|
8c20fe17bd | ||
|
a0cc73a27a | ||
|
682c4531af | ||
|
5c3051e6fa | ||
|
3dd46bc884 | ||
|
e44d50fb52 | ||
|
5d9ea439b3 | ||
|
d0668838b9 | ||
|
da776556d0 | ||
|
f2e8759d10 | ||
|
98095efe88 | ||
|
b7e3dfc5a2 | ||
|
3e1dbc3ca7 | ||
|
adb065a328 | ||
|
c793cb3385 | ||
|
3eef19598e | ||
|
f4aebd4c8d | ||
|
98be7cd833 | ||
|
eaf706b73c | ||
|
b170a80cdc | ||
|
aefffc9ed8 | ||
|
f31a3a251a | ||
|
a9c94cbf48 | ||
|
667a386d87 | ||
|
d2089e46f8 | ||
|
be29e41334 | ||
|
47965930a1 | ||
|
bc6c4a337c | ||
|
f7fdfa4eac | ||
|
0405f728c6 | ||
|
63c5a46b82 | ||
|
c89fa789b7 | ||
|
39f1d909d1 | ||
|
71b577f839 | ||
|
a93d63d576 | ||
|
7fb72dbcbf | ||
|
688fbab5d5 |
8
.gitignore
vendored
8
.gitignore
vendored
@@ -23,17 +23,11 @@ Godeps/_workspace/bin
|
|||||||
.project
|
.project
|
||||||
.settings
|
.settings
|
||||||
|
|
||||||
deploy/osx/Mist.app
|
|
||||||
deploy/osx/Mist\ Installer.dmg
|
|
||||||
cmd/mist/assets/ext/ethereum.js/
|
|
||||||
|
|
||||||
# used by the Makefile
|
# used by the Makefile
|
||||||
/build/_workspace/
|
/build/_workspace/
|
||||||
/build/bin/
|
/build/bin/
|
||||||
|
/geth*.zip
|
||||||
|
|
||||||
# travis
|
# travis
|
||||||
profile.tmp
|
profile.tmp
|
||||||
profile.cov
|
profile.cov
|
||||||
|
|
||||||
# vagrant
|
|
||||||
.vagrant
|
|
||||||
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -1,3 +0,0 @@
|
|||||||
[submodule "cmd/mist/assets/ext/ethereum.js"]
|
|
||||||
path = cmd/mist/assets/ext/ethereum.js
|
|
||||||
url = https://github.com/ethereum/web3.js
|
|
60
.travis.yml
60
.travis.yml
@@ -1,31 +1,45 @@
|
|||||||
language: go
|
language: go
|
||||||
go:
|
go_import_path: github.com/ethereum/go-ethereum
|
||||||
- 1.4.2
|
|
||||||
- 1.5.4
|
|
||||||
- 1.6.2
|
|
||||||
install:
|
|
||||||
# - go get code.google.com/p/go.tools/cmd/goimports
|
|
||||||
# - go get github.com/golang/lint/golint
|
|
||||||
# - go get golang.org/x/tools/cmd/vet
|
|
||||||
- go get golang.org/x/tools/cmd/cover
|
|
||||||
before_script:
|
|
||||||
# - gofmt -l -w .
|
|
||||||
# - goimports -l -w .
|
|
||||||
# - golint .
|
|
||||||
# - go vet ./...
|
|
||||||
# - go test -race ./...
|
|
||||||
script:
|
|
||||||
- make travis-test-with-coverage
|
|
||||||
after_success:
|
|
||||||
- bash <(curl -s https://codecov.io/bash)
|
|
||||||
env:
|
|
||||||
global:
|
|
||||||
- secure: "U2U1AmkU4NJBgKR/uUAebQY87cNL0+1JHjnLOmmXwxYYyj5ralWb1aSuSH3qSXiT93qLBmtaUkuv9fberHVqrbAeVlztVdUsKAq7JMQH+M99iFkC9UiRMqHmtjWJ0ok4COD1sRYixxi21wb/JrMe3M1iL4QJVS61iltjHhVdM64="
|
|
||||||
sudo: false
|
sudo: false
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- os: linux
|
||||||
|
dist: trusty
|
||||||
|
go: 1.4.2
|
||||||
|
- os: linux
|
||||||
|
dist: trusty
|
||||||
|
go: 1.5.4
|
||||||
|
- os: linux
|
||||||
|
dist: trusty
|
||||||
|
go: 1.6.2
|
||||||
|
- os: osx
|
||||||
|
go: 1.6.2
|
||||||
|
|
||||||
|
# This builder does the PPA upload (and nothing else).
|
||||||
|
- os: linux
|
||||||
|
dist: trusty
|
||||||
|
go: 1.6.2
|
||||||
|
env: PPA
|
||||||
|
addons:
|
||||||
|
apt:
|
||||||
|
packages:
|
||||||
|
- devscripts
|
||||||
|
- debhelper
|
||||||
|
- dput
|
||||||
|
script:
|
||||||
|
- go run build/ci.go travis-debsrc
|
||||||
|
|
||||||
|
install:
|
||||||
|
- go get golang.org/x/tools/cmd/cover
|
||||||
|
script:
|
||||||
|
- go run build/ci.go install
|
||||||
|
- go run build/ci.go test -coverage -vet
|
||||||
|
after_success:
|
||||||
|
# - go run build/ci.go archive -type tar
|
||||||
|
|
||||||
notifications:
|
notifications:
|
||||||
webhooks:
|
webhooks:
|
||||||
urls:
|
urls:
|
||||||
- https://webhooks.gitter.im/e/e09ccdce1048c5e03445
|
- https://webhooks.gitter.im/e/e09ccdce1048c5e03445
|
||||||
on_success: change
|
on_success: change
|
||||||
on_failure: always
|
on_failure: always
|
||||||
on_start: false
|
|
||||||
|
21
Godeps/Godeps.json
generated
21
Godeps/Godeps.json
generated
@@ -1,6 +1,7 @@
|
|||||||
{
|
{
|
||||||
"ImportPath": "github.com/ethereum/go-ethereum",
|
"ImportPath": "github.com/ethereum/go-ethereum",
|
||||||
"GoVersion": "go1.5.2",
|
"GoVersion": "go1.5.2",
|
||||||
|
"GodepVersion": "v74",
|
||||||
"Packages": [
|
"Packages": [
|
||||||
"./..."
|
"./..."
|
||||||
],
|
],
|
||||||
@@ -13,19 +14,14 @@
|
|||||||
"ImportPath": "github.com/cespare/cp",
|
"ImportPath": "github.com/cespare/cp",
|
||||||
"Rev": "165db2f241fd235aec29ba6d9b1ccd5f1c14637c"
|
"Rev": "165db2f241fd235aec29ba6d9b1ccd5f1c14637c"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"ImportPath": "github.com/codegangsta/cli",
|
|
||||||
"Comment": "1.2.0-215-g0ab42fd",
|
|
||||||
"Rev": "0ab42fd482c27cf2c95e7794ad3bb2082c2ab2d7"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/davecgh/go-spew/spew",
|
"ImportPath": "github.com/davecgh/go-spew/spew",
|
||||||
"Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d"
|
"Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/ethereum/ethash",
|
"ImportPath": "github.com/ethereum/ethash",
|
||||||
"Comment": "v23.1-245-g25b32de",
|
"Comment": "v23.1-247-g2e80de5",
|
||||||
"Rev": "25b32de0c0271065c28c3719c2bfe86959d72f0c"
|
"Rev": "2e80de5022370cfe632195b1720db52d07ff8a77"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/fatih/color",
|
"ImportPath": "github.com/fatih/color",
|
||||||
@@ -121,7 +117,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/rjeczalik/notify",
|
"ImportPath": "github.com/rjeczalik/notify",
|
||||||
"Rev": "5dd6205716539662f8f14ab513552b41eab69d5d"
|
"Rev": "f627deca7a510d96f0ef9388f2d0e8b16d21f87f"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/robertkrimen/otto",
|
"ImportPath": "github.com/robertkrimen/otto",
|
||||||
@@ -155,6 +151,10 @@
|
|||||||
"ImportPath": "github.com/rs/cors",
|
"ImportPath": "github.com/rs/cors",
|
||||||
"Rev": "5950cf11d77f8a61b432a25dd4d444b4ced01379"
|
"Rev": "5950cf11d77f8a61b432a25dd4d444b4ced01379"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/rs/xhandler",
|
||||||
|
"Rev": "d9d9599b6aaf6a058cb7b1f48291ded2cbd13390"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
||||||
"Rev": "917f41c560270110ceb73c5b38be2a9127387071"
|
"Rev": "917f41c560270110ceb73c5b38be2a9127387071"
|
||||||
@@ -319,6 +319,11 @@
|
|||||||
{
|
{
|
||||||
"ImportPath": "gopkg.in/karalabe/cookiejar.v2/collections/prque",
|
"ImportPath": "gopkg.in/karalabe/cookiejar.v2/collections/prque",
|
||||||
"Rev": "8dcd6a7f4951f6ff3ee9cbb919a06d8925822e57"
|
"Rev": "8dcd6a7f4951f6ff3ee9cbb919a06d8925822e57"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "gopkg.in/urfave/cli.v1",
|
||||||
|
"Comment": "v1.17.0",
|
||||||
|
"Rev": "01857ac33766ce0c93856370626f9799281c14f4"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
14
Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete
generated
vendored
14
Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete
generated
vendored
@@ -1,14 +0,0 @@
|
|||||||
#! /bin/bash
|
|
||||||
|
|
||||||
: ${PROG:=$(basename ${BASH_SOURCE})}
|
|
||||||
|
|
||||||
_cli_bash_autocomplete() {
|
|
||||||
local cur opts base
|
|
||||||
COMPREPLY=()
|
|
||||||
cur="${COMP_WORDS[COMP_CWORD]}"
|
|
||||||
opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion )
|
|
||||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
complete -F _cli_bash_autocomplete $PROG
|
|
5
Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete
generated
vendored
5
Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete
generated
vendored
@@ -1,5 +0,0 @@
|
|||||||
autoload -U compinit && compinit
|
|
||||||
autoload -U bashcompinit && bashcompinit
|
|
||||||
|
|
||||||
script_dir=$(dirname $0)
|
|
||||||
source ${script_dir}/bash_autocomplete
|
|
0
Godeps/_workspace/src/github.com/ethereum/ethash/setup.py
generated
vendored
Normal file → Executable file
0
Godeps/_workspace/src/github.com/ethereum/ethash/setup.py
generated
vendored
Normal file → Executable file
11
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/endian.h
generated
vendored
11
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/endian.h
generated
vendored
@@ -59,21 +59,20 @@
|
|||||||
|
|
||||||
#define fix_endian32(dst_, src_) dst_ = ethash_swap_u32(src_)
|
#define fix_endian32(dst_, src_) dst_ = ethash_swap_u32(src_)
|
||||||
#define fix_endian32_same(val_) val_ = ethash_swap_u32(val_)
|
#define fix_endian32_same(val_) val_ = ethash_swap_u32(val_)
|
||||||
#define fix_endian64(dst_, src_) dst_ = ethash_swap_u64(src_
|
#define fix_endian64(dst_, src_) dst_ = ethash_swap_u64(src_)
|
||||||
#define fix_endian64_same(val_) val_ = ethash_swap_u64(val_)
|
#define fix_endian64_same(val_) val_ = ethash_swap_u64(val_)
|
||||||
#define fix_endian_arr32(arr_, size_) \
|
#define fix_endian_arr32(arr_, size_) \
|
||||||
do { \
|
do { \
|
||||||
for (unsigned i_ = 0; i_ < (size_), ++i_) { \
|
for (unsigned i_ = 0; i_ < (size_); ++i_) { \
|
||||||
arr_[i_] = ethash_swap_u32(arr_[i_]); \
|
arr_[i_] = ethash_swap_u32(arr_[i_]); \
|
||||||
} \
|
} \
|
||||||
while (0)
|
} while (0)
|
||||||
#define fix_endian_arr64(arr_, size_) \
|
#define fix_endian_arr64(arr_, size_) \
|
||||||
do { \
|
do { \
|
||||||
for (unsigned i_ = 0; i_ < (size_), ++i_) { \
|
for (unsigned i_ = 0; i_ < (size_); ++i_) { \
|
||||||
arr_[i_] = ethash_swap_u64(arr_[i_]); \
|
arr_[i_] = ethash_swap_u64(arr_[i_]); \
|
||||||
} \
|
} \
|
||||||
while (0) \
|
} while (0)
|
||||||
|
|
||||||
#else
|
#else
|
||||||
# error "endian not supported"
|
# error "endian not supported"
|
||||||
#endif // BYTE_ORDER
|
#endif // BYTE_ORDER
|
||||||
|
2
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/internal.c
generated
vendored
2
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/internal.c
generated
vendored
@@ -257,7 +257,7 @@ static bool ethash_hash(
|
|||||||
void ethash_quick_hash(
|
void ethash_quick_hash(
|
||||||
ethash_h256_t* return_hash,
|
ethash_h256_t* return_hash,
|
||||||
ethash_h256_t const* header_hash,
|
ethash_h256_t const* header_hash,
|
||||||
uint64_t const nonce,
|
uint64_t nonce,
|
||||||
ethash_h256_t const* mix_hash
|
ethash_h256_t const* mix_hash
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
|
3
Godeps/_workspace/src/github.com/rjeczalik/notify/.travis.yml
generated
vendored
3
Godeps/_workspace/src/github.com/rjeczalik/notify/.travis.yml
generated
vendored
@@ -21,10 +21,9 @@ env:
|
|||||||
- PATH=$HOME/bin:$PATH
|
- PATH=$HOME/bin:$PATH
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- go get golang.org/x/tools/cmd/vet
|
|
||||||
- go get -t -v ./...
|
- go get -t -v ./...
|
||||||
|
|
||||||
script:
|
script:
|
||||||
- go tool vet -all .
|
- "(go version | grep -q 1.4) || go tool vet -all ."
|
||||||
- go install $GOFLAGS ./...
|
- go install $GOFLAGS ./...
|
||||||
- go test -v -race $GOFLAGS ./...
|
- go test -v -race $GOFLAGS ./...
|
||||||
|
1
Godeps/_workspace/src/github.com/rjeczalik/notify/appveyor.yml
generated
vendored
1
Godeps/_workspace/src/github.com/rjeczalik/notify/appveyor.yml
generated
vendored
@@ -11,7 +11,6 @@ environment:
|
|||||||
|
|
||||||
install:
|
install:
|
||||||
- go version
|
- go version
|
||||||
- go get golang.org/x/tools/cmd/vet
|
|
||||||
- go get -v -t ./...
|
- go get -v -t ./...
|
||||||
|
|
||||||
build_script:
|
build_script:
|
||||||
|
2
Godeps/_workspace/src/github.com/rjeczalik/notify/watcher_fsevents.go
generated
vendored
2
Godeps/_workspace/src/github.com/rjeczalik/notify/watcher_fsevents.go
generated
vendored
@@ -133,7 +133,7 @@ func (w *watch) Dispatch(ev []FSEvent) {
|
|||||||
ev[i].Flags, ev[i].Path, i, ev[i].ID, len(ev))
|
ev[i].Flags, ev[i].Path, i, ev[i].ID, len(ev))
|
||||||
if ev[i].Flags&failure != 0 {
|
if ev[i].Flags&failure != 0 {
|
||||||
// TODO(rjeczalik): missing error handling
|
// TODO(rjeczalik): missing error handling
|
||||||
panic("unhandled error: " + Event(ev[i].Flags).String())
|
continue
|
||||||
}
|
}
|
||||||
if !strings.HasPrefix(ev[i].Path, w.path) {
|
if !strings.HasPrefix(ev[i].Path, w.path) {
|
||||||
continue
|
continue
|
||||||
|
7
Godeps/_workspace/src/github.com/rs/xhandler/.travis.yml
generated
vendored
7
Godeps/_workspace/src/github.com/rs/xhandler/.travis.yml
generated
vendored
@@ -1,7 +0,0 @@
|
|||||||
language: go
|
|
||||||
go:
|
|
||||||
- 1.5
|
|
||||||
- tip
|
|
||||||
matrix:
|
|
||||||
allow_failures:
|
|
||||||
- go: tip
|
|
19
Godeps/_workspace/src/github.com/rs/xhandler/LICENSE
generated
vendored
19
Godeps/_workspace/src/github.com/rs/xhandler/LICENSE
generated
vendored
@@ -1,19 +0,0 @@
|
|||||||
Copyright (c) 2015 Olivier Poitrey <rs@dailymotion.com>
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is furnished
|
|
||||||
to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
THE SOFTWARE.
|
|
134
Godeps/_workspace/src/github.com/rs/xhandler/README.md
generated
vendored
134
Godeps/_workspace/src/github.com/rs/xhandler/README.md
generated
vendored
@@ -1,134 +0,0 @@
|
|||||||
# XHandler
|
|
||||||
|
|
||||||
[](https://godoc.org/github.com/rs/xhandler) [](https://raw.githubusercontent.com/rs/xhandler/master/LICENSE) [](https://travis-ci.org/rs/xhandler) [](http://gocover.io/github.com/rs/xhandler)
|
|
||||||
|
|
||||||
XHandler is a bridge between [net/context](https://godoc.org/golang.org/x/net/context) and `http.Handler`.
|
|
||||||
|
|
||||||
It lets you enforce `net/context` in your handlers without sacrificing compatibility with existing `http.Handlers` nor imposing a specific router.
|
|
||||||
|
|
||||||
Thanks to `net/context` deadline management, `xhandler` is able to enforce a per request deadline and will cancel the context when the client closes the connection unexpectedly.
|
|
||||||
|
|
||||||
You may create your own `net/context` aware handler pretty much the same way as you would do with http.Handler.
|
|
||||||
|
|
||||||
Read more about xhandler on [Dailymotion engineering blog](http://engineering.dailymotion.com/our-way-to-go/).
|
|
||||||
|
|
||||||
## Installing
|
|
||||||
|
|
||||||
go get -u github.com/rs/xhandler
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rs/cors"
|
|
||||||
"github.com/rs/xhandler"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
|
||||||
|
|
||||||
type myMiddleware struct {
|
|
||||||
next xhandler.HandlerC
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h myMiddleware) ServeHTTPC(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx = context.WithValue(ctx, "test", "World")
|
|
||||||
h.next.ServeHTTPC(ctx, w, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
c := xhandler.Chain{}
|
|
||||||
|
|
||||||
// Add close notifier handler so context is cancelled when the client closes
|
|
||||||
// the connection
|
|
||||||
c.UseC(xhandler.CloseHandler)
|
|
||||||
|
|
||||||
// Add timeout handler
|
|
||||||
c.UseC(xhandler.TimeoutHandler(2 * time.Second))
|
|
||||||
|
|
||||||
// Middleware putting something in the context
|
|
||||||
c.UseC(func(next xhandler.HandlerC) xhandler.HandlerC {
|
|
||||||
return myMiddleware{next: next}
|
|
||||||
})
|
|
||||||
|
|
||||||
// Mix it with a non-context-aware middleware handler
|
|
||||||
c.Use(cors.Default().Handler)
|
|
||||||
|
|
||||||
// Final handler (using handlerFuncC), reading from the context
|
|
||||||
xh := xhandler.HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
|
||||||
value := ctx.Value("test").(string)
|
|
||||||
w.Write([]byte("Hello " + value))
|
|
||||||
})
|
|
||||||
|
|
||||||
// Bridge context aware handlers with http.Handler using xhandler.Handle()
|
|
||||||
http.Handle("/test", c.Handler(xh))
|
|
||||||
|
|
||||||
if err := http.ListenAndServe(":8080", nil); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Using xmux
|
|
||||||
|
|
||||||
Xhandler comes with an optional context aware [muxer](https://github.com/rs/xmux) forked from [httprouter](https://github.com/julienschmidt/httprouter):
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rs/xhandler"
|
|
||||||
"github.com/rs/xmux"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
c := xhandler.Chain{}
|
|
||||||
|
|
||||||
// Append a context-aware middleware handler
|
|
||||||
c.UseC(xhandler.CloseHandler)
|
|
||||||
|
|
||||||
// Another context-aware middleware handler
|
|
||||||
c.UseC(xhandler.TimeoutHandler(2 * time.Second))
|
|
||||||
|
|
||||||
mux := xmux.New()
|
|
||||||
|
|
||||||
// Use c.Handler to terminate the chain with your final handler
|
|
||||||
mux.GET("/welcome/:name", xhandler.HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, req *http.Request) {
|
|
||||||
fmt.Fprintf(w, "Welcome %s!", xmux.Params(ctx).Get("name"))
|
|
||||||
}))
|
|
||||||
|
|
||||||
if err := http.ListenAndServe(":8080", c.Handler(mux)); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
See [xmux](https://github.com/rs/xmux) for more examples.
|
|
||||||
|
|
||||||
## Context Aware Middleware
|
|
||||||
|
|
||||||
Here is a list of `net/context` aware middleware handlers implementing `xhandler.HandlerC` interface.
|
|
||||||
|
|
||||||
Feel free to put up a PR linking your middleware if you have built one:
|
|
||||||
|
|
||||||
| Middleware | Author | Description |
|
|
||||||
| ---------- | ------ | ----------- |
|
|
||||||
| [xmux](https://github.com/rs/xmux) | [Olivier Poitrey](https://github.com/rs) | HTTP request muxer |
|
|
||||||
| [xlog](https://github.com/rs/xlog) | [Olivier Poitrey](https://github.com/rs) | HTTP handler logger |
|
|
||||||
| [xstats](https://github.com/rs/xstats) | [Olivier Poitrey](https://github.com/rs) | A generic client for service instrumentation |
|
|
||||||
| [xaccess](https://github.com/rs/xaccess) | [Olivier Poitrey](https://github.com/rs) | HTTP handler access logger with [xlog](https://github.com/rs/xlog) and [xstats](https://github.com/rs/xstats) |
|
|
||||||
| [cors](https://github.com/rs/cors) | [Olivier Poitrey](https://github.com/rs) | [Cross Origin Resource Sharing](http://www.w3.org/TR/cors/) (CORS) support |
|
|
||||||
|
|
||||||
## Licenses
|
|
||||||
|
|
||||||
All source code is licensed under the [MIT License](https://raw.github.com/rs/xhandler/master/LICENSE).
|
|
93
Godeps/_workspace/src/github.com/rs/xhandler/chain.go
generated
vendored
93
Godeps/_workspace/src/github.com/rs/xhandler/chain.go
generated
vendored
@@ -1,93 +0,0 @@
|
|||||||
package xhandler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Chain is an helper to chain middleware handlers together for an easier
|
|
||||||
// management.
|
|
||||||
type Chain []func(next HandlerC) HandlerC
|
|
||||||
|
|
||||||
// UseC appends a context-aware handler to the middleware chain.
|
|
||||||
func (c *Chain) UseC(f func(next HandlerC) HandlerC) {
|
|
||||||
*c = append(*c, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use appends a standard http.Handler to the middleware chain without
|
|
||||||
// lossing track of the context when inserted between two context aware handlers.
|
|
||||||
//
|
|
||||||
// Caveat: the f function will be called on each request so you are better to put
|
|
||||||
// any initialization sequence outside of this function.
|
|
||||||
func (c *Chain) Use(f func(next http.Handler) http.Handler) {
|
|
||||||
xf := func(next HandlerC) HandlerC {
|
|
||||||
return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
|
||||||
n := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
next.ServeHTTPC(ctx, w, r)
|
|
||||||
})
|
|
||||||
f(n).ServeHTTP(w, r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
*c = append(*c, xf)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handler wraps the provided final handler with all the middleware appended to
|
|
||||||
// the chain and return a new standard http.Handler instance.
|
|
||||||
// The context.Background() context is injected automatically.
|
|
||||||
func (c Chain) Handler(xh HandlerC) http.Handler {
|
|
||||||
ctx := context.Background()
|
|
||||||
return c.HandlerCtx(ctx, xh)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandlerFC is an helper to provide a function (HandlerFuncC) to Handler().
|
|
||||||
//
|
|
||||||
// HandlerFC is equivalent to:
|
|
||||||
// c.Handler(xhandler.HandlerFuncC(xhc))
|
|
||||||
func (c Chain) HandlerFC(xhf HandlerFuncC) http.Handler {
|
|
||||||
ctx := context.Background()
|
|
||||||
return c.HandlerCtx(ctx, HandlerFuncC(xhf))
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandlerH is an helper to provide a standard http handler (http.HandlerFunc)
|
|
||||||
// to Handler(). Your final handler won't have access the context though.
|
|
||||||
func (c Chain) HandlerH(h http.Handler) http.Handler {
|
|
||||||
ctx := context.Background()
|
|
||||||
return c.HandlerCtx(ctx, HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
|
||||||
h.ServeHTTP(w, r)
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandlerF is an helper to provide a standard http handler function
|
|
||||||
// (http.HandlerFunc) to Handler(). Your final handler won't have access
|
|
||||||
// the context though.
|
|
||||||
func (c Chain) HandlerF(hf http.HandlerFunc) http.Handler {
|
|
||||||
ctx := context.Background()
|
|
||||||
return c.HandlerCtx(ctx, HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
|
||||||
hf(w, r)
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandlerCtx wraps the provided final handler with all the middleware appended to
|
|
||||||
// the chain and return a new standard http.Handler instance.
|
|
||||||
func (c Chain) HandlerCtx(ctx context.Context, xh HandlerC) http.Handler {
|
|
||||||
return New(ctx, c.HandlerC(xh))
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandlerC wraps the provided final handler with all the middleware appended to
|
|
||||||
// the chain and returns a HandlerC instance.
|
|
||||||
func (c Chain) HandlerC(xh HandlerC) HandlerC {
|
|
||||||
for i := len(c) - 1; i >= 0; i-- {
|
|
||||||
xh = c[i](xh)
|
|
||||||
}
|
|
||||||
return xh
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandlerCF wraps the provided final handler func with all the middleware appended to
|
|
||||||
// the chain and returns a HandlerC instance.
|
|
||||||
//
|
|
||||||
// HandlerCF is equivalent to:
|
|
||||||
// c.HandlerC(xhandler.HandlerFuncC(xhc))
|
|
||||||
func (c Chain) HandlerCF(xhc HandlerFuncC) HandlerC {
|
|
||||||
return c.HandlerC(HandlerFuncC(xhc))
|
|
||||||
}
|
|
59
Godeps/_workspace/src/github.com/rs/xhandler/middleware.go
generated
vendored
59
Godeps/_workspace/src/github.com/rs/xhandler/middleware.go
generated
vendored
@@ -1,59 +0,0 @@
|
|||||||
package xhandler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CloseHandler returns a Handler cancelling the context when the client
|
|
||||||
// connection close unexpectedly.
|
|
||||||
func CloseHandler(next HandlerC) HandlerC {
|
|
||||||
return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
|
||||||
// Cancel the context if the client closes the connection
|
|
||||||
if wcn, ok := w.(http.CloseNotifier); ok {
|
|
||||||
var cancel context.CancelFunc
|
|
||||||
ctx, cancel = context.WithCancel(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
notify := wcn.CloseNotify()
|
|
||||||
go func() {
|
|
||||||
select {
|
|
||||||
case <-notify:
|
|
||||||
cancel()
|
|
||||||
case <-ctx.Done():
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
next.ServeHTTPC(ctx, w, r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimeoutHandler returns a Handler which adds a timeout to the context.
|
|
||||||
//
|
|
||||||
// Child handlers have the responsability to obey the context deadline and to return
|
|
||||||
// an appropriate error (or not) response in case of timeout.
|
|
||||||
func TimeoutHandler(timeout time.Duration) func(next HandlerC) HandlerC {
|
|
||||||
return func(next HandlerC) HandlerC {
|
|
||||||
return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx, _ = context.WithTimeout(ctx, timeout)
|
|
||||||
next.ServeHTTPC(ctx, w, r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If is a special handler that will skip insert the condNext handler only if a condition
|
|
||||||
// applies at runtime.
|
|
||||||
func If(cond func(ctx context.Context, w http.ResponseWriter, r *http.Request) bool, condNext func(next HandlerC) HandlerC) func(next HandlerC) HandlerC {
|
|
||||||
return func(next HandlerC) HandlerC {
|
|
||||||
return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
|
||||||
if cond(ctx, w, r) {
|
|
||||||
condNext(next).ServeHTTPC(ctx, w, r)
|
|
||||||
} else {
|
|
||||||
next.ServeHTTPC(ctx, w, r)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
42
Godeps/_workspace/src/github.com/rs/xhandler/xhandler.go
generated
vendored
42
Godeps/_workspace/src/github.com/rs/xhandler/xhandler.go
generated
vendored
@@ -1,42 +0,0 @@
|
|||||||
// Package xhandler provides a bridge between http.Handler and net/context.
|
|
||||||
//
|
|
||||||
// xhandler enforces net/context in your handlers without sacrificing
|
|
||||||
// compatibility with existing http.Handlers nor imposing a specific router.
|
|
||||||
//
|
|
||||||
// Thanks to net/context deadline management, xhandler is able to enforce
|
|
||||||
// a per request deadline and will cancel the context in when the client close
|
|
||||||
// the connection unexpectedly.
|
|
||||||
//
|
|
||||||
// You may create net/context aware middlewares pretty much the same way as
|
|
||||||
// you would do with http.Handler.
|
|
||||||
package xhandler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HandlerC is a net/context aware http.Handler
|
|
||||||
type HandlerC interface {
|
|
||||||
ServeHTTPC(context.Context, http.ResponseWriter, *http.Request)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandlerFuncC type is an adapter to allow the use of ordinary functions
|
|
||||||
// as a xhandler.Handler. If f is a function with the appropriate signature,
|
|
||||||
// xhandler.HandlerFuncC(f) is a xhandler.Handler object that calls f.
|
|
||||||
type HandlerFuncC func(context.Context, http.ResponseWriter, *http.Request)
|
|
||||||
|
|
||||||
// ServeHTTPC calls f(ctx, w, r).
|
|
||||||
func (f HandlerFuncC) ServeHTTPC(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
|
||||||
f(ctx, w, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a conventional http.Handler injecting the provided root
|
|
||||||
// context to sub handlers. This handler is used as a bridge between conventional
|
|
||||||
// http.Handler and context aware handlers.
|
|
||||||
func New(ctx context.Context, h HandlerC) http.Handler {
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
h.ServeHTTPC(ctx, w, r)
|
|
||||||
})
|
|
||||||
}
|
|
@@ -2,18 +2,22 @@ language: go
|
|||||||
sudo: false
|
sudo: false
|
||||||
|
|
||||||
go:
|
go:
|
||||||
- 1.0.3
|
|
||||||
- 1.1.2
|
- 1.1.2
|
||||||
- 1.2.2
|
- 1.2.2
|
||||||
- 1.3.3
|
- 1.3.3
|
||||||
- 1.4.2
|
- 1.4
|
||||||
- 1.5.1
|
- 1.5.4
|
||||||
|
- 1.6.2
|
||||||
- tip
|
- tip
|
||||||
|
|
||||||
matrix:
|
matrix:
|
||||||
allow_failures:
|
allow_failures:
|
||||||
- go: tip
|
- go: tip
|
||||||
|
|
||||||
|
before_script:
|
||||||
|
- go get github.com/meatballhat/gfmxr/...
|
||||||
|
|
||||||
script:
|
script:
|
||||||
- go vet ./...
|
- go vet ./...
|
||||||
- go test -v ./...
|
- go test -v ./...
|
||||||
|
- gfmxr -c $(grep -c 'package main' README.md) -s README.md
|
310
Godeps/_workspace/src/gopkg.in/urfave/cli.v1/CHANGELOG.md
generated
vendored
Normal file
310
Godeps/_workspace/src/gopkg.in/urfave/cli.v1/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,310 @@
|
|||||||
|
# Change Log
|
||||||
|
|
||||||
|
**ATTN**: This project uses [semantic versioning](http://semver.org/).
|
||||||
|
|
||||||
|
## [Unreleased]
|
||||||
|
|
||||||
|
## [1.17.0] - 2016-05-09
|
||||||
|
### Added
|
||||||
|
- Pluggable flag-level help text rendering via `cli.DefaultFlagStringFunc`
|
||||||
|
- `context.GlobalBoolT` was added as an analogue to `context.GlobalBool`
|
||||||
|
- Support for hiding commands by setting `Hidden: true` -- this will hide the
|
||||||
|
commands in help output
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- `Float64Flag`, `IntFlag`, and `DurationFlag` default values are no longer
|
||||||
|
quoted in help text output.
|
||||||
|
- All flag types now include `(default: {value})` strings following usage when a
|
||||||
|
default value can be (reasonably) detected.
|
||||||
|
- `IntSliceFlag` and `StringSliceFlag` usage strings are now more consistent
|
||||||
|
with non-slice flag types
|
||||||
|
- Apps now exit with a code of 3 if an unknown subcommand is specified
|
||||||
|
(previously they printed "No help topic for...", but still exited 0. This
|
||||||
|
makes it easier to script around apps built using `cli` since they can trust
|
||||||
|
that a 0 exit code indicated a successful execution.
|
||||||
|
- cleanups based on [Go Report Card
|
||||||
|
feedback](https://goreportcard.com/report/github.com/codegangsta/cli)
|
||||||
|
|
||||||
|
## [1.16.0] - 2016-05-02
|
||||||
|
### Added
|
||||||
|
- `Hidden` field on all flag struct types to omit from generated help text
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- `BashCompletionFlag` (`--enable-bash-completion`) is now omitted from
|
||||||
|
generated help text via the `Hidden` field
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- handling of error values in `HandleAction` and `HandleExitCoder`
|
||||||
|
|
||||||
|
## [1.15.0] - 2016-04-30
|
||||||
|
### Added
|
||||||
|
- This file!
|
||||||
|
- Support for placeholders in flag usage strings
|
||||||
|
- `App.Metadata` map for arbitrary data/state management
|
||||||
|
- `Set` and `GlobalSet` methods on `*cli.Context` for altering values after
|
||||||
|
parsing.
|
||||||
|
- Support for nested lookup of dot-delimited keys in structures loaded from
|
||||||
|
YAML.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- The `App.Action` and `Command.Action` now prefer a return signature of
|
||||||
|
`func(*cli.Context) error`, as defined by `cli.ActionFunc`. If a non-nil
|
||||||
|
`error` is returned, there may be two outcomes:
|
||||||
|
- If the error fulfills `cli.ExitCoder`, then `os.Exit` will be called
|
||||||
|
automatically
|
||||||
|
- Else the error is bubbled up and returned from `App.Run`
|
||||||
|
- Specifying an `Action` with the legacy return signature of
|
||||||
|
`func(*cli.Context)` will produce a deprecation message to stderr
|
||||||
|
- Specifying an `Action` that is not a `func` type will produce a non-zero exit
|
||||||
|
from `App.Run`
|
||||||
|
- Specifying an `Action` func that has an invalid (input) signature will
|
||||||
|
produce a non-zero exit from `App.Run`
|
||||||
|
|
||||||
|
### Deprecated
|
||||||
|
- <a name="deprecated-cli-app-runandexitonerror"></a>
|
||||||
|
`cli.App.RunAndExitOnError`, which should now be done by returning an error
|
||||||
|
that fulfills `cli.ExitCoder` to `cli.App.Run`.
|
||||||
|
- <a name="deprecated-cli-app-action-signature"></a> the legacy signature for
|
||||||
|
`cli.App.Action` of `func(*cli.Context)`, which should now have a return
|
||||||
|
signature of `func(*cli.Context) error`, as defined by `cli.ActionFunc`.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Added missing `*cli.Context.GlobalFloat64` method
|
||||||
|
|
||||||
|
## [1.14.0] - 2016-04-03 (backfilled 2016-04-25)
|
||||||
|
### Added
|
||||||
|
- Codebeat badge
|
||||||
|
- Support for categorization via `CategorizedHelp` and `Categories` on app.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Use `filepath.Base` instead of `path.Base` in `Name` and `HelpName`.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Ensure version is not shown in help text when `HideVersion` set.
|
||||||
|
|
||||||
|
## [1.13.0] - 2016-03-06 (backfilled 2016-04-25)
|
||||||
|
### Added
|
||||||
|
- YAML file input support.
|
||||||
|
- `NArg` method on context.
|
||||||
|
|
||||||
|
## [1.12.0] - 2016-02-17 (backfilled 2016-04-25)
|
||||||
|
### Added
|
||||||
|
- Custom usage error handling.
|
||||||
|
- Custom text support in `USAGE` section of help output.
|
||||||
|
- Improved help messages for empty strings.
|
||||||
|
- AppVeyor CI configuration.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Removed `panic` from default help printer func.
|
||||||
|
- De-duping and optimizations.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Correctly handle `Before`/`After` at command level when no subcommands.
|
||||||
|
- Case of literal `-` argument causing flag reordering.
|
||||||
|
- Environment variable hints on Windows.
|
||||||
|
- Docs updates.
|
||||||
|
|
||||||
|
## [1.11.1] - 2015-12-21 (backfilled 2016-04-25)
|
||||||
|
### Changed
|
||||||
|
- Use `path.Base` in `Name` and `HelpName`
|
||||||
|
- Export `GetName` on flag types.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Flag parsing when skipping is enabled.
|
||||||
|
- Test output cleanup.
|
||||||
|
- Move completion check to account for empty input case.
|
||||||
|
|
||||||
|
## [1.11.0] - 2015-11-15 (backfilled 2016-04-25)
|
||||||
|
### Added
|
||||||
|
- Destination scan support for flags.
|
||||||
|
- Testing against `tip` in Travis CI config.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Go version in Travis CI config.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Removed redundant tests.
|
||||||
|
- Use correct example naming in tests.
|
||||||
|
|
||||||
|
## [1.10.2] - 2015-10-29 (backfilled 2016-04-25)
|
||||||
|
### Fixed
|
||||||
|
- Remove unused var in bash completion.
|
||||||
|
|
||||||
|
## [1.10.1] - 2015-10-21 (backfilled 2016-04-25)
|
||||||
|
### Added
|
||||||
|
- Coverage and reference logos in README.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Use specified values in help and version parsing.
|
||||||
|
- Only display app version and help message once.
|
||||||
|
|
||||||
|
## [1.10.0] - 2015-10-06 (backfilled 2016-04-25)
|
||||||
|
### Added
|
||||||
|
- More tests for existing functionality.
|
||||||
|
- `ArgsUsage` at app and command level for help text flexibility.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Honor `HideHelp` and `HideVersion` in `App.Run`.
|
||||||
|
- Remove juvenile word from README.
|
||||||
|
|
||||||
|
## [1.9.0] - 2015-09-08 (backfilled 2016-04-25)
|
||||||
|
### Added
|
||||||
|
- `FullName` on command with accompanying help output update.
|
||||||
|
- Set default `$PROG` in bash completion.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Docs formatting.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Removed self-referential imports in tests.
|
||||||
|
|
||||||
|
## [1.8.0] - 2015-06-30 (backfilled 2016-04-25)
|
||||||
|
### Added
|
||||||
|
- Support for `Copyright` at app level.
|
||||||
|
- `Parent` func at context level to walk up context lineage.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Global flag processing at top level.
|
||||||
|
|
||||||
|
## [1.7.1] - 2015-06-11 (backfilled 2016-04-25)
|
||||||
|
### Added
|
||||||
|
- Aggregate errors from `Before`/`After` funcs.
|
||||||
|
- Doc comments on flag structs.
|
||||||
|
- Include non-global flags when checking version and help.
|
||||||
|
- Travis CI config updates.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Ensure slice type flags have non-nil values.
|
||||||
|
- Collect global flags from the full command hierarchy.
|
||||||
|
- Docs prose.
|
||||||
|
|
||||||
|
## [1.7.0] - 2015-05-03 (backfilled 2016-04-25)
|
||||||
|
### Changed
|
||||||
|
- `HelpPrinter` signature includes output writer.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Specify go 1.1+ in docs.
|
||||||
|
- Set `Writer` when running command as app.
|
||||||
|
|
||||||
|
## [1.6.0] - 2015-03-23 (backfilled 2016-04-25)
|
||||||
|
### Added
|
||||||
|
- Multiple author support.
|
||||||
|
- `NumFlags` at context level.
|
||||||
|
- `Aliases` at command level.
|
||||||
|
|
||||||
|
### Deprecated
|
||||||
|
- `ShortName` at command level.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Subcommand help output.
|
||||||
|
- Backward compatible support for deprecated `Author` and `Email` fields.
|
||||||
|
- Docs regarding `Names`/`Aliases`.
|
||||||
|
|
||||||
|
## [1.5.0] - 2015-02-20 (backfilled 2016-04-25)
|
||||||
|
### Added
|
||||||
|
- `After` hook func support at app and command level.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Use parsed context when running command as subcommand.
|
||||||
|
- Docs prose.
|
||||||
|
|
||||||
|
## [1.4.1] - 2015-01-09 (backfilled 2016-04-25)
|
||||||
|
### Added
|
||||||
|
- Support for hiding `-h / --help` flags, but not `help` subcommand.
|
||||||
|
- Stop flag parsing after `--`.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Help text for generic flags to specify single value.
|
||||||
|
- Use double quotes in output for defaults.
|
||||||
|
- Use `ParseInt` instead of `ParseUint` for int environment var values.
|
||||||
|
- Use `0` as base when parsing int environment var values.
|
||||||
|
|
||||||
|
## [1.4.0] - 2014-12-12 (backfilled 2016-04-25)
|
||||||
|
### Added
|
||||||
|
- Support for environment variable lookup "cascade".
|
||||||
|
- Support for `Stdout` on app for output redirection.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Print command help instead of app help in `ShowCommandHelp`.
|
||||||
|
|
||||||
|
## [1.3.1] - 2014-11-13 (backfilled 2016-04-25)
|
||||||
|
### Added
|
||||||
|
- Docs and example code updates.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Default `-v / --version` flag made optional.
|
||||||
|
|
||||||
|
## [1.3.0] - 2014-08-10 (backfilled 2016-04-25)
|
||||||
|
### Added
|
||||||
|
- `FlagNames` at context level.
|
||||||
|
- Exposed `VersionPrinter` var for more control over version output.
|
||||||
|
- Zsh completion hook.
|
||||||
|
- `AUTHOR` section in default app help template.
|
||||||
|
- Contribution guidelines.
|
||||||
|
- `DurationFlag` type.
|
||||||
|
|
||||||
|
## [1.2.0] - 2014-08-02
|
||||||
|
### Added
|
||||||
|
- Support for environment variable defaults on flags plus tests.
|
||||||
|
|
||||||
|
## [1.1.0] - 2014-07-15
|
||||||
|
### Added
|
||||||
|
- Bash completion.
|
||||||
|
- Optional hiding of built-in help command.
|
||||||
|
- Optional skipping of flag parsing at command level.
|
||||||
|
- `Author`, `Email`, and `Compiled` metadata on app.
|
||||||
|
- `Before` hook func support at app and command level.
|
||||||
|
- `CommandNotFound` func support at app level.
|
||||||
|
- Command reference available on context.
|
||||||
|
- `GenericFlag` type.
|
||||||
|
- `Float64Flag` type.
|
||||||
|
- `BoolTFlag` type.
|
||||||
|
- `IsSet` flag helper on context.
|
||||||
|
- More flag lookup funcs at context level.
|
||||||
|
- More tests & docs.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Help template updates to account for presence/absence of flags.
|
||||||
|
- Separated subcommand help template.
|
||||||
|
- Exposed `HelpPrinter` var for more control over help output.
|
||||||
|
|
||||||
|
## [1.0.0] - 2013-11-01
|
||||||
|
### Added
|
||||||
|
- `help` flag in default app flag set and each command flag set.
|
||||||
|
- Custom handling of argument parsing errors.
|
||||||
|
- Command lookup by name at app level.
|
||||||
|
- `StringSliceFlag` type and supporting `StringSlice` type.
|
||||||
|
- `IntSliceFlag` type and supporting `IntSlice` type.
|
||||||
|
- Slice type flag lookups by name at context level.
|
||||||
|
- Export of app and command help functions.
|
||||||
|
- More tests & docs.
|
||||||
|
|
||||||
|
## 0.1.0 - 2013-07-22
|
||||||
|
### Added
|
||||||
|
- Initial implementation.
|
||||||
|
|
||||||
|
[Unreleased]: https://github.com/codegangsta/cli/compare/v1.17.0...HEAD
|
||||||
|
[1.17.0]: https://github.com/codegangsta/cli/compare/v1.16.0...v1.17.0
|
||||||
|
[1.16.0]: https://github.com/codegangsta/cli/compare/v1.15.0...v1.16.0
|
||||||
|
[1.15.0]: https://github.com/codegangsta/cli/compare/v1.14.0...v1.15.0
|
||||||
|
[1.14.0]: https://github.com/codegangsta/cli/compare/v1.13.0...v1.14.0
|
||||||
|
[1.13.0]: https://github.com/codegangsta/cli/compare/v1.12.0...v1.13.0
|
||||||
|
[1.12.0]: https://github.com/codegangsta/cli/compare/v1.11.1...v1.12.0
|
||||||
|
[1.11.1]: https://github.com/codegangsta/cli/compare/v1.11.0...v1.11.1
|
||||||
|
[1.11.0]: https://github.com/codegangsta/cli/compare/v1.10.2...v1.11.0
|
||||||
|
[1.10.2]: https://github.com/codegangsta/cli/compare/v1.10.1...v1.10.2
|
||||||
|
[1.10.1]: https://github.com/codegangsta/cli/compare/v1.10.0...v1.10.1
|
||||||
|
[1.10.0]: https://github.com/codegangsta/cli/compare/v1.9.0...v1.10.0
|
||||||
|
[1.9.0]: https://github.com/codegangsta/cli/compare/v1.8.0...v1.9.0
|
||||||
|
[1.8.0]: https://github.com/codegangsta/cli/compare/v1.7.1...v1.8.0
|
||||||
|
[1.7.1]: https://github.com/codegangsta/cli/compare/v1.7.0...v1.7.1
|
||||||
|
[1.7.0]: https://github.com/codegangsta/cli/compare/v1.6.0...v1.7.0
|
||||||
|
[1.6.0]: https://github.com/codegangsta/cli/compare/v1.5.0...v1.6.0
|
||||||
|
[1.5.0]: https://github.com/codegangsta/cli/compare/v1.4.1...v1.5.0
|
||||||
|
[1.4.1]: https://github.com/codegangsta/cli/compare/v1.4.0...v1.4.1
|
||||||
|
[1.4.0]: https://github.com/codegangsta/cli/compare/v1.3.1...v1.4.0
|
||||||
|
[1.3.1]: https://github.com/codegangsta/cli/compare/v1.3.0...v1.3.1
|
||||||
|
[1.3.0]: https://github.com/codegangsta/cli/compare/v1.2.0...v1.3.0
|
||||||
|
[1.2.0]: https://github.com/codegangsta/cli/compare/v1.1.0...v1.2.0
|
||||||
|
[1.1.0]: https://github.com/codegangsta/cli/compare/v1.0.0...v1.1.0
|
||||||
|
[1.0.0]: https://github.com/codegangsta/cli/compare/v0.1.0...v1.0.0
|
@@ -1,22 +1,24 @@
|
|||||||
[](http://gocover.io/github.com/codegangsta/cli)
|
[](http://gocover.io/github.com/codegangsta/cli)
|
||||||
[](https://travis-ci.org/codegangsta/cli)
|
[](https://travis-ci.org/codegangsta/cli)
|
||||||
[](https://godoc.org/github.com/codegangsta/cli)
|
[](https://godoc.org/github.com/codegangsta/cli)
|
||||||
|
[](https://codebeat.co/projects/github-com-codegangsta-cli)
|
||||||
|
[](https://goreportcard.com/report/codegangsta/cli)
|
||||||
|
|
||||||
# cli.go
|
# cli
|
||||||
|
|
||||||
`cli.go` is simple, fast, and fun package for building command line apps in Go. The goal is to enable developers to write fast and distributable command line applications in an expressive way.
|
cli is a simple, fast, and fun package for building command line apps in Go. The goal is to enable developers to write fast and distributable command line applications in an expressive way.
|
||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
|
|
||||||
Command line apps are usually so tiny that there is absolutely no reason why your code should *not* be self-documenting. Things like generating help text and parsing command flags/options should not hinder productivity when writing a command line app.
|
Command line apps are usually so tiny that there is absolutely no reason why your code should *not* be self-documenting. Things like generating help text and parsing command flags/options should not hinder productivity when writing a command line app.
|
||||||
|
|
||||||
**This is where `cli.go` comes into play.** `cli.go` makes command line programming fun, organized, and expressive!
|
**This is where cli comes into play.** cli makes command line programming fun, organized, and expressive!
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
Make sure you have a working Go environment (go 1.1+ is *required*). [See the install instructions](http://golang.org/doc/install.html).
|
Make sure you have a working Go environment (go 1.1+ is *required*). [See the install instructions](http://golang.org/doc/install.html).
|
||||||
|
|
||||||
To install `cli.go`, simply run:
|
To install cli, simply run:
|
||||||
```
|
```
|
||||||
$ go get github.com/codegangsta/cli
|
$ go get github.com/codegangsta/cli
|
||||||
```
|
```
|
||||||
@@ -28,7 +30,7 @@ export PATH=$PATH:$GOPATH/bin
|
|||||||
|
|
||||||
## Getting Started
|
## Getting Started
|
||||||
|
|
||||||
One of the philosophies behind `cli.go` is that an API should be playful and full of discovery. So a `cli.go` app can be as little as one line of code in `main()`.
|
One of the philosophies behind cli is that an API should be playful and full of discovery. So a cli app can be as little as one line of code in `main()`.
|
||||||
|
|
||||||
``` go
|
``` go
|
||||||
package main
|
package main
|
||||||
@@ -45,11 +47,16 @@ func main() {
|
|||||||
|
|
||||||
This app will run and show help text, but is not very useful. Let's give an action to execute and some help documentation:
|
This app will run and show help text, but is not very useful. Let's give an action to execute and some help documentation:
|
||||||
|
|
||||||
|
<!-- {
|
||||||
|
"output": "boom! I say!"
|
||||||
|
} -->
|
||||||
``` go
|
``` go
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/codegangsta/cli"
|
"github.com/codegangsta/cli"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -57,8 +64,9 @@ func main() {
|
|||||||
app := cli.NewApp()
|
app := cli.NewApp()
|
||||||
app.Name = "boom"
|
app.Name = "boom"
|
||||||
app.Usage = "make an explosive entrance"
|
app.Usage = "make an explosive entrance"
|
||||||
app.Action = func(c *cli.Context) {
|
app.Action = func(c *cli.Context) error {
|
||||||
println("boom! I say!")
|
fmt.Println("boom! I say!")
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
app.Run(os.Args)
|
app.Run(os.Args)
|
||||||
@@ -73,11 +81,16 @@ Being a programmer can be a lonely job. Thankfully by the power of automation th
|
|||||||
|
|
||||||
Start by creating a directory named `greet`, and within it, add a file, `greet.go` with the following code in it:
|
Start by creating a directory named `greet`, and within it, add a file, `greet.go` with the following code in it:
|
||||||
|
|
||||||
|
<!-- {
|
||||||
|
"output": "Hello friend!"
|
||||||
|
} -->
|
||||||
``` go
|
``` go
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/codegangsta/cli"
|
"github.com/codegangsta/cli"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -85,8 +98,9 @@ func main() {
|
|||||||
app := cli.NewApp()
|
app := cli.NewApp()
|
||||||
app.Name = "greet"
|
app.Name = "greet"
|
||||||
app.Usage = "fight the loneliness!"
|
app.Usage = "fight the loneliness!"
|
||||||
app.Action = func(c *cli.Context) {
|
app.Action = func(c *cli.Context) error {
|
||||||
println("Hello friend!")
|
fmt.Println("Hello friend!")
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
app.Run(os.Args)
|
app.Run(os.Args)
|
||||||
@@ -106,7 +120,7 @@ $ greet
|
|||||||
Hello friend!
|
Hello friend!
|
||||||
```
|
```
|
||||||
|
|
||||||
`cli.go` also generates neat help text:
|
cli also generates neat help text:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ greet help
|
$ greet help
|
||||||
@@ -132,8 +146,9 @@ You can lookup arguments by calling the `Args` function on `cli.Context`.
|
|||||||
|
|
||||||
``` go
|
``` go
|
||||||
...
|
...
|
||||||
app.Action = func(c *cli.Context) {
|
app.Action = func(c *cli.Context) error {
|
||||||
println("Hello", c.Args()[0])
|
fmt.Println("Hello", c.Args()[0])
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
@@ -151,16 +166,17 @@ app.Flags = []cli.Flag {
|
|||||||
Usage: "language for the greeting",
|
Usage: "language for the greeting",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
app.Action = func(c *cli.Context) {
|
app.Action = func(c *cli.Context) error {
|
||||||
name := "someone"
|
name := "someone"
|
||||||
if len(c.Args()) > 0 {
|
if c.NArg() > 0 {
|
||||||
name = c.Args()[0]
|
name = c.Args()[0]
|
||||||
}
|
}
|
||||||
if c.String("lang") == "spanish" {
|
if c.String("lang") == "spanish" {
|
||||||
println("Hola", name)
|
fmt.Println("Hola", name)
|
||||||
} else {
|
} else {
|
||||||
println("Hello", name)
|
fmt.Println("Hello", name)
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
@@ -178,22 +194,45 @@ app.Flags = []cli.Flag {
|
|||||||
Destination: &language,
|
Destination: &language,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
app.Action = func(c *cli.Context) {
|
app.Action = func(c *cli.Context) error {
|
||||||
name := "someone"
|
name := "someone"
|
||||||
if len(c.Args()) > 0 {
|
if c.NArg() > 0 {
|
||||||
name = c.Args()[0]
|
name = c.Args()[0]
|
||||||
}
|
}
|
||||||
if language == "spanish" {
|
if language == "spanish" {
|
||||||
println("Hola", name)
|
fmt.Println("Hola", name)
|
||||||
} else {
|
} else {
|
||||||
println("Hello", name)
|
fmt.Println("Hello", name)
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
See full list of flags at http://godoc.org/github.com/codegangsta/cli
|
See full list of flags at http://godoc.org/github.com/codegangsta/cli
|
||||||
|
|
||||||
|
#### Placeholder Values
|
||||||
|
|
||||||
|
Sometimes it's useful to specify a flag's value within the usage string itself. Such placeholders are
|
||||||
|
indicated with back quotes.
|
||||||
|
|
||||||
|
For example this:
|
||||||
|
|
||||||
|
```go
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "config, c",
|
||||||
|
Usage: "Load configuration from `FILE`",
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Will result in help output like:
|
||||||
|
|
||||||
|
```
|
||||||
|
--config FILE, -c FILE Load configuration from FILE
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that only the first placeholder is used. Subsequent back-quoted words will be left as-is.
|
||||||
|
|
||||||
#### Alternate Names
|
#### Alternate Names
|
||||||
|
|
||||||
You can set alternate (or short) names for flags by providing a comma-delimited list for the `Name`. e.g.
|
You can set alternate (or short) names for flags by providing a comma-delimited list for the `Name`. e.g.
|
||||||
@@ -238,6 +277,49 @@ app.Flags = []cli.Flag {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Values from alternate input sources (YAML and others)
|
||||||
|
|
||||||
|
There is a separate package altsrc that adds support for getting flag values from other input sources like YAML.
|
||||||
|
|
||||||
|
In order to get values for a flag from an alternate input source the following code would be added to wrap an existing cli.Flag like below:
|
||||||
|
|
||||||
|
``` go
|
||||||
|
altsrc.NewIntFlag(cli.IntFlag{Name: "test"})
|
||||||
|
```
|
||||||
|
|
||||||
|
Initialization must also occur for these flags. Below is an example initializing getting data from a yaml file below.
|
||||||
|
|
||||||
|
``` go
|
||||||
|
command.Before = altsrc.InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load"))
|
||||||
|
```
|
||||||
|
|
||||||
|
The code above will use the "load" string as a flag name to get the file name of a yaml file from the cli.Context.
|
||||||
|
It will then use that file name to initialize the yaml input source for any flags that are defined on that command.
|
||||||
|
As a note the "load" flag used would also have to be defined on the command flags in order for this code snipped to work.
|
||||||
|
|
||||||
|
Currently only YAML files are supported but developers can add support for other input sources by implementing the
|
||||||
|
altsrc.InputSourceContext for their given sources.
|
||||||
|
|
||||||
|
Here is a more complete sample of a command using YAML support:
|
||||||
|
|
||||||
|
``` go
|
||||||
|
command := &cli.Command{
|
||||||
|
Name: "test-cmd",
|
||||||
|
Aliases: []string{"tc"},
|
||||||
|
Usage: "this is for testing",
|
||||||
|
Description: "testing",
|
||||||
|
Action: func(c *cli.Context) error {
|
||||||
|
// Action to run
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
NewIntFlag(cli.IntFlag{Name: "test"}),
|
||||||
|
cli.StringFlag{Name: "load"}},
|
||||||
|
}
|
||||||
|
command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load"))
|
||||||
|
err := command.Run(c)
|
||||||
|
```
|
||||||
|
|
||||||
### Subcommands
|
### Subcommands
|
||||||
|
|
||||||
Subcommands can be defined for a more git-like command line app.
|
Subcommands can be defined for a more git-like command line app.
|
||||||
@@ -249,16 +331,18 @@ app.Commands = []cli.Command{
|
|||||||
Name: "add",
|
Name: "add",
|
||||||
Aliases: []string{"a"},
|
Aliases: []string{"a"},
|
||||||
Usage: "add a task to the list",
|
Usage: "add a task to the list",
|
||||||
Action: func(c *cli.Context) {
|
Action: func(c *cli.Context) error {
|
||||||
println("added task: ", c.Args().First())
|
fmt.Println("added task: ", c.Args().First())
|
||||||
|
return nil
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "complete",
|
Name: "complete",
|
||||||
Aliases: []string{"c"},
|
Aliases: []string{"c"},
|
||||||
Usage: "complete a task on the list",
|
Usage: "complete a task on the list",
|
||||||
Action: func(c *cli.Context) {
|
Action: func(c *cli.Context) error {
|
||||||
println("completed task: ", c.Args().First())
|
fmt.Println("completed task: ", c.Args().First())
|
||||||
|
return nil
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -269,15 +353,17 @@ app.Commands = []cli.Command{
|
|||||||
{
|
{
|
||||||
Name: "add",
|
Name: "add",
|
||||||
Usage: "add a new template",
|
Usage: "add a new template",
|
||||||
Action: func(c *cli.Context) {
|
Action: func(c *cli.Context) error {
|
||||||
println("new task template: ", c.Args().First())
|
fmt.Println("new task template: ", c.Args().First())
|
||||||
|
return nil
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "remove",
|
Name: "remove",
|
||||||
Usage: "remove an existing template",
|
Usage: "remove an existing template",
|
||||||
Action: func(c *cli.Context) {
|
Action: func(c *cli.Context) error {
|
||||||
println("removed task template: ", c.Args().First())
|
fmt.Println("removed task template: ", c.Args().First())
|
||||||
|
return nil
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -286,6 +372,80 @@ app.Commands = []cli.Command{
|
|||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Subcommands categories
|
||||||
|
|
||||||
|
For additional organization in apps that have many subcommands, you can
|
||||||
|
associate a category for each command to group them together in the help
|
||||||
|
output.
|
||||||
|
|
||||||
|
E.g.
|
||||||
|
|
||||||
|
```go
|
||||||
|
...
|
||||||
|
app.Commands = []cli.Command{
|
||||||
|
{
|
||||||
|
Name: "noop",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "add",
|
||||||
|
Category: "template",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "remove",
|
||||||
|
Category: "template",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
Will include:
|
||||||
|
|
||||||
|
```
|
||||||
|
...
|
||||||
|
COMMANDS:
|
||||||
|
noop
|
||||||
|
|
||||||
|
Template actions:
|
||||||
|
add
|
||||||
|
remove
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
### Exit code
|
||||||
|
|
||||||
|
Calling `App.Run` will not automatically call `os.Exit`, which means that by
|
||||||
|
default the exit code will "fall through" to being `0`. An explicit exit code
|
||||||
|
may be set by returning a non-nil error that fulfills `cli.ExitCoder`, *or* a
|
||||||
|
`cli.MultiError` that includes an error that fulfills `cli.ExitCoder`, e.g.:
|
||||||
|
|
||||||
|
``` go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/codegangsta/cli"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
app := cli.NewApp()
|
||||||
|
app.Flags = []cli.Flag{
|
||||||
|
cli.BoolTFlag{
|
||||||
|
Name: "ginger-crouton",
|
||||||
|
Usage: "is it in the soup?",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
app.Action = func(ctx *cli.Context) error {
|
||||||
|
if !ctx.Bool("ginger-crouton") {
|
||||||
|
return cli.NewExitError("it is not in the soup", 86)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
app.Run(os.Args)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
### Bash Completion
|
### Bash Completion
|
||||||
|
|
||||||
You can enable completion commands by setting the `EnableBashCompletion`
|
You can enable completion commands by setting the `EnableBashCompletion`
|
||||||
@@ -303,12 +463,13 @@ app.Commands = []cli.Command{
|
|||||||
Name: "complete",
|
Name: "complete",
|
||||||
Aliases: []string{"c"},
|
Aliases: []string{"c"},
|
||||||
Usage: "complete a task on the list",
|
Usage: "complete a task on the list",
|
||||||
Action: func(c *cli.Context) {
|
Action: func(c *cli.Context) error {
|
||||||
println("completed task: ", c.Args().First())
|
fmt.Println("completed task: ", c.Args().First())
|
||||||
|
return nil
|
||||||
},
|
},
|
||||||
BashComplete: func(c *cli.Context) {
|
BashComplete: func(c *cli.Context) {
|
||||||
// This will complete if no args are passed
|
// This will complete if no args are passed
|
||||||
if len(c.Args()) > 0 {
|
if c.NArg() > 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, t := range tasks {
|
for _, t := range tasks {
|
||||||
@@ -343,6 +504,72 @@ Alternatively, you can just document that users should source the generic
|
|||||||
`autocomplete/bash_autocomplete` in their bash configuration with `$PROG` set
|
`autocomplete/bash_autocomplete` in their bash configuration with `$PROG` set
|
||||||
to the name of their program (as above).
|
to the name of their program (as above).
|
||||||
|
|
||||||
|
### Generated Help Text Customization
|
||||||
|
|
||||||
|
All of the help text generation may be customized, and at multiple levels. The
|
||||||
|
templates are exposed as variables `AppHelpTemplate`, `CommandHelpTemplate`, and
|
||||||
|
`SubcommandHelpTemplate` which may be reassigned or augmented, and full override
|
||||||
|
is possible by assigning a compatible func to the `cli.HelpPrinter` variable,
|
||||||
|
e.g.:
|
||||||
|
|
||||||
|
<!-- {
|
||||||
|
"output": "Ha HA. I pwnd the help!!1"
|
||||||
|
} -->
|
||||||
|
``` go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/codegangsta/cli"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// EXAMPLE: Append to an existing template
|
||||||
|
cli.AppHelpTemplate = fmt.Sprintf(`%s
|
||||||
|
|
||||||
|
WEBSITE: http://awesometown.example.com
|
||||||
|
|
||||||
|
SUPPORT: support@awesometown.example.com
|
||||||
|
|
||||||
|
`, cli.AppHelpTemplate)
|
||||||
|
|
||||||
|
// EXAMPLE: Override a template
|
||||||
|
cli.AppHelpTemplate = `NAME:
|
||||||
|
{{.Name}} - {{.Usage}}
|
||||||
|
USAGE:
|
||||||
|
{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command
|
||||||
|
[command options]{{end}} {{if
|
||||||
|
.ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}
|
||||||
|
{{if len .Authors}}
|
||||||
|
AUTHOR(S):
|
||||||
|
{{range .Authors}}{{ . }}{{end}}
|
||||||
|
{{end}}{{if .Commands}}
|
||||||
|
COMMANDS:
|
||||||
|
{{range .Commands}}{{if not .HideHelp}} {{join .Names ", "}}{{ "\t"
|
||||||
|
}}{{.Usage}}{{ "\n" }}{{end}}{{end}}{{end}}{{if .VisibleFlags}}
|
||||||
|
GLOBAL OPTIONS:
|
||||||
|
{{range .VisibleFlags}}{{.}}
|
||||||
|
{{end}}{{end}}{{if .Copyright }}
|
||||||
|
COPYRIGHT:
|
||||||
|
{{.Copyright}}
|
||||||
|
{{end}}{{if .Version}}
|
||||||
|
VERSION:
|
||||||
|
{{.Version}}
|
||||||
|
{{end}}
|
||||||
|
`
|
||||||
|
|
||||||
|
// EXAMPLE: Replace the `HelpPrinter` func
|
||||||
|
cli.HelpPrinter = func(w io.Writer, templ string, data interface{}) {
|
||||||
|
fmt.Println("Ha HA. I pwnd the help!!1")
|
||||||
|
}
|
||||||
|
|
||||||
|
cli.NewApp().Run(os.Args)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
## Contribution Guidelines
|
## Contribution Guidelines
|
||||||
|
|
||||||
Feel free to put up a pull request to fix a bug or maybe add a feature. I will give it a code review and make sure that it does not break backwards compatibility. If I or any other collaborators agree that it is in line with the vision of the project, we will work with you to get the code into a mergeable state and merge it into the master branch.
|
Feel free to put up a pull request to fix a bug or maybe add a feature. I will give it a code review and make sure that it does not break backwards compatibility. If I or any other collaborators agree that it is in line with the vision of the project, we will work with you to get the code into a mergeable state and merge it into the master branch.
|
@@ -5,10 +5,27 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
changeLogURL = "https://github.com/codegangsta/cli/blob/master/CHANGELOG.md"
|
||||||
|
appActionDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-action-signature", changeLogURL)
|
||||||
|
runAndExitOnErrorDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-runandexitonerror", changeLogURL)
|
||||||
|
|
||||||
|
contactSysadmin = "This is an error in the application. Please contact the distributor of this application if this is not you."
|
||||||
|
|
||||||
|
errNonFuncAction = NewExitError("ERROR invalid Action type. "+
|
||||||
|
fmt.Sprintf("Must be a func of type `cli.ActionFunc`. %s", contactSysadmin)+
|
||||||
|
fmt.Sprintf("See %s", appActionDeprecationURL), 2)
|
||||||
|
errInvalidActionSignature = NewExitError("ERROR invalid Action signature. "+
|
||||||
|
fmt.Sprintf("Must be `cli.ActionFunc`. %s", contactSysadmin)+
|
||||||
|
fmt.Sprintf("See %s", appActionDeprecationURL), 2)
|
||||||
|
)
|
||||||
|
|
||||||
// App is the main structure of a cli application. It is recommended that
|
// App is the main structure of a cli application. It is recommended that
|
||||||
// an app be created with the cli.NewApp() function
|
// an app be created with the cli.NewApp() function
|
||||||
type App struct {
|
type App struct {
|
||||||
@@ -32,24 +49,27 @@ type App struct {
|
|||||||
EnableBashCompletion bool
|
EnableBashCompletion bool
|
||||||
// Boolean to hide built-in help command
|
// Boolean to hide built-in help command
|
||||||
HideHelp bool
|
HideHelp bool
|
||||||
// Boolean to hide built-in version flag
|
// Boolean to hide built-in version flag and the VERSION section of help
|
||||||
HideVersion bool
|
HideVersion bool
|
||||||
|
// Populate on app startup, only gettable through method Categories()
|
||||||
|
categories CommandCategories
|
||||||
// An action to execute when the bash-completion flag is set
|
// An action to execute when the bash-completion flag is set
|
||||||
BashComplete func(context *Context)
|
BashComplete BashCompleteFunc
|
||||||
// An action to execute before any subcommands are run, but after the context is ready
|
// An action to execute before any subcommands are run, but after the context is ready
|
||||||
// If a non-nil error is returned, no subcommands are run
|
// If a non-nil error is returned, no subcommands are run
|
||||||
Before func(context *Context) error
|
Before BeforeFunc
|
||||||
// An action to execute after any subcommands are run, but after the subcommand has finished
|
// An action to execute after any subcommands are run, but after the subcommand has finished
|
||||||
// It is run even if Action() panics
|
// It is run even if Action() panics
|
||||||
After func(context *Context) error
|
After AfterFunc
|
||||||
// The action to execute when no subcommands are specified
|
// The action to execute when no subcommands are specified
|
||||||
Action func(context *Context)
|
Action interface{}
|
||||||
|
// TODO: replace `Action: interface{}` with `Action: ActionFunc` once some kind
|
||||||
|
// of deprecation period has passed, maybe?
|
||||||
|
|
||||||
// Execute this function if the proper command cannot be found
|
// Execute this function if the proper command cannot be found
|
||||||
CommandNotFound func(context *Context, command string)
|
CommandNotFound CommandNotFoundFunc
|
||||||
// Execute this function, if an usage error occurs. This is useful for displaying customized usage error messages.
|
// Execute this function if an usage error occurs
|
||||||
// This function is able to replace the original error messages.
|
OnUsageError OnUsageErrorFunc
|
||||||
// If this function is not set, the "Incorrect usage" is displayed and the execution is interrupted.
|
|
||||||
OnUsageError func(context *Context, err error, isSubcommand bool) error
|
|
||||||
// Compilation date
|
// Compilation date
|
||||||
Compiled time.Time
|
Compiled time.Time
|
||||||
// List of all authors who contributed
|
// List of all authors who contributed
|
||||||
@@ -62,6 +82,12 @@ type App struct {
|
|||||||
Email string
|
Email string
|
||||||
// Writer writer to write output to
|
// Writer writer to write output to
|
||||||
Writer io.Writer
|
Writer io.Writer
|
||||||
|
// ErrWriter writes error output
|
||||||
|
ErrWriter io.Writer
|
||||||
|
// Other custom info
|
||||||
|
Metadata map[string]interface{}
|
||||||
|
|
||||||
|
didSetup bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tries to find out when this binary was compiled.
|
// Tries to find out when this binary was compiled.
|
||||||
@@ -74,11 +100,12 @@ func compileTime() time.Time {
|
|||||||
return info.ModTime()
|
return info.ModTime()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates a new cli Application with some reasonable defaults for Name, Usage, Version and Action.
|
// NewApp creates a new cli Application with some reasonable defaults for Name,
|
||||||
|
// Usage, Version and Action.
|
||||||
func NewApp() *App {
|
func NewApp() *App {
|
||||||
return &App{
|
return &App{
|
||||||
Name: path.Base(os.Args[0]),
|
Name: filepath.Base(os.Args[0]),
|
||||||
HelpName: path.Base(os.Args[0]),
|
HelpName: filepath.Base(os.Args[0]),
|
||||||
Usage: "A new cli application",
|
Usage: "A new cli application",
|
||||||
UsageText: "",
|
UsageText: "",
|
||||||
Version: "0.0.0",
|
Version: "0.0.0",
|
||||||
@@ -89,8 +116,16 @@ func NewApp() *App {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Entry point to the cli app. Parses the arguments slice and routes to the proper flag/args combination
|
// Setup runs initialization code to ensure all data structures are ready for
|
||||||
func (a *App) Run(arguments []string) (err error) {
|
// `Run` or inspection prior to `Run`. It is internally called by `Run`, but
|
||||||
|
// will return early if setup has already happened.
|
||||||
|
func (a *App) Setup() {
|
||||||
|
if a.didSetup {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
a.didSetup = true
|
||||||
|
|
||||||
if a.Author != "" || a.Email != "" {
|
if a.Author != "" || a.Email != "" {
|
||||||
a.Authors = append(a.Authors, Author{Name: a.Author, Email: a.Email})
|
a.Authors = append(a.Authors, Author{Name: a.Author, Email: a.Email})
|
||||||
}
|
}
|
||||||
@@ -104,6 +139,12 @@ func (a *App) Run(arguments []string) (err error) {
|
|||||||
}
|
}
|
||||||
a.Commands = newCmds
|
a.Commands = newCmds
|
||||||
|
|
||||||
|
a.categories = CommandCategories{}
|
||||||
|
for _, command := range a.Commands {
|
||||||
|
a.categories = a.categories.AddCommand(command.Category, command)
|
||||||
|
}
|
||||||
|
sort.Sort(a.categories)
|
||||||
|
|
||||||
// append help to commands
|
// append help to commands
|
||||||
if a.Command(helpCommand.Name) == nil && !a.HideHelp {
|
if a.Command(helpCommand.Name) == nil && !a.HideHelp {
|
||||||
a.Commands = append(a.Commands, helpCommand)
|
a.Commands = append(a.Commands, helpCommand)
|
||||||
@@ -120,6 +161,12 @@ func (a *App) Run(arguments []string) (err error) {
|
|||||||
if !a.HideVersion {
|
if !a.HideVersion {
|
||||||
a.appendFlag(VersionFlag)
|
a.appendFlag(VersionFlag)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run is the entry point to the cli app. Parses the arguments slice and routes
|
||||||
|
// to the proper flag/args combination
|
||||||
|
func (a *App) Run(arguments []string) (err error) {
|
||||||
|
a.Setup()
|
||||||
|
|
||||||
// parse flags
|
// parse flags
|
||||||
set := flagSet(a.Name, a.Flags)
|
set := flagSet(a.Name, a.Flags)
|
||||||
@@ -140,13 +187,13 @@ func (a *App) Run(arguments []string) (err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
if a.OnUsageError != nil {
|
if a.OnUsageError != nil {
|
||||||
err := a.OnUsageError(context, err, false)
|
err := a.OnUsageError(context, err, false)
|
||||||
|
HandleExitCoder(err)
|
||||||
return err
|
return err
|
||||||
} else {
|
}
|
||||||
fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.")
|
fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.")
|
||||||
ShowAppHelp(context)
|
ShowAppHelp(context)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if !a.HideHelp && checkHelp(context) {
|
if !a.HideHelp && checkHelp(context) {
|
||||||
ShowAppHelp(context)
|
ShowAppHelp(context)
|
||||||
@@ -171,10 +218,12 @@ func (a *App) Run(arguments []string) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if a.Before != nil {
|
if a.Before != nil {
|
||||||
err = a.Before(context)
|
beforeErr := a.Before(context)
|
||||||
if err != nil {
|
if beforeErr != nil {
|
||||||
fmt.Fprintf(a.Writer, "%v\n\n", err)
|
fmt.Fprintf(a.Writer, "%v\n\n", beforeErr)
|
||||||
ShowAppHelp(context)
|
ShowAppHelp(context)
|
||||||
|
HandleExitCoder(beforeErr)
|
||||||
|
err = beforeErr
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -189,19 +238,25 @@ func (a *App) Run(arguments []string) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Run default Action
|
// Run default Action
|
||||||
a.Action(context)
|
err = HandleAction(a.Action, context)
|
||||||
return nil
|
|
||||||
|
HandleExitCoder(err)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Another entry point to the cli app, takes care of passing arguments and error handling
|
// DEPRECATED: Another entry point to the cli app, takes care of passing arguments and error handling
|
||||||
func (a *App) RunAndExitOnError() {
|
func (a *App) RunAndExitOnError() {
|
||||||
|
fmt.Fprintf(a.errWriter(),
|
||||||
|
"DEPRECATED cli.App.RunAndExitOnError. %s See %s\n",
|
||||||
|
contactSysadmin, runAndExitOnErrorDeprecationURL)
|
||||||
if err := a.Run(os.Args); err != nil {
|
if err := a.Run(os.Args); err != nil {
|
||||||
fmt.Fprintln(os.Stderr, err)
|
fmt.Fprintln(a.errWriter(), err)
|
||||||
os.Exit(1)
|
OsExiter(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Invokes the subcommand given the context, parses ctx.Args() to generate command-specific flags
|
// RunAsSubcommand invokes the subcommand given the context, parses ctx.Args() to
|
||||||
|
// generate command-specific flags
|
||||||
func (a *App) RunAsSubcommand(ctx *Context) (err error) {
|
func (a *App) RunAsSubcommand(ctx *Context) (err error) {
|
||||||
// append help to commands
|
// append help to commands
|
||||||
if len(a.Commands) > 0 {
|
if len(a.Commands) > 0 {
|
||||||
@@ -252,13 +307,13 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
if a.OnUsageError != nil {
|
if a.OnUsageError != nil {
|
||||||
err = a.OnUsageError(context, err, true)
|
err = a.OnUsageError(context, err, true)
|
||||||
|
HandleExitCoder(err)
|
||||||
return err
|
return err
|
||||||
} else {
|
}
|
||||||
fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.")
|
fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.")
|
||||||
ShowSubcommandHelp(context)
|
ShowSubcommandHelp(context)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if len(a.Commands) > 0 {
|
if len(a.Commands) > 0 {
|
||||||
if checkSubcommandHelp(context) {
|
if checkSubcommandHelp(context) {
|
||||||
@@ -274,6 +329,7 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) {
|
|||||||
defer func() {
|
defer func() {
|
||||||
afterErr := a.After(context)
|
afterErr := a.After(context)
|
||||||
if afterErr != nil {
|
if afterErr != nil {
|
||||||
|
HandleExitCoder(err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = NewMultiError(err, afterErr)
|
err = NewMultiError(err, afterErr)
|
||||||
} else {
|
} else {
|
||||||
@@ -284,8 +340,10 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if a.Before != nil {
|
if a.Before != nil {
|
||||||
err := a.Before(context)
|
beforeErr := a.Before(context)
|
||||||
if err != nil {
|
if beforeErr != nil {
|
||||||
|
HandleExitCoder(beforeErr)
|
||||||
|
err = beforeErr
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -300,12 +358,13 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Run default Action
|
// Run default Action
|
||||||
a.Action(context)
|
err = HandleAction(a.Action, context)
|
||||||
|
|
||||||
return nil
|
HandleExitCoder(err)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the named command on App. Returns nil if the command does not exist
|
// Command returns the named command on App. Returns nil if the command does not exist
|
||||||
func (a *App) Command(name string) *Command {
|
func (a *App) Command(name string) *Command {
|
||||||
for _, c := range a.Commands {
|
for _, c := range a.Commands {
|
||||||
if c.HasName(name) {
|
if c.HasName(name) {
|
||||||
@@ -316,6 +375,46 @@ func (a *App) Command(name string) *Command {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Categories returns a slice containing all the categories with the commands they contain
|
||||||
|
func (a *App) Categories() CommandCategories {
|
||||||
|
return a.categories
|
||||||
|
}
|
||||||
|
|
||||||
|
// VisibleCategories returns a slice of categories and commands that are
|
||||||
|
// Hidden=false
|
||||||
|
func (a *App) VisibleCategories() []*CommandCategory {
|
||||||
|
ret := []*CommandCategory{}
|
||||||
|
for _, category := range a.categories {
|
||||||
|
if visible := func() *CommandCategory {
|
||||||
|
for _, command := range category.Commands {
|
||||||
|
if !command.Hidden {
|
||||||
|
return category
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}(); visible != nil {
|
||||||
|
ret = append(ret, visible)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// VisibleCommands returns a slice of the Commands with Hidden=false
|
||||||
|
func (a *App) VisibleCommands() []Command {
|
||||||
|
ret := []Command{}
|
||||||
|
for _, command := range a.Commands {
|
||||||
|
if !command.Hidden {
|
||||||
|
ret = append(ret, command)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// VisibleFlags returns a slice of the Flags with Hidden=false
|
||||||
|
func (a *App) VisibleFlags() []Flag {
|
||||||
|
return visibleFlags(a.Flags)
|
||||||
|
}
|
||||||
|
|
||||||
func (a *App) hasFlag(flag Flag) bool {
|
func (a *App) hasFlag(flag Flag) bool {
|
||||||
for _, f := range a.Flags {
|
for _, f := range a.Flags {
|
||||||
if flag == f {
|
if flag == f {
|
||||||
@@ -326,6 +425,16 @@ func (a *App) hasFlag(flag Flag) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *App) errWriter() io.Writer {
|
||||||
|
|
||||||
|
// When the app ErrWriter is nil use the package level one.
|
||||||
|
if a.ErrWriter == nil {
|
||||||
|
return ErrWriter
|
||||||
|
}
|
||||||
|
|
||||||
|
return a.ErrWriter
|
||||||
|
}
|
||||||
|
|
||||||
func (a *App) appendFlag(flag Flag) {
|
func (a *App) appendFlag(flag Flag) {
|
||||||
if !a.hasFlag(flag) {
|
if !a.hasFlag(flag) {
|
||||||
a.Flags = append(a.Flags, flag)
|
a.Flags = append(a.Flags, flag)
|
||||||
@@ -347,3 +456,43 @@ func (a Author) String() string {
|
|||||||
|
|
||||||
return fmt.Sprintf("%v %v", a.Name, e)
|
return fmt.Sprintf("%v %v", a.Name, e)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HandleAction uses ✧✧✧reflection✧✧✧ to figure out if the given Action is an
|
||||||
|
// ActionFunc, a func with the legacy signature for Action, or some other
|
||||||
|
// invalid thing. If it's an ActionFunc or a func with the legacy signature for
|
||||||
|
// Action, the func is run!
|
||||||
|
func HandleAction(action interface{}, context *Context) (err error) {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
switch r.(type) {
|
||||||
|
case error:
|
||||||
|
err = r.(error)
|
||||||
|
default:
|
||||||
|
err = NewExitError(fmt.Sprintf("ERROR unknown Action error: %v. See %s", r, appActionDeprecationURL), 2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if reflect.TypeOf(action).Kind() != reflect.Func {
|
||||||
|
return errNonFuncAction
|
||||||
|
}
|
||||||
|
|
||||||
|
vals := reflect.ValueOf(action).Call([]reflect.Value{reflect.ValueOf(context)})
|
||||||
|
|
||||||
|
if len(vals) == 0 {
|
||||||
|
fmt.Fprintf(ErrWriter,
|
||||||
|
"DEPRECATED Action signature. Must be `cli.ActionFunc`. %s See %s\n",
|
||||||
|
contactSysadmin, appActionDeprecationURL)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(vals) > 1 {
|
||||||
|
return errInvalidActionSignature
|
||||||
|
}
|
||||||
|
|
||||||
|
if retErr, ok := vals[0].Interface().(error); vals[0].IsValid() && ok {
|
||||||
|
return retErr
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
44
Godeps/_workspace/src/gopkg.in/urfave/cli.v1/category.go
generated
vendored
Normal file
44
Godeps/_workspace/src/gopkg.in/urfave/cli.v1/category.go
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
package cli
|
||||||
|
|
||||||
|
// CommandCategories is a slice of *CommandCategory.
|
||||||
|
type CommandCategories []*CommandCategory
|
||||||
|
|
||||||
|
// CommandCategory is a category containing commands.
|
||||||
|
type CommandCategory struct {
|
||||||
|
Name string
|
||||||
|
Commands Commands
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c CommandCategories) Less(i, j int) bool {
|
||||||
|
return c[i].Name < c[j].Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c CommandCategories) Len() int {
|
||||||
|
return len(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c CommandCategories) Swap(i, j int) {
|
||||||
|
c[i], c[j] = c[j], c[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddCommand adds a command to a category.
|
||||||
|
func (c CommandCategories) AddCommand(category string, command Command) CommandCategories {
|
||||||
|
for _, commandCategory := range c {
|
||||||
|
if commandCategory.Name == category {
|
||||||
|
commandCategory.Commands = append(commandCategory.Commands, command)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return append(c, &CommandCategory{Name: category, Commands: []Command{command}})
|
||||||
|
}
|
||||||
|
|
||||||
|
// VisibleCommands returns a slice of the Commands with Hidden=false
|
||||||
|
func (c *CommandCategory) VisibleCommands() []Command {
|
||||||
|
ret := []Command{}
|
||||||
|
for _, command := range c.Commands {
|
||||||
|
if !command.Hidden {
|
||||||
|
ret = append(ret, command)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
@@ -10,31 +10,10 @@
|
|||||||
// app := cli.NewApp()
|
// app := cli.NewApp()
|
||||||
// app.Name = "greet"
|
// app.Name = "greet"
|
||||||
// app.Usage = "say a greeting"
|
// app.Usage = "say a greeting"
|
||||||
// app.Action = func(c *cli.Context) {
|
// app.Action = func(c *cli.Context) error {
|
||||||
// println("Greetings")
|
// println("Greetings")
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// app.Run(os.Args)
|
// app.Run(os.Args)
|
||||||
// }
|
// }
|
||||||
package cli
|
package cli
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type MultiError struct {
|
|
||||||
Errors []error
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewMultiError(err ...error) MultiError {
|
|
||||||
return MultiError{Errors: err}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m MultiError) Error() string {
|
|
||||||
errs := make([]string, len(m.Errors))
|
|
||||||
for i, err := range m.Errors {
|
|
||||||
errs[i] = err.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
return strings.Join(errs, "\n")
|
|
||||||
}
|
|
@@ -3,6 +3,7 @@ package cli
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -22,35 +23,40 @@ type Command struct {
|
|||||||
Description string
|
Description string
|
||||||
// A short description of the arguments of this command
|
// A short description of the arguments of this command
|
||||||
ArgsUsage string
|
ArgsUsage string
|
||||||
|
// The category the command is part of
|
||||||
|
Category string
|
||||||
// The function to call when checking for bash command completions
|
// The function to call when checking for bash command completions
|
||||||
BashComplete func(context *Context)
|
BashComplete BashCompleteFunc
|
||||||
// An action to execute before any sub-subcommands are run, but after the context is ready
|
// An action to execute before any sub-subcommands are run, but after the context is ready
|
||||||
// If a non-nil error is returned, no sub-subcommands are run
|
// If a non-nil error is returned, no sub-subcommands are run
|
||||||
Before func(context *Context) error
|
Before BeforeFunc
|
||||||
// An action to execute after any subcommands are run, but after the subcommand has finished
|
// An action to execute after any subcommands are run, but after the subcommand has finished
|
||||||
// It is run even if Action() panics
|
// It is run even if Action() panics
|
||||||
After func(context *Context) error
|
After AfterFunc
|
||||||
// The function to call when this command is invoked
|
// The function to call when this command is invoked
|
||||||
Action func(context *Context)
|
Action interface{}
|
||||||
// Execute this function, if an usage error occurs. This is useful for displaying customized usage error messages.
|
// TODO: replace `Action: interface{}` with `Action: ActionFunc` once some kind
|
||||||
// This function is able to replace the original error messages.
|
// of deprecation period has passed, maybe?
|
||||||
// If this function is not set, the "Incorrect usage" is displayed and the execution is interrupted.
|
|
||||||
OnUsageError func(context *Context, err error) error
|
// Execute this function if a usage error occurs.
|
||||||
|
OnUsageError OnUsageErrorFunc
|
||||||
// List of child commands
|
// List of child commands
|
||||||
Subcommands []Command
|
Subcommands Commands
|
||||||
// List of flags to parse
|
// List of flags to parse
|
||||||
Flags []Flag
|
Flags []Flag
|
||||||
// Treat all flags as normal arguments if true
|
// Treat all flags as normal arguments if true
|
||||||
SkipFlagParsing bool
|
SkipFlagParsing bool
|
||||||
// Boolean to hide built-in help command
|
// Boolean to hide built-in help command
|
||||||
HideHelp bool
|
HideHelp bool
|
||||||
|
// Boolean to hide this command from help or completion
|
||||||
|
Hidden bool
|
||||||
|
|
||||||
// Full name of command for help, defaults to full command name, including parent commands.
|
// Full name of command for help, defaults to full command name, including parent commands.
|
||||||
HelpName string
|
HelpName string
|
||||||
commandNamePath []string
|
commandNamePath []string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the full name of the command.
|
// FullName returns the full name of the command.
|
||||||
// For subcommands this ensures that parent commands are part of the command path
|
// For subcommands this ensures that parent commands are part of the command path
|
||||||
func (c Command) FullName() string {
|
func (c Command) FullName() string {
|
||||||
if c.commandNamePath == nil {
|
if c.commandNamePath == nil {
|
||||||
@@ -59,7 +65,10 @@ func (c Command) FullName() string {
|
|||||||
return strings.Join(c.commandNamePath, " ")
|
return strings.Join(c.commandNamePath, " ")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Invokes the command given the context, parses ctx.Args() to generate command-specific flags
|
// Commands is a slice of Command
|
||||||
|
type Commands []Command
|
||||||
|
|
||||||
|
// Run invokes the command given the context, parses ctx.Args() to generate command-specific flags
|
||||||
func (c Command) Run(ctx *Context) (err error) {
|
func (c Command) Run(ctx *Context) (err error) {
|
||||||
if len(c.Subcommands) > 0 {
|
if len(c.Subcommands) > 0 {
|
||||||
return c.startApp(ctx)
|
return c.startApp(ctx)
|
||||||
@@ -120,15 +129,15 @@ func (c Command) Run(ctx *Context) (err error) {
|
|||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if c.OnUsageError != nil {
|
if c.OnUsageError != nil {
|
||||||
err := c.OnUsageError(ctx, err)
|
err := c.OnUsageError(ctx, err, false)
|
||||||
|
HandleExitCoder(err)
|
||||||
return err
|
return err
|
||||||
} else {
|
}
|
||||||
fmt.Fprintln(ctx.App.Writer, "Incorrect Usage.")
|
fmt.Fprintln(ctx.App.Writer, "Incorrect Usage.")
|
||||||
fmt.Fprintln(ctx.App.Writer)
|
fmt.Fprintln(ctx.App.Writer)
|
||||||
ShowCommandHelp(ctx, c.Name)
|
ShowCommandHelp(ctx, c.Name)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
nerr := normalizeFlags(c.Flags, set)
|
nerr := normalizeFlags(c.Flags, set)
|
||||||
if nerr != nil {
|
if nerr != nil {
|
||||||
@@ -137,6 +146,7 @@ func (c Command) Run(ctx *Context) (err error) {
|
|||||||
ShowCommandHelp(ctx, c.Name)
|
ShowCommandHelp(ctx, c.Name)
|
||||||
return nerr
|
return nerr
|
||||||
}
|
}
|
||||||
|
|
||||||
context := NewContext(ctx.App, set, ctx)
|
context := NewContext(ctx.App, set, ctx)
|
||||||
|
|
||||||
if checkCommandCompletions(context, c.Name) {
|
if checkCommandCompletions(context, c.Name) {
|
||||||
@@ -151,6 +161,7 @@ func (c Command) Run(ctx *Context) (err error) {
|
|||||||
defer func() {
|
defer func() {
|
||||||
afterErr := c.After(context)
|
afterErr := c.After(context)
|
||||||
if afterErr != nil {
|
if afterErr != nil {
|
||||||
|
HandleExitCoder(err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = NewMultiError(err, afterErr)
|
err = NewMultiError(err, afterErr)
|
||||||
} else {
|
} else {
|
||||||
@@ -161,20 +172,26 @@ func (c Command) Run(ctx *Context) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if c.Before != nil {
|
if c.Before != nil {
|
||||||
err := c.Before(context)
|
err = c.Before(context)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintln(ctx.App.Writer, err)
|
fmt.Fprintln(ctx.App.Writer, err)
|
||||||
fmt.Fprintln(ctx.App.Writer)
|
fmt.Fprintln(ctx.App.Writer)
|
||||||
ShowCommandHelp(ctx, c.Name)
|
ShowCommandHelp(ctx, c.Name)
|
||||||
|
HandleExitCoder(err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
context.Command = c
|
context.Command = c
|
||||||
c.Action(context)
|
err = HandleAction(c.Action, context)
|
||||||
return nil
|
|
||||||
|
if err != nil {
|
||||||
|
HandleExitCoder(err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Names returns the names including short names and aliases.
|
||||||
func (c Command) Names() []string {
|
func (c Command) Names() []string {
|
||||||
names := []string{c.Name}
|
names := []string{c.Name}
|
||||||
|
|
||||||
@@ -185,7 +202,7 @@ func (c Command) Names() []string {
|
|||||||
return append(names, c.Aliases...)
|
return append(names, c.Aliases...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns true if Command.Name or Command.ShortName matches given name
|
// HasName returns true if Command.Name or Command.ShortName matches given name
|
||||||
func (c Command) HasName(name string) bool {
|
func (c Command) HasName(name string) bool {
|
||||||
for _, n := range c.Names() {
|
for _, n := range c.Names() {
|
||||||
if n == name {
|
if n == name {
|
||||||
@@ -197,7 +214,7 @@ func (c Command) HasName(name string) bool {
|
|||||||
|
|
||||||
func (c Command) startApp(ctx *Context) error {
|
func (c Command) startApp(ctx *Context) error {
|
||||||
app := NewApp()
|
app := NewApp()
|
||||||
|
app.Metadata = ctx.App.Metadata
|
||||||
// set the name and usage
|
// set the name and usage
|
||||||
app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name)
|
app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name)
|
||||||
if c.HelpName == "" {
|
if c.HelpName == "" {
|
||||||
@@ -227,6 +244,13 @@ func (c Command) startApp(ctx *Context) error {
|
|||||||
app.Email = ctx.App.Email
|
app.Email = ctx.App.Email
|
||||||
app.Writer = ctx.App.Writer
|
app.Writer = ctx.App.Writer
|
||||||
|
|
||||||
|
app.categories = CommandCategories{}
|
||||||
|
for _, command := range c.Subcommands {
|
||||||
|
app.categories = app.categories.AddCommand(command.Category, command)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Sort(app.categories)
|
||||||
|
|
||||||
// bash completion
|
// bash completion
|
||||||
app.EnableBashCompletion = ctx.App.EnableBashCompletion
|
app.EnableBashCompletion = ctx.App.EnableBashCompletion
|
||||||
if c.BashComplete != nil {
|
if c.BashComplete != nil {
|
||||||
@@ -248,3 +272,8 @@ func (c Command) startApp(ctx *Context) error {
|
|||||||
|
|
||||||
return app.RunAsSubcommand(ctx)
|
return app.RunAsSubcommand(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VisibleFlags returns a slice of the Flags with Hidden=false
|
||||||
|
func (c Command) VisibleFlags() []Flag {
|
||||||
|
return visibleFlags(c.Flags)
|
||||||
|
}
|
@@ -21,57 +21,62 @@ type Context struct {
|
|||||||
parentContext *Context
|
parentContext *Context
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates a new context. For use in when invoking an App or Command action.
|
// NewContext creates a new context. For use in when invoking an App or Command action.
|
||||||
func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context {
|
func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context {
|
||||||
return &Context{App: app, flagSet: set, parentContext: parentCtx}
|
return &Context{App: app, flagSet: set, parentContext: parentCtx}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Looks up the value of a local int flag, returns 0 if no int flag exists
|
// Int looks up the value of a local int flag, returns 0 if no int flag exists
|
||||||
func (c *Context) Int(name string) int {
|
func (c *Context) Int(name string) int {
|
||||||
return lookupInt(name, c.flagSet)
|
return lookupInt(name, c.flagSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Looks up the value of a local time.Duration flag, returns 0 if no time.Duration flag exists
|
// Duration looks up the value of a local time.Duration flag, returns 0 if no
|
||||||
|
// time.Duration flag exists
|
||||||
func (c *Context) Duration(name string) time.Duration {
|
func (c *Context) Duration(name string) time.Duration {
|
||||||
return lookupDuration(name, c.flagSet)
|
return lookupDuration(name, c.flagSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Looks up the value of a local float64 flag, returns 0 if no float64 flag exists
|
// Float64 looks up the value of a local float64 flag, returns 0 if no float64
|
||||||
|
// flag exists
|
||||||
func (c *Context) Float64(name string) float64 {
|
func (c *Context) Float64(name string) float64 {
|
||||||
return lookupFloat64(name, c.flagSet)
|
return lookupFloat64(name, c.flagSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Looks up the value of a local bool flag, returns false if no bool flag exists
|
// Bool looks up the value of a local bool flag, returns false if no bool flag exists
|
||||||
func (c *Context) Bool(name string) bool {
|
func (c *Context) Bool(name string) bool {
|
||||||
return lookupBool(name, c.flagSet)
|
return lookupBool(name, c.flagSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Looks up the value of a local boolT flag, returns false if no bool flag exists
|
// BoolT looks up the value of a local boolT flag, returns false if no bool flag exists
|
||||||
func (c *Context) BoolT(name string) bool {
|
func (c *Context) BoolT(name string) bool {
|
||||||
return lookupBoolT(name, c.flagSet)
|
return lookupBoolT(name, c.flagSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Looks up the value of a local string flag, returns "" if no string flag exists
|
// String looks up the value of a local string flag, returns "" if no string flag exists
|
||||||
func (c *Context) String(name string) string {
|
func (c *Context) String(name string) string {
|
||||||
return lookupString(name, c.flagSet)
|
return lookupString(name, c.flagSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Looks up the value of a local string slice flag, returns nil if no string slice flag exists
|
// StringSlice looks up the value of a local string slice flag, returns nil if no
|
||||||
|
// string slice flag exists
|
||||||
func (c *Context) StringSlice(name string) []string {
|
func (c *Context) StringSlice(name string) []string {
|
||||||
return lookupStringSlice(name, c.flagSet)
|
return lookupStringSlice(name, c.flagSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Looks up the value of a local int slice flag, returns nil if no int slice flag exists
|
// IntSlice looks up the value of a local int slice flag, returns nil if no int
|
||||||
|
// slice flag exists
|
||||||
func (c *Context) IntSlice(name string) []int {
|
func (c *Context) IntSlice(name string) []int {
|
||||||
return lookupIntSlice(name, c.flagSet)
|
return lookupIntSlice(name, c.flagSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Looks up the value of a local generic flag, returns nil if no generic flag exists
|
// Generic looks up the value of a local generic flag, returns nil if no generic
|
||||||
|
// flag exists
|
||||||
func (c *Context) Generic(name string) interface{} {
|
func (c *Context) Generic(name string) interface{} {
|
||||||
return lookupGeneric(name, c.flagSet)
|
return lookupGeneric(name, c.flagSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Looks up the value of a global int flag, returns 0 if no int flag exists
|
// GlobalInt looks up the value of a global int flag, returns 0 if no int flag exists
|
||||||
func (c *Context) GlobalInt(name string) int {
|
func (c *Context) GlobalInt(name string) int {
|
||||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||||
return lookupInt(name, fs)
|
return lookupInt(name, fs)
|
||||||
@@ -79,7 +84,17 @@ func (c *Context) GlobalInt(name string) int {
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Looks up the value of a global time.Duration flag, returns 0 if no time.Duration flag exists
|
// GlobalFloat64 looks up the value of a global float64 flag, returns float64(0)
|
||||||
|
// if no float64 flag exists
|
||||||
|
func (c *Context) GlobalFloat64(name string) float64 {
|
||||||
|
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||||
|
return lookupFloat64(name, fs)
|
||||||
|
}
|
||||||
|
return float64(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GlobalDuration looks up the value of a global time.Duration flag, returns 0
|
||||||
|
// if no time.Duration flag exists
|
||||||
func (c *Context) GlobalDuration(name string) time.Duration {
|
func (c *Context) GlobalDuration(name string) time.Duration {
|
||||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||||
return lookupDuration(name, fs)
|
return lookupDuration(name, fs)
|
||||||
@@ -87,7 +102,8 @@ func (c *Context) GlobalDuration(name string) time.Duration {
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Looks up the value of a global bool flag, returns false if no bool flag exists
|
// GlobalBool looks up the value of a global bool flag, returns false if no bool
|
||||||
|
// flag exists
|
||||||
func (c *Context) GlobalBool(name string) bool {
|
func (c *Context) GlobalBool(name string) bool {
|
||||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||||
return lookupBool(name, fs)
|
return lookupBool(name, fs)
|
||||||
@@ -95,7 +111,17 @@ func (c *Context) GlobalBool(name string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Looks up the value of a global string flag, returns "" if no string flag exists
|
// GlobalBoolT looks up the value of a global bool flag, returns true if no bool
|
||||||
|
// flag exists
|
||||||
|
func (c *Context) GlobalBoolT(name string) bool {
|
||||||
|
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||||
|
return lookupBoolT(name, fs)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// GlobalString looks up the value of a global string flag, returns "" if no
|
||||||
|
// string flag exists
|
||||||
func (c *Context) GlobalString(name string) string {
|
func (c *Context) GlobalString(name string) string {
|
||||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||||
return lookupString(name, fs)
|
return lookupString(name, fs)
|
||||||
@@ -103,7 +129,8 @@ func (c *Context) GlobalString(name string) string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// Looks up the value of a global string slice flag, returns nil if no string slice flag exists
|
// GlobalStringSlice looks up the value of a global string slice flag, returns
|
||||||
|
// nil if no string slice flag exists
|
||||||
func (c *Context) GlobalStringSlice(name string) []string {
|
func (c *Context) GlobalStringSlice(name string) []string {
|
||||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||||
return lookupStringSlice(name, fs)
|
return lookupStringSlice(name, fs)
|
||||||
@@ -111,7 +138,8 @@ func (c *Context) GlobalStringSlice(name string) []string {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Looks up the value of a global int slice flag, returns nil if no int slice flag exists
|
// GlobalIntSlice looks up the value of a global int slice flag, returns nil if
|
||||||
|
// no int slice flag exists
|
||||||
func (c *Context) GlobalIntSlice(name string) []int {
|
func (c *Context) GlobalIntSlice(name string) []int {
|
||||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||||
return lookupIntSlice(name, fs)
|
return lookupIntSlice(name, fs)
|
||||||
@@ -119,7 +147,8 @@ func (c *Context) GlobalIntSlice(name string) []int {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Looks up the value of a global generic flag, returns nil if no generic flag exists
|
// GlobalGeneric looks up the value of a global generic flag, returns nil if no
|
||||||
|
// generic flag exists
|
||||||
func (c *Context) GlobalGeneric(name string) interface{} {
|
func (c *Context) GlobalGeneric(name string) interface{} {
|
||||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||||
return lookupGeneric(name, fs)
|
return lookupGeneric(name, fs)
|
||||||
@@ -127,12 +156,22 @@ func (c *Context) GlobalGeneric(name string) interface{} {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the number of flags set
|
// NumFlags returns the number of flags set
|
||||||
func (c *Context) NumFlags() int {
|
func (c *Context) NumFlags() int {
|
||||||
return c.flagSet.NFlag()
|
return c.flagSet.NFlag()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determines if the flag was actually set
|
// Set sets a context flag to a value.
|
||||||
|
func (c *Context) Set(name, value string) error {
|
||||||
|
return c.flagSet.Set(name, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GlobalSet sets a context flag to a value on the global flagset
|
||||||
|
func (c *Context) GlobalSet(name, value string) error {
|
||||||
|
return globalContext(c).flagSet.Set(name, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsSet determines if the flag was actually set
|
||||||
func (c *Context) IsSet(name string) bool {
|
func (c *Context) IsSet(name string) bool {
|
||||||
if c.setFlags == nil {
|
if c.setFlags == nil {
|
||||||
c.setFlags = make(map[string]bool)
|
c.setFlags = make(map[string]bool)
|
||||||
@@ -143,7 +182,7 @@ func (c *Context) IsSet(name string) bool {
|
|||||||
return c.setFlags[name] == true
|
return c.setFlags[name] == true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determines if the global flag was actually set
|
// GlobalIsSet determines if the global flag was actually set
|
||||||
func (c *Context) GlobalIsSet(name string) bool {
|
func (c *Context) GlobalIsSet(name string) bool {
|
||||||
if c.globalSetFlags == nil {
|
if c.globalSetFlags == nil {
|
||||||
c.globalSetFlags = make(map[string]bool)
|
c.globalSetFlags = make(map[string]bool)
|
||||||
@@ -160,7 +199,7 @@ func (c *Context) GlobalIsSet(name string) bool {
|
|||||||
return c.globalSetFlags[name]
|
return c.globalSetFlags[name]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns a slice of flag names used in this context.
|
// FlagNames returns a slice of flag names used in this context.
|
||||||
func (c *Context) FlagNames() (names []string) {
|
func (c *Context) FlagNames() (names []string) {
|
||||||
for _, flag := range c.Command.Flags {
|
for _, flag := range c.Command.Flags {
|
||||||
name := strings.Split(flag.GetName(), ",")[0]
|
name := strings.Split(flag.GetName(), ",")[0]
|
||||||
@@ -172,7 +211,7 @@ func (c *Context) FlagNames() (names []string) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns a slice of global flag names used by the app.
|
// GlobalFlagNames returns a slice of global flag names used by the app.
|
||||||
func (c *Context) GlobalFlagNames() (names []string) {
|
func (c *Context) GlobalFlagNames() (names []string) {
|
||||||
for _, flag := range c.App.Flags {
|
for _, flag := range c.App.Flags {
|
||||||
name := strings.Split(flag.GetName(), ",")[0]
|
name := strings.Split(flag.GetName(), ",")[0]
|
||||||
@@ -184,20 +223,26 @@ func (c *Context) GlobalFlagNames() (names []string) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the parent context, if any
|
// Parent returns the parent context, if any
|
||||||
func (c *Context) Parent() *Context {
|
func (c *Context) Parent() *Context {
|
||||||
return c.parentContext
|
return c.parentContext
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Args contains apps console arguments
|
||||||
type Args []string
|
type Args []string
|
||||||
|
|
||||||
// Returns the command line arguments associated with the context.
|
// Args returns the command line arguments associated with the context.
|
||||||
func (c *Context) Args() Args {
|
func (c *Context) Args() Args {
|
||||||
args := Args(c.flagSet.Args())
|
args := Args(c.flagSet.Args())
|
||||||
return args
|
return args
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the nth argument, or else a blank string
|
// NArg returns the number of the command line arguments.
|
||||||
|
func (c *Context) NArg() int {
|
||||||
|
return len(c.Args())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns the nth argument, or else a blank string
|
||||||
func (a Args) Get(n int) string {
|
func (a Args) Get(n int) string {
|
||||||
if len(a) > n {
|
if len(a) > n {
|
||||||
return a[n]
|
return a[n]
|
||||||
@@ -205,12 +250,12 @@ func (a Args) Get(n int) string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the first argument, or else a blank string
|
// First returns the first argument, or else a blank string
|
||||||
func (a Args) First() string {
|
func (a Args) First() string {
|
||||||
return a.Get(0)
|
return a.Get(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the rest of the arguments (not the first one)
|
// Tail returns the rest of the arguments (not the first one)
|
||||||
// or else an empty string slice
|
// or else an empty string slice
|
||||||
func (a Args) Tail() []string {
|
func (a Args) Tail() []string {
|
||||||
if len(a) >= 2 {
|
if len(a) >= 2 {
|
||||||
@@ -219,12 +264,12 @@ func (a Args) Tail() []string {
|
|||||||
return []string{}
|
return []string{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Checks if there are any arguments present
|
// Present checks if there are any arguments present
|
||||||
func (a Args) Present() bool {
|
func (a Args) Present() bool {
|
||||||
return len(a) != 0
|
return len(a) != 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Swaps arguments at the given indexes
|
// Swap swaps arguments at the given indexes
|
||||||
func (a Args) Swap(from, to int) error {
|
func (a Args) Swap(from, to int) error {
|
||||||
if from >= len(a) || to >= len(a) {
|
if from >= len(a) || to >= len(a) {
|
||||||
return errors.New("index out of range")
|
return errors.New("index out of range")
|
||||||
@@ -233,6 +278,19 @@ func (a Args) Swap(from, to int) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func globalContext(ctx *Context) *Context {
|
||||||
|
if ctx == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
if ctx.parentContext == nil {
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
ctx = ctx.parentContext
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func lookupGlobalFlagSet(name string, ctx *Context) *flag.FlagSet {
|
func lookupGlobalFlagSet(name string, ctx *Context) *flag.FlagSet {
|
||||||
if ctx.parentContext != nil {
|
if ctx.parentContext != nil {
|
||||||
ctx = ctx.parentContext
|
ctx = ctx.parentContext
|
92
Godeps/_workspace/src/gopkg.in/urfave/cli.v1/errors.go
generated
vendored
Normal file
92
Godeps/_workspace/src/gopkg.in/urfave/cli.v1/errors.go
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
package cli
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// OsExiter is the function used when the app exits. If not set defaults to os.Exit.
|
||||||
|
var OsExiter = os.Exit
|
||||||
|
|
||||||
|
// ErrWriter is used to write errors to the user. This can be anything
|
||||||
|
// implementing the io.Writer interface and defaults to os.Stderr.
|
||||||
|
var ErrWriter io.Writer = os.Stderr
|
||||||
|
|
||||||
|
// MultiError is an error that wraps multiple errors.
|
||||||
|
type MultiError struct {
|
||||||
|
Errors []error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMultiError creates a new MultiError. Pass in one or more errors.
|
||||||
|
func NewMultiError(err ...error) MultiError {
|
||||||
|
return MultiError{Errors: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error implents the error interface.
|
||||||
|
func (m MultiError) Error() string {
|
||||||
|
errs := make([]string, len(m.Errors))
|
||||||
|
for i, err := range m.Errors {
|
||||||
|
errs[i] = err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(errs, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExitCoder is the interface checked by `App` and `Command` for a custom exit
|
||||||
|
// code
|
||||||
|
type ExitCoder interface {
|
||||||
|
error
|
||||||
|
ExitCode() int
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExitError fulfills both the builtin `error` interface and `ExitCoder`
|
||||||
|
type ExitError struct {
|
||||||
|
exitCode int
|
||||||
|
message string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewExitError makes a new *ExitError
|
||||||
|
func NewExitError(message string, exitCode int) *ExitError {
|
||||||
|
return &ExitError{
|
||||||
|
exitCode: exitCode,
|
||||||
|
message: message,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns the string message, fulfilling the interface required by
|
||||||
|
// `error`
|
||||||
|
func (ee *ExitError) Error() string {
|
||||||
|
return ee.message
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExitCode returns the exit code, fulfilling the interface required by
|
||||||
|
// `ExitCoder`
|
||||||
|
func (ee *ExitError) ExitCode() int {
|
||||||
|
return ee.exitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleExitCoder checks if the error fulfills the ExitCoder interface, and if
|
||||||
|
// so prints the error to stderr (if it is non-empty) and calls OsExiter with the
|
||||||
|
// given exit code. If the given error is a MultiError, then this func is
|
||||||
|
// called on all members of the Errors slice.
|
||||||
|
func HandleExitCoder(err error) {
|
||||||
|
if err == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if exitErr, ok := err.(ExitCoder); ok {
|
||||||
|
if err.Error() != "" {
|
||||||
|
fmt.Fprintln(ErrWriter, err)
|
||||||
|
}
|
||||||
|
OsExiter(exitErr.ExitCode())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if multiErr, ok := err.(MultiError); ok {
|
||||||
|
for _, merr := range multiErr.Errors {
|
||||||
|
HandleExitCoder(merr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@@ -4,24 +4,28 @@ import (
|
|||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"reflect"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This flag enables bash-completion for all commands and subcommands
|
const defaultPlaceholder = "value"
|
||||||
|
|
||||||
|
// BashCompletionFlag enables bash-completion for all commands and subcommands
|
||||||
var BashCompletionFlag = BoolFlag{
|
var BashCompletionFlag = BoolFlag{
|
||||||
Name: "generate-bash-completion",
|
Name: "generate-bash-completion",
|
||||||
|
Hidden: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
// This flag prints the version for the application
|
// VersionFlag prints the version for the application
|
||||||
var VersionFlag = BoolFlag{
|
var VersionFlag = BoolFlag{
|
||||||
Name: "version, v",
|
Name: "version, v",
|
||||||
Usage: "print the version",
|
Usage: "print the version",
|
||||||
}
|
}
|
||||||
|
|
||||||
// This flag prints the help for all commands and subcommands
|
// HelpFlag prints the help for all commands and subcommands
|
||||||
// Set to the zero value (BoolFlag{}) to disable flag -- keeps subcommand
|
// Set to the zero value (BoolFlag{}) to disable flag -- keeps subcommand
|
||||||
// unless HideHelp is set to true)
|
// unless HideHelp is set to true)
|
||||||
var HelpFlag = BoolFlag{
|
var HelpFlag = BoolFlag{
|
||||||
@@ -29,6 +33,10 @@ var HelpFlag = BoolFlag{
|
|||||||
Usage: "show help",
|
Usage: "show help",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FlagStringer converts a flag definition to a string. This is used by help
|
||||||
|
// to display a flag.
|
||||||
|
var FlagStringer FlagStringFunc = stringifyFlag
|
||||||
|
|
||||||
// Flag is a common interface related to parsing flags in cli.
|
// Flag is a common interface related to parsing flags in cli.
|
||||||
// For more advanced flag parsing techniques, it is recommended that
|
// For more advanced flag parsing techniques, it is recommended that
|
||||||
// this interface be implemented.
|
// this interface be implemented.
|
||||||
@@ -68,24 +76,14 @@ type GenericFlag struct {
|
|||||||
Value Generic
|
Value Generic
|
||||||
Usage string
|
Usage string
|
||||||
EnvVar string
|
EnvVar string
|
||||||
|
Hidden bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns the string representation of the generic flag to display the
|
// String returns the string representation of the generic flag to display the
|
||||||
// help text to the user (uses the String() method of the generic flag to show
|
// help text to the user (uses the String() method of the generic flag to show
|
||||||
// the value)
|
// the value)
|
||||||
func (f GenericFlag) String() string {
|
func (f GenericFlag) String() string {
|
||||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s %v\t%v", prefixedNames(f.Name), f.FormatValueHelp(), f.Usage))
|
return FlagStringer(f)
|
||||||
}
|
|
||||||
|
|
||||||
func (f GenericFlag) FormatValueHelp() string {
|
|
||||||
if f.Value == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
s := f.Value.String()
|
|
||||||
if len(s) == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("\"%s\"", s)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply takes the flagset and calls Set on the generic flag with the value
|
// Apply takes the flagset and calls Set on the generic flag with the value
|
||||||
@@ -107,6 +105,7 @@ func (f GenericFlag) Apply(set *flag.FlagSet) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetName returns the name of a flag.
|
||||||
func (f GenericFlag) GetName() string {
|
func (f GenericFlag) GetName() string {
|
||||||
return f.Name
|
return f.Name
|
||||||
}
|
}
|
||||||
@@ -130,20 +129,19 @@ func (f *StringSlice) Value() []string {
|
|||||||
return *f
|
return *f
|
||||||
}
|
}
|
||||||
|
|
||||||
// StringSlice is a string flag that can be specified multiple times on the
|
// StringSliceFlag is a string flag that can be specified multiple times on the
|
||||||
// command-line
|
// command-line
|
||||||
type StringSliceFlag struct {
|
type StringSliceFlag struct {
|
||||||
Name string
|
Name string
|
||||||
Value *StringSlice
|
Value *StringSlice
|
||||||
Usage string
|
Usage string
|
||||||
EnvVar string
|
EnvVar string
|
||||||
|
Hidden bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns the usage
|
// String returns the usage
|
||||||
func (f StringSliceFlag) String() string {
|
func (f StringSliceFlag) String() string {
|
||||||
firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ")
|
return FlagStringer(f)
|
||||||
pref := prefixFor(firstName)
|
|
||||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s [%v]\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
@@ -171,11 +169,12 @@ func (f StringSliceFlag) Apply(set *flag.FlagSet) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetName returns the name of a flag.
|
||||||
func (f StringSliceFlag) GetName() string {
|
func (f StringSliceFlag) GetName() string {
|
||||||
return f.Name
|
return f.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
// StringSlice is an opaque type for []int to satisfy flag.Value
|
// IntSlice is an opaque type for []int to satisfy flag.Value
|
||||||
type IntSlice []int
|
type IntSlice []int
|
||||||
|
|
||||||
// Set parses the value into an integer and appends it to the list of values
|
// Set parses the value into an integer and appends it to the list of values
|
||||||
@@ -183,9 +182,8 @@ func (f *IntSlice) Set(value string) error {
|
|||||||
tmp, err := strconv.Atoi(value)
|
tmp, err := strconv.Atoi(value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
} else {
|
|
||||||
*f = append(*f, tmp)
|
|
||||||
}
|
}
|
||||||
|
*f = append(*f, tmp)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -206,13 +204,12 @@ type IntSliceFlag struct {
|
|||||||
Value *IntSlice
|
Value *IntSlice
|
||||||
Usage string
|
Usage string
|
||||||
EnvVar string
|
EnvVar string
|
||||||
|
Hidden bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns the usage
|
// String returns the usage
|
||||||
func (f IntSliceFlag) String() string {
|
func (f IntSliceFlag) String() string {
|
||||||
firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ")
|
return FlagStringer(f)
|
||||||
pref := prefixFor(firstName)
|
|
||||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s [%v]\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
@@ -226,7 +223,7 @@ func (f IntSliceFlag) Apply(set *flag.FlagSet) {
|
|||||||
s = strings.TrimSpace(s)
|
s = strings.TrimSpace(s)
|
||||||
err := newVal.Set(s)
|
err := newVal.Set(s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(os.Stderr, err.Error())
|
fmt.Fprintf(ErrWriter, err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
f.Value = newVal
|
f.Value = newVal
|
||||||
@@ -243,6 +240,7 @@ func (f IntSliceFlag) Apply(set *flag.FlagSet) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetName returns the name of the flag.
|
||||||
func (f IntSliceFlag) GetName() string {
|
func (f IntSliceFlag) GetName() string {
|
||||||
return f.Name
|
return f.Name
|
||||||
}
|
}
|
||||||
@@ -253,11 +251,12 @@ type BoolFlag struct {
|
|||||||
Usage string
|
Usage string
|
||||||
EnvVar string
|
EnvVar string
|
||||||
Destination *bool
|
Destination *bool
|
||||||
|
Hidden bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns a readable representation of this value (for usage defaults)
|
// String returns a readable representation of this value (for usage defaults)
|
||||||
func (f BoolFlag) String() string {
|
func (f BoolFlag) String() string {
|
||||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage))
|
return FlagStringer(f)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
@@ -285,6 +284,7 @@ func (f BoolFlag) Apply(set *flag.FlagSet) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetName returns the name of the flag.
|
||||||
func (f BoolFlag) GetName() string {
|
func (f BoolFlag) GetName() string {
|
||||||
return f.Name
|
return f.Name
|
||||||
}
|
}
|
||||||
@@ -296,11 +296,12 @@ type BoolTFlag struct {
|
|||||||
Usage string
|
Usage string
|
||||||
EnvVar string
|
EnvVar string
|
||||||
Destination *bool
|
Destination *bool
|
||||||
|
Hidden bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns a readable representation of this value (for usage defaults)
|
// String returns a readable representation of this value (for usage defaults)
|
||||||
func (f BoolTFlag) String() string {
|
func (f BoolTFlag) String() string {
|
||||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage))
|
return FlagStringer(f)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
@@ -328,6 +329,7 @@ func (f BoolTFlag) Apply(set *flag.FlagSet) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetName returns the name of the flag.
|
||||||
func (f BoolTFlag) GetName() string {
|
func (f BoolTFlag) GetName() string {
|
||||||
return f.Name
|
return f.Name
|
||||||
}
|
}
|
||||||
@@ -339,19 +341,12 @@ type StringFlag struct {
|
|||||||
Usage string
|
Usage string
|
||||||
EnvVar string
|
EnvVar string
|
||||||
Destination *string
|
Destination *string
|
||||||
|
Hidden bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns the usage
|
// String returns the usage
|
||||||
func (f StringFlag) String() string {
|
func (f StringFlag) String() string {
|
||||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s %v\t%v", prefixedNames(f.Name), f.FormatValueHelp(), f.Usage))
|
return FlagStringer(f)
|
||||||
}
|
|
||||||
|
|
||||||
func (f StringFlag) FormatValueHelp() string {
|
|
||||||
s := f.Value
|
|
||||||
if len(s) == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("\"%s\"", s)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
@@ -375,6 +370,7 @@ func (f StringFlag) Apply(set *flag.FlagSet) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetName returns the name of the flag.
|
||||||
func (f StringFlag) GetName() string {
|
func (f StringFlag) GetName() string {
|
||||||
return f.Name
|
return f.Name
|
||||||
}
|
}
|
||||||
@@ -387,11 +383,12 @@ type IntFlag struct {
|
|||||||
Usage string
|
Usage string
|
||||||
EnvVar string
|
EnvVar string
|
||||||
Destination *int
|
Destination *int
|
||||||
|
Hidden bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns the usage
|
// String returns the usage
|
||||||
func (f IntFlag) String() string {
|
func (f IntFlag) String() string {
|
||||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage))
|
return FlagStringer(f)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
@@ -418,6 +415,7 @@ func (f IntFlag) Apply(set *flag.FlagSet) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetName returns the name of the flag.
|
||||||
func (f IntFlag) GetName() string {
|
func (f IntFlag) GetName() string {
|
||||||
return f.Name
|
return f.Name
|
||||||
}
|
}
|
||||||
@@ -430,11 +428,12 @@ type DurationFlag struct {
|
|||||||
Usage string
|
Usage string
|
||||||
EnvVar string
|
EnvVar string
|
||||||
Destination *time.Duration
|
Destination *time.Duration
|
||||||
|
Hidden bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns a readable representation of this value (for usage defaults)
|
// String returns a readable representation of this value (for usage defaults)
|
||||||
func (f DurationFlag) String() string {
|
func (f DurationFlag) String() string {
|
||||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage))
|
return FlagStringer(f)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
@@ -461,6 +460,7 @@ func (f DurationFlag) Apply(set *flag.FlagSet) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetName returns the name of the flag.
|
||||||
func (f DurationFlag) GetName() string {
|
func (f DurationFlag) GetName() string {
|
||||||
return f.Name
|
return f.Name
|
||||||
}
|
}
|
||||||
@@ -473,11 +473,12 @@ type Float64Flag struct {
|
|||||||
Usage string
|
Usage string
|
||||||
EnvVar string
|
EnvVar string
|
||||||
Destination *float64
|
Destination *float64
|
||||||
|
Hidden bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns the usage
|
// String returns the usage
|
||||||
func (f Float64Flag) String() string {
|
func (f Float64Flag) String() string {
|
||||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage))
|
return FlagStringer(f)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
@@ -503,10 +504,21 @@ func (f Float64Flag) Apply(set *flag.FlagSet) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetName returns the name of the flag.
|
||||||
func (f Float64Flag) GetName() string {
|
func (f Float64Flag) GetName() string {
|
||||||
return f.Name
|
return f.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func visibleFlags(fl []Flag) []Flag {
|
||||||
|
visible := []Flag{}
|
||||||
|
for _, flag := range fl {
|
||||||
|
if !reflect.ValueOf(flag).FieldByName("Hidden").Bool() {
|
||||||
|
visible = append(visible, flag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return visible
|
||||||
|
}
|
||||||
|
|
||||||
func prefixFor(name string) (prefix string) {
|
func prefixFor(name string) (prefix string) {
|
||||||
if len(name) == 1 {
|
if len(name) == 1 {
|
||||||
prefix = "-"
|
prefix = "-"
|
||||||
@@ -517,16 +529,37 @@ func prefixFor(name string) (prefix string) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func prefixedNames(fullName string) (prefixed string) {
|
// Returns the placeholder, if any, and the unquoted usage string.
|
||||||
|
func unquoteUsage(usage string) (string, string) {
|
||||||
|
for i := 0; i < len(usage); i++ {
|
||||||
|
if usage[i] == '`' {
|
||||||
|
for j := i + 1; j < len(usage); j++ {
|
||||||
|
if usage[j] == '`' {
|
||||||
|
name := usage[i+1 : j]
|
||||||
|
usage = usage[:i] + name + usage[j+1:]
|
||||||
|
return name, usage
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", usage
|
||||||
|
}
|
||||||
|
|
||||||
|
func prefixedNames(fullName, placeholder string) string {
|
||||||
|
var prefixed string
|
||||||
parts := strings.Split(fullName, ",")
|
parts := strings.Split(fullName, ",")
|
||||||
for i, name := range parts {
|
for i, name := range parts {
|
||||||
name = strings.Trim(name, " ")
|
name = strings.Trim(name, " ")
|
||||||
prefixed += prefixFor(name) + name
|
prefixed += prefixFor(name) + name
|
||||||
|
if placeholder != "" {
|
||||||
|
prefixed += " " + placeholder
|
||||||
|
}
|
||||||
if i < len(parts)-1 {
|
if i < len(parts)-1 {
|
||||||
prefixed += ", "
|
prefixed += ", "
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
return prefixed
|
||||||
}
|
}
|
||||||
|
|
||||||
func withEnvHint(envVar, str string) string {
|
func withEnvHint(envVar, str string) string {
|
||||||
@@ -544,3 +577,83 @@ func withEnvHint(envVar, str string) string {
|
|||||||
}
|
}
|
||||||
return str + envText
|
return str + envText
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func stringifyFlag(f Flag) string {
|
||||||
|
fv := reflect.ValueOf(f)
|
||||||
|
|
||||||
|
switch f.(type) {
|
||||||
|
case IntSliceFlag:
|
||||||
|
return withEnvHint(fv.FieldByName("EnvVar").String(),
|
||||||
|
stringifyIntSliceFlag(f.(IntSliceFlag)))
|
||||||
|
case StringSliceFlag:
|
||||||
|
return withEnvHint(fv.FieldByName("EnvVar").String(),
|
||||||
|
stringifyStringSliceFlag(f.(StringSliceFlag)))
|
||||||
|
}
|
||||||
|
|
||||||
|
placeholder, usage := unquoteUsage(fv.FieldByName("Usage").String())
|
||||||
|
|
||||||
|
needsPlaceholder := false
|
||||||
|
defaultValueString := ""
|
||||||
|
val := fv.FieldByName("Value")
|
||||||
|
|
||||||
|
if val.IsValid() {
|
||||||
|
needsPlaceholder = true
|
||||||
|
defaultValueString = fmt.Sprintf(" (default: %v)", val.Interface())
|
||||||
|
|
||||||
|
if val.Kind() == reflect.String && val.String() != "" {
|
||||||
|
defaultValueString = fmt.Sprintf(" (default: %q)", val.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if defaultValueString == " (default: )" {
|
||||||
|
defaultValueString = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
if needsPlaceholder && placeholder == "" {
|
||||||
|
placeholder = defaultPlaceholder
|
||||||
|
}
|
||||||
|
|
||||||
|
usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultValueString))
|
||||||
|
|
||||||
|
return withEnvHint(fv.FieldByName("EnvVar").String(),
|
||||||
|
fmt.Sprintf("%s\t%s", prefixedNames(fv.FieldByName("Name").String(), placeholder), usageWithDefault))
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringifyIntSliceFlag(f IntSliceFlag) string {
|
||||||
|
defaultVals := []string{}
|
||||||
|
if f.Value != nil && len(f.Value.Value()) > 0 {
|
||||||
|
for _, i := range f.Value.Value() {
|
||||||
|
defaultVals = append(defaultVals, fmt.Sprintf("%d", i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return stringifySliceFlag(f.Usage, f.Name, defaultVals)
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringifyStringSliceFlag(f StringSliceFlag) string {
|
||||||
|
defaultVals := []string{}
|
||||||
|
if f.Value != nil && len(f.Value.Value()) > 0 {
|
||||||
|
for _, s := range f.Value.Value() {
|
||||||
|
if len(s) > 0 {
|
||||||
|
defaultVals = append(defaultVals, fmt.Sprintf("%q", s))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return stringifySliceFlag(f.Usage, f.Name, defaultVals)
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringifySliceFlag(usage, name string, defaultVals []string) string {
|
||||||
|
placeholder, usage := unquoteUsage(usage)
|
||||||
|
if placeholder == "" {
|
||||||
|
placeholder = defaultPlaceholder
|
||||||
|
}
|
||||||
|
|
||||||
|
defaultVal := ""
|
||||||
|
if len(defaultVals) > 0 {
|
||||||
|
defaultVal = fmt.Sprintf(" (default: %s)", strings.Join(defaultVals, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultVal))
|
||||||
|
return fmt.Sprintf("%s\t%s", prefixedNames(name, placeholder), usageWithDefault)
|
||||||
|
}
|
28
Godeps/_workspace/src/gopkg.in/urfave/cli.v1/funcs.go
generated
vendored
Normal file
28
Godeps/_workspace/src/gopkg.in/urfave/cli.v1/funcs.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
package cli
|
||||||
|
|
||||||
|
// BashCompleteFunc is an action to execute when the bash-completion flag is set
|
||||||
|
type BashCompleteFunc func(*Context)
|
||||||
|
|
||||||
|
// BeforeFunc is an action to execute before any subcommands are run, but after
|
||||||
|
// the context is ready if a non-nil error is returned, no subcommands are run
|
||||||
|
type BeforeFunc func(*Context) error
|
||||||
|
|
||||||
|
// AfterFunc is an action to execute after any subcommands are run, but after the
|
||||||
|
// subcommand has finished it is run even if Action() panics
|
||||||
|
type AfterFunc func(*Context) error
|
||||||
|
|
||||||
|
// ActionFunc is the action to execute when no subcommands are specified
|
||||||
|
type ActionFunc func(*Context) error
|
||||||
|
|
||||||
|
// CommandNotFoundFunc is executed if the proper command cannot be found
|
||||||
|
type CommandNotFoundFunc func(*Context, string)
|
||||||
|
|
||||||
|
// OnUsageErrorFunc is executed if an usage error occurs. This is useful for displaying
|
||||||
|
// customized usage error messages. This function is able to replace the
|
||||||
|
// original error messages. If this function is not set, the "Incorrect usage"
|
||||||
|
// is displayed and the execution is interrupted.
|
||||||
|
type OnUsageErrorFunc func(context *Context, err error, isSubcommand bool) error
|
||||||
|
|
||||||
|
// FlagStringFunc is used by the help generation to display a flag, which is
|
||||||
|
// expected to be a single line.
|
||||||
|
type FlagStringFunc func(Flag) string
|
@@ -3,68 +3,74 @@ package cli
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
"text/template"
|
"text/template"
|
||||||
)
|
)
|
||||||
|
|
||||||
// The text template for the Default help topic.
|
// AppHelpTemplate is the text template for the Default help topic.
|
||||||
// cli.go uses text/template to render templates. You can
|
// cli.go uses text/template to render templates. You can
|
||||||
// render custom help text by setting this variable.
|
// render custom help text by setting this variable.
|
||||||
var AppHelpTemplate = `NAME:
|
var AppHelpTemplate = `NAME:
|
||||||
{{.Name}} - {{.Usage}}
|
{{.Name}} - {{.Usage}}
|
||||||
|
|
||||||
USAGE:
|
USAGE:
|
||||||
{{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .Flags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}
|
{{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}
|
||||||
{{if .Version}}
|
{{if .Version}}{{if not .HideVersion}}
|
||||||
VERSION:
|
VERSION:
|
||||||
{{.Version}}
|
{{.Version}}
|
||||||
{{end}}{{if len .Authors}}
|
{{end}}{{end}}{{if len .Authors}}
|
||||||
AUTHOR(S):
|
AUTHOR(S):
|
||||||
{{range .Authors}}{{ . }}{{end}}
|
{{range .Authors}}{{.}}{{end}}
|
||||||
{{end}}{{if .Commands}}
|
{{end}}{{if .VisibleCommands}}
|
||||||
COMMANDS:
|
COMMANDS:{{range .VisibleCategories}}{{if .Name}}
|
||||||
{{range .Commands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}}
|
{{.Name}}:{{end}}{{range .VisibleCommands}}
|
||||||
{{end}}{{end}}{{if .Flags}}
|
{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{"\t"}}{{.Usage}}{{end}}
|
||||||
|
{{end}}{{end}}{{if .VisibleFlags}}
|
||||||
GLOBAL OPTIONS:
|
GLOBAL OPTIONS:
|
||||||
{{range .Flags}}{{.}}
|
{{range .VisibleFlags}}{{.}}
|
||||||
{{end}}{{end}}{{if .Copyright }}
|
{{end}}{{end}}{{if .Copyright}}
|
||||||
COPYRIGHT:
|
COPYRIGHT:
|
||||||
{{.Copyright}}
|
{{.Copyright}}
|
||||||
{{end}}
|
{{end}}
|
||||||
`
|
`
|
||||||
|
|
||||||
// The text template for the command help topic.
|
// CommandHelpTemplate is the text template for the command help topic.
|
||||||
// cli.go uses text/template to render templates. You can
|
// cli.go uses text/template to render templates. You can
|
||||||
// render custom help text by setting this variable.
|
// render custom help text by setting this variable.
|
||||||
var CommandHelpTemplate = `NAME:
|
var CommandHelpTemplate = `NAME:
|
||||||
{{.HelpName}} - {{.Usage}}
|
{{.HelpName}} - {{.Usage}}
|
||||||
|
|
||||||
USAGE:
|
USAGE:
|
||||||
{{.HelpName}}{{if .Flags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{if .Description}}
|
{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{if .Category}}
|
||||||
|
|
||||||
|
CATEGORY:
|
||||||
|
{{.Category}}{{end}}{{if .Description}}
|
||||||
|
|
||||||
DESCRIPTION:
|
DESCRIPTION:
|
||||||
{{.Description}}{{end}}{{if .Flags}}
|
{{.Description}}{{end}}{{if .VisibleFlags}}
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
{{range .Flags}}{{.}}
|
{{range .VisibleFlags}}{{.}}
|
||||||
{{end}}{{ end }}
|
{{end}}{{end}}
|
||||||
`
|
`
|
||||||
|
|
||||||
// The text template for the subcommand help topic.
|
// SubcommandHelpTemplate is the text template for the subcommand help topic.
|
||||||
// cli.go uses text/template to render templates. You can
|
// cli.go uses text/template to render templates. You can
|
||||||
// render custom help text by setting this variable.
|
// render custom help text by setting this variable.
|
||||||
var SubcommandHelpTemplate = `NAME:
|
var SubcommandHelpTemplate = `NAME:
|
||||||
{{.HelpName}} - {{.Usage}}
|
{{.HelpName}} - {{.Usage}}
|
||||||
|
|
||||||
USAGE:
|
USAGE:
|
||||||
{{.HelpName}} command{{if .Flags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}
|
{{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}
|
||||||
|
|
||||||
COMMANDS:
|
COMMANDS:{{range .VisibleCategories}}{{if .Name}}
|
||||||
{{range .Commands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}}
|
{{.Name}}:{{end}}{{range .VisibleCommands}}
|
||||||
{{end}}{{if .Flags}}
|
{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{"\t"}}{{.Usage}}{{end}}
|
||||||
|
{{end}}{{if .VisibleFlags}}
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
{{range .Flags}}{{.}}
|
{{range .VisibleFlags}}{{.}}
|
||||||
{{end}}{{end}}
|
{{end}}{{end}}
|
||||||
`
|
`
|
||||||
|
|
||||||
@@ -73,13 +79,14 @@ var helpCommand = Command{
|
|||||||
Aliases: []string{"h"},
|
Aliases: []string{"h"},
|
||||||
Usage: "Shows a list of commands or help for one command",
|
Usage: "Shows a list of commands or help for one command",
|
||||||
ArgsUsage: "[command]",
|
ArgsUsage: "[command]",
|
||||||
Action: func(c *Context) {
|
Action: func(c *Context) error {
|
||||||
args := c.Args()
|
args := c.Args()
|
||||||
if args.Present() {
|
if args.Present() {
|
||||||
ShowCommandHelp(c, args.First())
|
return ShowCommandHelp(c, args.First())
|
||||||
} else {
|
|
||||||
ShowAppHelp(c)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ShowAppHelp(c)
|
||||||
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -88,65 +95,73 @@ var helpSubcommand = Command{
|
|||||||
Aliases: []string{"h"},
|
Aliases: []string{"h"},
|
||||||
Usage: "Shows a list of commands or help for one command",
|
Usage: "Shows a list of commands or help for one command",
|
||||||
ArgsUsage: "[command]",
|
ArgsUsage: "[command]",
|
||||||
Action: func(c *Context) {
|
Action: func(c *Context) error {
|
||||||
args := c.Args()
|
args := c.Args()
|
||||||
if args.Present() {
|
if args.Present() {
|
||||||
ShowCommandHelp(c, args.First())
|
return ShowCommandHelp(c, args.First())
|
||||||
} else {
|
|
||||||
ShowSubcommandHelp(c)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return ShowSubcommandHelp(c)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prints help for the App or Command
|
// Prints help for the App or Command
|
||||||
type helpPrinter func(w io.Writer, templ string, data interface{})
|
type helpPrinter func(w io.Writer, templ string, data interface{})
|
||||||
|
|
||||||
|
// HelpPrinter is a function that writes the help output. If not set a default
|
||||||
|
// is used. The function signature is:
|
||||||
|
// func(w io.Writer, templ string, data interface{})
|
||||||
var HelpPrinter helpPrinter = printHelp
|
var HelpPrinter helpPrinter = printHelp
|
||||||
|
|
||||||
// Prints version for the App
|
// VersionPrinter prints the version for the App
|
||||||
var VersionPrinter = printVersion
|
var VersionPrinter = printVersion
|
||||||
|
|
||||||
|
// ShowAppHelp is an action that displays the help.
|
||||||
func ShowAppHelp(c *Context) {
|
func ShowAppHelp(c *Context) {
|
||||||
HelpPrinter(c.App.Writer, AppHelpTemplate, c.App)
|
HelpPrinter(c.App.Writer, AppHelpTemplate, c.App)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prints the list of subcommands as the default app completion method
|
// DefaultAppComplete prints the list of subcommands as the default app completion method
|
||||||
func DefaultAppComplete(c *Context) {
|
func DefaultAppComplete(c *Context) {
|
||||||
for _, command := range c.App.Commands {
|
for _, command := range c.App.Commands {
|
||||||
|
if command.Hidden {
|
||||||
|
continue
|
||||||
|
}
|
||||||
for _, name := range command.Names() {
|
for _, name := range command.Names() {
|
||||||
fmt.Fprintln(c.App.Writer, name)
|
fmt.Fprintln(c.App.Writer, name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prints help for the given command
|
// ShowCommandHelp prints help for the given command
|
||||||
func ShowCommandHelp(ctx *Context, command string) {
|
func ShowCommandHelp(ctx *Context, command string) error {
|
||||||
// show the subcommand help for a command with subcommands
|
// show the subcommand help for a command with subcommands
|
||||||
if command == "" {
|
if command == "" {
|
||||||
HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App)
|
HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App)
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, c := range ctx.App.Commands {
|
for _, c := range ctx.App.Commands {
|
||||||
if c.HasName(command) {
|
if c.HasName(command) {
|
||||||
HelpPrinter(ctx.App.Writer, CommandHelpTemplate, c)
|
HelpPrinter(ctx.App.Writer, CommandHelpTemplate, c)
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ctx.App.CommandNotFound != nil {
|
if ctx.App.CommandNotFound == nil {
|
||||||
|
return NewExitError(fmt.Sprintf("No help topic for '%v'", command), 3)
|
||||||
|
}
|
||||||
|
|
||||||
ctx.App.CommandNotFound(ctx, command)
|
ctx.App.CommandNotFound(ctx, command)
|
||||||
} else {
|
return nil
|
||||||
fmt.Fprintf(ctx.App.Writer, "No help topic for '%v'\n", command)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prints help for the given subcommand
|
// ShowSubcommandHelp prints help for the given subcommand
|
||||||
func ShowSubcommandHelp(c *Context) {
|
func ShowSubcommandHelp(c *Context) error {
|
||||||
ShowCommandHelp(c, c.Command.Name)
|
return ShowCommandHelp(c, c.Command.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prints the version number of the App
|
// ShowVersion prints the version number of the App
|
||||||
func ShowVersion(c *Context) {
|
func ShowVersion(c *Context) {
|
||||||
VersionPrinter(c)
|
VersionPrinter(c)
|
||||||
}
|
}
|
||||||
@@ -155,7 +170,7 @@ func printVersion(c *Context) {
|
|||||||
fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version)
|
fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prints the lists of commands within a given context
|
// ShowCompletions prints the lists of commands within a given context
|
||||||
func ShowCompletions(c *Context) {
|
func ShowCompletions(c *Context) {
|
||||||
a := c.App
|
a := c.App
|
||||||
if a != nil && a.BashComplete != nil {
|
if a != nil && a.BashComplete != nil {
|
||||||
@@ -163,7 +178,7 @@ func ShowCompletions(c *Context) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prints the custom completions for a given command
|
// ShowCommandCompletions prints the custom completions for a given command
|
||||||
func ShowCommandCompletions(ctx *Context, command string) {
|
func ShowCommandCompletions(ctx *Context, command string) {
|
||||||
c := ctx.App.Command(command)
|
c := ctx.App.Command(command)
|
||||||
if c != nil && c.BashComplete != nil {
|
if c != nil && c.BashComplete != nil {
|
||||||
@@ -181,7 +196,10 @@ func printHelp(out io.Writer, templ string, data interface{}) {
|
|||||||
err := t.Execute(w, data)
|
err := t.Execute(w, data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If the writer is closed, t.Execute will fail, and there's nothing
|
// If the writer is closed, t.Execute will fail, and there's nothing
|
||||||
// we can do to recover. We could send this to os.Stderr if we need.
|
// we can do to recover.
|
||||||
|
if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" {
|
||||||
|
fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err)
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
w.Flush()
|
w.Flush()
|
105
Makefile
105
Makefile
@@ -2,8 +2,8 @@
|
|||||||
# with Go source code. If you know what GOPATH is then you probably
|
# with Go source code. If you know what GOPATH is then you probably
|
||||||
# don't need to bother with make.
|
# don't need to bother with make.
|
||||||
|
|
||||||
.PHONY: geth geth-cross evm all test travis-test-with-coverage xgo clean
|
.PHONY: geth geth-cross evm all test clean
|
||||||
.PHONY: geth-linux geth-linux-386 geth-linux-amd64
|
.PHONY: geth-linux geth-linux-386 geth-linux-amd64 geth-linux-mips64 geth-linux-mips64le
|
||||||
.PHONY: geth-linux-arm geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64
|
.PHONY: geth-linux-arm geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64
|
||||||
.PHONY: geth-darwin geth-darwin-386 geth-darwin-amd64
|
.PHONY: geth-darwin geth-darwin-386 geth-darwin-amd64
|
||||||
.PHONY: geth-windows geth-windows-386 geth-windows-amd64
|
.PHONY: geth-windows geth-windows-386 geth-windows-amd64
|
||||||
@@ -13,25 +13,41 @@ GOBIN = build/bin
|
|||||||
GO ?= latest
|
GO ?= latest
|
||||||
|
|
||||||
geth:
|
geth:
|
||||||
build/env.sh go build -i -v $(shell build/flags.sh) -o $(GOBIN)/geth ./cmd/geth
|
build/env.sh go run build/ci.go install ./cmd/geth
|
||||||
@echo "Done building."
|
@echo "Done building."
|
||||||
@echo "Run \"$(GOBIN)/geth\" to launch geth."
|
@echo "Run \"$(GOBIN)/geth\" to launch geth."
|
||||||
|
|
||||||
|
evm:
|
||||||
|
build/env.sh go run build/ci.go install ./cmd/evm
|
||||||
|
@echo "Done building."
|
||||||
|
@echo "Run \"$(GOBIN)/evm to start the evm."
|
||||||
|
|
||||||
|
all:
|
||||||
|
build/env.sh go run build/ci.go install
|
||||||
|
|
||||||
|
test: all
|
||||||
|
build/env.sh go run build/ci.go test
|
||||||
|
|
||||||
|
clean:
|
||||||
|
rm -fr build/_workspace/pkg/ Godeps/_workspace/pkg $(GOBIN)/*
|
||||||
|
|
||||||
|
# Cross Compilation Targets (xgo)
|
||||||
|
|
||||||
geth-cross: geth-linux geth-darwin geth-windows geth-android geth-ios
|
geth-cross: geth-linux geth-darwin geth-windows geth-android geth-ios
|
||||||
@echo "Full cross compilation done:"
|
@echo "Full cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-*
|
@ls -ld $(GOBIN)/geth-*
|
||||||
|
|
||||||
geth-linux: geth-linux-386 geth-linux-amd64 geth-linux-arm
|
geth-linux: geth-linux-386 geth-linux-amd64 geth-linux-arm geth-linux-mips64 geth-linux-mips64le
|
||||||
@echo "Linux cross compilation done:"
|
@echo "Linux cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-linux-*
|
@ls -ld $(GOBIN)/geth-linux-*
|
||||||
|
|
||||||
geth-linux-386: xgo
|
geth-linux-386:
|
||||||
build/env.sh $(GOBIN)/xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/386 -v $(shell build/flags.sh) ./cmd/geth
|
build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/386 -v ./cmd/geth
|
||||||
@echo "Linux 386 cross compilation done:"
|
@echo "Linux 386 cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep 386
|
@ls -ld $(GOBIN)/geth-linux-* | grep 386
|
||||||
|
|
||||||
geth-linux-amd64: xgo
|
geth-linux-amd64:
|
||||||
build/env.sh $(GOBIN)/xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/amd64 -v $(shell build/flags.sh) ./cmd/geth
|
build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/amd64 -v ./cmd/geth
|
||||||
@echo "Linux amd64 cross compilation done:"
|
@echo "Linux amd64 cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep amd64
|
@ls -ld $(GOBIN)/geth-linux-* | grep amd64
|
||||||
|
|
||||||
@@ -39,37 +55,47 @@ geth-linux-arm: geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-ar
|
|||||||
@echo "Linux ARM cross compilation done:"
|
@echo "Linux ARM cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm
|
@ls -ld $(GOBIN)/geth-linux-* | grep arm
|
||||||
|
|
||||||
geth-linux-arm-5: xgo
|
geth-linux-arm-5:
|
||||||
build/env.sh $(GOBIN)/xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/arm-5 -v $(shell build/flags.sh) ./cmd/geth
|
build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/arm-5 -v ./cmd/geth
|
||||||
@echo "Linux ARMv5 cross compilation done:"
|
@echo "Linux ARMv5 cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm-5
|
@ls -ld $(GOBIN)/geth-linux-* | grep arm-5
|
||||||
|
|
||||||
geth-linux-arm-6: xgo
|
geth-linux-arm-6:
|
||||||
build/env.sh $(GOBIN)/xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/arm-6 -v $(shell build/flags.sh) ./cmd/geth
|
build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/arm-6 -v ./cmd/geth
|
||||||
@echo "Linux ARMv6 cross compilation done:"
|
@echo "Linux ARMv6 cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm-6
|
@ls -ld $(GOBIN)/geth-linux-* | grep arm-6
|
||||||
|
|
||||||
geth-linux-arm-7: xgo
|
geth-linux-arm-7:
|
||||||
build/env.sh $(GOBIN)/xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/arm-7 -v $(shell build/flags.sh) ./cmd/geth
|
build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/arm-7 -v ./cmd/geth
|
||||||
@echo "Linux ARMv7 cross compilation done:"
|
@echo "Linux ARMv7 cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm-7
|
@ls -ld $(GOBIN)/geth-linux-* | grep arm-7
|
||||||
|
|
||||||
geth-linux-arm64: xgo
|
geth-linux-arm64:
|
||||||
build/env.sh $(GOBIN)/xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/arm64 -v $(shell build/flags.sh) ./cmd/geth
|
build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/arm64 -v ./cmd/geth
|
||||||
@echo "Linux ARM64 cross compilation done:"
|
@echo "Linux ARM64 cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm64
|
@ls -ld $(GOBIN)/geth-linux-* | grep arm64
|
||||||
|
|
||||||
|
geth-linux-mips64:
|
||||||
|
build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/mips64 -v ./cmd/geth
|
||||||
|
@echo "Linux MIPS64 cross compilation done:"
|
||||||
|
@ls -ld $(GOBIN)/geth-linux-* | grep mips64
|
||||||
|
|
||||||
|
geth-linux-mips64le:
|
||||||
|
build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/mips64le -v ./cmd/geth
|
||||||
|
@echo "Linux MIPS64le cross compilation done:"
|
||||||
|
@ls -ld $(GOBIN)/geth-linux-* | grep mips64le
|
||||||
|
|
||||||
geth-darwin: geth-darwin-386 geth-darwin-amd64
|
geth-darwin: geth-darwin-386 geth-darwin-amd64
|
||||||
@echo "Darwin cross compilation done:"
|
@echo "Darwin cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-darwin-*
|
@ls -ld $(GOBIN)/geth-darwin-*
|
||||||
|
|
||||||
geth-darwin-386: xgo
|
geth-darwin-386:
|
||||||
build/env.sh $(GOBIN)/xgo --go=$(GO) --dest=$(GOBIN) --targets=darwin/386 -v $(shell build/flags.sh) ./cmd/geth
|
build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=darwin/386 -v ./cmd/geth
|
||||||
@echo "Darwin 386 cross compilation done:"
|
@echo "Darwin 386 cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-darwin-* | grep 386
|
@ls -ld $(GOBIN)/geth-darwin-* | grep 386
|
||||||
|
|
||||||
geth-darwin-amd64: xgo
|
geth-darwin-amd64:
|
||||||
build/env.sh $(GOBIN)/xgo --go=$(GO) --dest=$(GOBIN) --targets=darwin/amd64 -v $(shell build/flags.sh) ./cmd/geth
|
build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=darwin/amd64 -v ./cmd/geth
|
||||||
@echo "Darwin amd64 cross compilation done:"
|
@echo "Darwin amd64 cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-darwin-* | grep amd64
|
@ls -ld $(GOBIN)/geth-darwin-* | grep amd64
|
||||||
|
|
||||||
@@ -77,45 +103,22 @@ geth-windows: geth-windows-386 geth-windows-amd64
|
|||||||
@echo "Windows cross compilation done:"
|
@echo "Windows cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-windows-*
|
@ls -ld $(GOBIN)/geth-windows-*
|
||||||
|
|
||||||
geth-windows-386: xgo
|
geth-windows-386:
|
||||||
build/env.sh $(GOBIN)/xgo --go=$(GO) --dest=$(GOBIN) --targets=windows/386 -v $(shell build/flags.sh) ./cmd/geth
|
build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=windows/386 -v ./cmd/geth
|
||||||
@echo "Windows 386 cross compilation done:"
|
@echo "Windows 386 cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-windows-* | grep 386
|
@ls -ld $(GOBIN)/geth-windows-* | grep 386
|
||||||
|
|
||||||
geth-windows-amd64: xgo
|
geth-windows-amd64:
|
||||||
build/env.sh $(GOBIN)/xgo --go=$(GO) --dest=$(GOBIN) --targets=windows/amd64 -v $(shell build/flags.sh) ./cmd/geth
|
build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=windows/amd64 -v ./cmd/geth
|
||||||
@echo "Windows amd64 cross compilation done:"
|
@echo "Windows amd64 cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-windows-* | grep amd64
|
@ls -ld $(GOBIN)/geth-windows-* | grep amd64
|
||||||
|
|
||||||
geth-android: xgo
|
geth-android:
|
||||||
build/env.sh $(GOBIN)/xgo --go=$(GO) --dest=$(GOBIN) --targets=android-21/aar -v $(shell build/flags.sh) ./cmd/geth
|
build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=android-21/aar -v ./cmd/geth
|
||||||
@echo "Android cross compilation done:"
|
@echo "Android cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-android-*
|
@ls -ld $(GOBIN)/geth-android-*
|
||||||
|
|
||||||
geth-ios: xgo
|
geth-ios:
|
||||||
build/env.sh $(GOBIN)/xgo --go=$(GO) --dest=$(GOBIN) --targets=ios-7.0/framework -v $(shell build/flags.sh) ./cmd/geth
|
build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=ios-7.0/framework -v ./cmd/geth
|
||||||
@echo "iOS framework cross compilation done:"
|
@echo "iOS framework cross compilation done:"
|
||||||
@ls -ld $(GOBIN)/geth-ios-*
|
@ls -ld $(GOBIN)/geth-ios-*
|
||||||
|
|
||||||
evm:
|
|
||||||
build/env.sh $(GOROOT)/bin/go install -v $(shell build/flags.sh) ./cmd/evm
|
|
||||||
@echo "Done building."
|
|
||||||
@echo "Run \"$(GOBIN)/evm to start the evm."
|
|
||||||
|
|
||||||
all:
|
|
||||||
for cmd in `ls ./cmd/`; do \
|
|
||||||
build/env.sh go build -i -v $(shell build/flags.sh) -o $(GOBIN)/$$cmd ./cmd/$$cmd; \
|
|
||||||
done
|
|
||||||
|
|
||||||
test: all
|
|
||||||
build/env.sh go test ./...
|
|
||||||
|
|
||||||
travis-test-with-coverage: all
|
|
||||||
build/env.sh go vet ./...
|
|
||||||
build/env.sh build/test-global-coverage.sh
|
|
||||||
|
|
||||||
xgo:
|
|
||||||
build/env.sh go get github.com/karalabe/xgo
|
|
||||||
|
|
||||||
clean:
|
|
||||||
rm -fr build/_workspace/pkg/ Godeps/_workspace/pkg $(GOBIN)/*
|
|
||||||
|
191
README.md
191
README.md
@@ -54,6 +54,197 @@ The go-ethereum project comes with several wrappers/executables found in the `cm
|
|||||||
| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://github.com/ethereum/wiki/wiki/JSON-RPC) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. |
|
| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://github.com/ethereum/wiki/wiki/JSON-RPC) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. |
|
||||||
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://github.com/ethereum/wiki/wiki/RLP)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
|
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://github.com/ethereum/wiki/wiki/RLP)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
|
||||||
|
|
||||||
|
## Running geth
|
||||||
|
|
||||||
|
Going through all the possible command line flags is out of scope here (please consult our
|
||||||
|
[CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options)), but we've
|
||||||
|
enumerated a few common parameter combos to get you up to speed quickly on how you can run your
|
||||||
|
own Geth instance.
|
||||||
|
|
||||||
|
### Full node on the main Ethereum network
|
||||||
|
|
||||||
|
By far the most common scenario is people wanting to simply interact with the Ethereum network:
|
||||||
|
create accounts; transfer funds; deploy and interact with contracts. For this particular use-case
|
||||||
|
the user doesn't care about years-old historical data, so we can fast-sync quickly to the current
|
||||||
|
state of the network. To do so:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ geth --fast --cache=512 console
|
||||||
|
```
|
||||||
|
|
||||||
|
This command will:
|
||||||
|
|
||||||
|
* Start geth in fast sync mode (`--fast`), causing it to download more data in exchange for avoiding
|
||||||
|
processing the entire history of the Ethereum network, which is very CPU intensive.
|
||||||
|
* Bump the memory allowance of the database to 512MB (`--cache=512`), which can help significantly in
|
||||||
|
sync times especially for HDD users. This flag is optional and you can set it as high or as low as
|
||||||
|
you'd like, though we'd recommend the 512MB - 2GB range.
|
||||||
|
* Start up Geth's built-in interactive [JavaScript console](https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console),
|
||||||
|
(via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://github.com/ethereum/wiki/wiki/JavaScript-API)
|
||||||
|
as well as Geth's own [management APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs).
|
||||||
|
This too is optional and if you leave it out you can always attach to an already running Geth instance
|
||||||
|
with `geth --attach`.
|
||||||
|
|
||||||
|
### Full node on the Ethereum test network
|
||||||
|
|
||||||
|
Transitioning towards developers, if you'd like to play around with creating Ethereum contracts, you
|
||||||
|
almost certainly would like to do that without any real money involved until you get the hang of the
|
||||||
|
entire system. In other words, instead of attaching to the main network, you want to join the **test**
|
||||||
|
network with your node, which is fully equivalent to the main network, but with play-Ether only.
|
||||||
|
|
||||||
|
```
|
||||||
|
$ geth --testnet --fast --cache=512 console
|
||||||
|
```
|
||||||
|
|
||||||
|
The `--fast`, `--cache` flags and `console` subcommand have the exact same meaning as above and they
|
||||||
|
are equially useful on the testnet too. Please see above for their explanations if you've skipped to
|
||||||
|
here.
|
||||||
|
|
||||||
|
Specifying the `--testnet` flag however will reconfigure your Geth instance a bit:
|
||||||
|
|
||||||
|
* Instead of using the default data directory (`~/.ethereum` on Linux for example), Geth will nest
|
||||||
|
itself one level deeper into a `testnet` subfolder (`~/.ethereum/testnet` on Linux).
|
||||||
|
* Instead of connecting the main Ethereum network, the client will connect to the test network,
|
||||||
|
which uses different P2P bootnodes, different network IDs and genesis states.
|
||||||
|
|
||||||
|
*Note: Although there are some internal protective measures to prevent transactions from crossing
|
||||||
|
over between the main network and test network (different starting nonces), you should make sure to
|
||||||
|
always use separate accounts for play-money and real-money. Unless you manually move accounts, Geth
|
||||||
|
will by default correctly separate the two networks and will not make any accounts available between
|
||||||
|
them.*
|
||||||
|
|
||||||
|
### Programatically interfacing Geth nodes
|
||||||
|
|
||||||
|
As a developer, sooner rather than later you'll want to start interacting with Geth and the Ethereum
|
||||||
|
network via your own programs and not manually through the console. To aid this, Geth has built in
|
||||||
|
support for a JSON-RPC based APIs ([standard APIs](https://github.com/ethereum/wiki/wiki/JSON-RPC) and
|
||||||
|
[Geth specific APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs)). These can be
|
||||||
|
exposed via HTTP, WebSockets and IPC (unix sockets on unix based platroms, and named pipes on Windows).
|
||||||
|
|
||||||
|
The IPC interface is enabled by default and exposes all the APIs supported by Geth, whereas the HTTP
|
||||||
|
and WS interfaces need to manually be enabled and only expose a subset of APIs due to security reasons.
|
||||||
|
These can be turned on/off and configured as you'd expect.
|
||||||
|
|
||||||
|
HTTP based JSON-RPC API options:
|
||||||
|
|
||||||
|
* `--rpc` Enable the HTTP-RPC server
|
||||||
|
* `--rpcaddr` HTTP-RPC server listening interface (default: "localhost")
|
||||||
|
* `--rpcport` HTTP-RPC server listening port (default: 8545)
|
||||||
|
* `--rpcapi` API's offered over the HTTP-RPC interface (default: "eth,net,web3")
|
||||||
|
* `--rpccorsdomain` Comma separated list of domains from which to accept cross origin requests (browser enforced)
|
||||||
|
* `--ws` Enable the WS-RPC server
|
||||||
|
* `--wsaddr` WS-RPC server listening interface (default: "localhost")
|
||||||
|
* `--wsport` WS-RPC server listening port (default: 8546)
|
||||||
|
* `--wsapi` API's offered over the WS-RPC interface (default: "eth,net,web3")
|
||||||
|
* `--wsorigins` Origins from which to accept websockets requests
|
||||||
|
* `--ipcdisable` Disable the IPC-RPC server
|
||||||
|
* `--ipcapi` API's offered over the IPC-RPC interface (default: "admin,debug,eth,miner,net,personal,shh,txpool,web3")
|
||||||
|
* `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it)
|
||||||
|
|
||||||
|
You'll need to use your own programming environments' capabilities (libraries, tools, etc) to connect
|
||||||
|
via HTTP, WS or IPC to a Geth node configured with the above flags and you'll need to speak [JSON-RPC](http://www.jsonrpc.org/specification)
|
||||||
|
on all transports. You can reuse the same connection for multiple requests!
|
||||||
|
|
||||||
|
**Note: Please understand the security implications of opening up an HTTP/WS based transport before
|
||||||
|
doing so! Hackers on the internet are actively trying to subvert Ethereum nodes with exposed APIs!
|
||||||
|
Further, all browser tabs can access locally running webservers, so malicious webpages could try to
|
||||||
|
subvert locally available APIs!**
|
||||||
|
|
||||||
|
### Operating a private network
|
||||||
|
|
||||||
|
Maintaining your own private network is more involved as a lot of configurations taken for granted in
|
||||||
|
the official networks need to be manually set up.
|
||||||
|
|
||||||
|
#### Defining the private genesis state
|
||||||
|
|
||||||
|
First, you'll need to create the genesis state of your networks, which all nodes need to be aware of
|
||||||
|
and agree upon. This consists of a small JSON file (e.g. call it `genesis.json`):
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"alloc" : {},
|
||||||
|
"coinbase" : "0x0000000000000000000000000000000000000000",
|
||||||
|
"difficulty" : "0x20000",
|
||||||
|
"extraData" : "",
|
||||||
|
"gasLimit" : "0x2fefd8",
|
||||||
|
"nonce" : "0x0000000000000042",
|
||||||
|
"mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"timestamp" : "0x00"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The above fields should be fine for most purposes, although we'd recommend changing the `nonce` to
|
||||||
|
some random value so you prevent unknown remote nodes from being able to connect to you. If you'd
|
||||||
|
like to pre-fund some accounts for easier testing, you can populate the `alloc` field with account
|
||||||
|
configs:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"alloc": {
|
||||||
|
"0x0000000000000000000000000000000000000001": {"balance": "111111111"},
|
||||||
|
"0x0000000000000000000000000000000000000002": {"balance": "222222222"}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
With the genesis state defined in the above JSON file, you'll need to initialize **every** Geth node
|
||||||
|
with it prior to starting it up to ensure all blockchain parameters are correctly set:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ geth init path/to/genesis.json
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Creating the rendezvous point
|
||||||
|
|
||||||
|
With all nodes that you want to run initialized to the desired genesis state, you'll need to start a
|
||||||
|
bootstrap node that others can use to find each other in your network and/or over the internet. The
|
||||||
|
clean way is to configure and run a dedicated bootnode:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ bootnode --genkey=boot.key
|
||||||
|
$ bootnode --nodekey=boot.key
|
||||||
|
```
|
||||||
|
|
||||||
|
With the bootnode online, it will display an [`enode` URL](https://github.com/ethereum/wiki/wiki/enode-url-format)
|
||||||
|
that other nodes can use to connect to it and exchange peer information. Make sure to replace the
|
||||||
|
displayed IP address information (most probably `[::]`) with your externally accessible IP to get the
|
||||||
|
actual `enode` URL.
|
||||||
|
|
||||||
|
*Note: You could also use a full fledged Geth node as a bootnode, but it's the less recommended way.*
|
||||||
|
|
||||||
|
#### Starting up your member nodes
|
||||||
|
|
||||||
|
With the bootnode operational and externally reachable (you can try `telnet <ip> <port>` to ensure
|
||||||
|
it's indeed reachable), start every subsequent Geth node pointed to the bootnode for peer discovery
|
||||||
|
via the `--bootnodes` flag. It will probably also be desirable to keep the data directory of your
|
||||||
|
private network separated, so do also specify a custom `--datadir` flag.
|
||||||
|
|
||||||
|
```
|
||||||
|
$ geth --datadir=path/to/custom/data/folder --bootnodes=<bootnode-enode-url-from-above>
|
||||||
|
```
|
||||||
|
|
||||||
|
*Note: Since your network will be completely cut off from the main and test networks, you'll also
|
||||||
|
need to configure a miner to process transactions and create new blocks for you.*
|
||||||
|
|
||||||
|
#### Running a private miner
|
||||||
|
|
||||||
|
Mining on the public Ethereum network is a complex task as it's only feasible using GPUs, requiring
|
||||||
|
an OpenCL or CUDA enabled `ethminer` instance. For information on such a setup, please consult the
|
||||||
|
[EtherMining subreddit](https://www.reddit.com/r/EtherMining/) and the [Genoil miner](https://github.com/Genoil/cpp-ethereum)
|
||||||
|
repository.
|
||||||
|
|
||||||
|
In a private network setting however, a single CPU miner instance is more than enough for practical
|
||||||
|
purposes as it can produce a stable stream of blocks at the correct intervals without needing heavy
|
||||||
|
resources (consider running on a single thread, no need for multiple ones either). To start a Geth
|
||||||
|
instance for mining, run it with all your usual flags, extended by:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ geth <usual-flags> --mine --minerthreads=1 --etherbase=0x0000000000000000000000000000000000000000
|
||||||
|
```
|
||||||
|
|
||||||
|
Which will start mining bocks and transactions on a single CPU thread, crediting all proceedings to
|
||||||
|
the account specified by `--etherbase`. You can further tune the mining by changing the default gas
|
||||||
|
limit blocks converge to (`--targetgaslimit`) and the price transactions are accepted at (`--gasprice`).
|
||||||
|
|
||||||
## Contribution
|
## Contribution
|
||||||
|
|
||||||
Thank you for considering to help out with the source code! We welcome contributions from
|
Thank you for considering to help out with the source code! We welcome contributions from
|
||||||
|
@@ -72,7 +72,7 @@ func (b *SimulatedBackend) Commit() {
|
|||||||
|
|
||||||
// Rollback aborts all pending transactions, reverting to the last committed state.
|
// Rollback aborts all pending transactions, reverting to the last committed state.
|
||||||
func (b *SimulatedBackend) Rollback() {
|
func (b *SimulatedBackend) Rollback() {
|
||||||
blocks, _ := core.GenerateChain(b.blockchain.CurrentBlock(), b.database, 1, func(int, *core.BlockGen) {})
|
blocks, _ := core.GenerateChain(nil, b.blockchain.CurrentBlock(), b.database, 1, func(int, *core.BlockGen) {})
|
||||||
|
|
||||||
b.pendingBlock = blocks[0]
|
b.pendingBlock = blocks[0]
|
||||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), b.database)
|
b.pendingState, _ = state.New(b.pendingBlock.Root(), b.database)
|
||||||
@@ -178,7 +178,7 @@ func (b *SimulatedBackend) EstimateGasLimit(sender common.Address, contract *com
|
|||||||
// SendTransaction implements ContractTransactor.SendTransaction, delegating the raw
|
// SendTransaction implements ContractTransactor.SendTransaction, delegating the raw
|
||||||
// transaction injection to the remote node.
|
// transaction injection to the remote node.
|
||||||
func (b *SimulatedBackend) SendTransaction(tx *types.Transaction) error {
|
func (b *SimulatedBackend) SendTransaction(tx *types.Transaction) error {
|
||||||
blocks, _ := core.GenerateChain(b.blockchain.CurrentBlock(), b.database, 1, func(number int, block *core.BlockGen) {
|
blocks, _ := core.GenerateChain(nil, b.blockchain.CurrentBlock(), b.database, 1, func(number int, block *core.BlockGen) {
|
||||||
for _, tx := range b.pendingBlock.Transactions() {
|
for _, tx := range b.pendingBlock.Transactions() {
|
||||||
block.AddTx(tx)
|
block.AddTx(tx)
|
||||||
}
|
}
|
||||||
|
@@ -62,7 +62,7 @@ func (m Method) pack(method Method, args ...interface{}) ([]byte, error) {
|
|||||||
// calculate the offset
|
// calculate the offset
|
||||||
offset := len(method.Inputs)*32 + len(variableInput)
|
offset := len(method.Inputs)*32 + len(variableInput)
|
||||||
// set the offset
|
// set the offset
|
||||||
ret = append(ret, packNum(reflect.ValueOf(offset), UintTy)...)
|
ret = append(ret, packNum(reflect.ValueOf(offset))...)
|
||||||
// Append the packed output to the variable input. The variable input
|
// Append the packed output to the variable input. The variable input
|
||||||
// will be appended at the end of the input.
|
// will be appended at the end of the input.
|
||||||
variableInput = append(variableInput, packed...)
|
variableInput = append(variableInput, packed...)
|
||||||
|
@@ -56,61 +56,21 @@ var (
|
|||||||
big_ts = reflect.TypeOf([]*big.Int(nil))
|
big_ts = reflect.TypeOf([]*big.Int(nil))
|
||||||
)
|
)
|
||||||
|
|
||||||
// U256 will ensure unsigned 256bit on big nums
|
// U256 converts a big Int into a 256bit EVM number.
|
||||||
func U256(n *big.Int) []byte {
|
func U256(n *big.Int) []byte {
|
||||||
return common.LeftPadBytes(common.U256(n).Bytes(), 32)
|
return common.LeftPadBytes(common.U256(n).Bytes(), 32)
|
||||||
}
|
}
|
||||||
|
|
||||||
func S256(n *big.Int) []byte {
|
|
||||||
sint := common.S256(n)
|
|
||||||
ret := common.LeftPadBytes(sint.Bytes(), 32)
|
|
||||||
if sint.Cmp(common.Big0) < 0 {
|
|
||||||
for i, b := range ret {
|
|
||||||
if b == 0 {
|
|
||||||
ret[i] = 1
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
// S256 will ensure signed 256bit on big nums
|
|
||||||
func U2U256(n uint64) []byte {
|
|
||||||
return U256(big.NewInt(int64(n)))
|
|
||||||
}
|
|
||||||
|
|
||||||
func S2S256(n int64) []byte {
|
|
||||||
return S256(big.NewInt(n))
|
|
||||||
}
|
|
||||||
|
|
||||||
// packNum packs the given number (using the reflect value) and will cast it to appropriate number representation
|
// packNum packs the given number (using the reflect value) and will cast it to appropriate number representation
|
||||||
func packNum(value reflect.Value, to byte) []byte {
|
func packNum(value reflect.Value) []byte {
|
||||||
switch kind := value.Kind(); kind {
|
switch kind := value.Kind(); kind {
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||||
if to == UintTy {
|
return U256(new(big.Int).SetUint64(value.Uint()))
|
||||||
return U2U256(value.Uint())
|
|
||||||
} else {
|
|
||||||
return S2S256(int64(value.Uint()))
|
|
||||||
}
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
if to == UintTy {
|
return U256(big.NewInt(value.Int()))
|
||||||
return U2U256(uint64(value.Int()))
|
|
||||||
} else {
|
|
||||||
return S2S256(value.Int())
|
|
||||||
}
|
|
||||||
case reflect.Ptr:
|
case reflect.Ptr:
|
||||||
// This only takes care of packing and casting. No type checking is done here. It should be done prior to using this function.
|
|
||||||
if to == UintTy {
|
|
||||||
return U256(value.Interface().(*big.Int))
|
return U256(value.Interface().(*big.Int))
|
||||||
} else {
|
|
||||||
return S256(value.Interface().(*big.Int))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -18,6 +18,7 @@ package abi
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
@@ -26,49 +27,46 @@ import (
|
|||||||
func TestNumberTypes(t *testing.T) {
|
func TestNumberTypes(t *testing.T) {
|
||||||
ubytes := make([]byte, 32)
|
ubytes := make([]byte, 32)
|
||||||
ubytes[31] = 1
|
ubytes[31] = 1
|
||||||
sbytesmin := []byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
|
|
||||||
|
|
||||||
unsigned := U256(big.NewInt(1))
|
unsigned := U256(big.NewInt(1))
|
||||||
if !bytes.Equal(unsigned, ubytes) {
|
if !bytes.Equal(unsigned, ubytes) {
|
||||||
t.Errorf("expected %x got %x", ubytes, unsigned)
|
t.Errorf("expected %x got %x", ubytes, unsigned)
|
||||||
}
|
}
|
||||||
|
|
||||||
signed := S256(big.NewInt(1))
|
|
||||||
if !bytes.Equal(signed, ubytes) {
|
|
||||||
t.Errorf("expected %x got %x", ubytes, unsigned)
|
|
||||||
}
|
|
||||||
|
|
||||||
signed = S256(big.NewInt(-1))
|
|
||||||
if !bytes.Equal(signed, sbytesmin) {
|
|
||||||
t.Errorf("expected %x got %x", ubytes, unsigned)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPackNumber(t *testing.T) {
|
func TestPackNumber(t *testing.T) {
|
||||||
ubytes := make([]byte, 32)
|
tests := []struct {
|
||||||
ubytes[31] = 1
|
value reflect.Value
|
||||||
sbytesmin := []byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
|
packed []byte
|
||||||
maxunsigned := []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}
|
}{
|
||||||
|
// Protocol limits
|
||||||
|
{reflect.ValueOf(0), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
|
||||||
|
{reflect.ValueOf(1), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}},
|
||||||
|
{reflect.ValueOf(-1), []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}},
|
||||||
|
|
||||||
packed := packNum(reflect.ValueOf(1), IntTy)
|
// Type corner cases
|
||||||
if !bytes.Equal(packed, ubytes) {
|
{reflect.ValueOf(uint8(math.MaxUint8)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255}},
|
||||||
t.Errorf("expected %x got %x", ubytes, packed)
|
{reflect.ValueOf(uint16(math.MaxUint16)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255}},
|
||||||
}
|
{reflect.ValueOf(uint32(math.MaxUint32)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255}},
|
||||||
packed = packNum(reflect.ValueOf(-1), IntTy)
|
{reflect.ValueOf(uint64(math.MaxUint64)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255}},
|
||||||
if !bytes.Equal(packed, sbytesmin) {
|
|
||||||
t.Errorf("expected %x got %x", ubytes, packed)
|
|
||||||
}
|
|
||||||
packed = packNum(reflect.ValueOf(1), UintTy)
|
|
||||||
if !bytes.Equal(packed, ubytes) {
|
|
||||||
t.Errorf("expected %x got %x", ubytes, packed)
|
|
||||||
}
|
|
||||||
packed = packNum(reflect.ValueOf(-1), UintTy)
|
|
||||||
if !bytes.Equal(packed, maxunsigned) {
|
|
||||||
t.Errorf("expected %x got %x", maxunsigned, packed)
|
|
||||||
}
|
|
||||||
|
|
||||||
packed = packNum(reflect.ValueOf("string"), UintTy)
|
{reflect.ValueOf(int8(math.MaxInt8)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127}},
|
||||||
if packed != nil {
|
{reflect.ValueOf(int16(math.MaxInt16)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 255}},
|
||||||
|
{reflect.ValueOf(int32(math.MaxInt32)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 255, 255, 255}},
|
||||||
|
{reflect.ValueOf(int64(math.MaxInt64)), []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 255, 255, 255, 255, 255, 255, 255}},
|
||||||
|
|
||||||
|
{reflect.ValueOf(int8(math.MinInt8)), []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 128}},
|
||||||
|
{reflect.ValueOf(int16(math.MinInt16)), []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 128, 0}},
|
||||||
|
{reflect.ValueOf(int32(math.MinInt32)), []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 128, 0, 0, 0}},
|
||||||
|
{reflect.ValueOf(int64(math.MinInt64)), []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 128, 0, 0, 0, 0, 0, 0, 0}},
|
||||||
|
}
|
||||||
|
for i, tt := range tests {
|
||||||
|
packed := packNum(tt.value)
|
||||||
|
if !bytes.Equal(packed, tt.packed) {
|
||||||
|
t.Errorf("test %d: pack mismatch: have %x, want %x", i, packed, tt.packed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if packed := packNum(reflect.ValueOf("string")); packed != nil {
|
||||||
t.Errorf("expected 'string' to pack to nil. got %x instead", packed)
|
t.Errorf("expected 'string' to pack to nil. got %x instead", packed)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -25,7 +25,7 @@ import (
|
|||||||
// packBytesSlice packs the given bytes as [L, V] as the canonical representation
|
// packBytesSlice packs the given bytes as [L, V] as the canonical representation
|
||||||
// bytes slice
|
// bytes slice
|
||||||
func packBytesSlice(bytes []byte, l int) []byte {
|
func packBytesSlice(bytes []byte, l int) []byte {
|
||||||
len := packNum(reflect.ValueOf(l), UintTy)
|
len := packNum(reflect.ValueOf(l))
|
||||||
return append(len, common.RightPadBytes(bytes, (l+31)/32*32)...)
|
return append(len, common.RightPadBytes(bytes, (l+31)/32*32)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -34,7 +34,7 @@ func packBytesSlice(bytes []byte, l int) []byte {
|
|||||||
func packElement(t Type, reflectValue reflect.Value) []byte {
|
func packElement(t Type, reflectValue reflect.Value) []byte {
|
||||||
switch t.T {
|
switch t.T {
|
||||||
case IntTy, UintTy:
|
case IntTy, UintTy:
|
||||||
return packNum(reflectValue, t.T)
|
return packNum(reflectValue)
|
||||||
case StringTy:
|
case StringTy:
|
||||||
return packBytesSlice([]byte(reflectValue.String()), reflectValue.Len())
|
return packBytesSlice([]byte(reflectValue.String()), reflectValue.Len())
|
||||||
case AddressTy:
|
case AddressTy:
|
||||||
|
31
appveyor.yml
Normal file
31
appveyor.yml
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
os: Visual Studio 2015
|
||||||
|
|
||||||
|
# Clone directly into GOPATH.
|
||||||
|
clone_folder: c:\gopath\src\github.com\ethereum\go-ethereum
|
||||||
|
clone_depth: 5
|
||||||
|
version: "{branch}.{build}"
|
||||||
|
environment:
|
||||||
|
global:
|
||||||
|
GOPATH: c:\gopath
|
||||||
|
|
||||||
|
# cache choco package files so we don't hit sourceforge all
|
||||||
|
# the time.
|
||||||
|
cache:
|
||||||
|
- c:\cache
|
||||||
|
|
||||||
|
install:
|
||||||
|
- cmd: choco install --cache c:\cache golang mingw | find /v "Extracting "
|
||||||
|
- refreshenv
|
||||||
|
- cd c:\gopath\src\github.com\ethereum\go-ethereum
|
||||||
|
|
||||||
|
build_script:
|
||||||
|
- go run build\ci.go install
|
||||||
|
|
||||||
|
test_script:
|
||||||
|
- go run build\ci.go test -vet -coverage
|
||||||
|
|
||||||
|
after_build:
|
||||||
|
- go run build\ci.go archive -type zip
|
||||||
|
|
||||||
|
artifacts:
|
||||||
|
- path: geth-*.zip
|
26
build/ci-notes.md
Normal file
26
build/ci-notes.md
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
Debian Packaging
|
||||||
|
----------------
|
||||||
|
|
||||||
|
Tagged releases and develop branch commits are available as installable Debian packages
|
||||||
|
for Ubuntu. Packages are built for the all Ubuntu versions which are supported by
|
||||||
|
Canonical:
|
||||||
|
|
||||||
|
- Trusty Tahr (14.04 LTS)
|
||||||
|
- Wily Werewolf (15.10)
|
||||||
|
- Xenial Xerus (16.04 LTS)
|
||||||
|
|
||||||
|
Packages of develop branch commits have suffix -unstable and cannot be installed alongside
|
||||||
|
the stable version. Switching between release streams requires user intervention.
|
||||||
|
|
||||||
|
The packages are built and served by launchpad.net. We generate a Debian source package
|
||||||
|
for each distribution and upload it. Their builder picks up the source package, builds it
|
||||||
|
and installs the new version into the PPA repository. Launchpad requires a valid signature
|
||||||
|
by a team member for source package uploads. The signing key is stored in an environment
|
||||||
|
variable which Travis CI makes available to certain builds.
|
||||||
|
|
||||||
|
We want to build go-ethereum with the most recent version of Go, irrespective of the Go
|
||||||
|
version that is available in the main Ubuntu repository. In order to make this possible,
|
||||||
|
our PPA depends on the ~gophers/ubuntu/archive PPA. Our source package build-depends on
|
||||||
|
golang-1.6, which is co-installable alongside the regular golang package. PPA dependencies
|
||||||
|
can be edited at https://launchpad.net/%7Elp-fjl/+archive/ubuntu/geth-ci-testing/+edit-dependencies
|
||||||
|
|
497
build/ci.go
Normal file
497
build/ci.go
Normal file
@@ -0,0 +1,497 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
// +build none
|
||||||
|
|
||||||
|
/*
|
||||||
|
The ci command is called from Continuous Integration scripts.
|
||||||
|
|
||||||
|
Usage: go run ci.go <command> <command flags/arguments>
|
||||||
|
|
||||||
|
Available commands are:
|
||||||
|
|
||||||
|
install [ packages... ] -- builds packages and executables
|
||||||
|
test [ -coverage ] [ -vet ] [ packages... ] -- runs the tests
|
||||||
|
archive [ -type zip|tar ] -- archives build artefacts
|
||||||
|
importkeys -- imports signing keys from env
|
||||||
|
debsrc [ -sign key-id ] [ -upload dest ] -- creates a debian source package
|
||||||
|
xgo [ options ] -- cross builds according to options
|
||||||
|
|
||||||
|
For all commands, -n prevents execution of external programs (dry run mode).
|
||||||
|
|
||||||
|
*/
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/base64"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"../internal/build"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Files that end up in the geth*.zip archive.
|
||||||
|
gethArchiveFiles = []string{
|
||||||
|
"COPYING",
|
||||||
|
executablePath("geth"),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Files that end up in the geth-alltools*.zip archive.
|
||||||
|
allToolsArchiveFiles = []string{
|
||||||
|
"COPYING",
|
||||||
|
executablePath("abigen"),
|
||||||
|
executablePath("evm"),
|
||||||
|
executablePath("geth"),
|
||||||
|
executablePath("rlpdump"),
|
||||||
|
}
|
||||||
|
|
||||||
|
// A debian package is created for all executables listed here.
|
||||||
|
debExecutables = []debExecutable{
|
||||||
|
{
|
||||||
|
Name: "geth",
|
||||||
|
Description: "Ethereum CLI client.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "rlpdump",
|
||||||
|
Description: "Developer utility tool that prints RLP structures.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "evm",
|
||||||
|
Description: "Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "abigen",
|
||||||
|
Description: "Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages.",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Distros for which packages are created.
|
||||||
|
// Note: vivid is unsupported because there is no golang-1.6 package for it.
|
||||||
|
debDistros = []string{"trusty", "wily", "xenial", "yakkety"}
|
||||||
|
)
|
||||||
|
|
||||||
|
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
|
||||||
|
|
||||||
|
func executablePath(name string) string {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
name += ".exe"
|
||||||
|
}
|
||||||
|
return filepath.Join(GOBIN, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.SetFlags(log.Lshortfile)
|
||||||
|
|
||||||
|
if _, err := os.Stat(filepath.Join("build", "ci.go")); os.IsNotExist(err) {
|
||||||
|
log.Fatal("this script must be run from the root of the repository")
|
||||||
|
}
|
||||||
|
if len(os.Args) < 2 {
|
||||||
|
log.Fatal("need subcommand as first argument")
|
||||||
|
}
|
||||||
|
switch os.Args[1] {
|
||||||
|
case "install":
|
||||||
|
doInstall(os.Args[2:])
|
||||||
|
case "test":
|
||||||
|
doTest(os.Args[2:])
|
||||||
|
case "archive":
|
||||||
|
doArchive(os.Args[2:])
|
||||||
|
case "debsrc":
|
||||||
|
doDebianSource(os.Args[2:])
|
||||||
|
case "travis-debsrc":
|
||||||
|
doTravisDebianSource(os.Args[2:])
|
||||||
|
case "xgo":
|
||||||
|
doXgo(os.Args[2:])
|
||||||
|
default:
|
||||||
|
log.Fatal("unknown command ", os.Args[1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compiling
|
||||||
|
|
||||||
|
func doInstall(cmdline []string) {
|
||||||
|
commitHash := flag.String("gitcommit", "", "Git commit hash embedded into binary.")
|
||||||
|
flag.CommandLine.Parse(cmdline)
|
||||||
|
|
||||||
|
// Check Go version. People regularly open issues about compilation
|
||||||
|
// failure with outdated Go. This should save them the trouble.
|
||||||
|
if runtime.Version() < "go1.4" && !strings.HasPrefix(runtime.Version(), "devel") {
|
||||||
|
log.Println("You have Go version", runtime.Version())
|
||||||
|
log.Println("go-ethereum requires at least Go version 1.4 and cannot")
|
||||||
|
log.Println("be compiled with an earlier version. Please upgrade your Go installation.")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compile packages given as arguments, or everything if there are no arguments.
|
||||||
|
packages := []string{"./..."}
|
||||||
|
if flag.NArg() > 0 {
|
||||||
|
packages = flag.Args()
|
||||||
|
}
|
||||||
|
|
||||||
|
goinstall := goTool("install", makeBuildFlags(*commitHash)...)
|
||||||
|
goinstall.Args = append(goinstall.Args, "-v")
|
||||||
|
goinstall.Args = append(goinstall.Args, packages...)
|
||||||
|
build.MustRun(goinstall)
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeBuildFlags(commitHash string) (flags []string) {
|
||||||
|
// Since Go 1.5, the separator char for link time assignments
|
||||||
|
// is '=' and using ' ' prints a warning. However, Go < 1.5 does
|
||||||
|
// not support using '='.
|
||||||
|
sep := " "
|
||||||
|
if runtime.Version() > "go1.5" || strings.Contains(runtime.Version(), "devel") {
|
||||||
|
sep = "="
|
||||||
|
}
|
||||||
|
|
||||||
|
if os.Getenv("GO_OPENCL") != "" {
|
||||||
|
flags = append(flags, "-tags", "opencl")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set gitCommit constant via link-time assignment. If this is a git checkout, we can
|
||||||
|
// just get the current commit hash through git. Otherwise we fall back to the hash
|
||||||
|
// that was passed as -gitcommit.
|
||||||
|
//
|
||||||
|
// -gitcommit is required for Debian package builds. The source package doesn't
|
||||||
|
// contain .git but we still want to embed the commit hash into the packaged binary.
|
||||||
|
// The hash is rendered into the debian/rules build script when the source package is
|
||||||
|
// created.
|
||||||
|
if _, err := os.Stat(filepath.Join(".git", "HEAD")); !os.IsNotExist(err) {
|
||||||
|
if c := build.GitCommit(); c != "" {
|
||||||
|
commitHash = c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if commitHash != "" {
|
||||||
|
flags = append(flags, "-ldflags", "-X main.gitCommit"+sep+commitHash)
|
||||||
|
}
|
||||||
|
return flags
|
||||||
|
}
|
||||||
|
|
||||||
|
func goTool(subcmd string, args ...string) *exec.Cmd {
|
||||||
|
gocmd := filepath.Join(runtime.GOROOT(), "bin", "go")
|
||||||
|
cmd := exec.Command(gocmd, subcmd)
|
||||||
|
cmd.Args = append(cmd.Args, args...)
|
||||||
|
cmd.Env = []string{
|
||||||
|
"GOPATH=" + build.GOPATH(),
|
||||||
|
"GOBIN=" + GOBIN,
|
||||||
|
}
|
||||||
|
for _, e := range os.Environ() {
|
||||||
|
if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "GOBIN=") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
cmd.Env = append(cmd.Env, e)
|
||||||
|
}
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
// Running The Tests
|
||||||
|
//
|
||||||
|
// "tests" also includes static analysis tools such as vet.
|
||||||
|
|
||||||
|
func doTest(cmdline []string) {
|
||||||
|
var (
|
||||||
|
vet = flag.Bool("vet", false, "Whether to run go vet")
|
||||||
|
coverage = flag.Bool("coverage", false, "Whether to record code coverage")
|
||||||
|
)
|
||||||
|
flag.CommandLine.Parse(cmdline)
|
||||||
|
packages := []string{"./..."}
|
||||||
|
if len(flag.CommandLine.Args()) > 0 {
|
||||||
|
packages = flag.CommandLine.Args()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run analysis tools before the tests.
|
||||||
|
if *vet {
|
||||||
|
build.MustRun(goTool("vet", packages...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run the actual tests.
|
||||||
|
gotest := goTool("test")
|
||||||
|
if *coverage {
|
||||||
|
gotest.Args = append(gotest.Args, "-covermode=atomic", "-cover")
|
||||||
|
}
|
||||||
|
gotest.Args = append(gotest.Args, packages...)
|
||||||
|
build.MustRun(gotest)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Release Packaging
|
||||||
|
|
||||||
|
func doArchive(cmdline []string) {
|
||||||
|
var (
|
||||||
|
atype = flag.String("type", "zip", "Type of archive to write (zip|tar)")
|
||||||
|
ext string
|
||||||
|
)
|
||||||
|
flag.CommandLine.Parse(cmdline)
|
||||||
|
switch *atype {
|
||||||
|
case "zip":
|
||||||
|
ext = ".zip"
|
||||||
|
case "tar":
|
||||||
|
ext = ".tar.gz"
|
||||||
|
default:
|
||||||
|
log.Fatal("unknown archive type: ", atype)
|
||||||
|
}
|
||||||
|
base := makeArchiveBasename()
|
||||||
|
if err := build.WriteArchive("geth-"+base, ext, gethArchiveFiles); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := build.WriteArchive("geth-alltools-"+base, ext, allToolsArchiveFiles); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeArchiveBasename() string {
|
||||||
|
// date := time.Now().UTC().Format("200601021504")
|
||||||
|
platform := runtime.GOOS + "-" + runtime.GOARCH
|
||||||
|
archive := platform + "-" + build.VERSION()
|
||||||
|
if commit := build.GitCommit(); commit != "" {
|
||||||
|
archive += "-" + commit[:8]
|
||||||
|
}
|
||||||
|
return archive
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debian Packaging
|
||||||
|
|
||||||
|
// CLI entry point for Travis CI.
|
||||||
|
func doTravisDebianSource(cmdline []string) {
|
||||||
|
flag.CommandLine.Parse(cmdline)
|
||||||
|
|
||||||
|
// Package only whitelisted branches.
|
||||||
|
switch {
|
||||||
|
case os.Getenv("TRAVIS_REPO_SLUG") != "ethereum/go-ethereum":
|
||||||
|
log.Printf("skipping because this is a fork build")
|
||||||
|
return
|
||||||
|
case os.Getenv("TRAVIS_PULL_REQUEST") != "false":
|
||||||
|
log.Printf("skipping because this is a PR build")
|
||||||
|
return
|
||||||
|
case os.Getenv("TRAVIS_BRANCH") != "develop" && !strings.HasPrefix(os.Getenv("TRAVIS_TAG"), "v1."):
|
||||||
|
log.Printf("skipping because branch %q tag %q is not on the whitelist",
|
||||||
|
os.Getenv("TRAVIS_BRANCH"),
|
||||||
|
os.Getenv("TRAVIS_TAG"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Import the signing key.
|
||||||
|
if b64key := os.Getenv("PPA_SIGNING_KEY"); b64key != "" {
|
||||||
|
key, err := base64.StdEncoding.DecodeString(b64key)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal("invalid base64 PPA_SIGNING_KEY")
|
||||||
|
}
|
||||||
|
gpg := exec.Command("gpg", "--import")
|
||||||
|
gpg.Stdin = bytes.NewReader(key)
|
||||||
|
build.MustRun(gpg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assign unstable status to non-tag builds.
|
||||||
|
unstable := "true"
|
||||||
|
if os.Getenv("TRAVIS_BRANCH") != "develop" && os.Getenv("TRAVIS_TAG") != "" {
|
||||||
|
unstable = "false"
|
||||||
|
}
|
||||||
|
|
||||||
|
doDebianSource([]string{
|
||||||
|
"-signer", "Felix Lange (Geth CI Testing Key) <fjl@twurst.com>",
|
||||||
|
"-buildnum", os.Getenv("TRAVIS_BUILD_NUMBER"),
|
||||||
|
"-upload", "ppa:lp-fjl/geth-ci-testing",
|
||||||
|
"-unstable", unstable,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// CLI entry point for doing packaging locally.
|
||||||
|
func doDebianSource(cmdline []string) {
|
||||||
|
var (
|
||||||
|
signer = flag.String("signer", "", `Signing key name, also used as package author`)
|
||||||
|
upload = flag.String("upload", "", `Where to upload the source package (usually "ppa:ethereum/ethereum")`)
|
||||||
|
buildnum = flag.String("buildnum", "", `Build number (included in version)`)
|
||||||
|
unstable = flag.Bool("unstable", false, `Use package name suffix "-unstable"`)
|
||||||
|
now = time.Now()
|
||||||
|
)
|
||||||
|
flag.CommandLine.Parse(cmdline)
|
||||||
|
|
||||||
|
// Create the debian worktree in /tmp.
|
||||||
|
tmpdir, err := ioutil.TempDir("", "eth-deb-build-")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, distro := range debDistros {
|
||||||
|
meta := newDebMetadata(distro, *signer, *buildnum, *unstable, now)
|
||||||
|
pkgdir := stageDebianSource(tmpdir, meta)
|
||||||
|
debuild := exec.Command("debuild", "-S", "-sa", "-us", "-uc")
|
||||||
|
debuild.Dir = pkgdir
|
||||||
|
build.MustRun(debuild)
|
||||||
|
|
||||||
|
changes := fmt.Sprintf("%s_%s_source.changes", meta.Name(), meta.VersionString())
|
||||||
|
changes = filepath.Join(tmpdir, changes)
|
||||||
|
if *signer != "" {
|
||||||
|
build.MustRunCommand("debsign", changes)
|
||||||
|
}
|
||||||
|
if *upload != "" {
|
||||||
|
build.MustRunCommand("dput", *upload, changes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type debExecutable struct {
|
||||||
|
Name, Description string
|
||||||
|
}
|
||||||
|
|
||||||
|
type debMetadata struct {
|
||||||
|
// go-ethereum version being built. Note that this
|
||||||
|
// is not the debian package version. The package version
|
||||||
|
// is constructed by VersionString.
|
||||||
|
Version string
|
||||||
|
|
||||||
|
Author string // "name <email>", also selects signing key
|
||||||
|
Buildnum string // build number
|
||||||
|
Distro, Commit, Time string
|
||||||
|
Executables []debExecutable
|
||||||
|
Unstable bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDebMetadata(distro, author, buildnum string, unstable bool, t time.Time) debMetadata {
|
||||||
|
if author == "" {
|
||||||
|
// No signing key, use default author.
|
||||||
|
author = "Ethereum Builds <fjl@ethereum.org>"
|
||||||
|
}
|
||||||
|
return debMetadata{
|
||||||
|
Unstable: unstable,
|
||||||
|
Author: author,
|
||||||
|
Distro: distro,
|
||||||
|
Commit: build.GitCommit(),
|
||||||
|
Version: build.VERSION(),
|
||||||
|
Buildnum: buildnum,
|
||||||
|
Time: t.Format(time.RFC1123Z),
|
||||||
|
Executables: debExecutables,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns the name of the metapackage that depends
|
||||||
|
// on all executable packages.
|
||||||
|
func (meta debMetadata) Name() string {
|
||||||
|
if meta.Unstable {
|
||||||
|
return "ethereum-unstable"
|
||||||
|
}
|
||||||
|
return "ethereum"
|
||||||
|
}
|
||||||
|
|
||||||
|
// VersionString returns the debian version of the packages.
|
||||||
|
func (meta debMetadata) VersionString() string {
|
||||||
|
vsn := meta.Version
|
||||||
|
if meta.Buildnum != "" {
|
||||||
|
vsn += "+build" + meta.Buildnum
|
||||||
|
}
|
||||||
|
if meta.Distro != "" {
|
||||||
|
vsn += "+" + meta.Distro
|
||||||
|
}
|
||||||
|
return vsn
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExeList returns the list of all executable packages.
|
||||||
|
func (meta debMetadata) ExeList() string {
|
||||||
|
names := make([]string, len(meta.Executables))
|
||||||
|
for i, e := range meta.Executables {
|
||||||
|
names[i] = meta.ExeName(e)
|
||||||
|
}
|
||||||
|
return strings.Join(names, ", ")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExeName returns the package name of an executable package.
|
||||||
|
func (meta debMetadata) ExeName(exe debExecutable) string {
|
||||||
|
if meta.Unstable {
|
||||||
|
return exe.Name + "-unstable"
|
||||||
|
}
|
||||||
|
return exe.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExeConflicts returns the content of the Conflicts field
|
||||||
|
// for executable packages.
|
||||||
|
func (meta debMetadata) ExeConflicts(exe debExecutable) string {
|
||||||
|
if meta.Unstable {
|
||||||
|
// Set up the conflicts list so that the *-unstable packages
|
||||||
|
// cannot be installed alongside the regular version.
|
||||||
|
//
|
||||||
|
// https://www.debian.org/doc/debian-policy/ch-relationships.html
|
||||||
|
// is very explicit about Conflicts: and says that Breaks: should
|
||||||
|
// be preferred and the conflicting files should be handled via
|
||||||
|
// alternates. We might do this eventually but using a conflict is
|
||||||
|
// easier now.
|
||||||
|
return "ethereum, " + exe.Name
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func stageDebianSource(tmpdir string, meta debMetadata) (pkgdir string) {
|
||||||
|
pkg := meta.Name() + "-" + meta.VersionString()
|
||||||
|
pkgdir = filepath.Join(tmpdir, pkg)
|
||||||
|
if err := os.Mkdir(pkgdir, 0755); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy the source code.
|
||||||
|
build.MustRunCommand("git", "checkout-index", "-a", "--prefix", pkgdir+string(filepath.Separator))
|
||||||
|
|
||||||
|
// Put the debian build files in place.
|
||||||
|
debian := filepath.Join(pkgdir, "debian")
|
||||||
|
build.Render("build/deb.rules", filepath.Join(debian, "rules"), 0755, meta)
|
||||||
|
build.Render("build/deb.changelog", filepath.Join(debian, "changelog"), 0644, meta)
|
||||||
|
build.Render("build/deb.control", filepath.Join(debian, "control"), 0644, meta)
|
||||||
|
build.Render("build/deb.copyright", filepath.Join(debian, "copyright"), 0644, meta)
|
||||||
|
build.RenderString("8\n", filepath.Join(debian, "compat"), 0644, meta)
|
||||||
|
build.RenderString("3.0 (native)\n", filepath.Join(debian, "source/format"), 0644, meta)
|
||||||
|
for _, exe := range meta.Executables {
|
||||||
|
install := filepath.Join(debian, exe.Name+".install")
|
||||||
|
docs := filepath.Join(debian, exe.Name+".docs")
|
||||||
|
build.Render("build/deb.install", install, 0644, exe)
|
||||||
|
build.Render("build/deb.docs", docs, 0644, exe)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pkgdir
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cross compilation
|
||||||
|
|
||||||
|
func doXgo(cmdline []string) {
|
||||||
|
// Make sure xgo is available for cross compilation
|
||||||
|
gogetxgo := goTool("get", "github.com/karalabe/xgo")
|
||||||
|
build.MustRun(gogetxgo)
|
||||||
|
|
||||||
|
// Execute the actual cross compilation
|
||||||
|
pkg := cmdline[len(cmdline)-1]
|
||||||
|
args := append(cmdline[:len(cmdline)-1], makeBuildFlags("")...)
|
||||||
|
|
||||||
|
build.MustRun(xgoTool(append(args, pkg)...))
|
||||||
|
}
|
||||||
|
|
||||||
|
func xgoTool(args ...string) *exec.Cmd {
|
||||||
|
cmd := exec.Command(filepath.Join(GOBIN, "xgo"), args...)
|
||||||
|
cmd.Env = []string{
|
||||||
|
"GOPATH=" + build.GOPATH(),
|
||||||
|
"GOBIN=" + GOBIN,
|
||||||
|
}
|
||||||
|
for _, e := range os.Environ() {
|
||||||
|
if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "GOBIN=") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
cmd.Env = append(cmd.Env, e)
|
||||||
|
}
|
||||||
|
return cmd
|
||||||
|
}
|
5
build/deb.changelog
Normal file
5
build/deb.changelog
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
{{.Name}} ({{.VersionString}}) {{.Distro}}; urgency=low
|
||||||
|
|
||||||
|
* git build of {{.Commit}}
|
||||||
|
|
||||||
|
-- {{.Author}} {{.Time}}
|
25
build/deb.control
Normal file
25
build/deb.control
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
Source: {{.Name}}
|
||||||
|
Section: science
|
||||||
|
Priority: extra
|
||||||
|
Maintainer: {{.Author}}
|
||||||
|
Build-Depends: debhelper (>= 8.0.0), golang-1.6
|
||||||
|
Standards-Version: 3.9.5
|
||||||
|
Homepage: https://ethereum.org
|
||||||
|
Vcs-Git: git://github.com/ethereum/go-ethereum.git
|
||||||
|
Vcs-Browser: https://github.com/ethereum/go-ethereum
|
||||||
|
|
||||||
|
Package: {{.Name}}
|
||||||
|
Architecture: any
|
||||||
|
Depends: ${misc:Depends}, {{.ExeList}}
|
||||||
|
Description: Meta-package to install geth and other tools
|
||||||
|
Meta-package to install geth and other tools
|
||||||
|
|
||||||
|
{{range .Executables}}
|
||||||
|
Package: {{$.ExeName .}}
|
||||||
|
Conflicts: {{$.ExeConflicts .}}
|
||||||
|
Architecture: any
|
||||||
|
Depends: ${shlibs:Depends}, ${misc:Depends}
|
||||||
|
Built-Using: ${misc:Built-Using}
|
||||||
|
Description: {{.Description}}
|
||||||
|
{{.Description}}
|
||||||
|
{{end}}
|
14
build/deb.copyright
Normal file
14
build/deb.copyright
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
Copyright 2016 The go-ethereum Authors
|
||||||
|
|
||||||
|
go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation, either version 3 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License
|
||||||
|
along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
1
build/deb.docs
Normal file
1
build/deb.docs
Normal file
@@ -0,0 +1 @@
|
|||||||
|
AUTHORS
|
1
build/deb.install
Normal file
1
build/deb.install
Normal file
@@ -0,0 +1 @@
|
|||||||
|
build/bin/{{.Name}} usr/bin
|
13
build/deb.rules
Normal file
13
build/deb.rules
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
#!/usr/bin/make -f
|
||||||
|
# -*- makefile -*-
|
||||||
|
|
||||||
|
# Uncomment this to turn on verbose mode.
|
||||||
|
#export DH_VERBOSE=1
|
||||||
|
|
||||||
|
override_dh_auto_build:
|
||||||
|
build/env.sh /usr/lib/go-1.6/bin/go run build/ci.go install -gitcommit {{.Commit}}
|
||||||
|
|
||||||
|
override_dh_auto_test:
|
||||||
|
|
||||||
|
%:
|
||||||
|
dh $@
|
@@ -20,9 +20,8 @@ fi
|
|||||||
|
|
||||||
# Set up the environment to use the workspace.
|
# Set up the environment to use the workspace.
|
||||||
# Also add Godeps workspace so we build using canned dependencies.
|
# Also add Godeps workspace so we build using canned dependencies.
|
||||||
GOPATH="$ethdir/go-ethereum/Godeps/_workspace:$workspace"
|
GOPATH="$workspace"
|
||||||
GOBIN="$PWD/build/bin"
|
export GOPATH
|
||||||
export GOPATH GOBIN
|
|
||||||
|
|
||||||
# Run the command inside the workspace.
|
# Run the command inside the workspace.
|
||||||
cd "$ethdir/go-ethereum"
|
cd "$ethdir/go-ethereum"
|
||||||
|
@@ -1,22 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
if [ ! -f "build/env.sh" ]; then
|
|
||||||
echo "$0 must be run from the root of the repository."
|
|
||||||
exit 2
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Since Go 1.5, the separator char for link time assignments
|
|
||||||
# is '=' and using ' ' prints a warning. However, Go < 1.5 does
|
|
||||||
# not support using '='.
|
|
||||||
sep=$(go version | awk '{ if ($3 >= "go1.5" || index($3, "devel")) print "="; else print " "; }' -)
|
|
||||||
|
|
||||||
# set gitCommit when running from a Git checkout.
|
|
||||||
if [ -f ".git/HEAD" ]; then
|
|
||||||
echo "-ldflags '-X main.gitCommit$sep$(git rev-parse HEAD)'"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -z "$GO_OPENCL" ]; then
|
|
||||||
echo "-tags opencl"
|
|
||||||
fi
|
|
@@ -1,15 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
echo "" > coverage.txt
|
|
||||||
|
|
||||||
for d in $(find ./* -maxdepth 10 -type d -not -path "./build" -not -path "./Godeps/*" ); do
|
|
||||||
if ls $d/*.go &> /dev/null; then
|
|
||||||
go test -coverprofile=profile.out -covermode=atomic $d
|
|
||||||
if [ -f profile.out ]; then
|
|
||||||
cat profile.out >> coverage.txt
|
|
||||||
echo '<<<<<< EOF' >> coverage.txt
|
|
||||||
rm profile.out
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
@@ -1,26 +0,0 @@
|
|||||||
@echo off
|
|
||||||
if not exist .\build\win-ci-compile.bat (
|
|
||||||
echo This script must be run from the root of the repository.
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
if not defined GOPATH (
|
|
||||||
echo GOPATH is not set.
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
|
|
||||||
set GOPATH=%GOPATH%;%cd%\Godeps\_workspace
|
|
||||||
set GOBIN=%cd%\build\bin
|
|
||||||
|
|
||||||
rem set gitCommit when running from a Git checkout.
|
|
||||||
set goLinkFlags=""
|
|
||||||
if exist ".git\HEAD" (
|
|
||||||
where /q git
|
|
||||||
if not errorlevel 1 (
|
|
||||||
for /f %%h in ('git rev-parse HEAD') do (
|
|
||||||
set goLinkFlags="-X main.gitCommit=%%h"
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
@echo on
|
|
||||||
go install -v -ldflags %goLinkFlags% ./...
|
|
@@ -1,15 +0,0 @@
|
|||||||
@echo off
|
|
||||||
if not exist .\build\win-ci-test.bat (
|
|
||||||
echo This script must be run from the root of the repository.
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
if not defined GOPATH (
|
|
||||||
echo GOPATH is not set.
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
|
|
||||||
set GOPATH=%GOPATH%;%cd%\Godeps\_workspace
|
|
||||||
set GOBIN=%cd%\build\bin
|
|
||||||
|
|
||||||
@echo on
|
|
||||||
go test ./...
|
|
32
circle.yml
Normal file
32
circle.yml
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
machine:
|
||||||
|
services:
|
||||||
|
- docker
|
||||||
|
|
||||||
|
dependencies:
|
||||||
|
cache_directories:
|
||||||
|
- "~/.ethash" # Cache the ethash DAG generated by hive for consecutive builds
|
||||||
|
- "~/.docker" # Cache all docker images manually to avoid lengthy rebuilds
|
||||||
|
override:
|
||||||
|
# Restore all previously cached docker images
|
||||||
|
- mkdir -p ~/.docker
|
||||||
|
- for img in `ls ~/.docker`; do docker load -i ~/.docker/$img; done
|
||||||
|
|
||||||
|
# Pull in and hive, restore cached ethash DAGs and do a dry run
|
||||||
|
- go get -u github.com/karalabe/hive
|
||||||
|
- (cd ~/.go_workspace/src/github.com/karalabe/hive && mkdir -p workspace/ethash/ ~/.ethash)
|
||||||
|
- (cd ~/.go_workspace/src/github.com/karalabe/hive && cp -r ~/.ethash/. workspace/ethash/)
|
||||||
|
- (cd ~/.go_workspace/src/github.com/karalabe/hive && hive --docker-noshell --client=NONE --test=. --sim=. --loglevel=6)
|
||||||
|
|
||||||
|
# Cache all the docker images and the ethash DAGs
|
||||||
|
- for img in `docker images | grep -v "^<none>" | tail -n +2 | awk '{print $1}'`; do docker save $img > ~/.docker/`echo $img | tr '/' ':'`.tar; done
|
||||||
|
- cp -r ~/.go_workspace/src/github.com/karalabe/hive/workspace/ethash/. ~/.ethash
|
||||||
|
|
||||||
|
test:
|
||||||
|
override:
|
||||||
|
# Build Geth and move into a known folder
|
||||||
|
- make geth
|
||||||
|
- cp ./build/bin/geth $HOME/geth
|
||||||
|
|
||||||
|
# Run hive and move all generated logs into the public artifacts folder
|
||||||
|
- (cd ~/.go_workspace/src/github.com/karalabe/hive && hive --docker-noshell --client=go-ethereum:local --override=$HOME/geth --test=. --sim=.)
|
||||||
|
- cp -r ~/.go_workspace/src/github.com/karalabe/hive/workspace/logs/* $CIRCLE_ARTIFACTS
|
@@ -25,10 +25,10 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/codegangsta/cli"
|
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/tests"
|
"github.com/ethereum/go-ethereum/tests"
|
||||||
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -74,9 +74,9 @@ func runTestWithReader(test string, r io.Reader) error {
|
|||||||
var err error
|
var err error
|
||||||
switch strings.ToLower(test) {
|
switch strings.ToLower(test) {
|
||||||
case "bk", "block", "blocktest", "blockchaintest", "blocktests", "blockchaintests":
|
case "bk", "block", "blocktest", "blockchaintest", "blocktests", "blockchaintests":
|
||||||
err = tests.RunBlockTestWithReader(params.MainNetHomesteadBlock, r, skipTests)
|
err = tests.RunBlockTestWithReader(params.MainNetHomesteadBlock, params.MainNetDAOForkBlock, r, skipTests)
|
||||||
case "st", "state", "statetest", "statetests":
|
case "st", "state", "statetest", "statetests":
|
||||||
rs := tests.RuleSet{HomesteadBlock: params.MainNetHomesteadBlock}
|
rs := tests.RuleSet{HomesteadBlock: params.MainNetHomesteadBlock, DAOForkBlock: params.MainNetDAOForkBlock, DAOForkSupport: true}
|
||||||
err = tests.RunStateTestWithReader(rs, r, skipTests)
|
err = tests.RunStateTestWithReader(rs, r, skipTests)
|
||||||
case "tx", "transactiontest", "transactiontests":
|
case "tx", "transactiontest", "transactiontests":
|
||||||
err = tests.RunTransactionTestsWithReader(r, skipTests)
|
err = tests.RunTransactionTestsWithReader(r, skipTests)
|
||||||
@@ -183,7 +183,7 @@ func runSuite(test, file string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupApp(c *cli.Context) {
|
func setupApp(c *cli.Context) error {
|
||||||
flagTest := c.GlobalString(TestFlag.Name)
|
flagTest := c.GlobalString(TestFlag.Name)
|
||||||
flagFile := c.GlobalString(FileFlag.Name)
|
flagFile := c.GlobalString(FileFlag.Name)
|
||||||
continueOnError = c.GlobalBool(ContinueOnErrorFlag.Name)
|
continueOnError = c.GlobalBool(ContinueOnErrorFlag.Name)
|
||||||
@@ -196,8 +196,8 @@ func setupApp(c *cli.Context) {
|
|||||||
if err := runTestWithReader(flagTest, os.Stdin); err != nil {
|
if err := runTestWithReader(flagTest, os.Stdin); err != nil {
|
||||||
glog.Fatalln(err)
|
glog.Fatalln(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
@@ -24,7 +24,6 @@ import (
|
|||||||
"runtime"
|
"runtime"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/codegangsta/cli"
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
@@ -33,6 +32,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -84,11 +84,16 @@ var (
|
|||||||
Name: "verbosity",
|
Name: "verbosity",
|
||||||
Usage: "sets the verbosity level",
|
Usage: "sets the verbosity level",
|
||||||
}
|
}
|
||||||
|
CreateFlag = cli.BoolFlag{
|
||||||
|
Name: "create",
|
||||||
|
Usage: "indicates the action should be create rather than call",
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
app = utils.NewApp("0.2", "the evm command line interface")
|
app = utils.NewApp("0.2", "the evm command line interface")
|
||||||
app.Flags = []cli.Flag{
|
app.Flags = []cli.Flag{
|
||||||
|
CreateFlag,
|
||||||
DebugFlag,
|
DebugFlag,
|
||||||
VerbosityFlag,
|
VerbosityFlag,
|
||||||
ForceJitFlag,
|
ForceJitFlag,
|
||||||
@@ -104,15 +109,13 @@ func init() {
|
|||||||
app.Action = run
|
app.Action = run
|
||||||
}
|
}
|
||||||
|
|
||||||
func run(ctx *cli.Context) {
|
func run(ctx *cli.Context) error {
|
||||||
glog.SetToStderr(true)
|
glog.SetToStderr(true)
|
||||||
glog.SetV(ctx.GlobalInt(VerbosityFlag.Name))
|
glog.SetV(ctx.GlobalInt(VerbosityFlag.Name))
|
||||||
|
|
||||||
db, _ := ethdb.NewMemDatabase()
|
db, _ := ethdb.NewMemDatabase()
|
||||||
statedb, _ := state.New(common.Hash{}, db)
|
statedb, _ := state.New(common.Hash{}, db)
|
||||||
sender := statedb.CreateAccount(common.StringToAddress("sender"))
|
sender := statedb.CreateAccount(common.StringToAddress("sender"))
|
||||||
receiver := statedb.CreateAccount(common.StringToAddress("receiver"))
|
|
||||||
receiver.SetCode(common.Hex2Bytes(ctx.GlobalString(CodeFlag.Name)))
|
|
||||||
|
|
||||||
vmenv := NewEnv(statedb, common.StringToAddress("evmuser"), common.Big(ctx.GlobalString(ValueFlag.Name)), vm.Config{
|
vmenv := NewEnv(statedb, common.StringToAddress("evmuser"), common.Big(ctx.GlobalString(ValueFlag.Name)), vm.Config{
|
||||||
Debug: ctx.GlobalBool(DebugFlag.Name),
|
Debug: ctx.GlobalBool(DebugFlag.Name),
|
||||||
@@ -121,7 +124,25 @@ func run(ctx *cli.Context) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
tstart := time.Now()
|
tstart := time.Now()
|
||||||
ret, e := vmenv.Call(
|
|
||||||
|
var (
|
||||||
|
ret []byte
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
if ctx.GlobalBool(CreateFlag.Name) {
|
||||||
|
input := append(common.Hex2Bytes(ctx.GlobalString(CodeFlag.Name)), common.Hex2Bytes(ctx.GlobalString(InputFlag.Name))...)
|
||||||
|
ret, _, err = vmenv.Create(
|
||||||
|
sender,
|
||||||
|
input,
|
||||||
|
common.Big(ctx.GlobalString(GasFlag.Name)),
|
||||||
|
common.Big(ctx.GlobalString(PriceFlag.Name)),
|
||||||
|
common.Big(ctx.GlobalString(ValueFlag.Name)),
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
receiver := statedb.CreateAccount(common.StringToAddress("receiver"))
|
||||||
|
receiver.SetCode(common.Hex2Bytes(ctx.GlobalString(CodeFlag.Name)))
|
||||||
|
ret, err = vmenv.Call(
|
||||||
sender,
|
sender,
|
||||||
receiver.Address(),
|
receiver.Address(),
|
||||||
common.Hex2Bytes(ctx.GlobalString(InputFlag.Name)),
|
common.Hex2Bytes(ctx.GlobalString(InputFlag.Name)),
|
||||||
@@ -129,9 +150,11 @@ func run(ctx *cli.Context) {
|
|||||||
common.Big(ctx.GlobalString(PriceFlag.Name)),
|
common.Big(ctx.GlobalString(PriceFlag.Name)),
|
||||||
common.Big(ctx.GlobalString(ValueFlag.Name)),
|
common.Big(ctx.GlobalString(ValueFlag.Name)),
|
||||||
)
|
)
|
||||||
|
}
|
||||||
vmdone := time.Since(tstart)
|
vmdone := time.Since(tstart)
|
||||||
|
|
||||||
if ctx.GlobalBool(DumpFlag.Name) {
|
if ctx.GlobalBool(DumpFlag.Name) {
|
||||||
|
statedb.Commit()
|
||||||
fmt.Println(string(statedb.Dump()))
|
fmt.Println(string(statedb.Dump()))
|
||||||
}
|
}
|
||||||
vm.StdErrFormat(vmenv.StructLogs())
|
vm.StdErrFormat(vmenv.StructLogs())
|
||||||
@@ -150,10 +173,11 @@ num gc: %d
|
|||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("OUT: 0x%x", ret)
|
fmt.Printf("OUT: 0x%x", ret)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
fmt.Printf(" error: %v", e)
|
fmt.Printf(" error: %v", err)
|
||||||
}
|
}
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
@@ -20,13 +20,13 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
|
||||||
"github.com/codegangsta/cli"
|
|
||||||
"github.com/ethereum/go-ethereum/accounts"
|
"github.com/ethereum/go-ethereum/accounts"
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
"github.com/ethereum/go-ethereum/console"
|
"github.com/ethereum/go-ethereum/console"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -70,7 +70,7 @@ either new or import). Without it you are not able to unlock your account.
|
|||||||
|
|
||||||
Note that exporting your key in unencrypted format is NOT supported.
|
Note that exporting your key in unencrypted format is NOT supported.
|
||||||
|
|
||||||
Keys are stored under <DATADIR>/keys.
|
Keys are stored under <DATADIR>/keystore.
|
||||||
It is safe to transfer the entire directory or the individual keys therein
|
It is safe to transfer the entire directory or the individual keys therein
|
||||||
between ethereum nodes by simply copying.
|
between ethereum nodes by simply copying.
|
||||||
Make sure you backup your keys regularly.
|
Make sure you backup your keys regularly.
|
||||||
@@ -167,11 +167,12 @@ nodes.
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func accountList(ctx *cli.Context) {
|
func accountList(ctx *cli.Context) error {
|
||||||
accman := utils.MakeAccountManager(ctx)
|
accman := utils.MakeAccountManager(ctx)
|
||||||
for i, acct := range accman.Accounts() {
|
for i, acct := range accman.Accounts() {
|
||||||
fmt.Printf("Account #%d: {%x} %s\n", i, acct.Address, acct.File)
|
fmt.Printf("Account #%d: {%x} %s\n", i, acct.Address, acct.File)
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// tries unlocking the specified account a few times.
|
// tries unlocking the specified account a few times.
|
||||||
@@ -259,7 +260,7 @@ func ambiguousAddrRecovery(am *accounts.Manager, err *accounts.AmbiguousAddrErro
|
|||||||
}
|
}
|
||||||
|
|
||||||
// accountCreate creates a new account into the keystore defined by the CLI flags.
|
// accountCreate creates a new account into the keystore defined by the CLI flags.
|
||||||
func accountCreate(ctx *cli.Context) {
|
func accountCreate(ctx *cli.Context) error {
|
||||||
accman := utils.MakeAccountManager(ctx)
|
accman := utils.MakeAccountManager(ctx)
|
||||||
password := getPassPhrase("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx))
|
password := getPassPhrase("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx))
|
||||||
|
|
||||||
@@ -268,11 +269,12 @@ func accountCreate(ctx *cli.Context) {
|
|||||||
utils.Fatalf("Failed to create account: %v", err)
|
utils.Fatalf("Failed to create account: %v", err)
|
||||||
}
|
}
|
||||||
fmt.Printf("Address: {%x}\n", account.Address)
|
fmt.Printf("Address: {%x}\n", account.Address)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// accountUpdate transitions an account from a previous format to the current
|
// accountUpdate transitions an account from a previous format to the current
|
||||||
// one, also providing the possibility to change the pass-phrase.
|
// one, also providing the possibility to change the pass-phrase.
|
||||||
func accountUpdate(ctx *cli.Context) {
|
func accountUpdate(ctx *cli.Context) error {
|
||||||
if len(ctx.Args()) == 0 {
|
if len(ctx.Args()) == 0 {
|
||||||
utils.Fatalf("No accounts specified to update")
|
utils.Fatalf("No accounts specified to update")
|
||||||
}
|
}
|
||||||
@@ -283,9 +285,10 @@ func accountUpdate(ctx *cli.Context) {
|
|||||||
if err := accman.Update(account, oldPassword, newPassword); err != nil {
|
if err := accman.Update(account, oldPassword, newPassword); err != nil {
|
||||||
utils.Fatalf("Could not update the account: %v", err)
|
utils.Fatalf("Could not update the account: %v", err)
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func importWallet(ctx *cli.Context) {
|
func importWallet(ctx *cli.Context) error {
|
||||||
keyfile := ctx.Args().First()
|
keyfile := ctx.Args().First()
|
||||||
if len(keyfile) == 0 {
|
if len(keyfile) == 0 {
|
||||||
utils.Fatalf("keyfile must be given as argument")
|
utils.Fatalf("keyfile must be given as argument")
|
||||||
@@ -303,9 +306,10 @@ func importWallet(ctx *cli.Context) {
|
|||||||
utils.Fatalf("%v", err)
|
utils.Fatalf("%v", err)
|
||||||
}
|
}
|
||||||
fmt.Printf("Address: {%x}\n", acct.Address)
|
fmt.Printf("Address: {%x}\n", acct.Address)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func accountImport(ctx *cli.Context) {
|
func accountImport(ctx *cli.Context) error {
|
||||||
keyfile := ctx.Args().First()
|
keyfile := ctx.Args().First()
|
||||||
if len(keyfile) == 0 {
|
if len(keyfile) == 0 {
|
||||||
utils.Fatalf("keyfile must be given as argument")
|
utils.Fatalf("keyfile must be given as argument")
|
||||||
@@ -321,4 +325,5 @@ func accountImport(ctx *cli.Context) {
|
|||||||
utils.Fatalf("Could not create the account: %v", err)
|
utils.Fatalf("Could not create the account: %v", err)
|
||||||
}
|
}
|
||||||
fmt.Printf("Address: {%x}\n", acct.Address)
|
fmt.Printf("Address: {%x}\n", acct.Address)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
@@ -23,7 +23,6 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/codegangsta/cli"
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/console"
|
"github.com/ethereum/go-ethereum/console"
|
||||||
@@ -32,6 +31,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -72,7 +72,7 @@ Use "ethereum dump 0" to dump the genesis block.
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func importChain(ctx *cli.Context) {
|
func importChain(ctx *cli.Context) error {
|
||||||
if len(ctx.Args()) != 1 {
|
if len(ctx.Args()) != 1 {
|
||||||
utils.Fatalf("This command requires an argument.")
|
utils.Fatalf("This command requires an argument.")
|
||||||
}
|
}
|
||||||
@@ -84,9 +84,10 @@ func importChain(ctx *cli.Context) {
|
|||||||
utils.Fatalf("Import error: %v", err)
|
utils.Fatalf("Import error: %v", err)
|
||||||
}
|
}
|
||||||
fmt.Printf("Import done in %v", time.Since(start))
|
fmt.Printf("Import done in %v", time.Since(start))
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func exportChain(ctx *cli.Context) {
|
func exportChain(ctx *cli.Context) error {
|
||||||
if len(ctx.Args()) < 1 {
|
if len(ctx.Args()) < 1 {
|
||||||
utils.Fatalf("This command requires an argument.")
|
utils.Fatalf("This command requires an argument.")
|
||||||
}
|
}
|
||||||
@@ -114,9 +115,10 @@ func exportChain(ctx *cli.Context) {
|
|||||||
utils.Fatalf("Export error: %v\n", err)
|
utils.Fatalf("Export error: %v\n", err)
|
||||||
}
|
}
|
||||||
fmt.Printf("Export done in %v", time.Since(start))
|
fmt.Printf("Export done in %v", time.Since(start))
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func removeDB(ctx *cli.Context) {
|
func removeDB(ctx *cli.Context) error {
|
||||||
confirm, err := console.Stdin.PromptConfirm("Remove local database?")
|
confirm, err := console.Stdin.PromptConfirm("Remove local database?")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("%v", err)
|
utils.Fatalf("%v", err)
|
||||||
@@ -132,9 +134,10 @@ func removeDB(ctx *cli.Context) {
|
|||||||
} else {
|
} else {
|
||||||
fmt.Println("Operation aborted")
|
fmt.Println("Operation aborted")
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func upgradeDB(ctx *cli.Context) {
|
func upgradeDB(ctx *cli.Context) error {
|
||||||
glog.Infoln("Upgrading blockchain database")
|
glog.Infoln("Upgrading blockchain database")
|
||||||
|
|
||||||
chain, chainDb := utils.MakeChain(ctx)
|
chain, chainDb := utils.MakeChain(ctx)
|
||||||
@@ -163,9 +166,10 @@ func upgradeDB(ctx *cli.Context) {
|
|||||||
os.Remove(exportFile)
|
os.Remove(exportFile)
|
||||||
glog.Infoln("Import finished")
|
glog.Infoln("Import finished")
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func dump(ctx *cli.Context) {
|
func dump(ctx *cli.Context) error {
|
||||||
chain, chainDb := utils.MakeChain(ctx)
|
chain, chainDb := utils.MakeChain(ctx)
|
||||||
for _, arg := range ctx.Args() {
|
for _, arg := range ctx.Args() {
|
||||||
var block *types.Block
|
var block *types.Block
|
||||||
@@ -182,12 +186,12 @@ func dump(ctx *cli.Context) {
|
|||||||
state, err := state.New(block.Root(), chainDb)
|
state, err := state.New(block.Root(), chainDb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("could not create new state: %v", err)
|
utils.Fatalf("could not create new state: %v", err)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
fmt.Printf("%s\n", state.Dump())
|
fmt.Printf("%s\n", state.Dump())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
chainDb.Close()
|
chainDb.Close()
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// hashish returns true for strings that look like hashes.
|
// hashish returns true for strings that look like hashes.
|
||||||
|
@@ -20,9 +20,9 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
|
|
||||||
"github.com/codegangsta/cli"
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
"github.com/ethereum/go-ethereum/console"
|
"github.com/ethereum/go-ethereum/console"
|
||||||
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -60,7 +60,7 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
|
|||||||
|
|
||||||
// localConsole starts a new geth node, attaching a JavaScript console to it at the
|
// localConsole starts a new geth node, attaching a JavaScript console to it at the
|
||||||
// same time.
|
// same time.
|
||||||
func localConsole(ctx *cli.Context) {
|
func localConsole(ctx *cli.Context) error {
|
||||||
// Create and start the node based on the CLI flags
|
// Create and start the node based on the CLI flags
|
||||||
node := utils.MakeSystemNode(clientIdentifier, verString, relConfig, makeDefaultExtra(), ctx)
|
node := utils.MakeSystemNode(clientIdentifier, verString, relConfig, makeDefaultExtra(), ctx)
|
||||||
startNode(ctx, node)
|
startNode(ctx, node)
|
||||||
@@ -86,16 +86,18 @@ func localConsole(ctx *cli.Context) {
|
|||||||
// If only a short execution was requested, evaluate and return
|
// If only a short execution was requested, evaluate and return
|
||||||
if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" {
|
if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" {
|
||||||
console.Evaluate(script)
|
console.Evaluate(script)
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
// Otherwise print the welcome screen and enter interactive mode
|
// Otherwise print the welcome screen and enter interactive mode
|
||||||
console.Welcome()
|
console.Welcome()
|
||||||
console.Interactive()
|
console.Interactive()
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// remoteConsole will connect to a remote geth instance, attaching a JavaScript
|
// remoteConsole will connect to a remote geth instance, attaching a JavaScript
|
||||||
// console to it.
|
// console to it.
|
||||||
func remoteConsole(ctx *cli.Context) {
|
func remoteConsole(ctx *cli.Context) error {
|
||||||
// Attach to a remotely running geth instance and start the JavaScript console
|
// Attach to a remotely running geth instance and start the JavaScript console
|
||||||
client, err := utils.NewRemoteRPCClient(ctx)
|
client, err := utils.NewRemoteRPCClient(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -116,17 +118,19 @@ func remoteConsole(ctx *cli.Context) {
|
|||||||
// If only a short execution was requested, evaluate and return
|
// If only a short execution was requested, evaluate and return
|
||||||
if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" {
|
if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" {
|
||||||
console.Evaluate(script)
|
console.Evaluate(script)
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
// Otherwise print the welcome screen and enter interactive mode
|
// Otherwise print the welcome screen and enter interactive mode
|
||||||
console.Welcome()
|
console.Welcome()
|
||||||
console.Interactive()
|
console.Interactive()
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ephemeralConsole starts a new geth node, attaches an ephemeral JavaScript
|
// ephemeralConsole starts a new geth node, attaches an ephemeral JavaScript
|
||||||
// console to it, and each of the files specified as arguments and tears the
|
// console to it, and each of the files specified as arguments and tears the
|
||||||
// everything down.
|
// everything down.
|
||||||
func ephemeralConsole(ctx *cli.Context) {
|
func ephemeralConsole(ctx *cli.Context) error {
|
||||||
// Create and start the node based on the CLI flags
|
// Create and start the node based on the CLI flags
|
||||||
node := utils.MakeSystemNode(clientIdentifier, verString, relConfig, makeDefaultExtra(), ctx)
|
node := utils.MakeSystemNode(clientIdentifier, verString, relConfig, makeDefaultExtra(), ctx)
|
||||||
startNode(ctx, node)
|
startNode(ctx, node)
|
||||||
@@ -164,4 +168,6 @@ func ephemeralConsole(ctx *cli.Context) {
|
|||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}()
|
}()
|
||||||
console.Stop(true)
|
console.Stop(true)
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
@@ -17,7 +17,8 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math/rand"
|
"crypto/rand"
|
||||||
|
"math/big"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
@@ -73,7 +74,7 @@ func TestIPCAttachWelcome(t *testing.T) {
|
|||||||
coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182"
|
coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182"
|
||||||
var ipc string
|
var ipc string
|
||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
ipc = `\\.\pipe\geth` + strconv.Itoa(rand.Int())
|
ipc = `\\.\pipe\geth` + strconv.Itoa(trulyRandInt(100000, 999999))
|
||||||
} else {
|
} else {
|
||||||
ws := tmpdir(t)
|
ws := tmpdir(t)
|
||||||
defer os.RemoveAll(ws)
|
defer os.RemoveAll(ws)
|
||||||
@@ -94,7 +95,7 @@ func TestIPCAttachWelcome(t *testing.T) {
|
|||||||
|
|
||||||
func TestHTTPAttachWelcome(t *testing.T) {
|
func TestHTTPAttachWelcome(t *testing.T) {
|
||||||
coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182"
|
coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182"
|
||||||
port := strconv.Itoa(rand.Intn(65535-1024) + 1024) // Yeah, sometimes this will fail, sorry :P
|
port := strconv.Itoa(trulyRandInt(1024, 65536)) // Yeah, sometimes this will fail, sorry :P
|
||||||
geth := runGeth(t,
|
geth := runGeth(t,
|
||||||
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
|
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
|
||||||
"--etherbase", coinbase, "--rpc", "--rpcport", port)
|
"--etherbase", coinbase, "--rpc", "--rpcport", port)
|
||||||
@@ -108,7 +109,7 @@ func TestHTTPAttachWelcome(t *testing.T) {
|
|||||||
|
|
||||||
func TestWSAttachWelcome(t *testing.T) {
|
func TestWSAttachWelcome(t *testing.T) {
|
||||||
coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182"
|
coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182"
|
||||||
port := strconv.Itoa(rand.Intn(65535-1024) + 1024) // Yeah, sometimes this will fail, sorry :P
|
port := strconv.Itoa(trulyRandInt(1024, 65536)) // Yeah, sometimes this will fail, sorry :P
|
||||||
|
|
||||||
geth := runGeth(t,
|
geth := runGeth(t,
|
||||||
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
|
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
|
||||||
@@ -160,3 +161,10 @@ at block: 0 ({{niltime}}){{if ipc}}
|
|||||||
`)
|
`)
|
||||||
attach.expectExit()
|
attach.expectExit()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// trulyRandInt generates a crypto random integer used by the console tests to
|
||||||
|
// not clash network ports with other tests running cocurrently.
|
||||||
|
func trulyRandInt(lo, hi int) int {
|
||||||
|
num, _ := rand.Int(rand.Reader, big.NewInt(int64(hi-lo)))
|
||||||
|
return int(num.Int64()) + lo
|
||||||
|
}
|
||||||
|
232
cmd/geth/dao_test.go
Normal file
232
cmd/geth/dao_test.go
Normal file
@@ -0,0 +1,232 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"math/big"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Genesis block for nodes which don't care about the DAO fork (i.e. not configured)
|
||||||
|
var daoOldGenesis = `{
|
||||||
|
"alloc" : {},
|
||||||
|
"coinbase" : "0x0000000000000000000000000000000000000000",
|
||||||
|
"difficulty" : "0x20000",
|
||||||
|
"extraData" : "",
|
||||||
|
"gasLimit" : "0x2fefd8",
|
||||||
|
"nonce" : "0x0000000000000042",
|
||||||
|
"mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"timestamp" : "0x00",
|
||||||
|
"config" : {}
|
||||||
|
}`
|
||||||
|
|
||||||
|
// Genesis block for nodes which actively oppose the DAO fork
|
||||||
|
var daoNoForkGenesis = `{
|
||||||
|
"alloc" : {},
|
||||||
|
"coinbase" : "0x0000000000000000000000000000000000000000",
|
||||||
|
"difficulty" : "0x20000",
|
||||||
|
"extraData" : "",
|
||||||
|
"gasLimit" : "0x2fefd8",
|
||||||
|
"nonce" : "0x0000000000000042",
|
||||||
|
"mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"timestamp" : "0x00",
|
||||||
|
"config" : {
|
||||||
|
"daoForkBlock" : 314,
|
||||||
|
"daoForkSupport" : false
|
||||||
|
}
|
||||||
|
}`
|
||||||
|
|
||||||
|
// Genesis block for nodes which actively support the DAO fork
|
||||||
|
var daoProForkGenesis = `{
|
||||||
|
"alloc" : {},
|
||||||
|
"coinbase" : "0x0000000000000000000000000000000000000000",
|
||||||
|
"difficulty" : "0x20000",
|
||||||
|
"extraData" : "",
|
||||||
|
"gasLimit" : "0x2fefd8",
|
||||||
|
"nonce" : "0x0000000000000042",
|
||||||
|
"mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"timestamp" : "0x00",
|
||||||
|
"config" : {
|
||||||
|
"daoForkBlock" : 314,
|
||||||
|
"daoForkSupport" : true
|
||||||
|
}
|
||||||
|
}`
|
||||||
|
|
||||||
|
var daoGenesisHash = common.HexToHash("5e1fc79cb4ffa4739177b5408045cd5d51c6cf766133f23f7cd72ee1f8d790e0")
|
||||||
|
var daoGenesisForkBlock = big.NewInt(314)
|
||||||
|
|
||||||
|
// Tests that the DAO hard-fork number and the nodes support/opposition is correctly
|
||||||
|
// set in the database after various initialization procedures and invocations.
|
||||||
|
func TestDAODefaultMainnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, false, "", [][2]bool{{false, false}}, params.MainNetDAOForkBlock, true)
|
||||||
|
}
|
||||||
|
func TestDAOSupportMainnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, false, "", [][2]bool{{true, false}}, params.MainNetDAOForkBlock, true)
|
||||||
|
}
|
||||||
|
func TestDAOOpposeMainnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, false, "", [][2]bool{{false, true}}, params.MainNetDAOForkBlock, false)
|
||||||
|
}
|
||||||
|
func TestDAOSwitchToSupportMainnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, false, "", [][2]bool{{false, true}, {true, false}}, params.MainNetDAOForkBlock, true)
|
||||||
|
}
|
||||||
|
func TestDAOSwitchToOpposeMainnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, false, "", [][2]bool{{true, false}, {false, true}}, params.MainNetDAOForkBlock, false)
|
||||||
|
}
|
||||||
|
func TestDAODefaultTestnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, true, "", [][2]bool{{false, false}}, params.TestNetDAOForkBlock, true)
|
||||||
|
}
|
||||||
|
func TestDAOSupportTestnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, true, "", [][2]bool{{true, false}}, params.TestNetDAOForkBlock, true)
|
||||||
|
}
|
||||||
|
func TestDAOOpposeTestnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, true, "", [][2]bool{{false, true}}, params.TestNetDAOForkBlock, false)
|
||||||
|
}
|
||||||
|
func TestDAOSwitchToSupportTestnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, true, "", [][2]bool{{false, true}, {true, false}}, params.TestNetDAOForkBlock, true)
|
||||||
|
}
|
||||||
|
func TestDAOSwitchToOpposeTestnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, true, "", [][2]bool{{true, false}, {false, true}}, params.TestNetDAOForkBlock, false)
|
||||||
|
}
|
||||||
|
func TestDAOInitOldPrivnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, false, daoOldGenesis, [][2]bool{}, nil, false)
|
||||||
|
}
|
||||||
|
func TestDAODefaultOldPrivnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, false, daoOldGenesis, [][2]bool{{false, false}}, params.MainNetDAOForkBlock, true)
|
||||||
|
}
|
||||||
|
func TestDAOSupportOldPrivnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, false, daoOldGenesis, [][2]bool{{true, false}}, params.MainNetDAOForkBlock, true)
|
||||||
|
}
|
||||||
|
func TestDAOOpposeOldPrivnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, false, daoOldGenesis, [][2]bool{{false, true}}, params.MainNetDAOForkBlock, false)
|
||||||
|
}
|
||||||
|
func TestDAOSwitchToSupportOldPrivnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, false, daoOldGenesis, [][2]bool{{false, true}, {true, false}}, params.MainNetDAOForkBlock, true)
|
||||||
|
}
|
||||||
|
func TestDAOSwitchToOpposeOldPrivnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, false, daoOldGenesis, [][2]bool{{true, false}, {false, true}}, params.MainNetDAOForkBlock, false)
|
||||||
|
}
|
||||||
|
func TestDAOInitNoForkPrivnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, false, daoNoForkGenesis, [][2]bool{}, daoGenesisForkBlock, false)
|
||||||
|
}
|
||||||
|
func TestDAODefaultNoForkPrivnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, false, daoNoForkGenesis, [][2]bool{{false, false}}, daoGenesisForkBlock, false)
|
||||||
|
}
|
||||||
|
func TestDAOSupportNoForkPrivnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, false, daoNoForkGenesis, [][2]bool{{true, false}}, daoGenesisForkBlock, true)
|
||||||
|
}
|
||||||
|
func TestDAOOpposeNoForkPrivnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, false, daoNoForkGenesis, [][2]bool{{false, true}}, daoGenesisForkBlock, false)
|
||||||
|
}
|
||||||
|
func TestDAOSwitchToSupportNoForkPrivnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, false, daoNoForkGenesis, [][2]bool{{false, true}, {true, false}}, daoGenesisForkBlock, true)
|
||||||
|
}
|
||||||
|
func TestDAOSwitchToOpposeNoForkPrivnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, false, daoNoForkGenesis, [][2]bool{{true, false}, {false, true}}, daoGenesisForkBlock, false)
|
||||||
|
}
|
||||||
|
func TestDAOInitProForkPrivnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, false, daoProForkGenesis, [][2]bool{}, daoGenesisForkBlock, true)
|
||||||
|
}
|
||||||
|
func TestDAODefaultProForkPrivnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, false, daoProForkGenesis, [][2]bool{{false, false}}, daoGenesisForkBlock, true)
|
||||||
|
}
|
||||||
|
func TestDAOSupportProForkPrivnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, false, daoProForkGenesis, [][2]bool{{true, false}}, daoGenesisForkBlock, true)
|
||||||
|
}
|
||||||
|
func TestDAOOpposeProForkPrivnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, false, daoProForkGenesis, [][2]bool{{false, true}}, daoGenesisForkBlock, false)
|
||||||
|
}
|
||||||
|
func TestDAOSwitchToSupportProForkPrivnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, false, daoProForkGenesis, [][2]bool{{false, true}, {true, false}}, daoGenesisForkBlock, true)
|
||||||
|
}
|
||||||
|
func TestDAOSwitchToOpposeProForkPrivnet(t *testing.T) {
|
||||||
|
testDAOForkBlockNewChain(t, false, daoProForkGenesis, [][2]bool{{true, false}, {false, true}}, daoGenesisForkBlock, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testDAOForkBlockNewChain(t *testing.T, testnet bool, genesis string, votes [][2]bool, expectBlock *big.Int, expectVote bool) {
|
||||||
|
// Create a temporary data directory to use and inspect later
|
||||||
|
datadir := tmpdir(t)
|
||||||
|
defer os.RemoveAll(datadir)
|
||||||
|
|
||||||
|
// Start a Geth instance with the requested flags set and immediately terminate
|
||||||
|
if genesis != "" {
|
||||||
|
json := filepath.Join(datadir, "genesis.json")
|
||||||
|
if err := ioutil.WriteFile(json, []byte(genesis), 0600); err != nil {
|
||||||
|
t.Fatalf("failed to write genesis file: %v", err)
|
||||||
|
}
|
||||||
|
runGeth(t, "--datadir", datadir, "init", json).cmd.Wait()
|
||||||
|
}
|
||||||
|
for _, vote := range votes {
|
||||||
|
args := []string{"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", "--ipcdisable", "--datadir", datadir}
|
||||||
|
if testnet {
|
||||||
|
args = append(args, "--testnet")
|
||||||
|
}
|
||||||
|
if vote[0] {
|
||||||
|
args = append(args, "--support-dao-fork")
|
||||||
|
}
|
||||||
|
if vote[1] {
|
||||||
|
args = append(args, "--oppose-dao-fork")
|
||||||
|
}
|
||||||
|
geth := runGeth(t, append(args, []string{"--exec", "2+2", "console"}...)...)
|
||||||
|
geth.cmd.Wait()
|
||||||
|
}
|
||||||
|
// Retrieve the DAO config flag from the database
|
||||||
|
path := filepath.Join(datadir, "chaindata")
|
||||||
|
if testnet && genesis == "" {
|
||||||
|
path = filepath.Join(datadir, "testnet", "chaindata")
|
||||||
|
}
|
||||||
|
db, err := ethdb.NewLDBDatabase(path, 0, 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to open test database: %v", err)
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
genesisHash := common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3")
|
||||||
|
if testnet {
|
||||||
|
genesisHash = common.HexToHash("0x0cd786a2425d16f152c658316c423e6ce1181e15c3295826d7c9904cba9ce303")
|
||||||
|
}
|
||||||
|
if genesis != "" {
|
||||||
|
genesisHash = daoGenesisHash
|
||||||
|
}
|
||||||
|
config, err := core.GetChainConfig(db, genesisHash)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to retrieve chain config: %v", err)
|
||||||
|
}
|
||||||
|
// Validate the DAO hard-fork block number against the expected value
|
||||||
|
if config.DAOForkBlock == nil {
|
||||||
|
if expectBlock != nil {
|
||||||
|
t.Errorf("dao hard-fork block mismatch: have nil, want %v", expectBlock)
|
||||||
|
}
|
||||||
|
} else if expectBlock == nil {
|
||||||
|
t.Errorf("dao hard-fork block mismatch: have %v, want nil", config.DAOForkBlock)
|
||||||
|
} else if config.DAOForkBlock.Cmp(expectBlock) != 0 {
|
||||||
|
t.Errorf("dao hard-fork block mismatch: have %v, want %v", config.DAOForkBlock, expectBlock)
|
||||||
|
}
|
||||||
|
if config.DAOForkSupport != expectVote {
|
||||||
|
t.Errorf("dao hard-fork support mismatch: have %v, want %v", config.DAOForkSupport, expectVote)
|
||||||
|
}
|
||||||
|
}
|
107
cmd/geth/genesis_test.go
Normal file
107
cmd/geth/genesis_test.go
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
var customGenesisTests = []struct {
|
||||||
|
genesis string
|
||||||
|
query string
|
||||||
|
result string
|
||||||
|
}{
|
||||||
|
// Plain genesis file without anything extra
|
||||||
|
{
|
||||||
|
genesis: `{
|
||||||
|
"alloc" : {},
|
||||||
|
"coinbase" : "0x0000000000000000000000000000000000000000",
|
||||||
|
"difficulty" : "0x20000",
|
||||||
|
"extraData" : "",
|
||||||
|
"gasLimit" : "0x2fefd8",
|
||||||
|
"nonce" : "0x0000000000000042",
|
||||||
|
"mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"timestamp" : "0x00"
|
||||||
|
}`,
|
||||||
|
query: "eth.getBlock(0).nonce",
|
||||||
|
result: "0x0000000000000042",
|
||||||
|
},
|
||||||
|
// Genesis file with an empty chain configuration (ensure missing fields work)
|
||||||
|
{
|
||||||
|
genesis: `{
|
||||||
|
"alloc" : {},
|
||||||
|
"coinbase" : "0x0000000000000000000000000000000000000000",
|
||||||
|
"difficulty" : "0x20000",
|
||||||
|
"extraData" : "",
|
||||||
|
"gasLimit" : "0x2fefd8",
|
||||||
|
"nonce" : "0x0000000000000042",
|
||||||
|
"mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"timestamp" : "0x00",
|
||||||
|
"config" : {}
|
||||||
|
}`,
|
||||||
|
query: "eth.getBlock(0).nonce",
|
||||||
|
result: "0x0000000000000042",
|
||||||
|
},
|
||||||
|
// Genesis file with specific chain configurations
|
||||||
|
{
|
||||||
|
genesis: `{
|
||||||
|
"alloc" : {},
|
||||||
|
"coinbase" : "0x0000000000000000000000000000000000000000",
|
||||||
|
"difficulty" : "0x20000",
|
||||||
|
"extraData" : "",
|
||||||
|
"gasLimit" : "0x2fefd8",
|
||||||
|
"nonce" : "0x0000000000000042",
|
||||||
|
"mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"timestamp" : "0x00",
|
||||||
|
"config" : {
|
||||||
|
"homesteadBlock" : 314,
|
||||||
|
"daoForkBlock" : 141,
|
||||||
|
"daoForkSupport" : true
|
||||||
|
},
|
||||||
|
}`,
|
||||||
|
query: "eth.getBlock(0).nonce",
|
||||||
|
result: "0x0000000000000042",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that initializing Geth with a custom genesis block and chain definitions
|
||||||
|
// work properly.
|
||||||
|
func TestCustomGenesis(t *testing.T) {
|
||||||
|
for i, tt := range customGenesisTests {
|
||||||
|
// Create a temporary data directory to use and inspect later
|
||||||
|
datadir := tmpdir(t)
|
||||||
|
defer os.RemoveAll(datadir)
|
||||||
|
|
||||||
|
// Initialize the data directory with the custom genesis block
|
||||||
|
json := filepath.Join(datadir, "genesis.json")
|
||||||
|
if err := ioutil.WriteFile(json, []byte(tt.genesis), 0600); err != nil {
|
||||||
|
t.Fatalf("test %d: failed to write genesis file: %v", i, err)
|
||||||
|
}
|
||||||
|
runGeth(t, "--datadir", datadir, "init", json).cmd.Wait()
|
||||||
|
|
||||||
|
// Query the custom genesis block
|
||||||
|
geth := runGeth(t, "--datadir", datadir, "--maxpeers", "0", "--nodiscover", "--nat", "none", "--ipcdisable", "--exec", tt.query, "console")
|
||||||
|
geth.expectRegexp(tt.result)
|
||||||
|
geth.expectExit()
|
||||||
|
}
|
||||||
|
}
|
@@ -28,7 +28,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/codegangsta/cli"
|
|
||||||
"github.com/ethereum/ethash"
|
"github.com/ethereum/ethash"
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@@ -44,13 +43,14 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/release"
|
"github.com/ethereum/go-ethereum/release"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
clientIdentifier = "Geth" // Client identifier to advertise over the network
|
clientIdentifier = "Geth" // Client identifier to advertise over the network
|
||||||
versionMajor = 1 // Major version component of the current release
|
versionMajor = 1 // Major version component of the current release
|
||||||
versionMinor = 4 // Minor version component of the current release
|
versionMinor = 4 // Minor version component of the current release
|
||||||
versionPatch = 6 // Patch version component of the current release
|
versionPatch = 12 // Patch version component of the current release
|
||||||
versionMeta = "stable" // Version metadata to append to the version string
|
versionMeta = "stable" // Version metadata to append to the version string
|
||||||
|
|
||||||
versionOracle = "0xfa7b9770ca4cb04296cac84f37736d4041251cdf" // Ethereum address of the Geth release oracle
|
versionOracle = "0xfa7b9770ca4cb04296cac84f37736d4041251cdf" // Ethereum address of the Geth release oracle
|
||||||
@@ -149,7 +149,6 @@ participating.
|
|||||||
utils.IdentityFlag,
|
utils.IdentityFlag,
|
||||||
utils.UnlockedAccountFlag,
|
utils.UnlockedAccountFlag,
|
||||||
utils.PasswordFileFlag,
|
utils.PasswordFileFlag,
|
||||||
utils.GenesisFileFlag,
|
|
||||||
utils.BootnodesFlag,
|
utils.BootnodesFlag,
|
||||||
utils.DataDirFlag,
|
utils.DataDirFlag,
|
||||||
utils.KeyStoreDirFlag,
|
utils.KeyStoreDirFlag,
|
||||||
@@ -164,6 +163,8 @@ participating.
|
|||||||
utils.MaxPendingPeersFlag,
|
utils.MaxPendingPeersFlag,
|
||||||
utils.EtherbaseFlag,
|
utils.EtherbaseFlag,
|
||||||
utils.GasPriceFlag,
|
utils.GasPriceFlag,
|
||||||
|
utils.SupportDAOFork,
|
||||||
|
utils.OpposeDAOFork,
|
||||||
utils.MinerThreadsFlag,
|
utils.MinerThreadsFlag,
|
||||||
utils.MiningEnabledFlag,
|
utils.MiningEnabledFlag,
|
||||||
utils.MiningGPUFlag,
|
utils.MiningGPUFlag,
|
||||||
@@ -224,12 +225,6 @@ participating.
|
|||||||
eth.EnableBadBlockReporting = true
|
eth.EnableBadBlockReporting = true
|
||||||
|
|
||||||
utils.SetupNetwork(ctx)
|
utils.SetupNetwork(ctx)
|
||||||
|
|
||||||
// Deprecation warning.
|
|
||||||
if ctx.GlobalIsSet(utils.GenesisFileFlag.Name) {
|
|
||||||
common.PrintDepricationWarning("--genesis is deprecated. Switch to use 'geth init /path/to/file'")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -271,15 +266,17 @@ func makeDefaultExtra() []byte {
|
|||||||
// geth is the main entry point into the system if no special subcommand is ran.
|
// geth is the main entry point into the system if no special subcommand is ran.
|
||||||
// It creates a default node based on the command line arguments and runs it in
|
// It creates a default node based on the command line arguments and runs it in
|
||||||
// blocking mode, waiting for it to be shut down.
|
// blocking mode, waiting for it to be shut down.
|
||||||
func geth(ctx *cli.Context) {
|
func geth(ctx *cli.Context) error {
|
||||||
node := utils.MakeSystemNode(clientIdentifier, verString, relConfig, makeDefaultExtra(), ctx)
|
node := utils.MakeSystemNode(clientIdentifier, verString, relConfig, makeDefaultExtra(), ctx)
|
||||||
startNode(ctx, node)
|
startNode(ctx, node)
|
||||||
node.Wait()
|
node.Wait()
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// initGenesis will initialise the given JSON format genesis file and writes it as
|
// initGenesis will initialise the given JSON format genesis file and writes it as
|
||||||
// the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
|
// the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
|
||||||
func initGenesis(ctx *cli.Context) {
|
func initGenesis(ctx *cli.Context) error {
|
||||||
genesisPath := ctx.Args().First()
|
genesisPath := ctx.Args().First()
|
||||||
if len(genesisPath) == 0 {
|
if len(genesisPath) == 0 {
|
||||||
utils.Fatalf("must supply path to genesis JSON file")
|
utils.Fatalf("must supply path to genesis JSON file")
|
||||||
@@ -300,6 +297,7 @@ func initGenesis(ctx *cli.Context) {
|
|||||||
utils.Fatalf("failed to write genesis block: %v", err)
|
utils.Fatalf("failed to write genesis block: %v", err)
|
||||||
}
|
}
|
||||||
glog.V(logger.Info).Infof("successfully wrote genesis block and/or chain rule set: %x", block.Hash())
|
glog.V(logger.Info).Infof("successfully wrote genesis block and/or chain rule set: %x", block.Hash())
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// startNode boots up the system node and all registered protocols, after which
|
// startNode boots up the system node and all registered protocols, after which
|
||||||
@@ -331,7 +329,7 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func makedag(ctx *cli.Context) {
|
func makedag(ctx *cli.Context) error {
|
||||||
args := ctx.Args()
|
args := ctx.Args()
|
||||||
wrongArgs := func() {
|
wrongArgs := func() {
|
||||||
utils.Fatalf(`Usage: geth makedag <block number> <outputdir>`)
|
utils.Fatalf(`Usage: geth makedag <block number> <outputdir>`)
|
||||||
@@ -358,13 +356,15 @@ func makedag(ctx *cli.Context) {
|
|||||||
default:
|
default:
|
||||||
wrongArgs()
|
wrongArgs()
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func gpuinfo(ctx *cli.Context) {
|
func gpuinfo(ctx *cli.Context) error {
|
||||||
eth.PrintOpenCLDevices()
|
eth.PrintOpenCLDevices()
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func gpubench(ctx *cli.Context) {
|
func gpubench(ctx *cli.Context) error {
|
||||||
args := ctx.Args()
|
args := ctx.Args()
|
||||||
wrongArgs := func() {
|
wrongArgs := func() {
|
||||||
utils.Fatalf(`Usage: geth gpubench <gpu number>`)
|
utils.Fatalf(`Usage: geth gpubench <gpu number>`)
|
||||||
@@ -381,9 +381,10 @@ func gpubench(ctx *cli.Context) {
|
|||||||
default:
|
default:
|
||||||
wrongArgs()
|
wrongArgs()
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func version(c *cli.Context) {
|
func version(c *cli.Context) error {
|
||||||
fmt.Println(clientIdentifier)
|
fmt.Println(clientIdentifier)
|
||||||
fmt.Println("Version:", verString)
|
fmt.Println("Version:", verString)
|
||||||
fmt.Println("Protocol Versions:", eth.ProtocolVersions)
|
fmt.Println("Protocol Versions:", eth.ProtocolVersions)
|
||||||
@@ -392,4 +393,6 @@ func version(c *cli.Context) {
|
|||||||
fmt.Println("OS:", runtime.GOOS)
|
fmt.Println("OS:", runtime.GOOS)
|
||||||
fmt.Printf("GOPATH=%s\n", os.Getenv("GOPATH"))
|
fmt.Printf("GOPATH=%s\n", os.Getenv("GOPATH"))
|
||||||
fmt.Printf("GOROOT=%s\n", runtime.GOROOT())
|
fmt.Printf("GOROOT=%s\n", runtime.GOROOT())
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
@@ -26,11 +26,11 @@ import (
|
|||||||
|
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"github.com/codegangsta/cli"
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
"github.com/gizak/termui"
|
"github.com/gizak/termui"
|
||||||
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -67,7 +67,7 @@ to display multiple metrics simultaneously.
|
|||||||
)
|
)
|
||||||
|
|
||||||
// monitor starts a terminal UI based monitoring tool for the requested metrics.
|
// monitor starts a terminal UI based monitoring tool for the requested metrics.
|
||||||
func monitor(ctx *cli.Context) {
|
func monitor(ctx *cli.Context) error {
|
||||||
var (
|
var (
|
||||||
client rpc.Client
|
client rpc.Client
|
||||||
err error
|
err error
|
||||||
@@ -154,6 +154,7 @@ func monitor(ctx *cli.Context) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
termui.Loop()
|
termui.Loop()
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// retrieveMetrics contacts the attached geth node and retrieves the entire set
|
// retrieveMetrics contacts the attached geth node and retrieves the entire set
|
||||||
|
@@ -58,7 +58,10 @@ type testgeth struct {
|
|||||||
func init() {
|
func init() {
|
||||||
// Run the app if we're the child process for runGeth.
|
// Run the app if we're the child process for runGeth.
|
||||||
if os.Getenv("GETH_TEST_CHILD") != "" {
|
if os.Getenv("GETH_TEST_CHILD") != "" {
|
||||||
app.RunAndExitOnError()
|
if err := app.Run(os.Args); err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -21,9 +21,9 @@ package main
|
|||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/codegangsta/cli"
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
"github.com/ethereum/go-ethereum/internal/debug"
|
"github.com/ethereum/go-ethereum/internal/debug"
|
||||||
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AppHelpTemplate is the test template for the default, global app help topic.
|
// AppHelpTemplate is the test template for the default, global app help topic.
|
||||||
@@ -68,7 +68,6 @@ var AppHelpFlagGroups = []flagGroup{
|
|||||||
utils.OlympicFlag,
|
utils.OlympicFlag,
|
||||||
utils.TestNetFlag,
|
utils.TestNetFlag,
|
||||||
utils.DevModeFlag,
|
utils.DevModeFlag,
|
||||||
utils.GenesisFileFlag,
|
|
||||||
utils.IdentityFlag,
|
utils.IdentityFlag,
|
||||||
utils.FastSyncFlag,
|
utils.FastSyncFlag,
|
||||||
utils.LightKDFFlag,
|
utils.LightKDFFlag,
|
||||||
|
@@ -20,9 +20,9 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/codegangsta/cli"
|
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewRemoteRPCClient returns a RPC client which connects to a running geth instance.
|
// NewRemoteRPCClient returns a RPC client which connects to a running geth instance.
|
||||||
|
@@ -120,7 +120,7 @@ func ImportChain(chain *core.BlockChain, fn string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.Infoln("Importing blockchain", fn)
|
glog.Infoln("Importing blockchain ", fn)
|
||||||
fh, err := os.Open(fn)
|
fh, err := os.Open(fn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -182,7 +182,7 @@ func hasAllBlocks(chain *core.BlockChain, bs []*types.Block) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func ExportChain(blockchain *core.BlockChain, fn string) error {
|
func ExportChain(blockchain *core.BlockChain, fn string) error {
|
||||||
glog.Infoln("Exporting blockchain to", fn)
|
glog.Infoln("Exporting blockchain to ", fn)
|
||||||
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
|
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -191,12 +191,12 @@ func ExportChain(blockchain *core.BlockChain, fn string) error {
|
|||||||
if err := blockchain.Export(fh); err != nil {
|
if err := blockchain.Export(fh); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
glog.Infoln("Exported blockchain to", fn)
|
glog.Infoln("Exported blockchain to ", fn)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error {
|
func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error {
|
||||||
glog.Infoln("Exporting blockchain to", fn)
|
glog.Infoln("Exporting blockchain to ", fn)
|
||||||
// TODO verify mode perms
|
// TODO verify mode perms
|
||||||
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm)
|
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -206,6 +206,6 @@ func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, las
|
|||||||
if err := blockchain.ExportN(fh, first, last); err != nil {
|
if err := blockchain.ExportN(fh, first, last); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
glog.Infoln("Exported blockchain to", fn)
|
glog.Infoln("Exported blockchain to ", fn)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@@ -24,7 +24,7 @@ import (
|
|||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/codegangsta/cli"
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Custom type which is registered in the flags library which cli uses for
|
// Custom type which is registered in the flags library which cli uses for
|
||||||
|
@@ -30,7 +30,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/codegangsta/cli"
|
|
||||||
"github.com/ethereum/ethash"
|
"github.com/ethereum/ethash"
|
||||||
"github.com/ethereum/go-ethereum/accounts"
|
"github.com/ethereum/go-ethereum/accounts"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@@ -51,6 +50,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/release"
|
"github.com/ethereum/go-ethereum/release"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
"github.com/ethereum/go-ethereum/whisper"
|
"github.com/ethereum/go-ethereum/whisper"
|
||||||
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -126,10 +126,6 @@ var (
|
|||||||
Name: "dev",
|
Name: "dev",
|
||||||
Usage: "Developer mode: pre-configured private network with several debugging flags",
|
Usage: "Developer mode: pre-configured private network with several debugging flags",
|
||||||
}
|
}
|
||||||
GenesisFileFlag = cli.StringFlag{
|
|
||||||
Name: "genesis",
|
|
||||||
Usage: "Insert/overwrite the genesis block (JSON format)",
|
|
||||||
}
|
|
||||||
IdentityFlag = cli.StringFlag{
|
IdentityFlag = cli.StringFlag{
|
||||||
Name: "identity",
|
Name: "identity",
|
||||||
Usage: "Custom node name",
|
Usage: "Custom node name",
|
||||||
@@ -161,6 +157,15 @@ var (
|
|||||||
Name: "lightkdf",
|
Name: "lightkdf",
|
||||||
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
|
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
|
||||||
}
|
}
|
||||||
|
// Fork settings
|
||||||
|
SupportDAOFork = cli.BoolFlag{
|
||||||
|
Name: "support-dao-fork",
|
||||||
|
Usage: "Updates the chain rules to support the DAO hard-fork",
|
||||||
|
}
|
||||||
|
OpposeDAOFork = cli.BoolFlag{
|
||||||
|
Name: "oppose-dao-fork",
|
||||||
|
Usage: "Updates the chain rules to oppose the DAO hard-fork",
|
||||||
|
}
|
||||||
// Miner settings
|
// Miner settings
|
||||||
// TODO: refactor CPU vs GPU mining flags
|
// TODO: refactor CPU vs GPU mining flags
|
||||||
MiningEnabledFlag = cli.BoolFlag{
|
MiningEnabledFlag = cli.BoolFlag{
|
||||||
@@ -534,20 +539,6 @@ func MakeWSRpcHost(ctx *cli.Context) string {
|
|||||||
return ctx.GlobalString(WSListenAddrFlag.Name)
|
return ctx.GlobalString(WSListenAddrFlag.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MakeGenesisBlock loads up a genesis block from an input file specified in the
|
|
||||||
// command line, or returns the empty string if none set.
|
|
||||||
func MakeGenesisBlock(ctx *cli.Context) string {
|
|
||||||
genesis := ctx.GlobalString(GenesisFileFlag.Name)
|
|
||||||
if genesis == "" {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
data, err := ioutil.ReadFile(genesis)
|
|
||||||
if err != nil {
|
|
||||||
Fatalf("Failed to load custom genesis file: %v", err)
|
|
||||||
}
|
|
||||||
return string(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MakeDatabaseHandles raises out the number of allowed file handles per process
|
// MakeDatabaseHandles raises out the number of allowed file handles per process
|
||||||
// for Geth and returns half of the allowance to assign to the database.
|
// for Geth and returns half of the allowance to assign to the database.
|
||||||
func MakeDatabaseHandles() int {
|
func MakeDatabaseHandles() int {
|
||||||
@@ -689,7 +680,6 @@ func MakeSystemNode(name, version string, relconf release.Config, extra []byte,
|
|||||||
|
|
||||||
ethConf := ð.Config{
|
ethConf := ð.Config{
|
||||||
ChainConfig: MustMakeChainConfig(ctx),
|
ChainConfig: MustMakeChainConfig(ctx),
|
||||||
Genesis: MakeGenesisBlock(ctx),
|
|
||||||
FastSync: ctx.GlobalBool(FastSyncFlag.Name),
|
FastSync: ctx.GlobalBool(FastSyncFlag.Name),
|
||||||
BlockChainVersion: ctx.GlobalInt(BlockchainVersionFlag.Name),
|
BlockChainVersion: ctx.GlobalInt(BlockchainVersionFlag.Name),
|
||||||
DatabaseCache: ctx.GlobalInt(CacheFlag.Name),
|
DatabaseCache: ctx.GlobalInt(CacheFlag.Name),
|
||||||
@@ -722,17 +712,13 @@ func MakeSystemNode(name, version string, relconf release.Config, extra []byte,
|
|||||||
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||||
ethConf.NetworkId = 1
|
ethConf.NetworkId = 1
|
||||||
}
|
}
|
||||||
if !ctx.GlobalIsSet(GenesisFileFlag.Name) {
|
|
||||||
ethConf.Genesis = core.OlympicGenesisBlock()
|
ethConf.Genesis = core.OlympicGenesisBlock()
|
||||||
}
|
|
||||||
|
|
||||||
case ctx.GlobalBool(TestNetFlag.Name):
|
case ctx.GlobalBool(TestNetFlag.Name):
|
||||||
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||||
ethConf.NetworkId = 2
|
ethConf.NetworkId = 2
|
||||||
}
|
}
|
||||||
if !ctx.GlobalIsSet(GenesisFileFlag.Name) {
|
|
||||||
ethConf.Genesis = core.TestNetGenesisBlock()
|
ethConf.Genesis = core.TestNetGenesisBlock()
|
||||||
}
|
|
||||||
state.StartingNonce = 1048576 // (2**20)
|
state.StartingNonce = 1048576 // (2**20)
|
||||||
|
|
||||||
case ctx.GlobalBool(DevModeFlag.Name):
|
case ctx.GlobalBool(DevModeFlag.Name):
|
||||||
@@ -747,9 +733,7 @@ func MakeSystemNode(name, version string, relconf release.Config, extra []byte,
|
|||||||
stackConf.ListenAddr = ":0"
|
stackConf.ListenAddr = ":0"
|
||||||
}
|
}
|
||||||
// Override the Ethereum protocol configs
|
// Override the Ethereum protocol configs
|
||||||
if !ctx.GlobalIsSet(GenesisFileFlag.Name) {
|
|
||||||
ethConf.Genesis = core.OlympicGenesisBlock()
|
ethConf.Genesis = core.OlympicGenesisBlock()
|
||||||
}
|
|
||||||
if !ctx.GlobalIsSet(GasPriceFlag.Name) {
|
if !ctx.GlobalIsSet(GasPriceFlag.Name) {
|
||||||
ethConf.GasPrice = new(big.Int)
|
ethConf.GasPrice = new(big.Int)
|
||||||
}
|
}
|
||||||
@@ -806,24 +790,62 @@ func MustMakeChainConfig(ctx *cli.Context) *core.ChainConfig {
|
|||||||
|
|
||||||
// MustMakeChainConfigFromDb reads the chain configuration from the given database.
|
// MustMakeChainConfigFromDb reads the chain configuration from the given database.
|
||||||
func MustMakeChainConfigFromDb(ctx *cli.Context, db ethdb.Database) *core.ChainConfig {
|
func MustMakeChainConfigFromDb(ctx *cli.Context, db ethdb.Database) *core.ChainConfig {
|
||||||
genesis := core.GetBlock(db, core.GetCanonicalHash(db, 0))
|
// If the chain is already initialized, use any existing chain configs
|
||||||
|
config := new(core.ChainConfig)
|
||||||
|
|
||||||
|
genesis := core.GetBlock(db, core.GetCanonicalHash(db, 0))
|
||||||
if genesis != nil {
|
if genesis != nil {
|
||||||
// Existing genesis block, use stored config if available.
|
|
||||||
storedConfig, err := core.GetChainConfig(db, genesis.Hash())
|
storedConfig, err := core.GetChainConfig(db, genesis.Hash())
|
||||||
if err == nil {
|
switch err {
|
||||||
return storedConfig
|
case nil:
|
||||||
} else if err != core.ChainConfigNotFoundErr {
|
config = storedConfig
|
||||||
|
case core.ChainConfigNotFoundErr:
|
||||||
|
// No configs found, use empty, will populate below
|
||||||
|
default:
|
||||||
Fatalf("Could not make chain configuration: %v", err)
|
Fatalf("Could not make chain configuration: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var homesteadBlockNo *big.Int
|
// Set any missing fields due to them being unset or system upgrade
|
||||||
|
if config.HomesteadBlock == nil {
|
||||||
if ctx.GlobalBool(TestNetFlag.Name) {
|
if ctx.GlobalBool(TestNetFlag.Name) {
|
||||||
homesteadBlockNo = params.TestNetHomesteadBlock
|
config.HomesteadBlock = params.TestNetHomesteadBlock
|
||||||
} else {
|
} else {
|
||||||
homesteadBlockNo = params.MainNetHomesteadBlock
|
config.HomesteadBlock = params.MainNetHomesteadBlock
|
||||||
}
|
}
|
||||||
return &core.ChainConfig{HomesteadBlock: homesteadBlockNo}
|
}
|
||||||
|
if config.DAOForkBlock == nil {
|
||||||
|
if ctx.GlobalBool(TestNetFlag.Name) {
|
||||||
|
config.DAOForkBlock = params.TestNetDAOForkBlock
|
||||||
|
} else {
|
||||||
|
config.DAOForkBlock = params.MainNetDAOForkBlock
|
||||||
|
}
|
||||||
|
config.DAOForkSupport = true
|
||||||
|
}
|
||||||
|
// Force override any existing configs if explicitly requested
|
||||||
|
switch {
|
||||||
|
case ctx.GlobalBool(SupportDAOFork.Name):
|
||||||
|
config.DAOForkSupport = true
|
||||||
|
case ctx.GlobalBool(OpposeDAOFork.Name):
|
||||||
|
config.DAOForkSupport = false
|
||||||
|
}
|
||||||
|
// Temporarilly display a proper message so the user knows which fork its on
|
||||||
|
if !ctx.GlobalBool(TestNetFlag.Name) && (genesis == nil || genesis.Hash() == common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3")) {
|
||||||
|
choice := "SUPPORT"
|
||||||
|
if !config.DAOForkSupport {
|
||||||
|
choice = "OPPOSE"
|
||||||
|
}
|
||||||
|
current := fmt.Sprintf("Geth is currently configured to %s the DAO hard-fork!", choice)
|
||||||
|
howtoswap := fmt.Sprintf("You can change your choice prior to block #%v with --support-dao-fork or --oppose-dao-fork.", config.DAOForkBlock)
|
||||||
|
howtosync := fmt.Sprintf("After the hard-fork block #%v passed, changing chains requires a resync from scratch!", config.DAOForkBlock)
|
||||||
|
separator := strings.Repeat("-", len(howtoswap))
|
||||||
|
|
||||||
|
glog.V(logger.Warn).Info(separator)
|
||||||
|
glog.V(logger.Warn).Info(current)
|
||||||
|
glog.V(logger.Warn).Info(howtoswap)
|
||||||
|
glog.V(logger.Warn).Info(howtosync)
|
||||||
|
glog.V(logger.Warn).Info(separator)
|
||||||
|
}
|
||||||
|
return config
|
||||||
}
|
}
|
||||||
|
|
||||||
// MakeChainDatabase open an LevelDB using the flags passed to the client and will hard crash if it fails.
|
// MakeChainDatabase open an LevelDB using the flags passed to the client and will hard crash if it fails.
|
||||||
|
@@ -30,6 +30,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/internal/jsre"
|
"github.com/ethereum/go-ethereum/internal/jsre"
|
||||||
"github.com/ethereum/go-ethereum/internal/web3ext"
|
"github.com/ethereum/go-ethereum/internal/web3ext"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
|
"github.com/mattn/go-colorable"
|
||||||
"github.com/peterh/liner"
|
"github.com/peterh/liner"
|
||||||
"github.com/robertkrimen/otto"
|
"github.com/robertkrimen/otto"
|
||||||
)
|
)
|
||||||
@@ -80,7 +81,7 @@ func New(config Config) (*Console, error) {
|
|||||||
config.Prompt = DefaultPrompt
|
config.Prompt = DefaultPrompt
|
||||||
}
|
}
|
||||||
if config.Printer == nil {
|
if config.Printer == nil {
|
||||||
config.Printer = os.Stdout
|
config.Printer = colorable.NewColorableStdout()
|
||||||
}
|
}
|
||||||
// Initialize the console and return
|
// Initialize the console and return
|
||||||
console := &Console{
|
console := &Console{
|
||||||
@@ -330,11 +331,11 @@ func (c *Console) Interactive() {
|
|||||||
// Append the line to the input and check for multi-line interpretation
|
// Append the line to the input and check for multi-line interpretation
|
||||||
input += line + "\n"
|
input += line + "\n"
|
||||||
|
|
||||||
indents = strings.Count(input, "{") + strings.Count(input, "(") - strings.Count(input, "}") - strings.Count(input, ")")
|
indents = countIndents(input)
|
||||||
if indents <= 0 {
|
if indents <= 0 {
|
||||||
prompt = c.prompt
|
prompt = c.prompt
|
||||||
} else {
|
} else {
|
||||||
prompt = strings.Repeat("..", indents*2) + " "
|
prompt = strings.Repeat(".", indents*3) + " "
|
||||||
}
|
}
|
||||||
// If all the needed lines are present, save the command and run
|
// If all the needed lines are present, save the command and run
|
||||||
if indents <= 0 {
|
if indents <= 0 {
|
||||||
@@ -353,6 +354,49 @@ func (c *Console) Interactive() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// countIndents returns the number of identations for the given input.
|
||||||
|
// In case of invalid input such as var a = } the result can be negative.
|
||||||
|
func countIndents(input string) int {
|
||||||
|
var (
|
||||||
|
indents = 0
|
||||||
|
inString = false
|
||||||
|
strOpenChar = ' ' // keep track of the string open char to allow var str = "I'm ....";
|
||||||
|
charEscaped = false // keep track if the previous char was the '\' char, allow var str = "abc\"def";
|
||||||
|
)
|
||||||
|
|
||||||
|
for _, c := range input {
|
||||||
|
switch c {
|
||||||
|
case '\\':
|
||||||
|
// indicate next char as escaped when in string and previous char isn't escaping this backslash
|
||||||
|
if !charEscaped && inString {
|
||||||
|
charEscaped = true
|
||||||
|
}
|
||||||
|
case '\'', '"':
|
||||||
|
if inString && !charEscaped && strOpenChar == c { // end string
|
||||||
|
inString = false
|
||||||
|
} else if !inString && !charEscaped { // begin string
|
||||||
|
inString = true
|
||||||
|
strOpenChar = c
|
||||||
|
}
|
||||||
|
charEscaped = false
|
||||||
|
case '{', '(':
|
||||||
|
if !inString { // ignore brackets when in string, allow var str = "a{"; without indenting
|
||||||
|
indents++
|
||||||
|
}
|
||||||
|
charEscaped = false
|
||||||
|
case '}', ')':
|
||||||
|
if !inString {
|
||||||
|
indents--
|
||||||
|
}
|
||||||
|
charEscaped = false
|
||||||
|
default:
|
||||||
|
charEscaped = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return indents
|
||||||
|
}
|
||||||
|
|
||||||
// Execute runs the JavaScript file specified as the argument.
|
// Execute runs the JavaScript file specified as the argument.
|
||||||
func (c *Console) Execute(path string) error {
|
func (c *Console) Execute(path string) error {
|
||||||
return c.jsre.Exec(path)
|
return c.jsre.Exec(path)
|
||||||
|
@@ -294,3 +294,49 @@ func TestPrettyError(t *testing.T) {
|
|||||||
t.Fatalf("pretty error mismatch: have %s, want %s", output, want)
|
t.Fatalf("pretty error mismatch: have %s, want %s", output, want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tests that tests if the number of indents for JS input is calculated correct.
|
||||||
|
func TestIndenting(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
input string
|
||||||
|
expectedIndentCount int
|
||||||
|
}{
|
||||||
|
{`var a = 1;`, 0},
|
||||||
|
{`"some string"`, 0},
|
||||||
|
{`"some string with (parentesis`, 0},
|
||||||
|
{`"some string with newline
|
||||||
|
("`, 0},
|
||||||
|
{`function v(a,b) {}`, 0},
|
||||||
|
{`function f(a,b) { var str = "asd("; };`, 0},
|
||||||
|
{`function f(a) {`, 1},
|
||||||
|
{`function f(a, function(b) {`, 2},
|
||||||
|
{`function f(a, function(b) {
|
||||||
|
var str = "a)}";
|
||||||
|
});`, 0},
|
||||||
|
{`function f(a,b) {
|
||||||
|
var str = "a{b(" + a, ", " + b;
|
||||||
|
}`, 0},
|
||||||
|
{`var str = "\"{"`, 0},
|
||||||
|
{`var str = "'("`, 0},
|
||||||
|
{`var str = "\\{"`, 0},
|
||||||
|
{`var str = "\\\\{"`, 0},
|
||||||
|
{`var str = 'a"{`, 0},
|
||||||
|
{`var obj = {`, 1},
|
||||||
|
{`var obj = { {a:1`, 2},
|
||||||
|
{`var obj = { {a:1}`, 1},
|
||||||
|
{`var obj = { {a:1}, b:2}`, 0},
|
||||||
|
{`var obj = {}`, 0},
|
||||||
|
{`var obj = {
|
||||||
|
a: 1, b: 2
|
||||||
|
}`, 0},
|
||||||
|
{`var test = }`, -1},
|
||||||
|
{`var str = "a\""; var obj = {`, 1},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tt := range testCases {
|
||||||
|
counted := countIndents(tt.input)
|
||||||
|
if counted != tt.expectedIndentCount {
|
||||||
|
t.Errorf("test %d: invalid indenting: have %d, want %d", i, counted, tt.expectedIndentCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@@ -1,11 +1,11 @@
|
|||||||
FROM alpine:3.3
|
FROM alpine:3.3
|
||||||
|
|
||||||
RUN \
|
RUN \
|
||||||
apk add --update go git make gcc musl-dev gmp-dev gmp && \
|
apk add --update go git make gcc musl-dev && \
|
||||||
git clone https://github.com/ethereum/go-ethereum && \
|
git clone https://github.com/ethereum/go-ethereum && \
|
||||||
(cd go-ethereum && make geth) && \
|
(cd go-ethereum && make geth) && \
|
||||||
cp go-ethereum/build/bin/geth /geth && \
|
cp go-ethereum/build/bin/geth /geth && \
|
||||||
apk del go git make gcc musl-dev gmp-dev && \
|
apk del go git make gcc musl-dev && \
|
||||||
rm -rf /go-ethereum && rm -rf /var/cache/apk/*
|
rm -rf /go-ethereum && rm -rf /var/cache/apk/*
|
||||||
|
|
||||||
EXPOSE 8545
|
EXPOSE 8545
|
||||||
|
@@ -163,7 +163,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
|
|||||||
// Generate a chain of b.N blocks using the supplied block
|
// Generate a chain of b.N blocks using the supplied block
|
||||||
// generator function.
|
// generator function.
|
||||||
genesis := WriteGenesisBlockForTesting(db, GenesisAccount{benchRootAddr, benchRootFunds})
|
genesis := WriteGenesisBlockForTesting(db, GenesisAccount{benchRootAddr, benchRootFunds})
|
||||||
chain, _ := GenerateChain(genesis, db, b.N, gen)
|
chain, _ := GenerateChain(nil, genesis, db, b.N, gen)
|
||||||
|
|
||||||
// Time the insertion of the new chain.
|
// Time the insertion of the new chain.
|
||||||
// State and blocks are stored in the same DB.
|
// State and blocks are stored in the same DB.
|
||||||
|
@@ -247,7 +247,8 @@ func ValidateHeader(config *ChainConfig, pow pow.PoW, header *types.Header, pare
|
|||||||
return &BlockNonceErr{header.Number, header.Hash(), header.Nonce.Uint64()}
|
return &BlockNonceErr{header.Number, header.Hash(), header.Nonce.Uint64()}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
// If all checks passed, validate the extra-data field for hard forks
|
||||||
|
return ValidateDAOHeaderExtraData(config, header)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns
|
// CalcDifficulty is the difficulty adjustment algorithm. It returns
|
||||||
|
@@ -763,13 +763,20 @@ func (self *BlockChain) WriteBlock(block *types.Block) (status WriteStatus, err
|
|||||||
if ptd == nil {
|
if ptd == nil {
|
||||||
return NonStatTy, ParentError(block.ParentHash())
|
return NonStatTy, ParentError(block.ParentHash())
|
||||||
}
|
}
|
||||||
|
// Make sure no inconsistent state is leaked during insertion
|
||||||
|
self.mu.Lock()
|
||||||
|
defer self.mu.Unlock()
|
||||||
|
|
||||||
localTd := self.GetTd(self.currentBlock.Hash())
|
localTd := self.GetTd(self.currentBlock.Hash())
|
||||||
externTd := new(big.Int).Add(block.Difficulty(), ptd)
|
externTd := new(big.Int).Add(block.Difficulty(), ptd)
|
||||||
|
|
||||||
// Make sure no inconsistent state is leaked during insertion
|
// Irrelevant of the canonical status, write the block itself to the database
|
||||||
self.mu.Lock()
|
if err := self.hc.WriteTd(block.Hash(), externTd); err != nil {
|
||||||
defer self.mu.Unlock()
|
glog.Fatalf("failed to write block total difficulty: %v", err)
|
||||||
|
}
|
||||||
|
if err := WriteBlock(self.chainDb, block); err != nil {
|
||||||
|
glog.Fatalf("failed to write block contents: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
// If the total difficulty is higher than our known, add it to the canonical chain
|
// If the total difficulty is higher than our known, add it to the canonical chain
|
||||||
// Second clause in the if statement reduces the vulnerability to selfish mining.
|
// Second clause in the if statement reduces the vulnerability to selfish mining.
|
||||||
@@ -781,20 +788,11 @@ func (self *BlockChain) WriteBlock(block *types.Block) (status WriteStatus, err
|
|||||||
return NonStatTy, err
|
return NonStatTy, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Insert the block as the new head of the chain
|
self.insert(block) // Insert the block as the new head of the chain
|
||||||
self.insert(block)
|
|
||||||
status = CanonStatTy
|
status = CanonStatTy
|
||||||
} else {
|
} else {
|
||||||
status = SideStatTy
|
status = SideStatTy
|
||||||
}
|
}
|
||||||
// Irrelevant of the canonical status, write the block itself to the database
|
|
||||||
if err := self.hc.WriteTd(block.Hash(), externTd); err != nil {
|
|
||||||
glog.Fatalf("failed to write block total difficulty: %v", err)
|
|
||||||
}
|
|
||||||
if err := WriteBlock(self.chainDb, block); err != nil {
|
|
||||||
glog.Fatalf("failed to write block contents: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
self.futureBlocks.Remove(block.Hash())
|
self.futureBlocks.Remove(block.Hash())
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@@ -712,7 +712,7 @@ func TestFastVsFullChains(t *testing.T) {
|
|||||||
funds = big.NewInt(1000000000)
|
funds = big.NewInt(1000000000)
|
||||||
genesis = GenesisBlockForTesting(gendb, address, funds)
|
genesis = GenesisBlockForTesting(gendb, address, funds)
|
||||||
)
|
)
|
||||||
blocks, receipts := GenerateChain(genesis, gendb, 1024, func(i int, block *BlockGen) {
|
blocks, receipts := GenerateChain(nil, genesis, gendb, 1024, func(i int, block *BlockGen) {
|
||||||
block.SetCoinbase(common.Address{0x00})
|
block.SetCoinbase(common.Address{0x00})
|
||||||
|
|
||||||
// If the block number is multiple of 3, send a few bonus transactions to the miner
|
// If the block number is multiple of 3, send a few bonus transactions to the miner
|
||||||
@@ -795,7 +795,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
|||||||
genesis = GenesisBlockForTesting(gendb, address, funds)
|
genesis = GenesisBlockForTesting(gendb, address, funds)
|
||||||
)
|
)
|
||||||
height := uint64(1024)
|
height := uint64(1024)
|
||||||
blocks, receipts := GenerateChain(genesis, gendb, int(height), nil)
|
blocks, receipts := GenerateChain(nil, genesis, gendb, int(height), nil)
|
||||||
|
|
||||||
// Configure a subchain to roll back
|
// Configure a subchain to roll back
|
||||||
remove := []common.Hash{}
|
remove := []common.Hash{}
|
||||||
@@ -895,7 +895,7 @@ func TestChainTxReorgs(t *testing.T) {
|
|||||||
// - futureAdd: transaction added after the reorg has already finished
|
// - futureAdd: transaction added after the reorg has already finished
|
||||||
var pastAdd, freshAdd, futureAdd *types.Transaction
|
var pastAdd, freshAdd, futureAdd *types.Transaction
|
||||||
|
|
||||||
chain, _ := GenerateChain(genesis, db, 3, func(i int, gen *BlockGen) {
|
chain, _ := GenerateChain(nil, genesis, db, 3, func(i int, gen *BlockGen) {
|
||||||
switch i {
|
switch i {
|
||||||
case 0:
|
case 0:
|
||||||
pastDrop, _ = types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key2)
|
pastDrop, _ = types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key2)
|
||||||
@@ -920,7 +920,7 @@ func TestChainTxReorgs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// overwrite the old chain
|
// overwrite the old chain
|
||||||
chain, _ = GenerateChain(genesis, db, 5, func(i int, gen *BlockGen) {
|
chain, _ = GenerateChain(nil, genesis, db, 5, func(i int, gen *BlockGen) {
|
||||||
switch i {
|
switch i {
|
||||||
case 0:
|
case 0:
|
||||||
pastAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3)
|
pastAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3)
|
||||||
@@ -990,7 +990,7 @@ func TestLogReorgs(t *testing.T) {
|
|||||||
blockchain, _ := NewBlockChain(db, testChainConfig(), FakePow{}, evmux)
|
blockchain, _ := NewBlockChain(db, testChainConfig(), FakePow{}, evmux)
|
||||||
|
|
||||||
subs := evmux.Subscribe(RemovedLogsEvent{})
|
subs := evmux.Subscribe(RemovedLogsEvent{})
|
||||||
chain, _ := GenerateChain(genesis, db, 2, func(i int, gen *BlockGen) {
|
chain, _ := GenerateChain(nil, genesis, db, 2, func(i int, gen *BlockGen) {
|
||||||
if i == 1 {
|
if i == 1 {
|
||||||
tx, err := types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), big.NewInt(1000000), new(big.Int), code).SignECDSA(key1)
|
tx, err := types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), big.NewInt(1000000), new(big.Int), code).SignECDSA(key1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1003,7 +1003,7 @@ func TestLogReorgs(t *testing.T) {
|
|||||||
t.Fatalf("failed to insert chain: %v", err)
|
t.Fatalf("failed to insert chain: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
chain, _ = GenerateChain(genesis, db, 3, func(i int, gen *BlockGen) {})
|
chain, _ = GenerateChain(nil, genesis, db, 3, func(i int, gen *BlockGen) {})
|
||||||
if _, err := blockchain.InsertChain(chain); err != nil {
|
if _, err := blockchain.InsertChain(chain); err != nil {
|
||||||
t.Fatalf("failed to insert forked chain: %v", err)
|
t.Fatalf("failed to insert forked chain: %v", err)
|
||||||
}
|
}
|
||||||
@@ -1025,12 +1025,12 @@ func TestReorgSideEvent(t *testing.T) {
|
|||||||
evmux := &event.TypeMux{}
|
evmux := &event.TypeMux{}
|
||||||
blockchain, _ := NewBlockChain(db, testChainConfig(), FakePow{}, evmux)
|
blockchain, _ := NewBlockChain(db, testChainConfig(), FakePow{}, evmux)
|
||||||
|
|
||||||
chain, _ := GenerateChain(genesis, db, 3, func(i int, gen *BlockGen) {})
|
chain, _ := GenerateChain(nil, genesis, db, 3, func(i int, gen *BlockGen) {})
|
||||||
if _, err := blockchain.InsertChain(chain); err != nil {
|
if _, err := blockchain.InsertChain(chain); err != nil {
|
||||||
t.Fatalf("failed to insert chain: %v", err)
|
t.Fatalf("failed to insert chain: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
replacementBlocks, _ := GenerateChain(genesis, db, 4, func(i int, gen *BlockGen) {
|
replacementBlocks, _ := GenerateChain(nil, genesis, db, 4, func(i int, gen *BlockGen) {
|
||||||
tx, err := types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), big.NewInt(1000000), new(big.Int), nil).SignECDSA(key1)
|
tx, err := types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), big.NewInt(1000000), new(big.Int), nil).SignECDSA(key1)
|
||||||
if i == 2 {
|
if i == 2 {
|
||||||
gen.OffsetTime(-1)
|
gen.OffsetTime(-1)
|
||||||
@@ -1090,3 +1090,41 @@ done:
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tests if the canonical block can be fetched from the database during chain insertion.
|
||||||
|
func TestCanonicalBlockRetrieval(t *testing.T) {
|
||||||
|
var (
|
||||||
|
db, _ = ethdb.NewMemDatabase()
|
||||||
|
genesis = WriteGenesisBlockForTesting(db)
|
||||||
|
)
|
||||||
|
|
||||||
|
evmux := &event.TypeMux{}
|
||||||
|
blockchain, _ := NewBlockChain(db, testChainConfig(), FakePow{}, evmux)
|
||||||
|
|
||||||
|
chain, _ := GenerateChain(nil, genesis, db, 10, func(i int, gen *BlockGen) {})
|
||||||
|
|
||||||
|
for i, _ := range chain {
|
||||||
|
go func(block *types.Block) {
|
||||||
|
// try to retrieve a block by its canonical hash and see if the block data can be retrieved.
|
||||||
|
for {
|
||||||
|
ch := GetCanonicalHash(db, block.NumberU64())
|
||||||
|
if ch == (common.Hash{}) {
|
||||||
|
continue // busy wait for canonical hash to be written
|
||||||
|
}
|
||||||
|
if ch != block.Hash() {
|
||||||
|
t.Fatalf("unknown canonical hash, want %s, got %s", block.Hash().Hex(), ch.Hex())
|
||||||
|
}
|
||||||
|
fb := GetBlock(db, ch)
|
||||||
|
if fb == nil {
|
||||||
|
t.Fatalf("unable to retrieve block %d for canonical hash: %s", block.NumberU64(), ch.Hex())
|
||||||
|
}
|
||||||
|
if fb.Hash() != block.Hash() {
|
||||||
|
t.Fatalf("invalid block hash for block %d, want %s, got %s", block.NumberU64(), block.Hash().Hex(), fb.Hash().Hex())
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}(chain[i])
|
||||||
|
|
||||||
|
blockchain.InsertChain(types.Blocks{chain[i]})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@@ -26,6 +26,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
"github.com/ethereum/go-ethereum/pow"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -35,7 +36,11 @@ import (
|
|||||||
|
|
||||||
// MakeChainConfig returns a new ChainConfig with the ethereum default chain settings.
|
// MakeChainConfig returns a new ChainConfig with the ethereum default chain settings.
|
||||||
func MakeChainConfig() *ChainConfig {
|
func MakeChainConfig() *ChainConfig {
|
||||||
return &ChainConfig{HomesteadBlock: big.NewInt(0)}
|
return &ChainConfig{
|
||||||
|
HomesteadBlock: big.NewInt(0),
|
||||||
|
DAOForkBlock: nil,
|
||||||
|
DAOForkSupport: true,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FakePow is a non-validating proof of work implementation.
|
// FakePow is a non-validating proof of work implementation.
|
||||||
@@ -173,10 +178,27 @@ func (b *BlockGen) OffsetTime(seconds int64) {
|
|||||||
// Blocks created by GenerateChain do not contain valid proof of work
|
// Blocks created by GenerateChain do not contain valid proof of work
|
||||||
// values. Inserting them into BlockChain requires use of FakePow or
|
// values. Inserting them into BlockChain requires use of FakePow or
|
||||||
// a similar non-validating proof of work implementation.
|
// a similar non-validating proof of work implementation.
|
||||||
func GenerateChain(parent *types.Block, db ethdb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts) {
|
func GenerateChain(config *ChainConfig, parent *types.Block, db ethdb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts) {
|
||||||
blocks, receipts := make(types.Blocks, n), make([]types.Receipts, n)
|
blocks, receipts := make(types.Blocks, n), make([]types.Receipts, n)
|
||||||
genblock := func(i int, h *types.Header, statedb *state.StateDB) (*types.Block, types.Receipts) {
|
genblock := func(i int, h *types.Header, statedb *state.StateDB) (*types.Block, types.Receipts) {
|
||||||
b := &BlockGen{parent: parent, i: i, chain: blocks, header: h, statedb: statedb}
|
b := &BlockGen{parent: parent, i: i, chain: blocks, header: h, statedb: statedb}
|
||||||
|
|
||||||
|
// Mutate the state and block according to any hard-fork specs
|
||||||
|
if config == nil {
|
||||||
|
config = MakeChainConfig()
|
||||||
|
}
|
||||||
|
if daoBlock := config.DAOForkBlock; daoBlock != nil {
|
||||||
|
limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange)
|
||||||
|
if h.Number.Cmp(daoBlock) >= 0 && h.Number.Cmp(limit) < 0 {
|
||||||
|
if config.DAOForkSupport {
|
||||||
|
h.Extra = common.CopyBytes(params.DAOForkBlockExtra)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if config.DAOForkSupport && config.DAOForkBlock != nil && config.DAOForkBlock.Cmp(h.Number) == 0 {
|
||||||
|
ApplyDAOHardFork(statedb)
|
||||||
|
}
|
||||||
|
// Execute any user modifications to the block and finalize it
|
||||||
if gen != nil {
|
if gen != nil {
|
||||||
gen(i, b)
|
gen(i, b)
|
||||||
}
|
}
|
||||||
@@ -261,7 +283,7 @@ func makeHeaderChain(parent *types.Header, n int, db ethdb.Database, seed int) [
|
|||||||
|
|
||||||
// makeBlockChain creates a deterministic chain of blocks rooted at parent.
|
// makeBlockChain creates a deterministic chain of blocks rooted at parent.
|
||||||
func makeBlockChain(parent *types.Block, n int, db ethdb.Database, seed int) []*types.Block {
|
func makeBlockChain(parent *types.Block, n int, db ethdb.Database, seed int) []*types.Block {
|
||||||
blocks, _ := GenerateChain(parent, db, n, func(i int, b *BlockGen) {
|
blocks, _ := GenerateChain(nil, parent, db, n, func(i int, b *BlockGen) {
|
||||||
b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)})
|
b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)})
|
||||||
})
|
})
|
||||||
return blocks
|
return blocks
|
||||||
|
@@ -47,7 +47,7 @@ func ExampleGenerateChain() {
|
|||||||
// This call generates a chain of 5 blocks. The function runs for
|
// This call generates a chain of 5 blocks. The function runs for
|
||||||
// each block and adds different features to gen based on the
|
// each block and adds different features to gen based on the
|
||||||
// block index.
|
// block index.
|
||||||
chain, _ := GenerateChain(genesis, db, 5, func(i int, gen *BlockGen) {
|
chain, _ := GenerateChain(nil, genesis, db, 5, func(i int, gen *BlockGen) {
|
||||||
switch i {
|
switch i {
|
||||||
case 0:
|
case 0:
|
||||||
// In block 1, addr1 sends addr2 some ether.
|
// In block 1, addr1 sends addr2 some ether.
|
||||||
|
@@ -60,7 +60,7 @@ func TestPowVerification(t *testing.T) {
|
|||||||
var (
|
var (
|
||||||
testdb, _ = ethdb.NewMemDatabase()
|
testdb, _ = ethdb.NewMemDatabase()
|
||||||
genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
|
genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
|
||||||
blocks, _ = GenerateChain(genesis, testdb, 8, nil)
|
blocks, _ = GenerateChain(nil, genesis, testdb, 8, nil)
|
||||||
)
|
)
|
||||||
headers := make([]*types.Header, len(blocks))
|
headers := make([]*types.Header, len(blocks))
|
||||||
for i, block := range blocks {
|
for i, block := range blocks {
|
||||||
@@ -115,7 +115,7 @@ func testPowConcurrentVerification(t *testing.T, threads int) {
|
|||||||
var (
|
var (
|
||||||
testdb, _ = ethdb.NewMemDatabase()
|
testdb, _ = ethdb.NewMemDatabase()
|
||||||
genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
|
genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
|
||||||
blocks, _ = GenerateChain(genesis, testdb, 8, nil)
|
blocks, _ = GenerateChain(nil, genesis, testdb, 8, nil)
|
||||||
)
|
)
|
||||||
headers := make([]*types.Header, len(blocks))
|
headers := make([]*types.Header, len(blocks))
|
||||||
for i, block := range blocks {
|
for i, block := range blocks {
|
||||||
@@ -186,7 +186,7 @@ func testPowConcurrentAbortion(t *testing.T, threads int) {
|
|||||||
var (
|
var (
|
||||||
testdb, _ = ethdb.NewMemDatabase()
|
testdb, _ = ethdb.NewMemDatabase()
|
||||||
genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
|
genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
|
||||||
blocks, _ = GenerateChain(genesis, testdb, 1024, nil)
|
blocks, _ = GenerateChain(nil, genesis, testdb, 1024, nil)
|
||||||
)
|
)
|
||||||
headers := make([]*types.Header, len(blocks))
|
headers := make([]*types.Header, len(blocks))
|
||||||
for i, block := range blocks {
|
for i, block := range blocks {
|
||||||
|
@@ -31,16 +31,17 @@ var ChainConfigNotFoundErr = errors.New("ChainConfig not found") // general conf
|
|||||||
// that any network, identified by its genesis block, can have its own
|
// that any network, identified by its genesis block, can have its own
|
||||||
// set of configuration options.
|
// set of configuration options.
|
||||||
type ChainConfig struct {
|
type ChainConfig struct {
|
||||||
HomesteadBlock *big.Int // homestead switch block
|
HomesteadBlock *big.Int `json:"homesteadBlock"` // Homestead switch block (nil = no fork, 0 = already homestead)
|
||||||
|
DAOForkBlock *big.Int `json:"daoForkBlock"` // TheDAO hard-fork switch block (nil = no fork)
|
||||||
|
DAOForkSupport bool `json:"daoForkSupport"` // Whether the nodes supports or opposes the DAO hard-fork
|
||||||
|
|
||||||
VmConfig vm.Config `json:"-"`
|
VmConfig vm.Config `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsHomestead returns whether num is either equal to the homestead block or greater.
|
// IsHomestead returns whether num is either equal to the homestead block or greater.
|
||||||
func (c *ChainConfig) IsHomestead(num *big.Int) bool {
|
func (c *ChainConfig) IsHomestead(num *big.Int) bool {
|
||||||
if num == nil {
|
if c.HomesteadBlock == nil || num == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
return num.Cmp(c.HomesteadBlock) >= 0
|
return num.Cmp(c.HomesteadBlock) >= 0
|
||||||
}
|
}
|
||||||
|
74
core/dao.go
Normal file
74
core/dao.go
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package core
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ValidateDAOHeaderExtraData validates the extra-data field of a block header to
|
||||||
|
// ensure it conforms to DAO hard-fork rules.
|
||||||
|
//
|
||||||
|
// DAO hard-fork extension to the header validity:
|
||||||
|
// a) if the node is no-fork, do not accept blocks in the [fork, fork+10) range
|
||||||
|
// with the fork specific extra-data set
|
||||||
|
// b) if the node is pro-fork, require blocks in the specific range to have the
|
||||||
|
// unique extra-data set.
|
||||||
|
func ValidateDAOHeaderExtraData(config *ChainConfig, header *types.Header) error {
|
||||||
|
// Short circuit validation if the node doesn't care about the DAO fork
|
||||||
|
if config.DAOForkBlock == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Make sure the block is within the fork's modified extra-data range
|
||||||
|
limit := new(big.Int).Add(config.DAOForkBlock, params.DAOForkExtraRange)
|
||||||
|
if header.Number.Cmp(config.DAOForkBlock) < 0 || header.Number.Cmp(limit) >= 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Depending whether we support or oppose the fork, validate the extra-data contents
|
||||||
|
if config.DAOForkSupport {
|
||||||
|
if bytes.Compare(header.Extra, params.DAOForkBlockExtra) != 0 {
|
||||||
|
return ValidationError("DAO pro-fork bad block extra-data: 0x%x", header.Extra)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if bytes.Compare(header.Extra, params.DAOForkBlockExtra) == 0 {
|
||||||
|
return ValidationError("DAO no-fork bad block extra-data: 0x%x", header.Extra)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// All ok, header has the same extra-data we expect
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyDAOHardFork modifies the state database according to the DAO hard-fork
|
||||||
|
// rules, transferring all balances of a set of DAO accounts to a single refund
|
||||||
|
// contract.
|
||||||
|
func ApplyDAOHardFork(statedb *state.StateDB) {
|
||||||
|
// Retrieve the contract to refund balances into
|
||||||
|
refund := statedb.GetOrNewStateObject(params.DAORefundContract)
|
||||||
|
|
||||||
|
// Move every DAO account and extra-balance account funds into the refund contract
|
||||||
|
for _, addr := range params.DAODrainList {
|
||||||
|
if account := statedb.GetStateObject(addr); account != nil {
|
||||||
|
refund.AddBalance(account.Balance())
|
||||||
|
account.SetBalance(new(big.Int))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
132
core/dao_test.go
Normal file
132
core/dao_test.go
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package core
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
"github.com/ethereum/go-ethereum/event"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tests that DAO-fork enabled clients can properly filter out fork-commencing
|
||||||
|
// blocks based on their extradata fields.
|
||||||
|
func TestDAOForkRangeExtradata(t *testing.T) {
|
||||||
|
forkBlock := big.NewInt(32)
|
||||||
|
|
||||||
|
// Generate a common prefix for both pro-forkers and non-forkers
|
||||||
|
db, _ := ethdb.NewMemDatabase()
|
||||||
|
genesis := WriteGenesisBlockForTesting(db)
|
||||||
|
prefix, _ := GenerateChain(nil, genesis, db, int(forkBlock.Int64()-1), func(i int, gen *BlockGen) {})
|
||||||
|
|
||||||
|
// Create the concurrent, conflicting two nodes
|
||||||
|
proDb, _ := ethdb.NewMemDatabase()
|
||||||
|
WriteGenesisBlockForTesting(proDb)
|
||||||
|
proConf := &ChainConfig{HomesteadBlock: big.NewInt(0), DAOForkBlock: forkBlock, DAOForkSupport: true}
|
||||||
|
proBc, _ := NewBlockChain(proDb, proConf, new(FakePow), new(event.TypeMux))
|
||||||
|
|
||||||
|
conDb, _ := ethdb.NewMemDatabase()
|
||||||
|
WriteGenesisBlockForTesting(conDb)
|
||||||
|
conConf := &ChainConfig{HomesteadBlock: big.NewInt(0), DAOForkBlock: forkBlock, DAOForkSupport: false}
|
||||||
|
conBc, _ := NewBlockChain(conDb, conConf, new(FakePow), new(event.TypeMux))
|
||||||
|
|
||||||
|
if _, err := proBc.InsertChain(prefix); err != nil {
|
||||||
|
t.Fatalf("pro-fork: failed to import chain prefix: %v", err)
|
||||||
|
}
|
||||||
|
if _, err := conBc.InsertChain(prefix); err != nil {
|
||||||
|
t.Fatalf("con-fork: failed to import chain prefix: %v", err)
|
||||||
|
}
|
||||||
|
// Try to expand both pro-fork and non-fork chains iteratively with other camp's blocks
|
||||||
|
for i := int64(0); i < params.DAOForkExtraRange.Int64(); i++ {
|
||||||
|
// Create a pro-fork block, and try to feed into the no-fork chain
|
||||||
|
db, _ = ethdb.NewMemDatabase()
|
||||||
|
WriteGenesisBlockForTesting(db)
|
||||||
|
bc, _ := NewBlockChain(db, conConf, new(FakePow), new(event.TypeMux))
|
||||||
|
|
||||||
|
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64()+1))
|
||||||
|
for j := 0; j < len(blocks)/2; j++ {
|
||||||
|
blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j]
|
||||||
|
}
|
||||||
|
if _, err := bc.InsertChain(blocks); err != nil {
|
||||||
|
t.Fatalf("failed to import contra-fork chain for expansion: %v", err)
|
||||||
|
}
|
||||||
|
blocks, _ = GenerateChain(proConf, conBc.CurrentBlock(), db, 1, func(i int, gen *BlockGen) {})
|
||||||
|
if _, err := conBc.InsertChain(blocks); err == nil {
|
||||||
|
t.Fatalf("contra-fork chain accepted pro-fork block: %v", blocks[0])
|
||||||
|
}
|
||||||
|
// Create a proper no-fork block for the contra-forker
|
||||||
|
blocks, _ = GenerateChain(conConf, conBc.CurrentBlock(), db, 1, func(i int, gen *BlockGen) {})
|
||||||
|
if _, err := conBc.InsertChain(blocks); err != nil {
|
||||||
|
t.Fatalf("contra-fork chain didn't accepted no-fork block: %v", err)
|
||||||
|
}
|
||||||
|
// Create a no-fork block, and try to feed into the pro-fork chain
|
||||||
|
db, _ = ethdb.NewMemDatabase()
|
||||||
|
WriteGenesisBlockForTesting(db)
|
||||||
|
bc, _ = NewBlockChain(db, proConf, new(FakePow), new(event.TypeMux))
|
||||||
|
|
||||||
|
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64()+1))
|
||||||
|
for j := 0; j < len(blocks)/2; j++ {
|
||||||
|
blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j]
|
||||||
|
}
|
||||||
|
if _, err := bc.InsertChain(blocks); err != nil {
|
||||||
|
t.Fatalf("failed to import pro-fork chain for expansion: %v", err)
|
||||||
|
}
|
||||||
|
blocks, _ = GenerateChain(conConf, proBc.CurrentBlock(), db, 1, func(i int, gen *BlockGen) {})
|
||||||
|
if _, err := proBc.InsertChain(blocks); err == nil {
|
||||||
|
t.Fatalf("pro-fork chain accepted contra-fork block: %v", blocks[0])
|
||||||
|
}
|
||||||
|
// Create a proper pro-fork block for the pro-forker
|
||||||
|
blocks, _ = GenerateChain(proConf, proBc.CurrentBlock(), db, 1, func(i int, gen *BlockGen) {})
|
||||||
|
if _, err := proBc.InsertChain(blocks); err != nil {
|
||||||
|
t.Fatalf("pro-fork chain didn't accepted pro-fork block: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Verify that contra-forkers accept pro-fork extra-datas after forking finishes
|
||||||
|
db, _ = ethdb.NewMemDatabase()
|
||||||
|
WriteGenesisBlockForTesting(db)
|
||||||
|
bc, _ := NewBlockChain(db, conConf, new(FakePow), new(event.TypeMux))
|
||||||
|
|
||||||
|
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64()+1))
|
||||||
|
for j := 0; j < len(blocks)/2; j++ {
|
||||||
|
blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j]
|
||||||
|
}
|
||||||
|
if _, err := bc.InsertChain(blocks); err != nil {
|
||||||
|
t.Fatalf("failed to import contra-fork chain for expansion: %v", err)
|
||||||
|
}
|
||||||
|
blocks, _ = GenerateChain(proConf, conBc.CurrentBlock(), db, 1, func(i int, gen *BlockGen) {})
|
||||||
|
if _, err := conBc.InsertChain(blocks); err != nil {
|
||||||
|
t.Fatalf("contra-fork chain didn't accept pro-fork block post-fork: %v", err)
|
||||||
|
}
|
||||||
|
// Verify that pro-forkers accept contra-fork extra-datas after forking finishes
|
||||||
|
db, _ = ethdb.NewMemDatabase()
|
||||||
|
WriteGenesisBlockForTesting(db)
|
||||||
|
bc, _ = NewBlockChain(db, proConf, new(FakePow), new(event.TypeMux))
|
||||||
|
|
||||||
|
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64()+1))
|
||||||
|
for j := 0; j < len(blocks)/2; j++ {
|
||||||
|
blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j]
|
||||||
|
}
|
||||||
|
if _, err := bc.InsertChain(blocks); err != nil {
|
||||||
|
t.Fatalf("failed to import pro-fork chain for expansion: %v", err)
|
||||||
|
}
|
||||||
|
blocks, _ = GenerateChain(conConf, proBc.CurrentBlock(), db, 1, func(i int, gen *BlockGen) {})
|
||||||
|
if _, err := proBc.InsertChain(blocks); err != nil {
|
||||||
|
t.Fatalf("pro-fork chain didn't accept contra-fork block post-fork: %v", err)
|
||||||
|
}
|
||||||
|
}
|
@@ -157,7 +157,11 @@ func GetTd(db ethdb.Database, hash common.Hash) *big.Int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetBlock retrieves an entire block corresponding to the hash, assembling it
|
// GetBlock retrieves an entire block corresponding to the hash, assembling it
|
||||||
// back from the stored header and body.
|
// back from the stored header and body. If either the header or body could not
|
||||||
|
// be retrieved nil is returned.
|
||||||
|
//
|
||||||
|
// Note, due to concurrent download of header and block body the header and thus
|
||||||
|
// canonical hash can be stored in the database but the body data not (yet).
|
||||||
func GetBlock(db ethdb.Database, hash common.Hash) *types.Block {
|
func GetBlock(db ethdb.Database, hash common.Hash) *types.Block {
|
||||||
// Retrieve the block header and body contents
|
// Retrieve the block header and body contents
|
||||||
header := GetHeader(db, hash)
|
header := GetHeader(db, hash)
|
||||||
|
@@ -561,7 +561,7 @@ func TestMipmapChain(t *testing.T) {
|
|||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|
||||||
genesis := WriteGenesisBlockForTesting(db, GenesisAccount{addr, big.NewInt(1000000)})
|
genesis := WriteGenesisBlockForTesting(db, GenesisAccount{addr, big.NewInt(1000000)})
|
||||||
chain, receipts := GenerateChain(genesis, db, 1010, func(i int, gen *BlockGen) {
|
chain, receipts := GenerateChain(nil, genesis, db, 1010, func(i int, gen *BlockGen) {
|
||||||
var receipts types.Receipts
|
var receipts types.Receipts
|
||||||
switch i {
|
switch i {
|
||||||
case 1:
|
case 1:
|
||||||
|
@@ -129,6 +129,14 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
|
|||||||
localTd := hc.GetTd(hc.currentHeaderHash)
|
localTd := hc.GetTd(hc.currentHeaderHash)
|
||||||
externTd := new(big.Int).Add(header.Difficulty, ptd)
|
externTd := new(big.Int).Add(header.Difficulty, ptd)
|
||||||
|
|
||||||
|
// Irrelevant of the canonical status, write the td and header to the database
|
||||||
|
if err := hc.WriteTd(hash, externTd); err != nil {
|
||||||
|
glog.Fatalf("failed to write header total difficulty: %v", err)
|
||||||
|
}
|
||||||
|
if err := WriteHeader(hc.chainDb, header); err != nil {
|
||||||
|
glog.Fatalf("failed to write header contents: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
// If the total difficulty is higher than our known, add it to the canonical chain
|
// If the total difficulty is higher than our known, add it to the canonical chain
|
||||||
// Second clause in the if statement reduces the vulnerability to selfish mining.
|
// Second clause in the if statement reduces the vulnerability to selfish mining.
|
||||||
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
|
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
|
||||||
@@ -150,6 +158,7 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
|
|||||||
headHeader = hc.GetHeader(headHash)
|
headHeader = hc.GetHeader(headHash)
|
||||||
headNumber = headHeader.Number.Uint64()
|
headNumber = headHeader.Number.Uint64()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extend the canonical chain with the new header
|
// Extend the canonical chain with the new header
|
||||||
if err := WriteCanonicalHash(hc.chainDb, hash, number); err != nil {
|
if err := WriteCanonicalHash(hc.chainDb, hash, number); err != nil {
|
||||||
glog.Fatalf("failed to insert header number: %v", err)
|
glog.Fatalf("failed to insert header number: %v", err)
|
||||||
@@ -157,19 +166,13 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
|
|||||||
if err := WriteHeadHeaderHash(hc.chainDb, hash); err != nil {
|
if err := WriteHeadHeaderHash(hc.chainDb, hash); err != nil {
|
||||||
glog.Fatalf("failed to insert head header hash: %v", err)
|
glog.Fatalf("failed to insert head header hash: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
hc.currentHeaderHash, hc.currentHeader = hash, types.CopyHeader(header)
|
hc.currentHeaderHash, hc.currentHeader = hash, types.CopyHeader(header)
|
||||||
|
|
||||||
status = CanonStatTy
|
status = CanonStatTy
|
||||||
} else {
|
} else {
|
||||||
status = SideStatTy
|
status = SideStatTy
|
||||||
}
|
}
|
||||||
// Irrelevant of the canonical status, write the header itself to the database
|
|
||||||
if err := hc.WriteTd(hash, externTd); err != nil {
|
|
||||||
glog.Fatalf("failed to write header total difficulty: %v", err)
|
|
||||||
}
|
|
||||||
if err := WriteHeader(hc.chainDb, header); err != nil {
|
|
||||||
glog.Fatalf("failed to write header contents: %v", err)
|
|
||||||
}
|
|
||||||
hc.headerCache.Add(hash, header)
|
hc.headerCache.Add(hash, header)
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@@ -187,7 +187,7 @@ func (self *StateObject) Copy() *StateObject {
|
|||||||
stateObject.codeHash = common.CopyBytes(self.codeHash)
|
stateObject.codeHash = common.CopyBytes(self.codeHash)
|
||||||
stateObject.nonce = self.nonce
|
stateObject.nonce = self.nonce
|
||||||
stateObject.trie = self.trie
|
stateObject.trie = self.trie
|
||||||
stateObject.code = common.CopyBytes(self.code)
|
stateObject.code = self.code
|
||||||
stateObject.initCode = common.CopyBytes(self.initCode)
|
stateObject.initCode = common.CopyBytes(self.initCode)
|
||||||
stateObject.storage = self.storage.Copy()
|
stateObject.storage = self.storage.Copy()
|
||||||
stateObject.remove = self.remove
|
stateObject.remove = self.remove
|
||||||
|
@@ -149,10 +149,11 @@ func TestSnapshot2(t *testing.T) {
|
|||||||
so0.balance = big.NewInt(42)
|
so0.balance = big.NewInt(42)
|
||||||
so0.nonce = 43
|
so0.nonce = 43
|
||||||
so0.SetCode([]byte{'c', 'a', 'f', 'e'})
|
so0.SetCode([]byte{'c', 'a', 'f', 'e'})
|
||||||
so0.remove = true
|
so0.remove = false
|
||||||
so0.deleted = false
|
so0.deleted = false
|
||||||
so0.dirty = false
|
so0.dirty = true
|
||||||
state.SetStateObject(so0)
|
state.SetStateObject(so0)
|
||||||
|
state.Commit()
|
||||||
|
|
||||||
// and one with deleted == true
|
// and one with deleted == true
|
||||||
so1 := state.GetStateObject(stateobjaddr1)
|
so1 := state.GetStateObject(stateobjaddr1)
|
||||||
@@ -173,6 +174,7 @@ func TestSnapshot2(t *testing.T) {
|
|||||||
state.Set(snapshot)
|
state.Set(snapshot)
|
||||||
|
|
||||||
so0Restored := state.GetStateObject(stateobjaddr0)
|
so0Restored := state.GetStateObject(stateobjaddr0)
|
||||||
|
so0Restored.GetState(storageaddr)
|
||||||
so1Restored := state.GetStateObject(stateobjaddr1)
|
so1Restored := state.GetStateObject(stateobjaddr1)
|
||||||
// non-deleted is equal (restored)
|
// non-deleted is equal (restored)
|
||||||
compareStateObjects(so0Restored, so0, t)
|
compareStateObjects(so0Restored, so0, t)
|
||||||
|
@@ -324,8 +324,10 @@ func (self *StateDB) Copy() *StateDB {
|
|||||||
state, _ := New(common.Hash{}, self.db)
|
state, _ := New(common.Hash{}, self.db)
|
||||||
state.trie = self.trie
|
state.trie = self.trie
|
||||||
for k, stateObject := range self.stateObjects {
|
for k, stateObject := range self.stateObjects {
|
||||||
|
if stateObject.dirty {
|
||||||
state.stateObjects[k] = stateObject.Copy()
|
state.stateObjects[k] = stateObject.Copy()
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
state.refund.Set(self.refund)
|
state.refund.Set(self.refund)
|
||||||
|
|
||||||
@@ -364,12 +366,32 @@ func (s *StateDB) IntermediateRoot() common.Hash {
|
|||||||
stateObject.Update()
|
stateObject.Update()
|
||||||
s.UpdateStateObject(stateObject)
|
s.UpdateStateObject(stateObject)
|
||||||
}
|
}
|
||||||
stateObject.dirty = false
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return s.trie.Hash()
|
return s.trie.Hash()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeleteSuicides flags the suicided objects for deletion so that it
|
||||||
|
// won't be referenced again when called / queried up on.
|
||||||
|
//
|
||||||
|
// DeleteSuicides should not be used for consensus related updates
|
||||||
|
// under any circumstances.
|
||||||
|
func (s *StateDB) DeleteSuicides() {
|
||||||
|
// Reset refund so that any used-gas calculations can use
|
||||||
|
// this method.
|
||||||
|
s.refund = new(big.Int)
|
||||||
|
for _, stateObject := range s.stateObjects {
|
||||||
|
if stateObject.dirty {
|
||||||
|
// If the object has been removed by a suicide
|
||||||
|
// flag the object as deleted.
|
||||||
|
if stateObject.remove {
|
||||||
|
stateObject.deleted = true
|
||||||
|
}
|
||||||
|
stateObject.dirty = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Commit commits all state changes to the database.
|
// Commit commits all state changes to the database.
|
||||||
func (s *StateDB) Commit() (root common.Hash, err error) {
|
func (s *StateDB) Commit() (root common.Hash, err error) {
|
||||||
root, batch := s.CommitBatch()
|
root, batch := s.CommitBatch()
|
||||||
|
@@ -65,7 +65,11 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
|
|||||||
allLogs vm.Logs
|
allLogs vm.Logs
|
||||||
gp = new(GasPool).AddGas(block.GasLimit())
|
gp = new(GasPool).AddGas(block.GasLimit())
|
||||||
)
|
)
|
||||||
|
// Mutate the the block and state according to any hard-fork specs
|
||||||
|
if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 {
|
||||||
|
ApplyDAOHardFork(statedb)
|
||||||
|
}
|
||||||
|
// Iterate over and process the individual transactions
|
||||||
for i, tx := range block.Transactions() {
|
for i, tx := range block.Transactions() {
|
||||||
statedb.StartRecord(tx.Hash(), block.Hash(), i)
|
statedb.StartRecord(tx.Hash(), block.Hash(), i)
|
||||||
receipt, logs, _, err := ApplyTransaction(p.config, p.bc, gp, statedb, header, tx, totalUsedGas, cfg)
|
receipt, logs, _, err := ApplyTransaction(p.config, p.bc, gp, statedb, header, tx, totalUsedGas, cfg)
|
||||||
|
@@ -95,7 +95,7 @@ func ecrecoverFunc(in []byte) []byte {
|
|||||||
|
|
||||||
// tighter sig s values in homestead only apply to tx sigs
|
// tighter sig s values in homestead only apply to tx sigs
|
||||||
if !crypto.ValidateSignatureValues(v, r, s, false) {
|
if !crypto.ValidateSignatureValues(v, r, s, false) {
|
||||||
glog.V(logger.Debug).Infof("EC RECOVER FAIL: v, r or s value invalid")
|
glog.V(logger.Detail).Infof("ECRECOVER error: v, r or s value invalid")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -106,7 +106,7 @@ func ecrecoverFunc(in []byte) []byte {
|
|||||||
pubKey, err := crypto.Ecrecover(in[:32], rsv)
|
pubKey, err := crypto.Ecrecover(in[:32], rsv)
|
||||||
// make sure the public key is a valid one
|
// make sure the public key is a valid one
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Error).Infof("EC RECOVER FAIL: ", err)
|
glog.V(logger.Detail).Infoln("ECRECOVER error: ", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1876,6 +1876,7 @@ func (api *PrivateDebugAPI) TraceTransaction(txHash common.Hash, logger *vm.LogC
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("mutation failed: %v", err)
|
return nil, fmt.Errorf("mutation failed: %v", err)
|
||||||
}
|
}
|
||||||
|
stateDb.DeleteSuicides()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Otherwise trace the transaction and return
|
// Otherwise trace the transaction and return
|
||||||
|
@@ -250,6 +250,8 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
|||||||
if config.ChainConfig == nil {
|
if config.ChainConfig == nil {
|
||||||
return nil, errors.New("missing chain config")
|
return nil, errors.New("missing chain config")
|
||||||
}
|
}
|
||||||
|
core.WriteChainConfig(chainDb, genesis.Hash(), config.ChainConfig)
|
||||||
|
|
||||||
eth.chainConfig = config.ChainConfig
|
eth.chainConfig = config.ChainConfig
|
||||||
eth.chainConfig.VmConfig = vm.Config{
|
eth.chainConfig.VmConfig = vm.Config{
|
||||||
EnableJit: config.EnableJit,
|
EnableJit: config.EnableJit,
|
||||||
|
@@ -32,7 +32,7 @@ func TestMipmapUpgrade(t *testing.T) {
|
|||||||
addr := common.BytesToAddress([]byte("jeff"))
|
addr := common.BytesToAddress([]byte("jeff"))
|
||||||
genesis := core.WriteGenesisBlockForTesting(db)
|
genesis := core.WriteGenesisBlockForTesting(db)
|
||||||
|
|
||||||
chain, receipts := core.GenerateChain(genesis, db, 10, func(i int, gen *core.BlockGen) {
|
chain, receipts := core.GenerateChain(nil, genesis, db, 10, func(i int, gen *core.BlockGen) {
|
||||||
var receipts types.Receipts
|
var receipts types.Receipts
|
||||||
switch i {
|
switch i {
|
||||||
case 1:
|
case 1:
|
||||||
|
@@ -49,11 +49,6 @@ var (
|
|||||||
MaxStateFetch = 384 // Amount of node state values to allow fetching per request
|
MaxStateFetch = 384 // Amount of node state values to allow fetching per request
|
||||||
|
|
||||||
MaxForkAncestry = 3 * params.EpochDuration.Uint64() // Maximum chain reorganisation
|
MaxForkAncestry = 3 * params.EpochDuration.Uint64() // Maximum chain reorganisation
|
||||||
|
|
||||||
hashTTL = 3 * time.Second // [eth/61] Time it takes for a hash request to time out
|
|
||||||
blockTargetRTT = 3 * time.Second / 2 // [eth/61] Target time for completing a block retrieval request
|
|
||||||
blockTTL = 3 * blockTargetRTT // [eth/61] Maximum time allowance before a block request is considered expired
|
|
||||||
|
|
||||||
rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests
|
rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests
|
||||||
rttMaxEstimate = 20 * time.Second // Maximum rount-trip time to target for download requests
|
rttMaxEstimate = 20 * time.Second // Maximum rount-trip time to target for download requests
|
||||||
rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value
|
rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value
|
||||||
@@ -64,7 +59,6 @@ var (
|
|||||||
qosConfidenceCap = 10 // Number of peers above which not to modify RTT confidence
|
qosConfidenceCap = 10 // Number of peers above which not to modify RTT confidence
|
||||||
qosTuningImpact = 0.25 // Impact that a new tuning target has on the previous value
|
qosTuningImpact = 0.25 // Impact that a new tuning target has on the previous value
|
||||||
|
|
||||||
maxQueuedHashes = 32 * 1024 // [eth/61] Maximum number of hashes to queue for import (DOS protection)
|
|
||||||
maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection)
|
maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection)
|
||||||
maxHeadersProcess = 2048 // Number of header download results to import at once into the chain
|
maxHeadersProcess = 2048 // Number of header download results to import at once into the chain
|
||||||
maxResultsProcess = 2048 // Number of content download results to import at once into the chain
|
maxResultsProcess = 2048 // Number of content download results to import at once into the chain
|
||||||
@@ -84,16 +78,13 @@ var (
|
|||||||
errStallingPeer = errors.New("peer is stalling")
|
errStallingPeer = errors.New("peer is stalling")
|
||||||
errNoPeers = errors.New("no peers to keep download active")
|
errNoPeers = errors.New("no peers to keep download active")
|
||||||
errTimeout = errors.New("timeout")
|
errTimeout = errors.New("timeout")
|
||||||
errEmptyHashSet = errors.New("empty hash set by peer")
|
|
||||||
errEmptyHeaderSet = errors.New("empty header set by peer")
|
errEmptyHeaderSet = errors.New("empty header set by peer")
|
||||||
errPeersUnavailable = errors.New("no peers available or all tried for download")
|
errPeersUnavailable = errors.New("no peers available or all tried for download")
|
||||||
errAlreadyInPool = errors.New("hash already in pool")
|
|
||||||
errInvalidAncestor = errors.New("retrieved ancestor is invalid")
|
errInvalidAncestor = errors.New("retrieved ancestor is invalid")
|
||||||
errInvalidChain = errors.New("retrieved hash chain is invalid")
|
errInvalidChain = errors.New("retrieved hash chain is invalid")
|
||||||
errInvalidBlock = errors.New("retrieved block is invalid")
|
errInvalidBlock = errors.New("retrieved block is invalid")
|
||||||
errInvalidBody = errors.New("retrieved block body is invalid")
|
errInvalidBody = errors.New("retrieved block body is invalid")
|
||||||
errInvalidReceipt = errors.New("retrieved receipt is invalid")
|
errInvalidReceipt = errors.New("retrieved receipt is invalid")
|
||||||
errCancelHashFetch = errors.New("hash download canceled (requested)")
|
|
||||||
errCancelBlockFetch = errors.New("block download canceled (requested)")
|
errCancelBlockFetch = errors.New("block download canceled (requested)")
|
||||||
errCancelHeaderFetch = errors.New("block header download canceled (requested)")
|
errCancelHeaderFetch = errors.New("block header download canceled (requested)")
|
||||||
errCancelBodyFetch = errors.New("block body download canceled (requested)")
|
errCancelBodyFetch = errors.New("block body download canceled (requested)")
|
||||||
@@ -102,6 +93,7 @@ var (
|
|||||||
errCancelHeaderProcessing = errors.New("header processing canceled (requested)")
|
errCancelHeaderProcessing = errors.New("header processing canceled (requested)")
|
||||||
errCancelContentProcessing = errors.New("content processing canceled (requested)")
|
errCancelContentProcessing = errors.New("content processing canceled (requested)")
|
||||||
errNoSyncActive = errors.New("no sync active")
|
errNoSyncActive = errors.New("no sync active")
|
||||||
|
errTooOld = errors.New("peer doesn't speak recent enough protocol version (need version >= 62)")
|
||||||
)
|
)
|
||||||
|
|
||||||
type Downloader struct {
|
type Downloader struct {
|
||||||
@@ -146,20 +138,19 @@ type Downloader struct {
|
|||||||
|
|
||||||
// Channels
|
// Channels
|
||||||
newPeerCh chan *peer
|
newPeerCh chan *peer
|
||||||
hashCh chan dataPack // [eth/61] Channel receiving inbound hashes
|
|
||||||
blockCh chan dataPack // [eth/61] Channel receiving inbound blocks
|
|
||||||
headerCh chan dataPack // [eth/62] Channel receiving inbound block headers
|
headerCh chan dataPack // [eth/62] Channel receiving inbound block headers
|
||||||
bodyCh chan dataPack // [eth/62] Channel receiving inbound block bodies
|
bodyCh chan dataPack // [eth/62] Channel receiving inbound block bodies
|
||||||
receiptCh chan dataPack // [eth/63] Channel receiving inbound receipts
|
receiptCh chan dataPack // [eth/63] Channel receiving inbound receipts
|
||||||
stateCh chan dataPack // [eth/63] Channel receiving inbound node state data
|
stateCh chan dataPack // [eth/63] Channel receiving inbound node state data
|
||||||
blockWakeCh chan bool // [eth/61] Channel to signal the block fetcher of new tasks
|
|
||||||
bodyWakeCh chan bool // [eth/62] Channel to signal the block body fetcher of new tasks
|
bodyWakeCh chan bool // [eth/62] Channel to signal the block body fetcher of new tasks
|
||||||
receiptWakeCh chan bool // [eth/63] Channel to signal the receipt fetcher of new tasks
|
receiptWakeCh chan bool // [eth/63] Channel to signal the receipt fetcher of new tasks
|
||||||
stateWakeCh chan bool // [eth/63] Channel to signal the state fetcher of new tasks
|
stateWakeCh chan bool // [eth/63] Channel to signal the state fetcher of new tasks
|
||||||
headerProcCh chan []*types.Header // [eth/62] Channel to feed the header processor new tasks
|
headerProcCh chan []*types.Header // [eth/62] Channel to feed the header processor new tasks
|
||||||
|
|
||||||
|
// Cancellation and termination
|
||||||
|
cancelPeer string // Identifier of the peer currently being used as the master (cancel on drop)
|
||||||
cancelCh chan struct{} // Channel to cancel mid-flight syncs
|
cancelCh chan struct{} // Channel to cancel mid-flight syncs
|
||||||
cancelLock sync.RWMutex // Lock to protect the cancel channel in delivers
|
cancelLock sync.RWMutex // Lock to protect the cancel channel and peer in delivers
|
||||||
|
|
||||||
quitCh chan struct{} // Quit channel to signal termination
|
quitCh chan struct{} // Quit channel to signal termination
|
||||||
quitLock sync.RWMutex // Lock to prevent double closes
|
quitLock sync.RWMutex // Lock to prevent double closes
|
||||||
@@ -199,13 +190,10 @@ func New(stateDb ethdb.Database, mux *event.TypeMux, hasHeader headerCheckFn, ha
|
|||||||
rollback: rollback,
|
rollback: rollback,
|
||||||
dropPeer: dropPeer,
|
dropPeer: dropPeer,
|
||||||
newPeerCh: make(chan *peer, 1),
|
newPeerCh: make(chan *peer, 1),
|
||||||
hashCh: make(chan dataPack, 1),
|
|
||||||
blockCh: make(chan dataPack, 1),
|
|
||||||
headerCh: make(chan dataPack, 1),
|
headerCh: make(chan dataPack, 1),
|
||||||
bodyCh: make(chan dataPack, 1),
|
bodyCh: make(chan dataPack, 1),
|
||||||
receiptCh: make(chan dataPack, 1),
|
receiptCh: make(chan dataPack, 1),
|
||||||
stateCh: make(chan dataPack, 1),
|
stateCh: make(chan dataPack, 1),
|
||||||
blockWakeCh: make(chan bool, 1),
|
|
||||||
bodyWakeCh: make(chan bool, 1),
|
bodyWakeCh: make(chan bool, 1),
|
||||||
receiptWakeCh: make(chan bool, 1),
|
receiptWakeCh: make(chan bool, 1),
|
||||||
stateWakeCh: make(chan bool, 1),
|
stateWakeCh: make(chan bool, 1),
|
||||||
@@ -250,13 +238,12 @@ func (d *Downloader) Synchronising() bool {
|
|||||||
|
|
||||||
// RegisterPeer injects a new download peer into the set of block source to be
|
// RegisterPeer injects a new download peer into the set of block source to be
|
||||||
// used for fetching hashes and blocks from.
|
// used for fetching hashes and blocks from.
|
||||||
func (d *Downloader) RegisterPeer(id string, version int, head common.Hash,
|
func (d *Downloader) RegisterPeer(id string, version int, currentHead currentHeadRetrievalFn,
|
||||||
getRelHashes relativeHashFetcherFn, getAbsHashes absoluteHashFetcherFn, getBlocks blockFetcherFn, // eth/61 callbacks, remove when upgrading
|
|
||||||
getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn,
|
getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn,
|
||||||
getReceipts receiptFetcherFn, getNodeData stateFetcherFn) error {
|
getReceipts receiptFetcherFn, getNodeData stateFetcherFn) error {
|
||||||
|
|
||||||
glog.V(logger.Detail).Infoln("Registering peer", id)
|
glog.V(logger.Detail).Infoln("Registering peer", id)
|
||||||
if err := d.peers.Register(newPeer(id, version, head, getRelHashes, getAbsHashes, getBlocks, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts, getNodeData)); err != nil {
|
if err := d.peers.Register(newPeer(id, version, currentHead, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts, getNodeData)); err != nil {
|
||||||
glog.V(logger.Error).Infoln("Register failed:", err)
|
glog.V(logger.Error).Infoln("Register failed:", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -269,12 +256,22 @@ func (d *Downloader) RegisterPeer(id string, version int, head common.Hash,
|
|||||||
// the specified peer. An effort is also made to return any pending fetches into
|
// the specified peer. An effort is also made to return any pending fetches into
|
||||||
// the queue.
|
// the queue.
|
||||||
func (d *Downloader) UnregisterPeer(id string) error {
|
func (d *Downloader) UnregisterPeer(id string) error {
|
||||||
|
// Unregister the peer from the active peer set and revoke any fetch tasks
|
||||||
glog.V(logger.Detail).Infoln("Unregistering peer", id)
|
glog.V(logger.Detail).Infoln("Unregistering peer", id)
|
||||||
if err := d.peers.Unregister(id); err != nil {
|
if err := d.peers.Unregister(id); err != nil {
|
||||||
glog.V(logger.Error).Infoln("Unregister failed:", err)
|
glog.V(logger.Error).Infoln("Unregister failed:", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
d.queue.Revoke(id)
|
d.queue.Revoke(id)
|
||||||
|
|
||||||
|
// If this peer was the master peer, abort sync immediately
|
||||||
|
d.cancelLock.RLock()
|
||||||
|
master := id == d.cancelPeer
|
||||||
|
d.cancelLock.RUnlock()
|
||||||
|
|
||||||
|
if master {
|
||||||
|
d.cancel()
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -291,7 +288,9 @@ func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode
|
|||||||
case errBusy:
|
case errBusy:
|
||||||
glog.V(logger.Detail).Infof("Synchronisation already in progress")
|
glog.V(logger.Detail).Infof("Synchronisation already in progress")
|
||||||
|
|
||||||
case errTimeout, errBadPeer, errStallingPeer, errEmptyHashSet, errEmptyHeaderSet, errPeersUnavailable, errInvalidAncestor, errInvalidChain:
|
case errTimeout, errBadPeer, errStallingPeer,
|
||||||
|
errEmptyHeaderSet, errPeersUnavailable, errTooOld,
|
||||||
|
errInvalidAncestor, errInvalidChain:
|
||||||
glog.V(logger.Debug).Infof("Removing peer %v: %v", id, err)
|
glog.V(logger.Debug).Infof("Removing peer %v: %v", id, err)
|
||||||
d.dropPeer(id)
|
d.dropPeer(id)
|
||||||
|
|
||||||
@@ -323,13 +322,13 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
|
|||||||
d.queue.Reset()
|
d.queue.Reset()
|
||||||
d.peers.Reset()
|
d.peers.Reset()
|
||||||
|
|
||||||
for _, ch := range []chan bool{d.blockWakeCh, d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} {
|
for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} {
|
||||||
select {
|
select {
|
||||||
case <-ch:
|
case <-ch:
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, ch := range []chan dataPack{d.hashCh, d.blockCh, d.headerCh, d.bodyCh, d.receiptCh, d.stateCh} {
|
for _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh, d.stateCh} {
|
||||||
for empty := false; !empty; {
|
for empty := false; !empty; {
|
||||||
select {
|
select {
|
||||||
case <-ch:
|
case <-ch:
|
||||||
@@ -345,9 +344,10 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
|
|||||||
empty = true
|
empty = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Create cancel channel for aborting mid-flight
|
// Create cancel channel for aborting mid-flight and mark the master peer
|
||||||
d.cancelLock.Lock()
|
d.cancelLock.Lock()
|
||||||
d.cancelCh = make(chan struct{})
|
d.cancelCh = make(chan struct{})
|
||||||
|
d.cancelPeer = id
|
||||||
d.cancelLock.Unlock()
|
d.cancelLock.Unlock()
|
||||||
|
|
||||||
defer d.cancel() // No matter what, we can't leave the cancel channel open
|
defer d.cancel() // No matter what, we can't leave the cancel channel open
|
||||||
@@ -377,41 +377,15 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
|
|||||||
d.mux.Post(DoneEvent{})
|
d.mux.Post(DoneEvent{})
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
if p.version < 62 {
|
||||||
|
return errTooOld
|
||||||
|
}
|
||||||
|
|
||||||
glog.V(logger.Debug).Infof("Synchronising with the network using: %s [eth/%d]", p.id, p.version)
|
glog.V(logger.Debug).Infof("Synchronising with the network using: %s [eth/%d]", p.id, p.version)
|
||||||
defer func(start time.Time) {
|
defer func(start time.Time) {
|
||||||
glog.V(logger.Debug).Infof("Synchronisation terminated after %v", time.Since(start))
|
glog.V(logger.Debug).Infof("Synchronisation terminated after %v", time.Since(start))
|
||||||
}(time.Now())
|
}(time.Now())
|
||||||
|
|
||||||
switch {
|
|
||||||
case p.version == 61:
|
|
||||||
// Look up the sync boundaries: the common ancestor and the target block
|
|
||||||
latest, err := d.fetchHeight61(p)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
origin, err := d.findAncestor61(p, latest)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
d.syncStatsLock.Lock()
|
|
||||||
if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin {
|
|
||||||
d.syncStatsChainOrigin = origin
|
|
||||||
}
|
|
||||||
d.syncStatsChainHeight = latest
|
|
||||||
d.syncStatsLock.Unlock()
|
|
||||||
|
|
||||||
// Initiate the sync using a concurrent hash and block retrieval algorithm
|
|
||||||
d.queue.Prepare(origin+1, d.mode, 0, nil)
|
|
||||||
if d.syncInitHook != nil {
|
|
||||||
d.syncInitHook(origin, latest)
|
|
||||||
}
|
|
||||||
return d.spawnSync(origin+1,
|
|
||||||
func() error { return d.fetchHashes61(p, td, origin+1) },
|
|
||||||
func() error { return d.fetchBlocks61(origin + 1) },
|
|
||||||
)
|
|
||||||
|
|
||||||
case p.version >= 62:
|
|
||||||
// Look up the sync boundaries: the common ancestor and the target block
|
// Look up the sync boundaries: the common ancestor and the target block
|
||||||
latest, err := d.fetchHeight(p)
|
latest, err := d.fetchHeight(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -470,12 +444,6 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
|
|||||||
func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync
|
func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync
|
||||||
func() error { return d.fetchNodeData() }, // Node state data is retrieved during fast sync
|
func() error { return d.fetchNodeData() }, // Node state data is retrieved during fast sync
|
||||||
)
|
)
|
||||||
|
|
||||||
default:
|
|
||||||
// Something very wrong, stop right here
|
|
||||||
glog.V(logger.Error).Infof("Unsupported eth protocol: %d", p.version)
|
|
||||||
return errBadPeer
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// spawnSync runs d.process and all given fetcher functions to completion in
|
// spawnSync runs d.process and all given fetcher functions to completion in
|
||||||
@@ -540,459 +508,14 @@ func (d *Downloader) Terminate() {
|
|||||||
d.cancel()
|
d.cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
// fetchHeight61 retrieves the head block of the remote peer to aid in estimating
|
|
||||||
// the total time a pending synchronisation would take.
|
|
||||||
func (d *Downloader) fetchHeight61(p *peer) (uint64, error) {
|
|
||||||
glog.V(logger.Debug).Infof("%v: retrieving remote chain height", p)
|
|
||||||
|
|
||||||
// Request the advertised remote head block and wait for the response
|
|
||||||
go p.getBlocks([]common.Hash{p.head})
|
|
||||||
|
|
||||||
timeout := time.After(hashTTL)
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-d.cancelCh:
|
|
||||||
return 0, errCancelBlockFetch
|
|
||||||
|
|
||||||
case packet := <-d.blockCh:
|
|
||||||
// Discard anything not from the origin peer
|
|
||||||
if packet.PeerId() != p.id {
|
|
||||||
glog.V(logger.Debug).Infof("Received blocks from incorrect peer(%s)", packet.PeerId())
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// Make sure the peer actually gave something valid
|
|
||||||
blocks := packet.(*blockPack).blocks
|
|
||||||
if len(blocks) != 1 {
|
|
||||||
glog.V(logger.Debug).Infof("%v: invalid number of head blocks: %d != 1", p, len(blocks))
|
|
||||||
return 0, errBadPeer
|
|
||||||
}
|
|
||||||
return blocks[0].NumberU64(), nil
|
|
||||||
|
|
||||||
case <-timeout:
|
|
||||||
glog.V(logger.Debug).Infof("%v: head block timeout", p)
|
|
||||||
return 0, errTimeout
|
|
||||||
|
|
||||||
case <-d.hashCh:
|
|
||||||
// Out of bounds hashes received, ignore them
|
|
||||||
|
|
||||||
case <-d.headerCh:
|
|
||||||
case <-d.bodyCh:
|
|
||||||
case <-d.stateCh:
|
|
||||||
case <-d.receiptCh:
|
|
||||||
// Ignore eth/{62,63} packets because this is eth/61.
|
|
||||||
// These can arrive as a late delivery from a previous sync.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// findAncestor61 tries to locate the common ancestor block of the local chain and
|
|
||||||
// a remote peers blockchain. In the general case when our node was in sync and
|
|
||||||
// on the correct chain, checking the top N blocks should already get us a match.
|
|
||||||
// In the rare scenario when we ended up on a long reorganisation (i.e. none of
|
|
||||||
// the head blocks match), we do a binary search to find the common ancestor.
|
|
||||||
func (d *Downloader) findAncestor61(p *peer, height uint64) (uint64, error) {
|
|
||||||
glog.V(logger.Debug).Infof("%v: looking for common ancestor", p)
|
|
||||||
|
|
||||||
// Figure out the valid ancestor range to prevent rewrite attacks
|
|
||||||
floor, ceil := int64(-1), d.headBlock().NumberU64()
|
|
||||||
if ceil >= MaxForkAncestry {
|
|
||||||
floor = int64(ceil - MaxForkAncestry)
|
|
||||||
}
|
|
||||||
// Request the topmost blocks to short circuit binary ancestor lookup
|
|
||||||
head := ceil
|
|
||||||
if head > height {
|
|
||||||
head = height
|
|
||||||
}
|
|
||||||
from := int64(head) - int64(MaxHashFetch) + 1
|
|
||||||
if from < 0 {
|
|
||||||
from = 0
|
|
||||||
}
|
|
||||||
go p.getAbsHashes(uint64(from), MaxHashFetch)
|
|
||||||
|
|
||||||
// Wait for the remote response to the head fetch
|
|
||||||
number, hash := uint64(0), common.Hash{}
|
|
||||||
timeout := time.After(hashTTL)
|
|
||||||
|
|
||||||
for finished := false; !finished; {
|
|
||||||
select {
|
|
||||||
case <-d.cancelCh:
|
|
||||||
return 0, errCancelHashFetch
|
|
||||||
|
|
||||||
case packet := <-d.hashCh:
|
|
||||||
// Discard anything not from the origin peer
|
|
||||||
if packet.PeerId() != p.id {
|
|
||||||
glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", packet.PeerId())
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// Make sure the peer actually gave something valid
|
|
||||||
hashes := packet.(*hashPack).hashes
|
|
||||||
if len(hashes) == 0 {
|
|
||||||
glog.V(logger.Debug).Infof("%v: empty head hash set", p)
|
|
||||||
return 0, errEmptyHashSet
|
|
||||||
}
|
|
||||||
// Check if a common ancestor was found
|
|
||||||
finished = true
|
|
||||||
for i := len(hashes) - 1; i >= 0; i-- {
|
|
||||||
// Skip any headers that underflow/overflow our requested set
|
|
||||||
header := d.getHeader(hashes[i])
|
|
||||||
if header == nil || header.Number.Int64() < from || header.Number.Uint64() > head {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Otherwise check if we already know the header or not
|
|
||||||
if d.hasBlockAndState(hashes[i]) {
|
|
||||||
number, hash = header.Number.Uint64(), header.Hash()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case <-timeout:
|
|
||||||
glog.V(logger.Debug).Infof("%v: head hash timeout", p)
|
|
||||||
return 0, errTimeout
|
|
||||||
|
|
||||||
case <-d.blockCh:
|
|
||||||
// Out of bounds blocks received, ignore them
|
|
||||||
|
|
||||||
case <-d.headerCh:
|
|
||||||
case <-d.bodyCh:
|
|
||||||
case <-d.stateCh:
|
|
||||||
case <-d.receiptCh:
|
|
||||||
// Ignore eth/{62,63} packets because this is eth/61.
|
|
||||||
// These can arrive as a late delivery from a previous sync.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If the head fetch already found an ancestor, return
|
|
||||||
if !common.EmptyHash(hash) {
|
|
||||||
if int64(number) <= floor {
|
|
||||||
glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, number, hash[:4], floor)
|
|
||||||
return 0, errInvalidAncestor
|
|
||||||
}
|
|
||||||
glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, number, hash[:4])
|
|
||||||
return number, nil
|
|
||||||
}
|
|
||||||
// Ancestor not found, we need to binary search over our chain
|
|
||||||
start, end := uint64(0), head
|
|
||||||
if floor > 0 {
|
|
||||||
start = uint64(floor)
|
|
||||||
}
|
|
||||||
for start+1 < end {
|
|
||||||
// Split our chain interval in two, and request the hash to cross check
|
|
||||||
check := (start + end) / 2
|
|
||||||
|
|
||||||
timeout := time.After(hashTTL)
|
|
||||||
go p.getAbsHashes(uint64(check), 1)
|
|
||||||
|
|
||||||
// Wait until a reply arrives to this request
|
|
||||||
for arrived := false; !arrived; {
|
|
||||||
select {
|
|
||||||
case <-d.cancelCh:
|
|
||||||
return 0, errCancelHashFetch
|
|
||||||
|
|
||||||
case packet := <-d.hashCh:
|
|
||||||
// Discard anything not from the origin peer
|
|
||||||
if packet.PeerId() != p.id {
|
|
||||||
glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", packet.PeerId())
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// Make sure the peer actually gave something valid
|
|
||||||
hashes := packet.(*hashPack).hashes
|
|
||||||
if len(hashes) != 1 {
|
|
||||||
glog.V(logger.Debug).Infof("%v: invalid search hash set (%d)", p, len(hashes))
|
|
||||||
return 0, errBadPeer
|
|
||||||
}
|
|
||||||
arrived = true
|
|
||||||
|
|
||||||
// Modify the search interval based on the response
|
|
||||||
if !d.hasBlockAndState(hashes[0]) {
|
|
||||||
end = check
|
|
||||||
break
|
|
||||||
}
|
|
||||||
block := d.getBlock(hashes[0]) // this doesn't check state, hence the above explicit check
|
|
||||||
if block.NumberU64() != check {
|
|
||||||
glog.V(logger.Debug).Infof("%v: non requested hash #%d [%x…], instead of #%d", p, block.NumberU64(), block.Hash().Bytes()[:4], check)
|
|
||||||
return 0, errBadPeer
|
|
||||||
}
|
|
||||||
start = check
|
|
||||||
|
|
||||||
case <-timeout:
|
|
||||||
glog.V(logger.Debug).Infof("%v: search hash timeout", p)
|
|
||||||
return 0, errTimeout
|
|
||||||
|
|
||||||
case <-d.blockCh:
|
|
||||||
// Out of bounds blocks received, ignore them
|
|
||||||
|
|
||||||
case <-d.headerCh:
|
|
||||||
case <-d.bodyCh:
|
|
||||||
case <-d.stateCh:
|
|
||||||
case <-d.receiptCh:
|
|
||||||
// Ignore eth/{62,63} packets because this is eth/61.
|
|
||||||
// These can arrive as a late delivery from a previous sync.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Ensure valid ancestry and return
|
|
||||||
if int64(start) <= floor {
|
|
||||||
glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, start, hash[:4], floor)
|
|
||||||
return 0, errInvalidAncestor
|
|
||||||
}
|
|
||||||
glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, start, hash[:4])
|
|
||||||
return start, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetchHashes61 keeps retrieving hashes from the requested number, until no more
|
|
||||||
// are returned, potentially throttling on the way.
|
|
||||||
func (d *Downloader) fetchHashes61(p *peer, td *big.Int, from uint64) error {
|
|
||||||
glog.V(logger.Debug).Infof("%v: downloading hashes from #%d", p, from)
|
|
||||||
|
|
||||||
// Create a timeout timer, and the associated hash fetcher
|
|
||||||
request := time.Now() // time of the last fetch request
|
|
||||||
timeout := time.NewTimer(0) // timer to dump a non-responsive active peer
|
|
||||||
<-timeout.C // timeout channel should be initially empty
|
|
||||||
defer timeout.Stop()
|
|
||||||
|
|
||||||
getHashes := func(from uint64) {
|
|
||||||
glog.V(logger.Detail).Infof("%v: fetching %d hashes from #%d", p, MaxHashFetch, from)
|
|
||||||
|
|
||||||
request = time.Now()
|
|
||||||
timeout.Reset(hashTTL)
|
|
||||||
go p.getAbsHashes(from, MaxHashFetch)
|
|
||||||
}
|
|
||||||
// Start pulling hashes, until all are exhausted
|
|
||||||
getHashes(from)
|
|
||||||
gotHashes := false
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-d.cancelCh:
|
|
||||||
return errCancelHashFetch
|
|
||||||
|
|
||||||
case packet := <-d.hashCh:
|
|
||||||
// Make sure the active peer is giving us the hashes
|
|
||||||
if packet.PeerId() != p.id {
|
|
||||||
glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", packet.PeerId())
|
|
||||||
break
|
|
||||||
}
|
|
||||||
hashReqTimer.UpdateSince(request)
|
|
||||||
timeout.Stop()
|
|
||||||
|
|
||||||
// If no more hashes are inbound, notify the block fetcher and return
|
|
||||||
if packet.Items() == 0 {
|
|
||||||
glog.V(logger.Debug).Infof("%v: no available hashes", p)
|
|
||||||
|
|
||||||
select {
|
|
||||||
case d.blockWakeCh <- false:
|
|
||||||
case <-d.cancelCh:
|
|
||||||
}
|
|
||||||
// If no hashes were retrieved at all, the peer violated it's TD promise that it had a
|
|
||||||
// better chain compared to ours. The only exception is if it's promised blocks were
|
|
||||||
// already imported by other means (e.g. fetcher):
|
|
||||||
//
|
|
||||||
// R <remote peer>, L <local node>: Both at block 10
|
|
||||||
// R: Mine block 11, and propagate it to L
|
|
||||||
// L: Queue block 11 for import
|
|
||||||
// L: Notice that R's head and TD increased compared to ours, start sync
|
|
||||||
// L: Import of block 11 finishes
|
|
||||||
// L: Sync begins, and finds common ancestor at 11
|
|
||||||
// L: Request new hashes up from 11 (R's TD was higher, it must have something)
|
|
||||||
// R: Nothing to give
|
|
||||||
if !gotHashes && td.Cmp(d.getTd(d.headBlock().Hash())) > 0 {
|
|
||||||
return errStallingPeer
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
gotHashes = true
|
|
||||||
hashes := packet.(*hashPack).hashes
|
|
||||||
|
|
||||||
// Otherwise insert all the new hashes, aborting in case of junk
|
|
||||||
glog.V(logger.Detail).Infof("%v: scheduling %d hashes from #%d", p, len(hashes), from)
|
|
||||||
|
|
||||||
inserts := d.queue.Schedule61(hashes, true)
|
|
||||||
if len(inserts) != len(hashes) {
|
|
||||||
glog.V(logger.Debug).Infof("%v: stale hashes", p)
|
|
||||||
return errBadPeer
|
|
||||||
}
|
|
||||||
// Notify the block fetcher of new hashes, but stop if queue is full
|
|
||||||
if d.queue.PendingBlocks() < maxQueuedHashes {
|
|
||||||
// We still have hashes to fetch, send continuation wake signal (potential)
|
|
||||||
select {
|
|
||||||
case d.blockWakeCh <- true:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Hash limit reached, send a termination wake signal (enforced)
|
|
||||||
select {
|
|
||||||
case d.blockWakeCh <- false:
|
|
||||||
case <-d.cancelCh:
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Queue not yet full, fetch the next batch
|
|
||||||
from += uint64(len(hashes))
|
|
||||||
getHashes(from)
|
|
||||||
|
|
||||||
case <-timeout.C:
|
|
||||||
glog.V(logger.Debug).Infof("%v: hash request timed out", p)
|
|
||||||
hashTimeoutMeter.Mark(1)
|
|
||||||
return errTimeout
|
|
||||||
|
|
||||||
case <-d.headerCh:
|
|
||||||
case <-d.bodyCh:
|
|
||||||
case <-d.stateCh:
|
|
||||||
case <-d.receiptCh:
|
|
||||||
// Ignore eth/{62,63} packets because this is eth/61.
|
|
||||||
// These can arrive as a late delivery from a previous sync.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetchBlocks61 iteratively downloads the scheduled hashes, taking any available
|
|
||||||
// peers, reserving a chunk of blocks for each, waiting for delivery and also
|
|
||||||
// periodically checking for timeouts.
|
|
||||||
func (d *Downloader) fetchBlocks61(from uint64) error {
|
|
||||||
glog.V(logger.Debug).Infof("Downloading blocks from #%d", from)
|
|
||||||
defer glog.V(logger.Debug).Infof("Block download terminated")
|
|
||||||
|
|
||||||
// Create a timeout timer for scheduling expiration tasks
|
|
||||||
ticker := time.NewTicker(100 * time.Millisecond)
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
update := make(chan struct{}, 1)
|
|
||||||
|
|
||||||
// Fetch blocks until the hash fetcher's done
|
|
||||||
finished := false
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-d.cancelCh:
|
|
||||||
return errCancelBlockFetch
|
|
||||||
|
|
||||||
case packet := <-d.blockCh:
|
|
||||||
// If the peer was previously banned and failed to deliver it's pack
|
|
||||||
// in a reasonable time frame, ignore it's message.
|
|
||||||
if peer := d.peers.Peer(packet.PeerId()); peer != nil {
|
|
||||||
blocks := packet.(*blockPack).blocks
|
|
||||||
|
|
||||||
// Deliver the received chunk of blocks and check chain validity
|
|
||||||
accepted, err := d.queue.DeliverBlocks(peer.id, blocks)
|
|
||||||
if err == errInvalidChain {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Unless a peer delivered something completely else than requested (usually
|
|
||||||
// caused by a timed out request which came through in the end), set it to
|
|
||||||
// idle. If the delivery's stale, the peer should have already been idled.
|
|
||||||
if err != errStaleDelivery {
|
|
||||||
peer.SetBlocksIdle(accepted)
|
|
||||||
}
|
|
||||||
// Issue a log to the user to see what's going on
|
|
||||||
switch {
|
|
||||||
case err == nil && len(blocks) == 0:
|
|
||||||
glog.V(logger.Detail).Infof("%s: no blocks delivered", peer)
|
|
||||||
case err == nil:
|
|
||||||
glog.V(logger.Detail).Infof("%s: delivered %d blocks", peer, len(blocks))
|
|
||||||
default:
|
|
||||||
glog.V(logger.Detail).Infof("%s: delivery failed: %v", peer, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Blocks arrived, try to update the progress
|
|
||||||
select {
|
|
||||||
case update <- struct{}{}:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
case cont := <-d.blockWakeCh:
|
|
||||||
// The hash fetcher sent a continuation flag, check if it's done
|
|
||||||
if !cont {
|
|
||||||
finished = true
|
|
||||||
}
|
|
||||||
// Hashes arrive, try to update the progress
|
|
||||||
select {
|
|
||||||
case update <- struct{}{}:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
case <-ticker.C:
|
|
||||||
// Sanity check update the progress
|
|
||||||
select {
|
|
||||||
case update <- struct{}{}:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
case <-update:
|
|
||||||
// Short circuit if we lost all our peers
|
|
||||||
if d.peers.Len() == 0 {
|
|
||||||
return errNoPeers
|
|
||||||
}
|
|
||||||
// Check for block request timeouts and demote the responsible peers
|
|
||||||
for pid, fails := range d.queue.ExpireBlocks(blockTTL) {
|
|
||||||
if peer := d.peers.Peer(pid); peer != nil {
|
|
||||||
if fails > 1 {
|
|
||||||
glog.V(logger.Detail).Infof("%s: block delivery timeout", peer)
|
|
||||||
peer.SetBlocksIdle(0)
|
|
||||||
} else {
|
|
||||||
glog.V(logger.Debug).Infof("%s: stalling block delivery, dropping", peer)
|
|
||||||
d.dropPeer(pid)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If there's nothing more to fetch, wait or terminate
|
|
||||||
if d.queue.PendingBlocks() == 0 {
|
|
||||||
if !d.queue.InFlightBlocks() && finished {
|
|
||||||
glog.V(logger.Debug).Infof("Block fetching completed")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// Send a download request to all idle peers, until throttled
|
|
||||||
throttled := false
|
|
||||||
idles, total := d.peers.BlockIdlePeers()
|
|
||||||
|
|
||||||
for _, peer := range idles {
|
|
||||||
// Short circuit if throttling activated
|
|
||||||
if d.queue.ShouldThrottleBlocks() {
|
|
||||||
throttled = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// Reserve a chunk of hashes for a peer. A nil can mean either that
|
|
||||||
// no more hashes are available, or that the peer is known not to
|
|
||||||
// have them.
|
|
||||||
request := d.queue.ReserveBlocks(peer, peer.BlockCapacity(blockTargetRTT))
|
|
||||||
if request == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if glog.V(logger.Detail) {
|
|
||||||
glog.Infof("%s: requesting %d blocks", peer, len(request.Hashes))
|
|
||||||
}
|
|
||||||
// Fetch the chunk and make sure any errors return the hashes to the queue
|
|
||||||
if err := peer.Fetch61(request); err != nil {
|
|
||||||
// Although we could try and make an attempt to fix this, this error really
|
|
||||||
// means that we've double allocated a fetch task to a peer. If that is the
|
|
||||||
// case, the internal state of the downloader and the queue is very wrong so
|
|
||||||
// better hard crash and note the error instead of silently accumulating into
|
|
||||||
// a much bigger issue.
|
|
||||||
panic(fmt.Sprintf("%v: fetch assignment failed", peer))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Make sure that we have peers available for fetching. If all peers have been tried
|
|
||||||
// and all failed throw an error
|
|
||||||
if !throttled && !d.queue.InFlightBlocks() && len(idles) == total {
|
|
||||||
return errPeersUnavailable
|
|
||||||
}
|
|
||||||
|
|
||||||
case <-d.headerCh:
|
|
||||||
case <-d.bodyCh:
|
|
||||||
case <-d.stateCh:
|
|
||||||
case <-d.receiptCh:
|
|
||||||
// Ignore eth/{62,63} packets because this is eth/61.
|
|
||||||
// These can arrive as a late delivery from a previous sync.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetchHeight retrieves the head header of the remote peer to aid in estimating
|
// fetchHeight retrieves the head header of the remote peer to aid in estimating
|
||||||
// the total time a pending synchronisation would take.
|
// the total time a pending synchronisation would take.
|
||||||
func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
|
func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
|
||||||
glog.V(logger.Debug).Infof("%v: retrieving remote chain height", p)
|
glog.V(logger.Debug).Infof("%v: retrieving remote chain height", p)
|
||||||
|
|
||||||
// Request the advertised remote head block and wait for the response
|
// Request the advertised remote head block and wait for the response
|
||||||
go p.getRelHeaders(p.head, 1, 0, false)
|
head, _ := p.currentHead()
|
||||||
|
go p.getRelHeaders(head, 1, 0, false)
|
||||||
|
|
||||||
timeout := time.After(d.requestTTL())
|
timeout := time.After(d.requestTTL())
|
||||||
for {
|
for {
|
||||||
@@ -1022,11 +545,6 @@ func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
|
|||||||
case <-d.stateCh:
|
case <-d.stateCh:
|
||||||
case <-d.receiptCh:
|
case <-d.receiptCh:
|
||||||
// Out of bounds delivery, ignore
|
// Out of bounds delivery, ignore
|
||||||
|
|
||||||
case <-d.hashCh:
|
|
||||||
case <-d.blockCh:
|
|
||||||
// Ignore eth/61 packets because this is eth/62+.
|
|
||||||
// These can arrive as a late delivery from a previous sync.
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1037,7 +555,7 @@ func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
|
|||||||
// In the rare scenario when we ended up on a long reorganisation (i.e. none of
|
// In the rare scenario when we ended up on a long reorganisation (i.e. none of
|
||||||
// the head links match), we do a binary search to find the common ancestor.
|
// the head links match), we do a binary search to find the common ancestor.
|
||||||
func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
||||||
glog.V(logger.Debug).Infof("%v: looking for common ancestor", p)
|
glog.V(logger.Debug).Infof("%v: looking for common ancestor (remote height %d)", p, height)
|
||||||
|
|
||||||
// Figure out the valid ancestor range to prevent rewrite attacks
|
// Figure out the valid ancestor range to prevent rewrite attacks
|
||||||
floor, ceil := int64(-1), d.headHeader().Number.Uint64()
|
floor, ceil := int64(-1), d.headHeader().Number.Uint64()
|
||||||
@@ -1054,11 +572,17 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|||||||
if head > height {
|
if head > height {
|
||||||
head = height
|
head = height
|
||||||
}
|
}
|
||||||
from := int64(head) - int64(MaxHeaderFetch) + 1
|
from := int64(head) - int64(MaxHeaderFetch)
|
||||||
if from < 0 {
|
if from < 0 {
|
||||||
from = 0
|
from = 0
|
||||||
}
|
}
|
||||||
go p.getAbsHeaders(uint64(from), MaxHeaderFetch, 0, false)
|
// Span out with 15 block gaps into the future to catch bad head reports
|
||||||
|
limit := 2 * MaxHeaderFetch / 16
|
||||||
|
count := 1 + int((int64(ceil)-from)/16)
|
||||||
|
if count > limit {
|
||||||
|
count = limit
|
||||||
|
}
|
||||||
|
go p.getAbsHeaders(uint64(from), count, 15, false)
|
||||||
|
|
||||||
// Wait for the remote response to the head fetch
|
// Wait for the remote response to the head fetch
|
||||||
number, hash := uint64(0), common.Hash{}
|
number, hash := uint64(0), common.Hash{}
|
||||||
@@ -1067,7 +591,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|||||||
for finished := false; !finished; {
|
for finished := false; !finished; {
|
||||||
select {
|
select {
|
||||||
case <-d.cancelCh:
|
case <-d.cancelCh:
|
||||||
return 0, errCancelHashFetch
|
return 0, errCancelHeaderFetch
|
||||||
|
|
||||||
case packet := <-d.headerCh:
|
case packet := <-d.headerCh:
|
||||||
// Discard anything not from the origin peer
|
// Discard anything not from the origin peer
|
||||||
@@ -1083,12 +607,8 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|||||||
}
|
}
|
||||||
// Make sure the peer's reply conforms to the request
|
// Make sure the peer's reply conforms to the request
|
||||||
for i := 0; i < len(headers); i++ {
|
for i := 0; i < len(headers); i++ {
|
||||||
if number := headers[i].Number.Int64(); number != from+int64(i) {
|
if number := headers[i].Number.Int64(); number != from+int64(i)*16 {
|
||||||
glog.V(logger.Warn).Infof("%v: head header set (item %d) broke chain ordering: requested %d, got %d", p, i, from+int64(i), number)
|
glog.V(logger.Warn).Infof("%v: head header set (item %d) broke chain ordering: requested %d, got %d", p, i, from+int64(i)*16, number)
|
||||||
return 0, errInvalidChain
|
|
||||||
}
|
|
||||||
if i > 0 && headers[i-1].Hash() != headers[i].ParentHash {
|
|
||||||
glog.V(logger.Warn).Infof("%v: head header set (item %d) broke chain ancestry: expected [%x], got [%x]", p, i, headers[i-1].Hash().Bytes()[:4], headers[i].ParentHash[:4])
|
|
||||||
return 0, errInvalidChain
|
return 0, errInvalidChain
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1096,12 +616,18 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|||||||
finished = true
|
finished = true
|
||||||
for i := len(headers) - 1; i >= 0; i-- {
|
for i := len(headers) - 1; i >= 0; i-- {
|
||||||
// Skip any headers that underflow/overflow our requested set
|
// Skip any headers that underflow/overflow our requested set
|
||||||
if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > head {
|
if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > ceil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Otherwise check if we already know the header or not
|
// Otherwise check if we already know the header or not
|
||||||
if (d.mode == FullSync && d.hasBlockAndState(headers[i].Hash())) || (d.mode != FullSync && d.hasHeader(headers[i].Hash())) {
|
if (d.mode == FullSync && d.hasBlockAndState(headers[i].Hash())) || (d.mode != FullSync && d.hasHeader(headers[i].Hash())) {
|
||||||
number, hash = headers[i].Number.Uint64(), headers[i].Hash()
|
number, hash = headers[i].Number.Uint64(), headers[i].Hash()
|
||||||
|
|
||||||
|
// If every header is known, even future ones, the peer straight out lied about its head
|
||||||
|
if number > height && i == limit-1 {
|
||||||
|
glog.V(logger.Warn).Infof("%v: lied about chain head: reported %d, found above %d", p, height, number)
|
||||||
|
return 0, errStallingPeer
|
||||||
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1114,11 +640,6 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|||||||
case <-d.stateCh:
|
case <-d.stateCh:
|
||||||
case <-d.receiptCh:
|
case <-d.receiptCh:
|
||||||
// Out of bounds delivery, ignore
|
// Out of bounds delivery, ignore
|
||||||
|
|
||||||
case <-d.hashCh:
|
|
||||||
case <-d.blockCh:
|
|
||||||
// Ignore eth/61 packets because this is eth/62+.
|
|
||||||
// These can arrive as a late delivery from a previous sync.
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// If the head fetch already found an ancestor, return
|
// If the head fetch already found an ancestor, return
|
||||||
@@ -1146,7 +667,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|||||||
for arrived := false; !arrived; {
|
for arrived := false; !arrived; {
|
||||||
select {
|
select {
|
||||||
case <-d.cancelCh:
|
case <-d.cancelCh:
|
||||||
return 0, errCancelHashFetch
|
return 0, errCancelHeaderFetch
|
||||||
|
|
||||||
case packer := <-d.headerCh:
|
case packer := <-d.headerCh:
|
||||||
// Discard anything not from the origin peer
|
// Discard anything not from the origin peer
|
||||||
@@ -1182,11 +703,6 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|||||||
case <-d.stateCh:
|
case <-d.stateCh:
|
||||||
case <-d.receiptCh:
|
case <-d.receiptCh:
|
||||||
// Out of bounds delivery, ignore
|
// Out of bounds delivery, ignore
|
||||||
|
|
||||||
case <-d.hashCh:
|
|
||||||
case <-d.blockCh:
|
|
||||||
// Ignore eth/61 packets because this is eth/62+.
|
|
||||||
// These can arrive as a late delivery from a previous sync.
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1305,11 +821,6 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
|||||||
case <-d.cancelCh:
|
case <-d.cancelCh:
|
||||||
}
|
}
|
||||||
return errBadPeer
|
return errBadPeer
|
||||||
|
|
||||||
case <-d.hashCh:
|
|
||||||
case <-d.blockCh:
|
|
||||||
// Ignore eth/61 packets because this is eth/62+.
|
|
||||||
// These can arrive as a late delivery from a previous sync.
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1555,7 +1066,14 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
|
|||||||
// Check for fetch request timeouts and demote the responsible peers
|
// Check for fetch request timeouts and demote the responsible peers
|
||||||
for pid, fails := range expire() {
|
for pid, fails := range expire() {
|
||||||
if peer := d.peers.Peer(pid); peer != nil {
|
if peer := d.peers.Peer(pid); peer != nil {
|
||||||
if fails > 1 {
|
// If a lot of retrieval elements expired, we might have overestimated the remote peer or perhaps
|
||||||
|
// ourselves. Only reset to minimal throughput but don't drop just yet. If even the minimal times
|
||||||
|
// out that sync wise we need to get rid of the peer.
|
||||||
|
//
|
||||||
|
// The reason the minimum threshold is 2 is because the downloader tries to estimate the bandwidth
|
||||||
|
// and latency of a peer separately, which requires pushing the measures capacity a bit and seeing
|
||||||
|
// how response times reacts, to it always requests one more than the minimum (i.e. min 2).
|
||||||
|
if fails > 2 {
|
||||||
glog.V(logger.Detail).Infof("%s: %s delivery timeout", peer, strings.ToLower(kind))
|
glog.V(logger.Detail).Infof("%s: %s delivery timeout", peer, strings.ToLower(kind))
|
||||||
setIdle(peer, 0)
|
setIdle(peer, 0)
|
||||||
} else {
|
} else {
|
||||||
@@ -1623,11 +1141,6 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
|
|||||||
if !progressed && !throttled && !running && len(idles) == total && pending() > 0 {
|
if !progressed && !throttled && !running && len(idles) == total && pending() > 0 {
|
||||||
return errPeersUnavailable
|
return errPeersUnavailable
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-d.hashCh:
|
|
||||||
case <-d.blockCh:
|
|
||||||
// Ignore eth/61 packets because this is eth/62+.
|
|
||||||
// These can arrive as a late delivery from a previous sync.
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1859,7 +1372,7 @@ func (d *Downloader) processContent() error {
|
|||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Debug).Infof("Result #%d [%x…] processing failed: %v", results[index].Header.Number, results[index].Header.Hash().Bytes()[:4], err)
|
glog.V(logger.Debug).Infof("Result #%d [%x…] processing failed: %v", results[index].Header.Number, results[index].Header.Hash().Bytes()[:4], err)
|
||||||
return err
|
return errInvalidChain
|
||||||
}
|
}
|
||||||
// Shift the results to the next batch
|
// Shift the results to the next batch
|
||||||
results = results[items:]
|
results = results[items:]
|
||||||
@@ -1867,19 +1380,6 @@ func (d *Downloader) processContent() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeliverHashes injects a new batch of hashes received from a remote node into
|
|
||||||
// the download schedule. This is usually invoked through the BlockHashesMsg by
|
|
||||||
// the protocol handler.
|
|
||||||
func (d *Downloader) DeliverHashes(id string, hashes []common.Hash) (err error) {
|
|
||||||
return d.deliver(id, d.hashCh, &hashPack{id, hashes}, hashInMeter, hashDropMeter)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeliverBlocks injects a new batch of blocks received from a remote node.
|
|
||||||
// This is usually invoked through the BlocksMsg by the protocol handler.
|
|
||||||
func (d *Downloader) DeliverBlocks(id string, blocks []*types.Block) (err error) {
|
|
||||||
return d.deliver(id, d.blockCh, &blockPack{id, blocks}, blockInMeter, blockDropMeter)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeliverHeaders injects a new batch of block headers received from a remote
|
// DeliverHeaders injects a new batch of block headers received from a remote
|
||||||
// node into the download schedule.
|
// node into the download schedule.
|
||||||
func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) (err error) {
|
func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) (err error) {
|
||||||
|
@@ -55,7 +55,7 @@ func init() {
|
|||||||
// reassembly.
|
// reassembly.
|
||||||
func makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
|
func makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
|
||||||
// Generate the block chain
|
// Generate the block chain
|
||||||
blocks, receipts := core.GenerateChain(parent, testdb, n, func(i int, block *core.BlockGen) {
|
blocks, receipts := core.GenerateChain(nil, parent, testdb, n, func(i int, block *core.BlockGen) {
|
||||||
block.SetCoinbase(common.Address{seed})
|
block.SetCoinbase(common.Address{seed})
|
||||||
|
|
||||||
// If a heavy chain is requested, delay blocks to raise difficulty
|
// If a heavy chain is requested, delay blocks to raise difficulty
|
||||||
@@ -399,14 +399,12 @@ func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Ha
|
|||||||
|
|
||||||
var err error
|
var err error
|
||||||
switch version {
|
switch version {
|
||||||
case 61:
|
|
||||||
err = dl.downloader.RegisterPeer(id, version, hashes[0], dl.peerGetRelHashesFn(id, delay), dl.peerGetAbsHashesFn(id, delay), dl.peerGetBlocksFn(id, delay), nil, nil, nil, nil, nil)
|
|
||||||
case 62:
|
case 62:
|
||||||
err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), nil, nil)
|
err = dl.downloader.RegisterPeer(id, version, dl.peerCurrentHeadFn(id), dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), nil, nil)
|
||||||
case 63:
|
case 63:
|
||||||
err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay), dl.peerGetNodeDataFn(id, delay))
|
err = dl.downloader.RegisterPeer(id, version, dl.peerCurrentHeadFn(id), dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay), dl.peerGetNodeDataFn(id, delay))
|
||||||
case 64:
|
case 64:
|
||||||
err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay), dl.peerGetNodeDataFn(id, delay))
|
err = dl.downloader.RegisterPeer(id, version, dl.peerCurrentHeadFn(id), dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay), dl.peerGetNodeDataFn(id, delay))
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
// Assign the owned hashes, headers and blocks to the peer (deep copy)
|
// Assign the owned hashes, headers and blocks to the peer (deep copy)
|
||||||
@@ -465,83 +463,14 @@ func (dl *downloadTester) dropPeer(id string) {
|
|||||||
dl.downloader.UnregisterPeer(id)
|
dl.downloader.UnregisterPeer(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// peerGetRelHashesFn constructs a GetHashes function associated with a specific
|
// peerCurrentHeadFn constructs a function to retrieve a peer's current head hash
|
||||||
// peer in the download tester. The returned function can be used to retrieve
|
// and total difficulty.
|
||||||
// batches of hashes from the particularly requested peer.
|
func (dl *downloadTester) peerCurrentHeadFn(id string) func() (common.Hash, *big.Int) {
|
||||||
func (dl *downloadTester) peerGetRelHashesFn(id string, delay time.Duration) func(head common.Hash) error {
|
return func() (common.Hash, *big.Int) {
|
||||||
return func(head common.Hash) error {
|
|
||||||
time.Sleep(delay)
|
|
||||||
|
|
||||||
dl.lock.RLock()
|
dl.lock.RLock()
|
||||||
defer dl.lock.RUnlock()
|
defer dl.lock.RUnlock()
|
||||||
|
|
||||||
// Gather the next batch of hashes
|
return dl.peerHashes[id][0], nil
|
||||||
hashes := dl.peerHashes[id]
|
|
||||||
result := make([]common.Hash, 0, MaxHashFetch)
|
|
||||||
for i, hash := range hashes {
|
|
||||||
if hash == head {
|
|
||||||
i++
|
|
||||||
for len(result) < cap(result) && i < len(hashes) {
|
|
||||||
result = append(result, hashes[i])
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Delay delivery a bit to allow attacks to unfold
|
|
||||||
go func() {
|
|
||||||
time.Sleep(time.Millisecond)
|
|
||||||
dl.downloader.DeliverHashes(id, result)
|
|
||||||
}()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// peerGetAbsHashesFn constructs a GetHashesFromNumber function associated with
|
|
||||||
// a particular peer in the download tester. The returned function can be used to
|
|
||||||
// retrieve batches of hashes from the particularly requested peer.
|
|
||||||
func (dl *downloadTester) peerGetAbsHashesFn(id string, delay time.Duration) func(uint64, int) error {
|
|
||||||
return func(head uint64, count int) error {
|
|
||||||
time.Sleep(delay)
|
|
||||||
|
|
||||||
dl.lock.RLock()
|
|
||||||
defer dl.lock.RUnlock()
|
|
||||||
|
|
||||||
// Gather the next batch of hashes
|
|
||||||
hashes := dl.peerHashes[id]
|
|
||||||
result := make([]common.Hash, 0, count)
|
|
||||||
for i := 0; i < count && len(hashes)-int(head)-1-i >= 0; i++ {
|
|
||||||
result = append(result, hashes[len(hashes)-int(head)-1-i])
|
|
||||||
}
|
|
||||||
// Delay delivery a bit to allow attacks to unfold
|
|
||||||
go func() {
|
|
||||||
time.Sleep(time.Millisecond)
|
|
||||||
dl.downloader.DeliverHashes(id, result)
|
|
||||||
}()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// peerGetBlocksFn constructs a getBlocks function associated with a particular
|
|
||||||
// peer in the download tester. The returned function can be used to retrieve
|
|
||||||
// batches of blocks from the particularly requested peer.
|
|
||||||
func (dl *downloadTester) peerGetBlocksFn(id string, delay time.Duration) func([]common.Hash) error {
|
|
||||||
return func(hashes []common.Hash) error {
|
|
||||||
time.Sleep(delay)
|
|
||||||
|
|
||||||
dl.lock.RLock()
|
|
||||||
defer dl.lock.RUnlock()
|
|
||||||
|
|
||||||
blocks := dl.peerBlocks[id]
|
|
||||||
result := make([]*types.Block, 0, len(hashes))
|
|
||||||
for _, hash := range hashes {
|
|
||||||
if block, ok := blocks[hash]; ok {
|
|
||||||
result = append(result, block)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
go dl.downloader.DeliverBlocks(id, result)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -730,7 +659,6 @@ func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, leng
|
|||||||
// Tests that simple synchronization against a canonical chain works correctly.
|
// Tests that simple synchronization against a canonical chain works correctly.
|
||||||
// In this test common ancestor lookup should be short circuited and not require
|
// In this test common ancestor lookup should be short circuited and not require
|
||||||
// binary searching.
|
// binary searching.
|
||||||
func TestCanonicalSynchronisation61(t *testing.T) { testCanonicalSynchronisation(t, 61, FullSync) }
|
|
||||||
func TestCanonicalSynchronisation62(t *testing.T) { testCanonicalSynchronisation(t, 62, FullSync) }
|
func TestCanonicalSynchronisation62(t *testing.T) { testCanonicalSynchronisation(t, 62, FullSync) }
|
||||||
func TestCanonicalSynchronisation63Full(t *testing.T) { testCanonicalSynchronisation(t, 63, FullSync) }
|
func TestCanonicalSynchronisation63Full(t *testing.T) { testCanonicalSynchronisation(t, 63, FullSync) }
|
||||||
func TestCanonicalSynchronisation63Fast(t *testing.T) { testCanonicalSynchronisation(t, 63, FastSync) }
|
func TestCanonicalSynchronisation63Fast(t *testing.T) { testCanonicalSynchronisation(t, 63, FastSync) }
|
||||||
@@ -759,7 +687,6 @@ func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
|
|
||||||
// Tests that if a large batch of blocks are being downloaded, it is throttled
|
// Tests that if a large batch of blocks are being downloaded, it is throttled
|
||||||
// until the cached blocks are retrieved.
|
// until the cached blocks are retrieved.
|
||||||
func TestThrottling61(t *testing.T) { testThrottling(t, 61, FullSync) }
|
|
||||||
func TestThrottling62(t *testing.T) { testThrottling(t, 62, FullSync) }
|
func TestThrottling62(t *testing.T) { testThrottling(t, 62, FullSync) }
|
||||||
func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
|
func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
|
||||||
func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
|
func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
|
||||||
@@ -845,7 +772,6 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
// Tests that simple synchronization against a forked chain works correctly. In
|
// Tests that simple synchronization against a forked chain works correctly. In
|
||||||
// this test common ancestor lookup should *not* be short circuited, and a full
|
// this test common ancestor lookup should *not* be short circuited, and a full
|
||||||
// binary search should be executed.
|
// binary search should be executed.
|
||||||
func TestForkedSync61(t *testing.T) { testForkedSync(t, 61, FullSync) }
|
|
||||||
func TestForkedSync62(t *testing.T) { testForkedSync(t, 62, FullSync) }
|
func TestForkedSync62(t *testing.T) { testForkedSync(t, 62, FullSync) }
|
||||||
func TestForkedSync63Full(t *testing.T) { testForkedSync(t, 63, FullSync) }
|
func TestForkedSync63Full(t *testing.T) { testForkedSync(t, 63, FullSync) }
|
||||||
func TestForkedSync63Fast(t *testing.T) { testForkedSync(t, 63, FastSync) }
|
func TestForkedSync63Fast(t *testing.T) { testForkedSync(t, 63, FastSync) }
|
||||||
@@ -881,7 +807,6 @@ func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
|
|
||||||
// Tests that synchronising against a much shorter but much heavyer fork works
|
// Tests that synchronising against a much shorter but much heavyer fork works
|
||||||
// corrently and is not dropped.
|
// corrently and is not dropped.
|
||||||
func TestHeavyForkedSync61(t *testing.T) { testHeavyForkedSync(t, 61, FullSync) }
|
|
||||||
func TestHeavyForkedSync62(t *testing.T) { testHeavyForkedSync(t, 62, FullSync) }
|
func TestHeavyForkedSync62(t *testing.T) { testHeavyForkedSync(t, 62, FullSync) }
|
||||||
func TestHeavyForkedSync63Full(t *testing.T) { testHeavyForkedSync(t, 63, FullSync) }
|
func TestHeavyForkedSync63Full(t *testing.T) { testHeavyForkedSync(t, 63, FullSync) }
|
||||||
func TestHeavyForkedSync63Fast(t *testing.T) { testHeavyForkedSync(t, 63, FastSync) }
|
func TestHeavyForkedSync63Fast(t *testing.T) { testHeavyForkedSync(t, 63, FastSync) }
|
||||||
@@ -915,24 +840,9 @@ func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
|
assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests that an inactive downloader will not accept incoming hashes and blocks.
|
|
||||||
func TestInactiveDownloader61(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
tester := newTester()
|
|
||||||
|
|
||||||
// Check that neither hashes nor blocks are accepted
|
|
||||||
if err := tester.downloader.DeliverHashes("bad peer", []common.Hash{}); err != errNoSyncActive {
|
|
||||||
t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
|
|
||||||
}
|
|
||||||
if err := tester.downloader.DeliverBlocks("bad peer", []*types.Block{}); err != errNoSyncActive {
|
|
||||||
t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests that chain forks are contained within a certain interval of the current
|
// Tests that chain forks are contained within a certain interval of the current
|
||||||
// chain head, ensuring that malicious peers cannot waste resources by feeding
|
// chain head, ensuring that malicious peers cannot waste resources by feeding
|
||||||
// long dead chains.
|
// long dead chains.
|
||||||
func TestBoundedForkedSync61(t *testing.T) { testBoundedForkedSync(t, 61, FullSync) }
|
|
||||||
func TestBoundedForkedSync62(t *testing.T) { testBoundedForkedSync(t, 62, FullSync) }
|
func TestBoundedForkedSync62(t *testing.T) { testBoundedForkedSync(t, 62, FullSync) }
|
||||||
func TestBoundedForkedSync63Full(t *testing.T) { testBoundedForkedSync(t, 63, FullSync) }
|
func TestBoundedForkedSync63Full(t *testing.T) { testBoundedForkedSync(t, 63, FullSync) }
|
||||||
func TestBoundedForkedSync63Fast(t *testing.T) { testBoundedForkedSync(t, 63, FastSync) }
|
func TestBoundedForkedSync63Fast(t *testing.T) { testBoundedForkedSync(t, 63, FastSync) }
|
||||||
@@ -968,7 +878,6 @@ func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
// Tests that chain forks are contained within a certain interval of the current
|
// Tests that chain forks are contained within a certain interval of the current
|
||||||
// chain head for short but heavy forks too. These are a bit special because they
|
// chain head for short but heavy forks too. These are a bit special because they
|
||||||
// take different ancestor lookup paths.
|
// take different ancestor lookup paths.
|
||||||
func TestBoundedHeavyForkedSync61(t *testing.T) { testBoundedHeavyForkedSync(t, 61, FullSync) }
|
|
||||||
func TestBoundedHeavyForkedSync62(t *testing.T) { testBoundedHeavyForkedSync(t, 62, FullSync) }
|
func TestBoundedHeavyForkedSync62(t *testing.T) { testBoundedHeavyForkedSync(t, 62, FullSync) }
|
||||||
func TestBoundedHeavyForkedSync63Full(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FullSync) }
|
func TestBoundedHeavyForkedSync63Full(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FullSync) }
|
||||||
func TestBoundedHeavyForkedSync63Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FastSync) }
|
func TestBoundedHeavyForkedSync63Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FastSync) }
|
||||||
@@ -1039,7 +948,6 @@ func TestInactiveDownloader63(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Tests that a canceled download wipes all previously accumulated state.
|
// Tests that a canceled download wipes all previously accumulated state.
|
||||||
func TestCancel61(t *testing.T) { testCancel(t, 61, FullSync) }
|
|
||||||
func TestCancel62(t *testing.T) { testCancel(t, 62, FullSync) }
|
func TestCancel62(t *testing.T) { testCancel(t, 62, FullSync) }
|
||||||
func TestCancel63Full(t *testing.T) { testCancel(t, 63, FullSync) }
|
func TestCancel63Full(t *testing.T) { testCancel(t, 63, FullSync) }
|
||||||
func TestCancel63Fast(t *testing.T) { testCancel(t, 63, FastSync) }
|
func TestCancel63Fast(t *testing.T) { testCancel(t, 63, FastSync) }
|
||||||
@@ -1081,7 +989,6 @@ func testCancel(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
|
// Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
|
||||||
func TestMultiSynchronisation61(t *testing.T) { testMultiSynchronisation(t, 61, FullSync) }
|
|
||||||
func TestMultiSynchronisation62(t *testing.T) { testMultiSynchronisation(t, 62, FullSync) }
|
func TestMultiSynchronisation62(t *testing.T) { testMultiSynchronisation(t, 62, FullSync) }
|
||||||
func TestMultiSynchronisation63Full(t *testing.T) { testMultiSynchronisation(t, 63, FullSync) }
|
func TestMultiSynchronisation63Full(t *testing.T) { testMultiSynchronisation(t, 63, FullSync) }
|
||||||
func TestMultiSynchronisation63Fast(t *testing.T) { testMultiSynchronisation(t, 63, FastSync) }
|
func TestMultiSynchronisation63Fast(t *testing.T) { testMultiSynchronisation(t, 63, FastSync) }
|
||||||
@@ -1112,7 +1019,6 @@ func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
|
|
||||||
// Tests that synchronisations behave well in multi-version protocol environments
|
// Tests that synchronisations behave well in multi-version protocol environments
|
||||||
// and not wreak havoc on other nodes in the network.
|
// and not wreak havoc on other nodes in the network.
|
||||||
func TestMultiProtoSynchronisation61(t *testing.T) { testMultiProtoSync(t, 61, FullSync) }
|
|
||||||
func TestMultiProtoSynchronisation62(t *testing.T) { testMultiProtoSync(t, 62, FullSync) }
|
func TestMultiProtoSynchronisation62(t *testing.T) { testMultiProtoSync(t, 62, FullSync) }
|
||||||
func TestMultiProtoSynchronisation63Full(t *testing.T) { testMultiProtoSync(t, 63, FullSync) }
|
func TestMultiProtoSynchronisation63Full(t *testing.T) { testMultiProtoSync(t, 63, FullSync) }
|
||||||
func TestMultiProtoSynchronisation63Fast(t *testing.T) { testMultiProtoSync(t, 63, FastSync) }
|
func TestMultiProtoSynchronisation63Fast(t *testing.T) { testMultiProtoSync(t, 63, FastSync) }
|
||||||
@@ -1131,7 +1037,6 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
tester := newTester()
|
tester := newTester()
|
||||||
defer tester.terminate()
|
defer tester.terminate()
|
||||||
|
|
||||||
tester.newPeer("peer 61", 61, hashes, nil, blocks, nil)
|
|
||||||
tester.newPeer("peer 62", 62, hashes, headers, blocks, nil)
|
tester.newPeer("peer 62", 62, hashes, headers, blocks, nil)
|
||||||
tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts)
|
tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts)
|
||||||
tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts)
|
tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts)
|
||||||
@@ -1143,7 +1048,7 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
assertOwnChain(t, tester, targetBlocks+1)
|
assertOwnChain(t, tester, targetBlocks+1)
|
||||||
|
|
||||||
// Check that no peers have been dropped off
|
// Check that no peers have been dropped off
|
||||||
for _, version := range []int{61, 62, 63, 64} {
|
for _, version := range []int{62, 63, 64} {
|
||||||
peer := fmt.Sprintf("peer %d", version)
|
peer := fmt.Sprintf("peer %d", version)
|
||||||
if _, ok := tester.peerHashes[peer]; !ok {
|
if _, ok := tester.peerHashes[peer]; !ok {
|
||||||
t.Errorf("%s dropped", peer)
|
t.Errorf("%s dropped", peer)
|
||||||
@@ -1368,7 +1273,6 @@ func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
|
|
||||||
// Tests that a peer advertising an high TD doesn't get to stall the downloader
|
// Tests that a peer advertising an high TD doesn't get to stall the downloader
|
||||||
// afterwards by not sending any useful hashes.
|
// afterwards by not sending any useful hashes.
|
||||||
func TestHighTDStarvationAttack61(t *testing.T) { testHighTDStarvationAttack(t, 61, FullSync) }
|
|
||||||
func TestHighTDStarvationAttack62(t *testing.T) { testHighTDStarvationAttack(t, 62, FullSync) }
|
func TestHighTDStarvationAttack62(t *testing.T) { testHighTDStarvationAttack(t, 62, FullSync) }
|
||||||
func TestHighTDStarvationAttack63Full(t *testing.T) { testHighTDStarvationAttack(t, 63, FullSync) }
|
func TestHighTDStarvationAttack63Full(t *testing.T) { testHighTDStarvationAttack(t, 63, FullSync) }
|
||||||
func TestHighTDStarvationAttack63Fast(t *testing.T) { testHighTDStarvationAttack(t, 63, FastSync) }
|
func TestHighTDStarvationAttack63Fast(t *testing.T) { testHighTDStarvationAttack(t, 63, FastSync) }
|
||||||
@@ -1391,7 +1295,6 @@ func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Tests that misbehaving peers are disconnected, whilst behaving ones are not.
|
// Tests that misbehaving peers are disconnected, whilst behaving ones are not.
|
||||||
func TestBlockHeaderAttackerDropping61(t *testing.T) { testBlockHeaderAttackerDropping(t, 61) }
|
|
||||||
func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
|
func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
|
||||||
func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
|
func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
|
||||||
func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
|
func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
|
||||||
@@ -1409,7 +1312,6 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
|
|||||||
{errStallingPeer, true}, // Peer was detected to be stalling, drop it
|
{errStallingPeer, true}, // Peer was detected to be stalling, drop it
|
||||||
{errNoPeers, false}, // No peers to download from, soft race, no issue
|
{errNoPeers, false}, // No peers to download from, soft race, no issue
|
||||||
{errTimeout, true}, // No hashes received in due time, drop the peer
|
{errTimeout, true}, // No hashes received in due time, drop the peer
|
||||||
{errEmptyHashSet, true}, // No hashes were returned as a response, drop as it's a dead end
|
|
||||||
{errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end
|
{errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end
|
||||||
{errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser
|
{errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser
|
||||||
{errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter
|
{errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter
|
||||||
@@ -1417,7 +1319,6 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
|
|||||||
{errInvalidBlock, false}, // A bad peer was detected, but not the sync origin
|
{errInvalidBlock, false}, // A bad peer was detected, but not the sync origin
|
||||||
{errInvalidBody, false}, // A bad peer was detected, but not the sync origin
|
{errInvalidBody, false}, // A bad peer was detected, but not the sync origin
|
||||||
{errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin
|
{errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin
|
||||||
{errCancelHashFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
|
||||||
{errCancelBlockFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
{errCancelBlockFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||||
{errCancelHeaderFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
{errCancelHeaderFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||||
{errCancelBodyFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
{errCancelBodyFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||||
@@ -1450,7 +1351,6 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
|
|||||||
|
|
||||||
// Tests that synchronisation progress (origin block number, current block number
|
// Tests that synchronisation progress (origin block number, current block number
|
||||||
// and highest block number) is tracked and updated correctly.
|
// and highest block number) is tracked and updated correctly.
|
||||||
func TestSyncProgress61(t *testing.T) { testSyncProgress(t, 61, FullSync) }
|
|
||||||
func TestSyncProgress62(t *testing.T) { testSyncProgress(t, 62, FullSync) }
|
func TestSyncProgress62(t *testing.T) { testSyncProgress(t, 62, FullSync) }
|
||||||
func TestSyncProgress63Full(t *testing.T) { testSyncProgress(t, 63, FullSync) }
|
func TestSyncProgress63Full(t *testing.T) { testSyncProgress(t, 63, FullSync) }
|
||||||
func TestSyncProgress63Fast(t *testing.T) { testSyncProgress(t, 63, FastSync) }
|
func TestSyncProgress63Fast(t *testing.T) { testSyncProgress(t, 63, FastSync) }
|
||||||
@@ -1524,7 +1424,6 @@ func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
// Tests that synchronisation progress (origin block number and highest block
|
// Tests that synchronisation progress (origin block number and highest block
|
||||||
// number) is tracked and updated correctly in case of a fork (or manual head
|
// number) is tracked and updated correctly in case of a fork (or manual head
|
||||||
// revertal).
|
// revertal).
|
||||||
func TestForkedSyncProgress61(t *testing.T) { testForkedSyncProgress(t, 61, FullSync) }
|
|
||||||
func TestForkedSyncProgress62(t *testing.T) { testForkedSyncProgress(t, 62, FullSync) }
|
func TestForkedSyncProgress62(t *testing.T) { testForkedSyncProgress(t, 62, FullSync) }
|
||||||
func TestForkedSyncProgress63Full(t *testing.T) { testForkedSyncProgress(t, 63, FullSync) }
|
func TestForkedSyncProgress63Full(t *testing.T) { testForkedSyncProgress(t, 63, FullSync) }
|
||||||
func TestForkedSyncProgress63Fast(t *testing.T) { testForkedSyncProgress(t, 63, FastSync) }
|
func TestForkedSyncProgress63Fast(t *testing.T) { testForkedSyncProgress(t, 63, FastSync) }
|
||||||
@@ -1601,7 +1500,6 @@ func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
// Tests that if synchronisation is aborted due to some failure, then the progress
|
// Tests that if synchronisation is aborted due to some failure, then the progress
|
||||||
// origin is not updated in the next sync cycle, as it should be considered the
|
// origin is not updated in the next sync cycle, as it should be considered the
|
||||||
// continuation of the previous sync and not a new instance.
|
// continuation of the previous sync and not a new instance.
|
||||||
func TestFailedSyncProgress61(t *testing.T) { testFailedSyncProgress(t, 61, FullSync) }
|
|
||||||
func TestFailedSyncProgress62(t *testing.T) { testFailedSyncProgress(t, 62, FullSync) }
|
func TestFailedSyncProgress62(t *testing.T) { testFailedSyncProgress(t, 62, FullSync) }
|
||||||
func TestFailedSyncProgress63Full(t *testing.T) { testFailedSyncProgress(t, 63, FullSync) }
|
func TestFailedSyncProgress63Full(t *testing.T) { testFailedSyncProgress(t, 63, FullSync) }
|
||||||
func TestFailedSyncProgress63Fast(t *testing.T) { testFailedSyncProgress(t, 63, FastSync) }
|
func TestFailedSyncProgress63Fast(t *testing.T) { testFailedSyncProgress(t, 63, FastSync) }
|
||||||
@@ -1679,7 +1577,6 @@ func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
|
|
||||||
// Tests that if an attacker fakes a chain height, after the attack is detected,
|
// Tests that if an attacker fakes a chain height, after the attack is detected,
|
||||||
// the progress height is successfully reduced at the next sync invocation.
|
// the progress height is successfully reduced at the next sync invocation.
|
||||||
func TestFakedSyncProgress61(t *testing.T) { testFakedSyncProgress(t, 61, FullSync) }
|
|
||||||
func TestFakedSyncProgress62(t *testing.T) { testFakedSyncProgress(t, 62, FullSync) }
|
func TestFakedSyncProgress62(t *testing.T) { testFakedSyncProgress(t, 62, FullSync) }
|
||||||
func TestFakedSyncProgress63Full(t *testing.T) { testFakedSyncProgress(t, 63, FullSync) }
|
func TestFakedSyncProgress63Full(t *testing.T) { testFakedSyncProgress(t, 63, FullSync) }
|
||||||
func TestFakedSyncProgress63Fast(t *testing.T) { testFakedSyncProgress(t, 63, FastSync) }
|
func TestFakedSyncProgress63Fast(t *testing.T) { testFakedSyncProgress(t, 63, FastSync) }
|
||||||
@@ -1824,13 +1721,15 @@ func testFastCriticalRestarts(t *testing.T, protocol int) {
|
|||||||
for i := 0; i < fsPivotInterval; i++ {
|
for i := 0; i < fsPivotInterval; i++ {
|
||||||
tester.peerMissingStates["peer"][headers[hashes[fsMinFullBlocks+i]].Root] = true
|
tester.peerMissingStates["peer"][headers[hashes[fsMinFullBlocks+i]].Root] = true
|
||||||
}
|
}
|
||||||
|
tester.downloader.dropPeer = func(id string) {} // We reuse the same "faulty" peer throughout the test
|
||||||
|
|
||||||
// Synchronise with the peer a few times and make sure they fail until the retry limit
|
// Synchronise with the peer a few times and make sure they fail until the retry limit
|
||||||
for i := 0; i < fsCriticalTrials; i++ {
|
for i := 0; i < fsCriticalTrials; i++ {
|
||||||
// Attempt a sync and ensure it fails properly
|
// Attempt a sync and ensure it fails properly
|
||||||
if err := tester.sync("peer", nil, FastSync); err == nil {
|
if err := tester.sync("peer", nil, FastSync); err == nil {
|
||||||
t.Fatalf("failing fast sync succeeded: %v", err)
|
t.Fatalf("failing fast sync succeeded: %v", err)
|
||||||
}
|
}
|
||||||
time.Sleep(500 * time.Millisecond) // Make sure no in-flight requests remain
|
time.Sleep(100 * time.Millisecond) // Make sure no in-flight requests remain
|
||||||
|
|
||||||
// If it's the first failure, pivot should be locked => reenable all others to detect pivot changes
|
// If it's the first failure, pivot should be locked => reenable all others to detect pivot changes
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
|
@@ -23,16 +23,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
hashInMeter = metrics.NewMeter("eth/downloader/hashes/in")
|
|
||||||
hashReqTimer = metrics.NewTimer("eth/downloader/hashes/req")
|
|
||||||
hashDropMeter = metrics.NewMeter("eth/downloader/hashes/drop")
|
|
||||||
hashTimeoutMeter = metrics.NewMeter("eth/downloader/hashes/timeout")
|
|
||||||
|
|
||||||
blockInMeter = metrics.NewMeter("eth/downloader/blocks/in")
|
|
||||||
blockReqTimer = metrics.NewTimer("eth/downloader/blocks/req")
|
|
||||||
blockDropMeter = metrics.NewMeter("eth/downloader/blocks/drop")
|
|
||||||
blockTimeoutMeter = metrics.NewMeter("eth/downloader/blocks/timeout")
|
|
||||||
|
|
||||||
headerInMeter = metrics.NewMeter("eth/downloader/headers/in")
|
headerInMeter = metrics.NewMeter("eth/downloader/headers/in")
|
||||||
headerReqTimer = metrics.NewTimer("eth/downloader/headers/req")
|
headerReqTimer = metrics.NewTimer("eth/downloader/headers/req")
|
||||||
headerDropMeter = metrics.NewMeter("eth/downloader/headers/drop")
|
headerDropMeter = metrics.NewMeter("eth/downloader/headers/drop")
|
||||||
|
@@ -23,6 +23,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
"math/big"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -37,10 +38,8 @@ const (
|
|||||||
measurementImpact = 0.1 // The impact a single measurement has on a peer's final throughput value.
|
measurementImpact = 0.1 // The impact a single measurement has on a peer's final throughput value.
|
||||||
)
|
)
|
||||||
|
|
||||||
// Hash and block fetchers belonging to eth/61 and below
|
// Head hash and total difficulty retriever for
|
||||||
type relativeHashFetcherFn func(common.Hash) error
|
type currentHeadRetrievalFn func() (common.Hash, *big.Int)
|
||||||
type absoluteHashFetcherFn func(uint64, int) error
|
|
||||||
type blockFetcherFn func([]common.Hash) error
|
|
||||||
|
|
||||||
// Block header and body fetchers belonging to eth/62 and above
|
// Block header and body fetchers belonging to eth/62 and above
|
||||||
type relativeHeaderFetcherFn func(common.Hash, int, int, bool) error
|
type relativeHeaderFetcherFn func(common.Hash, int, int, bool) error
|
||||||
@@ -58,7 +57,6 @@ var (
|
|||||||
// peer represents an active peer from which hashes and blocks are retrieved.
|
// peer represents an active peer from which hashes and blocks are retrieved.
|
||||||
type peer struct {
|
type peer struct {
|
||||||
id string // Unique identifier of the peer
|
id string // Unique identifier of the peer
|
||||||
head common.Hash // Hash of the peers latest known block
|
|
||||||
|
|
||||||
headerIdle int32 // Current header activity state of the peer (idle = 0, active = 1)
|
headerIdle int32 // Current header activity state of the peer (idle = 0, active = 1)
|
||||||
blockIdle int32 // Current block activity state of the peer (idle = 0, active = 1)
|
blockIdle int32 // Current block activity state of the peer (idle = 0, active = 1)
|
||||||
@@ -79,9 +77,7 @@ type peer struct {
|
|||||||
|
|
||||||
lacking map[common.Hash]struct{} // Set of hashes not to request (didn't have previously)
|
lacking map[common.Hash]struct{} // Set of hashes not to request (didn't have previously)
|
||||||
|
|
||||||
getRelHashes relativeHashFetcherFn // [eth/61] Method to retrieve a batch of hashes from an origin hash
|
currentHead currentHeadRetrievalFn // Method to fetch the currently known head of the peer
|
||||||
getAbsHashes absoluteHashFetcherFn // [eth/61] Method to retrieve a batch of hashes from an absolute position
|
|
||||||
getBlocks blockFetcherFn // [eth/61] Method to retrieve a batch of blocks
|
|
||||||
|
|
||||||
getRelHeaders relativeHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an origin hash
|
getRelHeaders relativeHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an origin hash
|
||||||
getAbsHeaders absoluteHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an absolute position
|
getAbsHeaders absoluteHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an absolute position
|
||||||
@@ -96,19 +92,14 @@ type peer struct {
|
|||||||
|
|
||||||
// newPeer create a new downloader peer, with specific hash and block retrieval
|
// newPeer create a new downloader peer, with specific hash and block retrieval
|
||||||
// mechanisms.
|
// mechanisms.
|
||||||
func newPeer(id string, version int, head common.Hash,
|
func newPeer(id string, version int, currentHead currentHeadRetrievalFn,
|
||||||
getRelHashes relativeHashFetcherFn, getAbsHashes absoluteHashFetcherFn, getBlocks blockFetcherFn, // eth/61 callbacks, remove when upgrading
|
|
||||||
getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn,
|
getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn,
|
||||||
getReceipts receiptFetcherFn, getNodeData stateFetcherFn) *peer {
|
getReceipts receiptFetcherFn, getNodeData stateFetcherFn) *peer {
|
||||||
return &peer{
|
return &peer{
|
||||||
id: id,
|
id: id,
|
||||||
head: head,
|
|
||||||
lacking: make(map[common.Hash]struct{}),
|
lacking: make(map[common.Hash]struct{}),
|
||||||
|
|
||||||
getRelHashes: getRelHashes,
|
currentHead: currentHead,
|
||||||
getAbsHashes: getAbsHashes,
|
|
||||||
getBlocks: getBlocks,
|
|
||||||
|
|
||||||
getRelHeaders: getRelHeaders,
|
getRelHeaders: getRelHeaders,
|
||||||
getAbsHeaders: getAbsHeaders,
|
getAbsHeaders: getAbsHeaders,
|
||||||
getBlockBodies: getBlockBodies,
|
getBlockBodies: getBlockBodies,
|
||||||
@@ -138,28 +129,6 @@ func (p *peer) Reset() {
|
|||||||
p.lacking = make(map[common.Hash]struct{})
|
p.lacking = make(map[common.Hash]struct{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fetch61 sends a block retrieval request to the remote peer.
|
|
||||||
func (p *peer) Fetch61(request *fetchRequest) error {
|
|
||||||
// Sanity check the protocol version
|
|
||||||
if p.version != 61 {
|
|
||||||
panic(fmt.Sprintf("block fetch [eth/61] requested on eth/%d", p.version))
|
|
||||||
}
|
|
||||||
// Short circuit if the peer is already fetching
|
|
||||||
if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) {
|
|
||||||
return errAlreadyFetching
|
|
||||||
}
|
|
||||||
p.blockStarted = time.Now()
|
|
||||||
|
|
||||||
// Convert the hash set to a retrievable slice
|
|
||||||
hashes := make([]common.Hash, 0, len(request.Hashes))
|
|
||||||
for hash, _ := range request.Hashes {
|
|
||||||
hashes = append(hashes, hash)
|
|
||||||
}
|
|
||||||
go p.getBlocks(hashes)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchHeaders sends a header retrieval request to the remote peer.
|
// FetchHeaders sends a header retrieval request to the remote peer.
|
||||||
func (p *peer) FetchHeaders(from uint64, count int) error {
|
func (p *peer) FetchHeaders(from uint64, count int) error {
|
||||||
// Sanity check the protocol version
|
// Sanity check the protocol version
|
||||||
@@ -481,20 +450,6 @@ func (ps *peerSet) AllPeers() []*peer {
|
|||||||
return list
|
return list
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlockIdlePeers retrieves a flat list of all the currently idle peers within the
|
|
||||||
// active peer set, ordered by their reputation.
|
|
||||||
func (ps *peerSet) BlockIdlePeers() ([]*peer, int) {
|
|
||||||
idle := func(p *peer) bool {
|
|
||||||
return atomic.LoadInt32(&p.blockIdle) == 0
|
|
||||||
}
|
|
||||||
throughput := func(p *peer) float64 {
|
|
||||||
p.lock.RLock()
|
|
||||||
defer p.lock.RUnlock()
|
|
||||||
return p.blockThroughput
|
|
||||||
}
|
|
||||||
return ps.idlePeers(61, 61, idle, throughput)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeaderIdlePeers retrieves a flat list of all the currently header-idle peers
|
// HeaderIdlePeers retrieves a flat list of all the currently header-idle peers
|
||||||
// within the active peer set, ordered by their reputation.
|
// within the active peer set, ordered by their reputation.
|
||||||
func (ps *peerSet) HeaderIdlePeers() ([]*peer, int) {
|
func (ps *peerSet) HeaderIdlePeers() ([]*peer, int) {
|
||||||
|
@@ -45,7 +45,6 @@ var (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
errNoFetchesPending = errors.New("no fetches pending")
|
errNoFetchesPending = errors.New("no fetches pending")
|
||||||
errStateSyncPending = errors.New("state trie sync already scheduled")
|
|
||||||
errStaleDelivery = errors.New("stale delivery")
|
errStaleDelivery = errors.New("stale delivery")
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -74,10 +73,6 @@ type queue struct {
|
|||||||
mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching
|
mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching
|
||||||
fastSyncPivot uint64 // Block number where the fast sync pivots into archive synchronisation mode
|
fastSyncPivot uint64 // Block number where the fast sync pivots into archive synchronisation mode
|
||||||
|
|
||||||
hashPool map[common.Hash]int // [eth/61] Pending hashes, mapping to their insertion index (priority)
|
|
||||||
hashQueue *prque.Prque // [eth/61] Priority queue of the block hashes to fetch
|
|
||||||
hashCounter int // [eth/61] Counter indexing the added hashes to ensure retrieval order
|
|
||||||
|
|
||||||
headerHead common.Hash // [eth/62] Hash of the last queued header to verify order
|
headerHead common.Hash // [eth/62] Hash of the last queued header to verify order
|
||||||
|
|
||||||
// Headers are "special", they download in batches, supported by a skeleton chain
|
// Headers are "special", they download in batches, supported by a skeleton chain
|
||||||
@@ -85,7 +80,6 @@ type queue struct {
|
|||||||
headerTaskQueue *prque.Prque // [eth/62] Priority queue of the skeleton indexes to fetch the filling headers for
|
headerTaskQueue *prque.Prque // [eth/62] Priority queue of the skeleton indexes to fetch the filling headers for
|
||||||
headerPeerMiss map[string]map[uint64]struct{} // [eth/62] Set of per-peer header batches known to be unavailable
|
headerPeerMiss map[string]map[uint64]struct{} // [eth/62] Set of per-peer header batches known to be unavailable
|
||||||
headerPendPool map[string]*fetchRequest // [eth/62] Currently pending header retrieval operations
|
headerPendPool map[string]*fetchRequest // [eth/62] Currently pending header retrieval operations
|
||||||
headerDonePool map[uint64]struct{} // [eth/62] Set of the completed header fetches
|
|
||||||
headerResults []*types.Header // [eth/62] Result cache accumulating the completed headers
|
headerResults []*types.Header // [eth/62] Result cache accumulating the completed headers
|
||||||
headerProced int // [eth/62] Number of headers already processed from the results
|
headerProced int // [eth/62] Number of headers already processed from the results
|
||||||
headerOffset uint64 // [eth/62] Number of the first header in the result cache
|
headerOffset uint64 // [eth/62] Number of the first header in the result cache
|
||||||
@@ -124,8 +118,6 @@ type queue struct {
|
|||||||
func newQueue(stateDb ethdb.Database) *queue {
|
func newQueue(stateDb ethdb.Database) *queue {
|
||||||
lock := new(sync.Mutex)
|
lock := new(sync.Mutex)
|
||||||
return &queue{
|
return &queue{
|
||||||
hashPool: make(map[common.Hash]int),
|
|
||||||
hashQueue: prque.New(),
|
|
||||||
headerPendPool: make(map[string]*fetchRequest),
|
headerPendPool: make(map[string]*fetchRequest),
|
||||||
headerContCh: make(chan bool),
|
headerContCh: make(chan bool),
|
||||||
blockTaskPool: make(map[common.Hash]*types.Header),
|
blockTaskPool: make(map[common.Hash]*types.Header),
|
||||||
@@ -158,10 +150,6 @@ func (q *queue) Reset() {
|
|||||||
q.mode = FullSync
|
q.mode = FullSync
|
||||||
q.fastSyncPivot = 0
|
q.fastSyncPivot = 0
|
||||||
|
|
||||||
q.hashPool = make(map[common.Hash]int)
|
|
||||||
q.hashQueue.Reset()
|
|
||||||
q.hashCounter = 0
|
|
||||||
|
|
||||||
q.headerHead = common.Hash{}
|
q.headerHead = common.Hash{}
|
||||||
|
|
||||||
q.headerPendPool = make(map[string]*fetchRequest)
|
q.headerPendPool = make(map[string]*fetchRequest)
|
||||||
@@ -208,7 +196,7 @@ func (q *queue) PendingBlocks() int {
|
|||||||
q.lock.Lock()
|
q.lock.Lock()
|
||||||
defer q.lock.Unlock()
|
defer q.lock.Unlock()
|
||||||
|
|
||||||
return q.hashQueue.Size() + q.blockTaskQueue.Size()
|
return q.blockTaskQueue.Size()
|
||||||
}
|
}
|
||||||
|
|
||||||
// PendingReceipts retrieves the number of block receipts pending for retrieval.
|
// PendingReceipts retrieves the number of block receipts pending for retrieval.
|
||||||
@@ -272,7 +260,7 @@ func (q *queue) Idle() bool {
|
|||||||
q.lock.Lock()
|
q.lock.Lock()
|
||||||
defer q.lock.Unlock()
|
defer q.lock.Unlock()
|
||||||
|
|
||||||
queued := q.hashQueue.Size() + q.blockTaskQueue.Size() + q.receiptTaskQueue.Size() + q.stateTaskQueue.Size()
|
queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size() + q.stateTaskQueue.Size()
|
||||||
pending := len(q.blockPendPool) + len(q.receiptPendPool) + len(q.statePendPool)
|
pending := len(q.blockPendPool) + len(q.receiptPendPool) + len(q.statePendPool)
|
||||||
cached := len(q.blockDonePool) + len(q.receiptDonePool)
|
cached := len(q.blockDonePool) + len(q.receiptDonePool)
|
||||||
|
|
||||||
@@ -323,34 +311,6 @@ func (q *queue) ShouldThrottleReceipts() bool {
|
|||||||
return pending >= len(q.resultCache)-len(q.receiptDonePool)
|
return pending >= len(q.resultCache)-len(q.receiptDonePool)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Schedule61 adds a set of hashes for the download queue for scheduling, returning
|
|
||||||
// the new hashes encountered.
|
|
||||||
func (q *queue) Schedule61(hashes []common.Hash, fifo bool) []common.Hash {
|
|
||||||
q.lock.Lock()
|
|
||||||
defer q.lock.Unlock()
|
|
||||||
|
|
||||||
// Insert all the hashes prioritised in the arrival order
|
|
||||||
inserts := make([]common.Hash, 0, len(hashes))
|
|
||||||
for _, hash := range hashes {
|
|
||||||
// Skip anything we already have
|
|
||||||
if old, ok := q.hashPool[hash]; ok {
|
|
||||||
glog.V(logger.Warn).Infof("Hash %x already scheduled at index %v", hash, old)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Update the counters and insert the hash
|
|
||||||
q.hashCounter = q.hashCounter + 1
|
|
||||||
inserts = append(inserts, hash)
|
|
||||||
|
|
||||||
q.hashPool[hash] = q.hashCounter
|
|
||||||
if fifo {
|
|
||||||
q.hashQueue.Push(hash, -float32(q.hashCounter)) // Lowest gets schedules first
|
|
||||||
} else {
|
|
||||||
q.hashQueue.Push(hash, float32(q.hashCounter)) // Highest gets schedules first
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return inserts
|
|
||||||
}
|
|
||||||
|
|
||||||
// ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill
|
// ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill
|
||||||
// up an already retrieved header skeleton.
|
// up an already retrieved header skeleton.
|
||||||
func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) {
|
func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) {
|
||||||
@@ -550,15 +510,6 @@ func (q *queue) ReserveHeaders(p *peer, count int) *fetchRequest {
|
|||||||
return request
|
return request
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReserveBlocks reserves a set of block hashes for the given peer, skipping any
|
|
||||||
// previously failed download.
|
|
||||||
func (q *queue) ReserveBlocks(p *peer, count int) *fetchRequest {
|
|
||||||
q.lock.Lock()
|
|
||||||
defer q.lock.Unlock()
|
|
||||||
|
|
||||||
return q.reserveHashes(p, count, q.hashQueue, nil, q.blockPendPool, len(q.resultCache)-len(q.blockDonePool))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReserveNodeData reserves a set of node data hashes for the given peer, skipping
|
// ReserveNodeData reserves a set of node data hashes for the given peer, skipping
|
||||||
// any previously failed download.
|
// any previously failed download.
|
||||||
func (q *queue) ReserveNodeData(p *peer, count int) *fetchRequest {
|
func (q *queue) ReserveNodeData(p *peer, count int) *fetchRequest {
|
||||||
@@ -753,11 +704,6 @@ func (q *queue) CancelHeaders(request *fetchRequest) {
|
|||||||
q.cancel(request, q.headerTaskQueue, q.headerPendPool)
|
q.cancel(request, q.headerTaskQueue, q.headerPendPool)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CancelBlocks aborts a fetch request, returning all pending hashes to the queue.
|
|
||||||
func (q *queue) CancelBlocks(request *fetchRequest) {
|
|
||||||
q.cancel(request, q.hashQueue, q.blockPendPool)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CancelBodies aborts a body fetch request, returning all pending headers to the
|
// CancelBodies aborts a body fetch request, returning all pending headers to the
|
||||||
// task queue.
|
// task queue.
|
||||||
func (q *queue) CancelBodies(request *fetchRequest) {
|
func (q *queue) CancelBodies(request *fetchRequest) {
|
||||||
@@ -801,9 +747,6 @@ func (q *queue) Revoke(peerId string) {
|
|||||||
defer q.lock.Unlock()
|
defer q.lock.Unlock()
|
||||||
|
|
||||||
if request, ok := q.blockPendPool[peerId]; ok {
|
if request, ok := q.blockPendPool[peerId]; ok {
|
||||||
for hash, index := range request.Hashes {
|
|
||||||
q.hashQueue.Push(hash, float32(index))
|
|
||||||
}
|
|
||||||
for _, header := range request.Headers {
|
for _, header := range request.Headers {
|
||||||
q.blockTaskQueue.Push(header, -float32(header.Number.Uint64()))
|
q.blockTaskQueue.Push(header, -float32(header.Number.Uint64()))
|
||||||
}
|
}
|
||||||
@@ -832,15 +775,6 @@ func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int {
|
|||||||
return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter)
|
return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExpireBlocks checks for in flight requests that exceeded a timeout allowance,
|
|
||||||
// canceling them and returning the responsible peers for penalisation.
|
|
||||||
func (q *queue) ExpireBlocks(timeout time.Duration) map[string]int {
|
|
||||||
q.lock.Lock()
|
|
||||||
defer q.lock.Unlock()
|
|
||||||
|
|
||||||
return q.expire(timeout, q.blockPendPool, q.hashQueue, blockTimeoutMeter)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExpireBodies checks for in flight block body requests that exceeded a timeout
|
// ExpireBodies checks for in flight block body requests that exceeded a timeout
|
||||||
// allowance, canceling them and returning the responsible peers for penalisation.
|
// allowance, canceling them and returning the responsible peers for penalisation.
|
||||||
func (q *queue) ExpireBodies(timeout time.Duration) map[string]int {
|
func (q *queue) ExpireBodies(timeout time.Duration) map[string]int {
|
||||||
@@ -907,74 +841,6 @@ func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest,
|
|||||||
return expiries
|
return expiries
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeliverBlocks injects a block retrieval response into the download queue. The
|
|
||||||
// method returns the number of blocks accepted from the delivery and also wakes
|
|
||||||
// any threads waiting for data delivery.
|
|
||||||
func (q *queue) DeliverBlocks(id string, blocks []*types.Block) (int, error) {
|
|
||||||
q.lock.Lock()
|
|
||||||
defer q.lock.Unlock()
|
|
||||||
|
|
||||||
// Short circuit if the blocks were never requested
|
|
||||||
request := q.blockPendPool[id]
|
|
||||||
if request == nil {
|
|
||||||
return 0, errNoFetchesPending
|
|
||||||
}
|
|
||||||
blockReqTimer.UpdateSince(request.Time)
|
|
||||||
delete(q.blockPendPool, id)
|
|
||||||
|
|
||||||
// If no blocks were retrieved, mark them as unavailable for the origin peer
|
|
||||||
if len(blocks) == 0 {
|
|
||||||
for hash, _ := range request.Hashes {
|
|
||||||
request.Peer.MarkLacking(hash)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Iterate over the downloaded blocks and add each of them
|
|
||||||
accepted, errs := 0, make([]error, 0)
|
|
||||||
for _, block := range blocks {
|
|
||||||
// Skip any blocks that were not requested
|
|
||||||
hash := block.Hash()
|
|
||||||
if _, ok := request.Hashes[hash]; !ok {
|
|
||||||
errs = append(errs, fmt.Errorf("non-requested block %x", hash))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Reconstruct the next result if contents match up
|
|
||||||
index := int(block.Number().Int64() - int64(q.resultOffset))
|
|
||||||
if index >= len(q.resultCache) || index < 0 {
|
|
||||||
errs = []error{errInvalidChain}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
q.resultCache[index] = &fetchResult{
|
|
||||||
Header: block.Header(),
|
|
||||||
Transactions: block.Transactions(),
|
|
||||||
Uncles: block.Uncles(),
|
|
||||||
}
|
|
||||||
q.blockDonePool[block.Hash()] = struct{}{}
|
|
||||||
|
|
||||||
delete(request.Hashes, hash)
|
|
||||||
delete(q.hashPool, hash)
|
|
||||||
accepted++
|
|
||||||
}
|
|
||||||
// Return all failed or missing fetches to the queue
|
|
||||||
for hash, index := range request.Hashes {
|
|
||||||
q.hashQueue.Push(hash, float32(index))
|
|
||||||
}
|
|
||||||
// Wake up WaitResults
|
|
||||||
if accepted > 0 {
|
|
||||||
q.active.Signal()
|
|
||||||
}
|
|
||||||
// If none of the blocks were good, it's a stale delivery
|
|
||||||
switch {
|
|
||||||
case len(errs) == 0:
|
|
||||||
return accepted, nil
|
|
||||||
case len(errs) == 1 && (errs[0] == errInvalidChain || errs[0] == errInvalidBlock):
|
|
||||||
return accepted, errs[0]
|
|
||||||
case len(errs) == len(blocks):
|
|
||||||
return accepted, errStaleDelivery
|
|
||||||
default:
|
|
||||||
return accepted, fmt.Errorf("multiple failures: %v", errs)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeliverHeaders injects a header retrieval response into the header results
|
// DeliverHeaders injects a header retrieval response into the header results
|
||||||
// cache. This method either accepts all headers it received, or none of them
|
// cache. This method either accepts all headers it received, or none of them
|
||||||
// if they do not map correctly to the skeleton.
|
// if they do not map correctly to the skeleton.
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user