Compare commits
2 Commits
Author | SHA1 | Date | |
---|---|---|---|
fd512fa12c | |||
dc3fb69dce |
15
Godeps/Godeps.json
generated
15
Godeps/Godeps.json
generated
@ -5,6 +5,11 @@
|
||||
"./..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "code.google.com/p/go-uuid/uuid",
|
||||
"Comment": "null-12",
|
||||
"Rev": "7dda39b2e7d5e265014674c5af696ba4186679e9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/codegangsta/cli",
|
||||
"Comment": "1.2.0-95-g9b2bd2b",
|
||||
@ -16,8 +21,8 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ethereum/ethash",
|
||||
"Comment": "v23.1-234-g062e40a",
|
||||
"Rev": "062e40a1a1671f5a5102862b56e4c56f68a732f5"
|
||||
"Comment": "v23.1-227-g8f6ccaa",
|
||||
"Rev": "8f6ccaaef9b418553807a73a95cb5f49cd3ea39f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fatih/color",
|
||||
@ -46,7 +51,7 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mattn/go-isatty",
|
||||
"Rev": "7fcbc72f853b92b5720db4a6b8482be612daef24"
|
||||
"Rev": "fdbe02a1b44e75977b2690062b83cf507d70c013"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mattn/go-runewidth",
|
||||
@ -57,10 +62,6 @@
|
||||
"ImportPath": "github.com/nsf/termbox-go",
|
||||
"Rev": "675ffd907b7401b8a709a5ef2249978af5616bb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pborman/uuid",
|
||||
"Rev": "cccd189d45f7ac3368a0d127efb7f4d08ae0b655"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/peterh/liner",
|
||||
"Rev": "29f6a646557d83e2b6e9ba05c45fbea9c006dbe8"
|
||||
|
@ -1,4 +1,4 @@
|
||||
Copyright (c) 2009,2014 Google Inc. All rights reserved.
|
||||
Copyright (c) 2009 Google Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
@ -40,15 +40,15 @@ func (t Time) UnixTime() (sec, nsec int64) {
|
||||
}
|
||||
|
||||
// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
|
||||
// clock sequence as well as adjusting the clock sequence as needed. An error
|
||||
// is returned if the current time cannot be determined.
|
||||
func GetTime() (Time, uint16, error) {
|
||||
// adjusts the clock sequence as needed. An error is returned if the current
|
||||
// time cannot be determined.
|
||||
func GetTime() (Time, error) {
|
||||
defer mu.Unlock()
|
||||
mu.Lock()
|
||||
return getTime()
|
||||
}
|
||||
|
||||
func getTime() (Time, uint16, error) {
|
||||
func getTime() (Time, error) {
|
||||
t := timeNow()
|
||||
|
||||
// If we don't have a clock sequence already, set one.
|
||||
@ -63,7 +63,7 @@ func getTime() (Time, uint16, error) {
|
||||
clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000
|
||||
}
|
||||
lasttime = now
|
||||
return Time(now), clock_seq, nil
|
||||
return Time(now), nil
|
||||
}
|
||||
|
||||
// ClockSequence returns the current clock sequence, generating one if not
|
@ -19,7 +19,7 @@ func NewUUID() UUID {
|
||||
SetNodeInterface("")
|
||||
}
|
||||
|
||||
now, seq, err := GetTime()
|
||||
now, err := GetTime()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@ -34,7 +34,7 @@ func NewUUID() UUID {
|
||||
binary.BigEndian.PutUint32(uuid[0:], time_low)
|
||||
binary.BigEndian.PutUint16(uuid[4:], time_mid)
|
||||
binary.BigEndian.PutUint16(uuid[6:], time_hi)
|
||||
binary.BigEndian.PutUint16(uuid[8:], seq)
|
||||
binary.BigEndian.PutUint16(uuid[8:], clock_seq)
|
||||
copy(uuid[10:], nodeID)
|
||||
|
||||
return uuid
|
8
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/endian.h
generated
vendored
8
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/endian.h
generated
vendored
@ -35,14 +35,10 @@
|
||||
#elif defined(__FreeBSD__) || defined(__DragonFly__) || defined(__NetBSD__)
|
||||
#define ethash_swap_u32(input_) bswap32(input_)
|
||||
#define ethash_swap_u64(input_) bswap64(input_)
|
||||
#elif defined(__OpenBSD__)
|
||||
#include <endian.h>
|
||||
#define ethash_swap_u32(input_) swap32(input_)
|
||||
#define ethash_swap_u64(input_) swap64(input_)
|
||||
#else // posix
|
||||
#include <byteswap.h>
|
||||
#define ethash_swap_u32(input_) bswap_32(input_)
|
||||
#define ethash_swap_u64(input_) bswap_64(input_)
|
||||
#define ethash_swap_u32(input_) __bswap_32(input_)
|
||||
#define ethash_swap_u64(input_) __bswap_64(input_)
|
||||
#endif
|
||||
|
||||
|
||||
|
4
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/fnv.h
generated
vendored
4
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/fnv.h
generated
vendored
@ -29,10 +29,6 @@ extern "C" {
|
||||
|
||||
#define FNV_PRIME 0x01000193
|
||||
|
||||
/* The FNV-1 spec multiplies the prime with the input one byte (octet) in turn.
|
||||
We instead multiply it with the full 32-bit input.
|
||||
This gives a different result compared to a canonical FNV-1 implementation.
|
||||
*/
|
||||
static inline uint32_t fnv_hash(uint32_t const x, uint32_t const y)
|
||||
{
|
||||
return x * FNV_PRIME ^ y;
|
||||
|
9
Godeps/_workspace/src/github.com/mattn/go-isatty/LICENSE
generated
vendored
9
Godeps/_workspace/src/github.com/mattn/go-isatty/LICENSE
generated
vendored
@ -1,9 +0,0 @@
|
||||
Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
|
||||
|
||||
MIT License (Expat)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
2
Godeps/_workspace/src/github.com/mattn/go-isatty/isatty_bsd.go
generated
vendored
2
Godeps/_workspace/src/github.com/mattn/go-isatty/isatty_bsd.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build darwin freebsd openbsd netbsd
|
||||
// +build darwin freebsd
|
||||
|
||||
package isatty
|
||||
|
||||
|
1
Godeps/_workspace/src/github.com/pborman/uuid/CONTRIBUTORS
generated
vendored
1
Godeps/_workspace/src/github.com/pborman/uuid/CONTRIBUTORS
generated
vendored
@ -1 +0,0 @@
|
||||
Paul Borman <borman@google.com>
|
30
Godeps/_workspace/src/github.com/pborman/uuid/json.go
generated
vendored
30
Godeps/_workspace/src/github.com/pborman/uuid/json.go
generated
vendored
@ -1,30 +0,0 @@
|
||||
// Copyright 2014 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import "errors"
|
||||
|
||||
func (u UUID) MarshalJSON() ([]byte, error) {
|
||||
if len(u) == 0 {
|
||||
return []byte(`""`), nil
|
||||
}
|
||||
return []byte(`"` + u.String() + `"`), nil
|
||||
}
|
||||
|
||||
func (u *UUID) UnmarshalJSON(data []byte) error {
|
||||
if len(data) == 0 || string(data) == `""` {
|
||||
return nil
|
||||
}
|
||||
if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' {
|
||||
return errors.New("invalid UUID format")
|
||||
}
|
||||
data = data[1 : len(data)-1]
|
||||
uu := Parse(string(data))
|
||||
if uu == nil {
|
||||
return errors.New("invalid UUID format")
|
||||
}
|
||||
*u = uu
|
||||
return nil
|
||||
}
|
32
Godeps/_workspace/src/github.com/pborman/uuid/json_test.go
generated
vendored
32
Godeps/_workspace/src/github.com/pborman/uuid/json_test.go
generated
vendored
@ -1,32 +0,0 @@
|
||||
// Copyright 2014 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var testUUID = Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479")
|
||||
|
||||
func TestJSON(t *testing.T) {
|
||||
type S struct {
|
||||
ID1 UUID
|
||||
ID2 UUID
|
||||
}
|
||||
s1 := S{ID1: testUUID}
|
||||
data, err := json.Marshal(&s1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var s2 S
|
||||
if err := json.Unmarshal(data, &s2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(&s1, &s2) {
|
||||
t.Errorf("got %#v, want %#v", s2, s1)
|
||||
}
|
||||
}
|
66
Godeps/_workspace/src/github.com/pborman/uuid/seq_test.go
generated
vendored
66
Godeps/_workspace/src/github.com/pborman/uuid/seq_test.go
generated
vendored
@ -1,66 +0,0 @@
|
||||
// Copyright 2014 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// This test is only run when --regressions is passed on the go test line.
|
||||
var regressions = flag.Bool("regressions", false, "run uuid regression tests")
|
||||
|
||||
// TestClockSeqRace tests for a particular race condition of returning two
|
||||
// identical Version1 UUIDs. The duration of 1 minute was chosen as the race
|
||||
// condition, before being fixed, nearly always occured in under 30 seconds.
|
||||
func TestClockSeqRace(t *testing.T) {
|
||||
if !*regressions {
|
||||
t.Skip("skipping regression tests")
|
||||
}
|
||||
duration := time.Minute
|
||||
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
ch := make(chan UUID, 10000)
|
||||
ncpu := runtime.NumCPU()
|
||||
switch ncpu {
|
||||
case 0, 1:
|
||||
// We can't run the test effectively.
|
||||
t.Skip("skipping race test, only one CPU detected")
|
||||
return
|
||||
default:
|
||||
runtime.GOMAXPROCS(ncpu)
|
||||
}
|
||||
for i := 0; i < ncpu; i++ {
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
case ch <- NewUUID():
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
uuids := make(map[string]bool)
|
||||
cnt := 0
|
||||
start := time.Now()
|
||||
for u := range ch {
|
||||
s := u.String()
|
||||
if uuids[s] {
|
||||
t.Errorf("duplicate uuid after %d in %v: %s", cnt, time.Since(start), s)
|
||||
return
|
||||
}
|
||||
uuids[s] = true
|
||||
if time.Since(start) > duration {
|
||||
return
|
||||
}
|
||||
cnt++
|
||||
}
|
||||
}
|
40
Godeps/_workspace/src/github.com/pborman/uuid/sql.go
generated
vendored
40
Godeps/_workspace/src/github.com/pborman/uuid/sql.go
generated
vendored
@ -1,40 +0,0 @@
|
||||
// Copyright 2015 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Scan implements sql.Scanner so UUIDs can be read from databases transparently
|
||||
// Currently, database types that map to string and []byte are supported. Please
|
||||
// consult database-specific driver documentation for matching types.
|
||||
func (uuid *UUID) Scan(src interface{}) error {
|
||||
switch src.(type) {
|
||||
case string:
|
||||
// see uuid.Parse for required string format
|
||||
parsed := Parse(src.(string))
|
||||
|
||||
if parsed == nil {
|
||||
return errors.New("Scan: invalid UUID format")
|
||||
}
|
||||
|
||||
*uuid = parsed
|
||||
case []byte:
|
||||
// assumes a simple slice of bytes, just check validity and store
|
||||
u := UUID(src.([]byte))
|
||||
|
||||
if u.Variant() == Invalid {
|
||||
return errors.New("Scan: invalid UUID format")
|
||||
}
|
||||
|
||||
*uuid = u
|
||||
default:
|
||||
return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
53
Godeps/_workspace/src/github.com/pborman/uuid/sql_test.go
generated
vendored
53
Godeps/_workspace/src/github.com/pborman/uuid/sql_test.go
generated
vendored
@ -1,53 +0,0 @@
|
||||
// Copyright 2015 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestScan(t *testing.T) {
|
||||
var stringTest string = "f47ac10b-58cc-0372-8567-0e02b2c3d479"
|
||||
var byteTest []byte = Parse(stringTest)
|
||||
var badTypeTest int = 6
|
||||
var invalidTest string = "f47ac10b-58cc-0372-8567-0e02b2c3d4"
|
||||
var invalidByteTest []byte = Parse(invalidTest)
|
||||
|
||||
var uuid UUID
|
||||
err := (&uuid).Scan(stringTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = (&uuid).Scan(byteTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = (&uuid).Scan(badTypeTest)
|
||||
if err == nil {
|
||||
t.Error("int correctly parsed and shouldn't have")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "unable to scan type") {
|
||||
t.Error("attempting to parse an int returned an incorrect error message")
|
||||
}
|
||||
|
||||
err = (&uuid).Scan(invalidTest)
|
||||
if err == nil {
|
||||
t.Error("invalid uuid was parsed without error")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "invalid UUID") {
|
||||
t.Error("attempting to parse an invalid UUID returned an incorrect error message")
|
||||
}
|
||||
|
||||
err = (&uuid).Scan(invalidByteTest)
|
||||
if err == nil {
|
||||
t.Error("invalid byte uuid was parsed without error")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "invalid UUID") {
|
||||
t.Error("attempting to parse an invalid byte UUID returned an incorrect error message")
|
||||
}
|
||||
}
|
27
Makefile
27
Makefile
@ -10,30 +10,6 @@ geth:
|
||||
@echo "Done building."
|
||||
@echo "Run \"$(GOBIN)/geth\" to launch geth."
|
||||
|
||||
geth-cross: geth-linux geth-darwin geth-windows geth-android
|
||||
@echo "Full cross compilation done:"
|
||||
@ls -l $(GOBIN)/geth-*
|
||||
|
||||
geth-linux: xgo
|
||||
build/env.sh $(GOBIN)/xgo --dest=$(GOBIN) --deps=https://gmplib.org/download/gmp/gmp-6.0.0a.tar.bz2 --targets=linux/* -v ./cmd/geth
|
||||
@echo "Linux cross compilation done:"
|
||||
@ls -l $(GOBIN)/geth-linux-*
|
||||
|
||||
geth-darwin: xgo
|
||||
build/env.sh $(GOBIN)/xgo --dest=$(GOBIN) --deps=https://gmplib.org/download/gmp/gmp-6.0.0a.tar.bz2 --targets=darwin/* -v ./cmd/geth
|
||||
@echo "Darwin cross compilation done:"
|
||||
@ls -l $(GOBIN)/geth-darwin-*
|
||||
|
||||
geth-windows: xgo
|
||||
build/env.sh $(GOBIN)/xgo --dest=$(GOBIN) --deps=https://gmplib.org/download/gmp/gmp-6.0.0a.tar.bz2 --targets=windows/* -v ./cmd/geth
|
||||
@echo "Windows cross compilation done:"
|
||||
@ls -l $(GOBIN)/geth-windows-*
|
||||
|
||||
geth-android: xgo
|
||||
build/env.sh $(GOBIN)/xgo --dest=$(GOBIN) --deps=https://gmplib.org/download/gmp/gmp-6.0.0a.tar.bz2 --targets=android-16/*,android-21/* -v ./cmd/geth
|
||||
@echo "Android cross compilation done:"
|
||||
@ls -l $(GOBIN)/geth-android-*
|
||||
|
||||
evm:
|
||||
build/env.sh $(GOROOT)/bin/go install -v $(shell build/ldflags.sh) ./cmd/evm
|
||||
@echo "Done building."
|
||||
@ -52,8 +28,5 @@ test: all
|
||||
travis-test-with-coverage: all
|
||||
build/env.sh build/test-global-coverage.sh
|
||||
|
||||
xgo:
|
||||
build/env.sh go get github.com/karalabe/xgo
|
||||
|
||||
clean:
|
||||
rm -fr build/_workspace/pkg/ Godeps/_workspace/pkg $(GOBIN)/*
|
||||
|
@ -7,12 +7,7 @@ if [ ! -f "build/env.sh" ]; then
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# Since Go 1.5, the separator char for link time assignments
|
||||
# is '=' and using ' ' prints a warning. However, Go < 1.5 does
|
||||
# not support using '='.
|
||||
sep=$(go version | awk '{ if ($3 >= "go1.5" || index($3, "devel")) print "="; else print " "; }' -)
|
||||
|
||||
# set gitCommit when running from a Git checkout.
|
||||
if [ -f ".git/HEAD" ]; then
|
||||
echo "-ldflags '-X main.gitCommit$sep$(git rev-parse HEAD)'"
|
||||
echo "-ldflags '-X main.gitCommit $(git rev-parse HEAD)'"
|
||||
fi
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/tests"
|
||||
@ -91,6 +92,7 @@ func runBlockTest(ctx *cli.Context) {
|
||||
if err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
defer ethereum.Stop()
|
||||
if rpc {
|
||||
fmt.Println("Block Test post state validated, starting RPC interface.")
|
||||
startEth(ctx, ethereum)
|
||||
@ -101,31 +103,34 @@ func runBlockTest(ctx *cli.Context) {
|
||||
|
||||
func runOneBlockTest(ctx *cli.Context, test *tests.BlockTest) (*eth.Ethereum, error) {
|
||||
cfg := utils.MakeEthConfig(ClientIdentifier, Version, ctx)
|
||||
cfg.NewDB = func(path string) (ethdb.Database, error) { return ethdb.NewMemDatabase() }
|
||||
cfg.NewDB = func(path string) (common.Database, error) { return ethdb.NewMemDatabase() }
|
||||
cfg.MaxPeers = 0 // disable network
|
||||
cfg.Shh = false // disable whisper
|
||||
cfg.NAT = nil // disable port mapping
|
||||
|
||||
ethereum, err := eth.New(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// if err := ethereum.Start(); err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
|
||||
// import the genesis block
|
||||
ethereum.ResetWithGenesisBlock(test.Genesis)
|
||||
|
||||
// import pre accounts
|
||||
_, err = test.InsertPreState(ethereum)
|
||||
statedb, err := test.InsertPreState(ethereum)
|
||||
if err != nil {
|
||||
return ethereum, fmt.Errorf("InsertPreState: %v", err)
|
||||
}
|
||||
|
||||
cm := ethereum.ChainManager()
|
||||
validBlocks, err := test.TryBlocksInsert(cm)
|
||||
if err != nil {
|
||||
if err := test.TryBlocksInsert(ethereum.ChainManager()); err != nil {
|
||||
return ethereum, fmt.Errorf("Block Test load error: %v", err)
|
||||
}
|
||||
newDB := cm.State()
|
||||
if err := test.ValidatePostState(newDB); err != nil {
|
||||
|
||||
if err := test.ValidatePostState(statedb); err != nil {
|
||||
return ethereum, fmt.Errorf("post state validation failed: %v", err)
|
||||
}
|
||||
return ethereum, test.ValidateImportedHeaders(cm, validBlocks)
|
||||
return ethereum, nil
|
||||
}
|
||||
|
@ -29,7 +29,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
)
|
||||
|
||||
@ -192,7 +191,7 @@ func hashish(x string) bool {
|
||||
return err != nil
|
||||
}
|
||||
|
||||
func closeAll(dbs ...ethdb.Database) {
|
||||
func closeAll(dbs ...common.Database) {
|
||||
for _, db := range dbs {
|
||||
db.Close()
|
||||
}
|
||||
|
@ -121,7 +121,7 @@ func keywordCompleter(line string) []string {
|
||||
}
|
||||
|
||||
func apiWordCompleter(line string, pos int) (head string, completions []string, tail string) {
|
||||
if len(line) == 0 || pos == 0 {
|
||||
if len(line) == 0 {
|
||||
return "", nil, ""
|
||||
}
|
||||
|
||||
|
@ -92,7 +92,7 @@ func testREPL(t *testing.T, config func(*eth.Config)) (string, *testjethre, *eth
|
||||
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
|
||||
core.WriteGenesisBlockForTesting(db, core.GenesisAccount{common.HexToAddress(testAddress), common.String2Big(testBalance)})
|
||||
core.WriteGenesisBlockForTesting(db, common.HexToAddress(testAddress), common.String2Big(testBalance))
|
||||
ks := crypto.NewKeyStorePlain(filepath.Join(tmp, "keystore"))
|
||||
am := accounts.NewManager(ks)
|
||||
conf := ð.Config{
|
||||
@ -103,7 +103,7 @@ func testREPL(t *testing.T, config func(*eth.Config)) (string, *testjethre, *eth
|
||||
Name: "test",
|
||||
SolcPath: testSolcPath,
|
||||
PowTest: true,
|
||||
NewDB: func(path string) (ethdb.Database, error) { return db, nil },
|
||||
NewDB: func(path string) (common.Database, error) { return db, nil },
|
||||
}
|
||||
if config != nil {
|
||||
config(conf)
|
||||
|
@ -48,21 +48,16 @@ import (
|
||||
|
||||
const (
|
||||
ClientIdentifier = "Geth"
|
||||
Version = "1.2.2"
|
||||
Version = "1.1.0"
|
||||
VersionMajor = 1
|
||||
VersionMinor = 2
|
||||
VersionPatch = 2
|
||||
VersionMinor = 1
|
||||
VersionPatch = 0
|
||||
)
|
||||
|
||||
var (
|
||||
gitCommit string // set via linker flagg
|
||||
nodeNameVersion string
|
||||
app *cli.App
|
||||
|
||||
ExtraDataFlag = cli.StringFlag{
|
||||
Name: "extradata",
|
||||
Usage: "Extra data for the miner",
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -288,7 +283,6 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
|
||||
utils.DataDirFlag,
|
||||
utils.BlockchainVersionFlag,
|
||||
utils.OlympicFlag,
|
||||
utils.EthVersionFlag,
|
||||
utils.CacheFlag,
|
||||
utils.JSpathFlag,
|
||||
utils.ListenPortFlag,
|
||||
@ -313,7 +307,6 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
|
||||
utils.IPCPathFlag,
|
||||
utils.ExecFlag,
|
||||
utils.WhisperEnabledFlag,
|
||||
utils.DevModeFlag,
|
||||
utils.VMDebugFlag,
|
||||
utils.VMForceJitFlag,
|
||||
utils.VMJitCacheFlag,
|
||||
@ -336,12 +329,10 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
|
||||
utils.GpobaseStepDownFlag,
|
||||
utils.GpobaseStepUpFlag,
|
||||
utils.GpobaseCorrectionFactorFlag,
|
||||
ExtraDataFlag,
|
||||
}
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
utils.SetupLogger(ctx)
|
||||
utils.SetupVM(ctx)
|
||||
utils.SetupEth(ctx)
|
||||
if ctx.GlobalBool(utils.PProfEanbledFlag.Name) {
|
||||
utils.StartPProf(ctx)
|
||||
}
|
||||
@ -360,14 +351,6 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
// makeExtra resolves extradata for the miner from a flag or returns a default.
|
||||
func makeExtra(ctx *cli.Context) []byte {
|
||||
if ctx.GlobalIsSet(ExtraDataFlag.Name) {
|
||||
return []byte(ctx.GlobalString(ExtraDataFlag.Name))
|
||||
}
|
||||
return makeDefaultExtra()
|
||||
}
|
||||
|
||||
func makeDefaultExtra() []byte {
|
||||
var clientInfo = struct {
|
||||
Version uint
|
||||
@ -396,7 +379,7 @@ func run(ctx *cli.Context) {
|
||||
}
|
||||
|
||||
cfg := utils.MakeEthConfig(ClientIdentifier, nodeNameVersion, ctx)
|
||||
cfg.ExtraData = makeExtra(ctx)
|
||||
cfg.ExtraData = makeDefaultExtra()
|
||||
|
||||
ethereum, err := eth.New(cfg)
|
||||
if err != nil {
|
||||
@ -417,7 +400,7 @@ func attach(ctx *cli.Context) {
|
||||
client, err = comms.ClientFromEndpoint(ctx.Args().First(), codec.JSON)
|
||||
} else {
|
||||
cfg := comms.IpcConfig{
|
||||
Endpoint: utils.IpcSocketPath(ctx),
|
||||
Endpoint: ctx.GlobalString(utils.IPCPathFlag.Name),
|
||||
}
|
||||
client, err = comms.NewIpcClient(cfg, codec.JSON)
|
||||
}
|
||||
@ -444,8 +427,6 @@ func console(ctx *cli.Context) {
|
||||
utils.CheckLegalese(ctx.GlobalString(utils.DataDirFlag.Name))
|
||||
|
||||
cfg := utils.MakeEthConfig(ClientIdentifier, nodeNameVersion, ctx)
|
||||
cfg.ExtraData = makeExtra(ctx)
|
||||
|
||||
ethereum, err := eth.New(cfg)
|
||||
if err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
@ -544,16 +525,17 @@ func blockRecovery(ctx *cli.Context) {
|
||||
|
||||
var block *types.Block
|
||||
if arg[0] == '#' {
|
||||
block = core.GetBlock(blockDb, core.GetCanonicalHash(blockDb, common.String2Big(arg[1:]).Uint64()))
|
||||
block = core.GetBlockByNumber(blockDb, common.String2Big(arg[1:]).Uint64())
|
||||
} else {
|
||||
block = core.GetBlock(blockDb, common.HexToHash(arg))
|
||||
block = core.GetBlockByHash(blockDb, common.HexToHash(arg))
|
||||
}
|
||||
|
||||
if block == nil {
|
||||
glog.Fatalln("block not found. Recovery failed")
|
||||
}
|
||||
|
||||
if err = core.WriteHeadBlockHash(blockDb, block.Hash()); err != nil {
|
||||
err = core.WriteHead(blockDb, block)
|
||||
if err != nil {
|
||||
glog.Fatalln("block write err", err)
|
||||
}
|
||||
glog.Infof("Recovery succesful. New HEAD %x\n", block.Hash())
|
||||
|
@ -289,7 +289,7 @@ func updateChart(metric string, data []float64, base *int, chart *termui.LineCha
|
||||
}
|
||||
}
|
||||
unit, scale := 0, 1.0
|
||||
for high >= 1000 && unit+1 < len(dataUnits) {
|
||||
for high >= 1000 {
|
||||
high, unit, scale = high/1000, unit+1, scale*1000
|
||||
}
|
||||
// If the unit changes, re-create the chart (hack to set max height...)
|
||||
|
@ -121,10 +121,6 @@ var (
|
||||
Name: "genesis",
|
||||
Usage: "Inserts/Overwrites the genesis block (json format)",
|
||||
}
|
||||
DevModeFlag = cli.BoolFlag{
|
||||
Name: "dev",
|
||||
Usage: "Developer mode. This mode creates a private network and sets several debugging flags",
|
||||
}
|
||||
IdentityFlag = cli.StringFlag{
|
||||
Name: "identity",
|
||||
Usage: "Custom node name",
|
||||
@ -142,11 +138,6 @@ var (
|
||||
Name: "olympic",
|
||||
Usage: "Use olympic style protocol",
|
||||
}
|
||||
EthVersionFlag = cli.IntFlag{
|
||||
Name: "eth",
|
||||
Value: 62,
|
||||
Usage: "Highest eth protocol to advertise (temporary, dev option)",
|
||||
}
|
||||
|
||||
// miner settings
|
||||
MinerThreadsFlag = cli.IntFlag{
|
||||
@ -414,7 +405,7 @@ func MakeEthConfig(clientID, version string, ctx *cli.Context) *eth.Config {
|
||||
glog.V(logger.Error).Infoln("WARNING: No etherbase set and no accounts found as default")
|
||||
}
|
||||
|
||||
cfg := ð.Config{
|
||||
return ð.Config{
|
||||
Name: common.MakeName(clientID, version),
|
||||
DataDir: ctx.GlobalString(DataDirFlag.Name),
|
||||
GenesisNonce: ctx.GlobalInt(GenesisNonceFlag.Name),
|
||||
@ -451,33 +442,6 @@ func MakeEthConfig(clientID, version string, ctx *cli.Context) *eth.Config {
|
||||
SolcPath: ctx.GlobalString(SolcPathFlag.Name),
|
||||
AutoDAG: ctx.GlobalBool(AutoDAGFlag.Name) || ctx.GlobalBool(MiningEnabledFlag.Name),
|
||||
}
|
||||
|
||||
if ctx.GlobalBool(DevModeFlag.Name) {
|
||||
if !ctx.GlobalIsSet(VMDebugFlag.Name) {
|
||||
cfg.VmDebug = true
|
||||
}
|
||||
if !ctx.GlobalIsSet(MaxPeersFlag.Name) {
|
||||
cfg.MaxPeers = 0
|
||||
}
|
||||
if !ctx.GlobalIsSet(GasPriceFlag.Name) {
|
||||
cfg.GasPrice = new(big.Int)
|
||||
}
|
||||
if !ctx.GlobalIsSet(ListenPortFlag.Name) {
|
||||
cfg.Port = "0" // auto port
|
||||
}
|
||||
if !ctx.GlobalIsSet(WhisperEnabledFlag.Name) {
|
||||
cfg.Shh = true
|
||||
}
|
||||
if !ctx.GlobalIsSet(DataDirFlag.Name) {
|
||||
cfg.DataDir = os.TempDir() + "/ethereum_dev_mode"
|
||||
}
|
||||
cfg.PowTest = true
|
||||
cfg.DevMode = true
|
||||
|
||||
glog.V(logger.Info).Infoln("dev mode enabled")
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
// SetupLogger configures glog from the logging-related command line flags.
|
||||
@ -495,20 +459,8 @@ func SetupVM(ctx *cli.Context) {
|
||||
vm.SetJITCacheSize(ctx.GlobalInt(VMJitCacheFlag.Name))
|
||||
}
|
||||
|
||||
// SetupEth configures the eth packages global settings
|
||||
func SetupEth(ctx *cli.Context) {
|
||||
version := ctx.GlobalInt(EthVersionFlag.Name)
|
||||
for len(eth.ProtocolVersions) > 0 && eth.ProtocolVersions[0] > uint(version) {
|
||||
eth.ProtocolVersions = eth.ProtocolVersions[1:]
|
||||
eth.ProtocolLengths = eth.ProtocolLengths[1:]
|
||||
}
|
||||
if len(eth.ProtocolVersions) == 0 {
|
||||
Fatalf("No valid eth protocols remaining")
|
||||
}
|
||||
}
|
||||
|
||||
// MakeChain creates a chain manager from set command line flags.
|
||||
func MakeChain(ctx *cli.Context) (chain *core.ChainManager, chainDb ethdb.Database) {
|
||||
func MakeChain(ctx *cli.Context) (chain *core.ChainManager, chainDb common.Database) {
|
||||
datadir := ctx.GlobalString(DataDirFlag.Name)
|
||||
cache := ctx.GlobalInt(CacheFlag.Name)
|
||||
|
||||
|
@ -1,50 +1,49 @@
|
||||
# common
|
||||
# ethutil
|
||||
|
||||
[](https://travis-ci.org/ethereum/go-ethereum)
|
||||
|
||||
The common package contains the ethereum utility library.
|
||||
The ethutil package contains the ethereum utility library.
|
||||
|
||||
# Installation
|
||||
|
||||
As a subdirectory the main go-ethereum repository, you get it with
|
||||
`go get github.com/ethereum/go-ethereum`.
|
||||
`go get github.com/ethereum/ethutil-go`
|
||||
|
||||
# Usage
|
||||
|
||||
## RLP (Recursive Linear Prefix) Encoding
|
||||
|
||||
RLP Encoding is an encoding scheme used by the Ethereum project. It
|
||||
encodes any native value or list to a string.
|
||||
RLP Encoding is an encoding scheme utilized by the Ethereum project. It
|
||||
encodes any native value or list to string.
|
||||
|
||||
More in depth information about the encoding scheme see the
|
||||
[Wiki](http://wiki.ethereum.org/index.php/RLP) article.
|
||||
More in depth information about the Encoding scheme see the [Wiki](http://wiki.ethereum.org/index.php/RLP)
|
||||
article.
|
||||
|
||||
```go
|
||||
rlp := common.Encode("doge")
|
||||
rlp := ethutil.Encode("doge")
|
||||
fmt.Printf("%q\n", rlp) // => "\0x83dog"
|
||||
|
||||
rlp = common.Encode([]interface{}{"dog", "cat"})
|
||||
rlp = ethutil.Encode([]interface{}{"dog", "cat"})
|
||||
fmt.Printf("%q\n", rlp) // => "\0xc8\0x83dog\0x83cat"
|
||||
decoded := common.Decode(rlp)
|
||||
decoded := ethutil.Decode(rlp)
|
||||
fmt.Println(decoded) // => ["dog" "cat"]
|
||||
```
|
||||
|
||||
## Patricia Trie
|
||||
|
||||
Patricie Tree is a merkle trie used by the Ethereum project.
|
||||
Patricie Tree is a merkle trie utilized by the Ethereum project.
|
||||
|
||||
More in depth information about the (modified) Patricia Trie can be
|
||||
found on the [Wiki](http://wiki.ethereum.org/index.php/Patricia_Tree).
|
||||
|
||||
The patricia trie uses a db as backend and could be anything as long as
|
||||
it satisfies the Database interface found in `common/db.go`.
|
||||
it satisfies the Database interface found in `ethutil/db.go`.
|
||||
|
||||
```go
|
||||
db := NewDatabase()
|
||||
|
||||
// db, root
|
||||
trie := common.NewTrie(db, "")
|
||||
trie := ethutil.NewTrie(db, "")
|
||||
|
||||
trie.Put("puppy", "dog")
|
||||
trie.Put("horse", "stallion")
|
||||
@ -66,7 +65,7 @@ all (key, value) bindings.
|
||||
// ... Create db/trie
|
||||
|
||||
// Note that RLP uses interface slices as list
|
||||
value := common.Encode([]interface{}{"one", 2, "three", []interface{}{42}})
|
||||
value := ethutil.Encode([]interface{}{"one", 2, "three", []interface{}{42}})
|
||||
// Store the RLP encoded value of the list
|
||||
trie.Put("mykey", value)
|
||||
```
|
||||
@ -90,7 +89,7 @@ type (e.g. `Slice()` returns []interface{}, `Uint()` return 0, etc).
|
||||
`Append(v)` appends the value (v) to the current value/list.
|
||||
|
||||
```go
|
||||
val := common.NewEmptyValue().Append(1).Append("2")
|
||||
val := ethutil.NewEmptyValue().Append(1).Append("2")
|
||||
val.AppendList().Append(3)
|
||||
```
|
||||
|
||||
@ -111,7 +110,7 @@ val.AppendList().Append(3)
|
||||
`Byte()` returns the value as a single byte.
|
||||
|
||||
```go
|
||||
val := common.NewValue([]interface{}{1,"2",[]interface{}{3}})
|
||||
val := ethutil.NewValue([]interface{}{1,"2",[]interface{}{3}})
|
||||
val.Get(0).Uint() // => 1
|
||||
val.Get(1).Str() // => "2"
|
||||
s := val.Get(2) // => Value([]interface{}{3})
|
||||
@ -123,7 +122,7 @@ s.Get(0).Uint() // => 3
|
||||
Decoding streams of RLP data is simplified
|
||||
|
||||
```go
|
||||
val := common.NewValueFromBytes(rlpData)
|
||||
val := ethutil.NewValueFromBytes(rlpData)
|
||||
val.Get(0).Uint()
|
||||
```
|
||||
|
||||
@ -133,7 +132,7 @@ Encoding from Value to RLP is done with the `Encode` method. The
|
||||
underlying value can be anything RLP can encode (int, str, lists, bytes)
|
||||
|
||||
```go
|
||||
val := common.NewValue([]interface{}{1,"2",[]interface{}{3}})
|
||||
val := ethutil.NewValue([]interface{}{1,"2",[]interface{}{3}})
|
||||
rlp := val.Encode()
|
||||
// Store the rlp data
|
||||
Store(rlp)
|
||||
|
@ -19,7 +19,6 @@ package compiler
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@ -34,10 +33,15 @@ import (
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
// flair = "Christian <c@ethdev.com> and Lefteris <lefteris@ethdev.com> (c) 2014-2015"
|
||||
flair = ""
|
||||
languageVersion = "0"
|
||||
)
|
||||
|
||||
var (
|
||||
versionRegexp = regexp.MustCompile("[0-9]+\\.[0-9]+\\.[0-9]+")
|
||||
legacyRegexp = regexp.MustCompile("0\\.(9\\..*|1\\.[01])")
|
||||
paramsLegacy = []string{
|
||||
versionRegExp = regexp.MustCompile("[0-9]+.[0-9]+.[0-9]+")
|
||||
params = []string{
|
||||
"--binary", // Request to output the contract in binary (hexadecimal).
|
||||
"file", //
|
||||
"--json-abi", // Request to output the contract's JSON ABI interface.
|
||||
@ -49,15 +53,6 @@ var (
|
||||
"--add-std",
|
||||
"1",
|
||||
}
|
||||
paramsNew = []string{
|
||||
"--bin", // Request to output the contract in binary (hexadecimal).
|
||||
"--abi", // Request to output the contract's JSON ABI interface.
|
||||
"--userdoc", // Request to output the contract's Natspec user documentation.
|
||||
"--devdoc", // Request to output the contract's Natspec developer documentation.
|
||||
"--add-std", // include standard lib contracts
|
||||
"--optimize", // code optimizer switched on
|
||||
"-o", // output directory
|
||||
}
|
||||
)
|
||||
|
||||
type Contract struct {
|
||||
@ -70,17 +65,14 @@ type ContractInfo struct {
|
||||
Language string `json:"language"`
|
||||
LanguageVersion string `json:"languageVersion"`
|
||||
CompilerVersion string `json:"compilerVersion"`
|
||||
CompilerOptions string `json:"compilerOptions"`
|
||||
AbiDefinition interface{} `json:"abiDefinition"`
|
||||
UserDoc interface{} `json:"userDoc"`
|
||||
DeveloperDoc interface{} `json:"developerDoc"`
|
||||
}
|
||||
|
||||
type Solidity struct {
|
||||
solcPath string
|
||||
version string
|
||||
fullVersion string
|
||||
legacy bool
|
||||
solcPath string
|
||||
version string
|
||||
}
|
||||
|
||||
func New(solcPath string) (sol *Solidity, err error) {
|
||||
@ -101,118 +93,112 @@ func New(solcPath string) (sol *Solidity, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
fullVersion := out.String()
|
||||
version := versionRegexp.FindString(fullVersion)
|
||||
legacy := legacyRegexp.MatchString(version)
|
||||
|
||||
version := versionRegExp.FindString(out.String())
|
||||
sol = &Solidity{
|
||||
solcPath: solcPath,
|
||||
version: version,
|
||||
fullVersion: fullVersion,
|
||||
legacy: legacy,
|
||||
solcPath: solcPath,
|
||||
version: version,
|
||||
}
|
||||
glog.V(logger.Info).Infoln(sol.Info())
|
||||
return
|
||||
}
|
||||
|
||||
func (sol *Solidity) Info() string {
|
||||
return fmt.Sprintf("%s\npath: %s", sol.fullVersion, sol.solcPath)
|
||||
return fmt.Sprintf("solc v%s\nSolidity Compiler: %s\n%s", sol.version, sol.solcPath, flair)
|
||||
}
|
||||
|
||||
func (sol *Solidity) Version() string {
|
||||
return sol.version
|
||||
}
|
||||
|
||||
// Compile builds and returns all the contracts contained within a source string.
|
||||
func (sol *Solidity) Compile(source string) (map[string]*Contract, error) {
|
||||
// Short circuit if no source code was specified
|
||||
func (sol *Solidity) Compile(source string) (contracts map[string]*Contract, err error) {
|
||||
|
||||
if len(source) == 0 {
|
||||
return nil, errors.New("solc: empty source string")
|
||||
err = fmt.Errorf("empty source")
|
||||
return
|
||||
}
|
||||
// Create a safe place to dump compilation output
|
||||
|
||||
wd, err := ioutil.TempDir("", "solc")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("solc: failed to create temporary build folder: %v", err)
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(wd)
|
||||
|
||||
// Assemble the compiler command, change to the temp folder and capture any errors
|
||||
stderr := new(bytes.Buffer)
|
||||
|
||||
var params []string
|
||||
if sol.legacy {
|
||||
params = paramsLegacy
|
||||
} else {
|
||||
params = paramsNew
|
||||
params = append(params, wd)
|
||||
}
|
||||
compilerOptions := strings.Join(params, " ")
|
||||
|
||||
in := strings.NewReader(source)
|
||||
var out bytes.Buffer
|
||||
// cwd set to temp dir
|
||||
cmd := exec.Command(sol.solcPath, params...)
|
||||
cmd.Dir = wd
|
||||
cmd.Stdin = strings.NewReader(source)
|
||||
cmd.Stderr = stderr
|
||||
cmd.Stdin = in
|
||||
cmd.Stdout = &out
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("solc error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, fmt.Errorf("solc: %v\n%s", err, string(stderr.Bytes()))
|
||||
}
|
||||
// Sanity check that something was actually built
|
||||
matches, _ := filepath.Glob(wd + "/*\\.bin*")
|
||||
matches, _ := filepath.Glob(wd + "/*.binary")
|
||||
if len(matches) < 1 {
|
||||
return nil, fmt.Errorf("solc: no build results found")
|
||||
err = fmt.Errorf("solc error: missing code output")
|
||||
return
|
||||
}
|
||||
// Compilation succeeded, assemble and return the contracts
|
||||
contracts := make(map[string]*Contract)
|
||||
|
||||
contracts = make(map[string]*Contract)
|
||||
for _, path := range matches {
|
||||
_, file := filepath.Split(path)
|
||||
base := strings.Split(file, ".")[0]
|
||||
|
||||
// Parse the individual compilation results (code binary, ABI definitions, user and dev docs)
|
||||
var binary []byte
|
||||
binext := ".bin"
|
||||
if sol.legacy {
|
||||
binext = ".binary"
|
||||
}
|
||||
if binary, err = ioutil.ReadFile(filepath.Join(wd, base+binext)); err != nil {
|
||||
return nil, fmt.Errorf("solc: error reading compiler output for code: %v", err)
|
||||
}
|
||||
codeFile := filepath.Join(wd, base+".binary")
|
||||
abiDefinitionFile := filepath.Join(wd, base+".abi")
|
||||
userDocFile := filepath.Join(wd, base+".docuser")
|
||||
developerDocFile := filepath.Join(wd, base+".docdev")
|
||||
|
||||
var abi interface{}
|
||||
if blob, err := ioutil.ReadFile(filepath.Join(wd, base+".abi")); err != nil {
|
||||
return nil, fmt.Errorf("solc: error reading abi definition: %v", err)
|
||||
} else if err = json.Unmarshal(blob, &abi); err != nil {
|
||||
return nil, fmt.Errorf("solc: error parsing abi definition: %v", err)
|
||||
var code, abiDefinitionJson, userDocJson, developerDocJson []byte
|
||||
code, err = ioutil.ReadFile(codeFile)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error reading compiler output for code: %v", err)
|
||||
return
|
||||
}
|
||||
abiDefinitionJson, err = ioutil.ReadFile(abiDefinitionFile)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error reading compiler output for abiDefinition: %v", err)
|
||||
return
|
||||
}
|
||||
var abiDefinition interface{}
|
||||
err = json.Unmarshal(abiDefinitionJson, &abiDefinition)
|
||||
|
||||
var userdoc interface{}
|
||||
if blob, err := ioutil.ReadFile(filepath.Join(wd, base+".docuser")); err != nil {
|
||||
return nil, fmt.Errorf("solc: error reading user doc: %v", err)
|
||||
} else if err = json.Unmarshal(blob, &userdoc); err != nil {
|
||||
return nil, fmt.Errorf("solc: error parsing user doc: %v", err)
|
||||
userDocJson, err = ioutil.ReadFile(userDocFile)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error reading compiler output for userDoc: %v", err)
|
||||
return
|
||||
}
|
||||
var userDoc interface{}
|
||||
err = json.Unmarshal(userDocJson, &userDoc)
|
||||
|
||||
var devdoc interface{}
|
||||
if blob, err := ioutil.ReadFile(filepath.Join(wd, base+".docdev")); err != nil {
|
||||
return nil, fmt.Errorf("solc: error reading dev doc: %v", err)
|
||||
} else if err = json.Unmarshal(blob, &devdoc); err != nil {
|
||||
return nil, fmt.Errorf("solc: error parsing dev doc: %v", err)
|
||||
developerDocJson, err = ioutil.ReadFile(developerDocFile)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error reading compiler output for developerDoc: %v", err)
|
||||
return
|
||||
}
|
||||
// Assemble the final contract
|
||||
contracts[base] = &Contract{
|
||||
Code: "0x" + string(binary),
|
||||
var developerDoc interface{}
|
||||
err = json.Unmarshal(developerDocJson, &developerDoc)
|
||||
|
||||
contract := &Contract{
|
||||
Code: "0x" + string(code),
|
||||
Info: ContractInfo{
|
||||
Source: source,
|
||||
Language: "Solidity",
|
||||
LanguageVersion: sol.version,
|
||||
LanguageVersion: languageVersion,
|
||||
CompilerVersion: sol.version,
|
||||
CompilerOptions: compilerOptions,
|
||||
AbiDefinition: abi,
|
||||
UserDoc: userdoc,
|
||||
DeveloperDoc: devdoc,
|
||||
AbiDefinition: abiDefinition,
|
||||
UserDoc: userDoc,
|
||||
DeveloperDoc: developerDoc,
|
||||
},
|
||||
}
|
||||
|
||||
contracts[base] = contract
|
||||
}
|
||||
return contracts, nil
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func SaveInfo(info *ContractInfo, filename string) (contenthash common.Hash, err error) {
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
const solcVersion = "0.1.1"
|
||||
const solcVersion = "0.9.23"
|
||||
|
||||
var (
|
||||
source = `
|
||||
@ -37,18 +37,18 @@ contract test {
|
||||
}
|
||||
}
|
||||
`
|
||||
code = "0x6060604052606d8060116000396000f30060606040526000357c010000000000000000000000000000000000000000000000000000000090048063c6888fa1146037576035565b005b6046600480359060200150605c565b6040518082815260200191505060405180910390f35b60006007820290506068565b91905056"
|
||||
info = `{"source":"\ncontract test {\n /// @notice Will multiply ` + "`a`" + ` by 7.\n function multiply(uint a) returns(uint d) {\n return a * 7;\n }\n}\n","language":"Solidity","languageVersion":"0.1.1","compilerVersion":"0.1.1","compilerOptions":"--binary file --json-abi file --natspec-user file --natspec-dev file --add-std 1","abiDefinition":[{"constant":false,"inputs":[{"name":"a","type":"uint256"}],"name":"multiply","outputs":[{"name":"d","type":"uint256"}],"type":"function"}],"userDoc":{"methods":{"multiply(uint256)":{"notice":"Will multiply ` + "`a`" + ` by 7."}}},"developerDoc":{"methods":{}}}`
|
||||
code = "0x605880600c6000396000f3006000357c010000000000000000000000000000000000000000000000000000000090048063c6888fa114602e57005b603d6004803590602001506047565b8060005260206000f35b60006007820290506053565b91905056"
|
||||
info = `{"source":"\ncontract test {\n /// @notice Will multiply ` + "`a`" + ` by 7.\n function multiply(uint a) returns(uint d) {\n return a * 7;\n }\n}\n","language":"Solidity","languageVersion":"0","compilerVersion":"0.9.23","abiDefinition":[{"constant":false,"inputs":[{"name":"a","type":"uint256"}],"name":"multiply","outputs":[{"name":"d","type":"uint256"}],"type":"function"}],"userDoc":{"methods":{"multiply(uint256)":{"notice":"Will multiply ` + "`a`" + ` by 7."}}},"developerDoc":{"methods":{}}}`
|
||||
|
||||
infohash = common.HexToHash("0x9f3803735e7f16120c5a140ab3f02121fd3533a9655c69b33a10e78752cc49b0")
|
||||
infohash = common.HexToHash("0xea782f674eb898e477c20e8a7cf11c2c28b09fa68b5278732104f7a101aed255")
|
||||
)
|
||||
|
||||
func TestCompiler(t *testing.T) {
|
||||
sol, err := New("")
|
||||
if err != nil {
|
||||
t.Skipf("solc not found: %v", err)
|
||||
t.Skip("solc not found: skip")
|
||||
} else if sol.Version() != solcVersion {
|
||||
t.Skipf("WARNING: a newer version of solc found (%v, expect %v)", sol.Version(), solcVersion)
|
||||
t.Skip("WARNING: skipping due to a newer version of solc found (%v, expect %v)", sol.Version(), solcVersion)
|
||||
}
|
||||
contracts, err := sol.Compile(source)
|
||||
if err != nil {
|
||||
@ -83,7 +83,7 @@ func TestCompileError(t *testing.T) {
|
||||
func TestNoCompiler(t *testing.T) {
|
||||
_, err := New("/path/to/solc")
|
||||
if err != nil {
|
||||
t.Logf("solidity quits with error: %v", err)
|
||||
t.Log("solidity quits with error: %v", err)
|
||||
} else {
|
||||
t.Errorf("no solc installed, but got no error")
|
||||
}
|
||||
@ -111,4 +111,4 @@ func TestSaveInfo(t *testing.T) {
|
||||
if cinfohash != infohash {
|
||||
t.Errorf("content hash for info is incorrect. expected %v, got %v", infohash.Hex(), cinfohash.Hex())
|
||||
}
|
||||
}
|
||||
}
|
@ -14,17 +14,13 @@
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ethdb
|
||||
package common
|
||||
|
||||
// Database interface
|
||||
type Database interface {
|
||||
Put(key []byte, value []byte) error
|
||||
Get(key []byte) ([]byte, error)
|
||||
Delete(key []byte) error
|
||||
Close()
|
||||
NewBatch() Batch
|
||||
}
|
||||
|
||||
type Batch interface {
|
||||
Put(key, value []byte) error
|
||||
Write() error
|
||||
Flush() error
|
||||
}
|
@ -134,7 +134,7 @@ func testEth(t *testing.T) (ethereum *eth.Ethereum, err error) {
|
||||
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
// set up mock genesis with balance on the testAddress
|
||||
core.WriteGenesisBlockForTesting(db, core.GenesisAccount{common.HexToAddress(testAddress), common.String2Big(testBalance)})
|
||||
core.WriteGenesisBlockForTesting(db, common.HexToAddress(testAddress), common.String2Big(testBalance))
|
||||
|
||||
// only use minimalistic stack with no networking
|
||||
ethereum, err = eth.New(ð.Config{
|
||||
@ -143,7 +143,7 @@ func testEth(t *testing.T) (ethereum *eth.Ethereum, err error) {
|
||||
MaxPeers: 0,
|
||||
PowTest: true,
|
||||
Etherbase: common.HexToAddress(testAddress),
|
||||
NewDB: func(path string) (ethdb.Database, error) { return db, nil },
|
||||
NewDB: func(path string) (common.Database, error) { return db, nil },
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
|
@ -144,7 +144,7 @@ func genUncles(i int, gen *BlockGen) {
|
||||
|
||||
func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
|
||||
// Create the database in memory or in a temporary directory.
|
||||
var db ethdb.Database
|
||||
var db common.Database
|
||||
if !disk {
|
||||
db, _ = ethdb.NewMemDatabase()
|
||||
} else {
|
||||
@ -162,7 +162,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
|
||||
|
||||
// Generate a chain of b.N blocks using the supplied block
|
||||
// generator function.
|
||||
genesis := WriteGenesisBlockForTesting(db, GenesisAccount{benchRootAddr, benchRootFunds})
|
||||
genesis := WriteGenesisBlockForTesting(db, benchRootAddr, benchRootFunds)
|
||||
chain := GenerateChain(genesis, db, b.N, gen)
|
||||
|
||||
// Time the insertion of the new chain.
|
||||
|
120
core/block_cache.go
Normal file
120
core/block_cache.go
Normal file
@ -0,0 +1,120 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// BlockCache implements a caching mechanism specifically for blocks and uses FILO to pop
|
||||
type BlockCache struct {
|
||||
size int
|
||||
|
||||
hashes []common.Hash
|
||||
blocks map[common.Hash]*types.Block
|
||||
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// Creates and returns a `BlockCache` with `size`. If `size` is smaller than 1 it will panic
|
||||
func NewBlockCache(size int) *BlockCache {
|
||||
if size < 1 {
|
||||
panic("block cache size not allowed to be smaller than 1")
|
||||
}
|
||||
|
||||
bc := &BlockCache{size: size}
|
||||
bc.Clear()
|
||||
return bc
|
||||
}
|
||||
|
||||
func (bc *BlockCache) Clear() {
|
||||
bc.blocks = make(map[common.Hash]*types.Block)
|
||||
bc.hashes = nil
|
||||
|
||||
}
|
||||
|
||||
func (bc *BlockCache) Push(block *types.Block) {
|
||||
bc.mu.Lock()
|
||||
defer bc.mu.Unlock()
|
||||
|
||||
if len(bc.hashes) == bc.size {
|
||||
delete(bc.blocks, bc.hashes[0])
|
||||
|
||||
// XXX There are a few other options on solving this
|
||||
// 1) use a poller / GC like mechanism to clean up untracked objects
|
||||
// 2) copy as below
|
||||
// re-use the slice and remove the reference to bc.hashes[0]
|
||||
// this will allow the element to be garbage collected.
|
||||
copy(bc.hashes, bc.hashes[1:])
|
||||
} else {
|
||||
bc.hashes = append(bc.hashes, common.Hash{})
|
||||
}
|
||||
|
||||
hash := block.Hash()
|
||||
bc.blocks[hash] = block
|
||||
bc.hashes[len(bc.hashes)-1] = hash
|
||||
}
|
||||
|
||||
func (bc *BlockCache) Delete(hash common.Hash) {
|
||||
bc.mu.Lock()
|
||||
defer bc.mu.Unlock()
|
||||
|
||||
if _, ok := bc.blocks[hash]; ok {
|
||||
delete(bc.blocks, hash)
|
||||
for i, h := range bc.hashes {
|
||||
if hash == h {
|
||||
bc.hashes = bc.hashes[:i+copy(bc.hashes[i:], bc.hashes[i+1:])]
|
||||
// or ? => bc.hashes = append(bc.hashes[:i], bc.hashes[i+1]...)
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bc *BlockCache) Get(hash common.Hash) *types.Block {
|
||||
bc.mu.RLock()
|
||||
defer bc.mu.RUnlock()
|
||||
|
||||
if block, haz := bc.blocks[hash]; haz {
|
||||
return block
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bc *BlockCache) Has(hash common.Hash) bool {
|
||||
bc.mu.RLock()
|
||||
defer bc.mu.RUnlock()
|
||||
|
||||
_, ok := bc.blocks[hash]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (bc *BlockCache) Each(cb func(int, *types.Block)) {
|
||||
bc.mu.Lock()
|
||||
defer bc.mu.Unlock()
|
||||
|
||||
i := 0
|
||||
for _, block := range bc.blocks {
|
||||
cb(i, block)
|
||||
i++
|
||||
}
|
||||
}
|
76
core/block_cache_test.go
Normal file
76
core/block_cache_test.go
Normal file
@ -0,0 +1,76 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
func newChain(size int) (chain []*types.Block) {
|
||||
var parentHash common.Hash
|
||||
for i := 0; i < size; i++ {
|
||||
head := &types.Header{ParentHash: parentHash, Number: big.NewInt(int64(i))}
|
||||
block := types.NewBlock(head, nil, nil, nil)
|
||||
chain = append(chain, block)
|
||||
parentHash = block.Hash()
|
||||
}
|
||||
return chain
|
||||
}
|
||||
|
||||
func insertChainCache(cache *BlockCache, chain []*types.Block) {
|
||||
for _, block := range chain {
|
||||
cache.Push(block)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewBlockCache(t *testing.T) {
|
||||
chain := newChain(3)
|
||||
cache := NewBlockCache(2)
|
||||
insertChainCache(cache, chain)
|
||||
|
||||
if cache.hashes[0] != chain[1].Hash() {
|
||||
t.Error("oldest block incorrect")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInclusion(t *testing.T) {
|
||||
chain := newChain(3)
|
||||
cache := NewBlockCache(3)
|
||||
insertChainCache(cache, chain)
|
||||
|
||||
for _, block := range chain {
|
||||
if b := cache.Get(block.Hash()); b == nil {
|
||||
t.Errorf("getting %x failed", block.Hash())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeletion(t *testing.T) {
|
||||
chain := newChain(3)
|
||||
cache := NewBlockCache(3)
|
||||
insertChainCache(cache, chain)
|
||||
|
||||
cache.Delete(chain[1].Hash())
|
||||
|
||||
if cache.Has(chain[1].Hash()) {
|
||||
t.Errorf("expected %x not to be included")
|
||||
}
|
||||
}
|
@ -26,7 +26,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
@ -42,7 +41,7 @@ const (
|
||||
)
|
||||
|
||||
type BlockProcessor struct {
|
||||
chainDb ethdb.Database
|
||||
chainDb common.Database
|
||||
// Mutex for locking the block processor. Blocks can only be handled one at a time
|
||||
mutex sync.Mutex
|
||||
// Canonical block chain
|
||||
@ -57,19 +56,7 @@ type BlockProcessor struct {
|
||||
eventMux *event.TypeMux
|
||||
}
|
||||
|
||||
// TODO: type GasPool big.Int
|
||||
//
|
||||
// GasPool is implemented by state.StateObject. This is a historical
|
||||
// coincidence. Gas tracking should move out of StateObject.
|
||||
|
||||
// GasPool tracks the amount of gas available during
|
||||
// execution of the transactions in a block.
|
||||
type GasPool interface {
|
||||
AddGas(gas, price *big.Int)
|
||||
SubGas(gas, price *big.Int) error
|
||||
}
|
||||
|
||||
func NewBlockProcessor(db ethdb.Database, pow pow.PoW, chainManager *ChainManager, eventMux *event.TypeMux) *BlockProcessor {
|
||||
func NewBlockProcessor(db common.Database, pow pow.PoW, chainManager *ChainManager, eventMux *event.TypeMux) *BlockProcessor {
|
||||
sm := &BlockProcessor{
|
||||
chainDb: db,
|
||||
mem: make(map[string]*big.Int),
|
||||
@ -77,15 +64,16 @@ func NewBlockProcessor(db ethdb.Database, pow pow.PoW, chainManager *ChainManage
|
||||
bc: chainManager,
|
||||
eventMux: eventMux,
|
||||
}
|
||||
|
||||
return sm
|
||||
}
|
||||
|
||||
func (sm *BlockProcessor) TransitionState(statedb *state.StateDB, parent, block *types.Block, transientProcess bool) (receipts types.Receipts, err error) {
|
||||
gp := statedb.GetOrNewStateObject(block.Coinbase())
|
||||
gp.SetGasLimit(block.GasLimit())
|
||||
coinbase := statedb.GetOrNewStateObject(block.Coinbase())
|
||||
coinbase.SetGasLimit(block.GasLimit())
|
||||
|
||||
// Process the transactions on to parent state
|
||||
receipts, err = sm.ApplyTransactions(gp, statedb, block, block.Transactions(), transientProcess)
|
||||
receipts, err = sm.ApplyTransactions(coinbase, statedb, block, block.Transactions(), transientProcess)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -93,8 +81,9 @@ func (sm *BlockProcessor) TransitionState(statedb *state.StateDB, parent, block
|
||||
return receipts, nil
|
||||
}
|
||||
|
||||
func (self *BlockProcessor) ApplyTransaction(gp GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *big.Int, transientProcess bool) (*types.Receipt, *big.Int, error) {
|
||||
_, gas, err := ApplyMessage(NewEnv(statedb, self.bc, tx, header), tx, gp)
|
||||
func (self *BlockProcessor) ApplyTransaction(coinbase *state.StateObject, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *big.Int, transientProcess bool) (*types.Receipt, *big.Int, error) {
|
||||
cb := statedb.GetStateObject(coinbase.Address())
|
||||
_, gas, err := ApplyMessage(NewEnv(statedb, self.bc, tx, header), tx, cb)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -129,7 +118,7 @@ func (self *BlockProcessor) ChainManager() *ChainManager {
|
||||
return self.bc
|
||||
}
|
||||
|
||||
func (self *BlockProcessor) ApplyTransactions(gp GasPool, statedb *state.StateDB, block *types.Block, txs types.Transactions, transientProcess bool) (types.Receipts, error) {
|
||||
func (self *BlockProcessor) ApplyTransactions(coinbase *state.StateObject, statedb *state.StateDB, block *types.Block, txs types.Transactions, transientProcess bool) (types.Receipts, error) {
|
||||
var (
|
||||
receipts types.Receipts
|
||||
totalUsedGas = big.NewInt(0)
|
||||
@ -141,7 +130,7 @@ func (self *BlockProcessor) ApplyTransactions(gp GasPool, statedb *state.StateDB
|
||||
for i, tx := range txs {
|
||||
statedb.StartRecord(tx.Hash(), block.Hash(), i)
|
||||
|
||||
receipt, txGas, err := self.ApplyTransaction(gp, statedb, header, tx, totalUsedGas, transientProcess)
|
||||
receipt, txGas, err := self.ApplyTransaction(coinbase, statedb, header, tx, totalUsedGas, transientProcess)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -214,7 +203,7 @@ func (sm *BlockProcessor) processWithParent(block, parent *types.Block) (logs st
|
||||
txs := block.Transactions()
|
||||
|
||||
// Block validation
|
||||
if err = ValidateHeader(sm.Pow, header, parent.Header(), false, false); err != nil {
|
||||
if err = ValidateHeader(sm.Pow, header, parent, false, false); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
@ -338,7 +327,7 @@ func (sm *BlockProcessor) VerifyUncles(statedb *state.StateDB, block, parent *ty
|
||||
return UncleError("uncle[%d](%x)'s parent is not ancestor (%x)", i, hash[:4], uncle.ParentHash[0:4])
|
||||
}
|
||||
|
||||
if err := ValidateHeader(sm.Pow, uncle, ancestors[uncle.ParentHash].Header(), true, true); err != nil {
|
||||
if err := ValidateHeader(sm.Pow, uncle, ancestors[uncle.ParentHash], true, true); err != nil {
|
||||
return ValidationError(fmt.Sprintf("uncle[%d](%x) header invalid: %v", i, hash[:4], err))
|
||||
}
|
||||
}
|
||||
@ -368,50 +357,52 @@ func (sm *BlockProcessor) GetLogs(block *types.Block) (logs state.Logs, err erro
|
||||
}
|
||||
|
||||
// See YP section 4.3.4. "Block Header Validity"
|
||||
// Validates a header. Returns an error if the header is invalid.
|
||||
func ValidateHeader(pow pow.PoW, header *types.Header, parent *types.Header, checkPow, uncle bool) error {
|
||||
if big.NewInt(int64(len(header.Extra))).Cmp(params.MaximumExtraDataSize) == 1 {
|
||||
return fmt.Errorf("Header extra data too long (%d)", len(header.Extra))
|
||||
// Validates a block. Returns an error if the block is invalid.
|
||||
func ValidateHeader(pow pow.PoW, block *types.Header, parent *types.Block, checkPow, uncle bool) error {
|
||||
if big.NewInt(int64(len(block.Extra))).Cmp(params.MaximumExtraDataSize) == 1 {
|
||||
return fmt.Errorf("Block extra data too long (%d)", len(block.Extra))
|
||||
}
|
||||
|
||||
if uncle {
|
||||
if header.Time.Cmp(common.MaxBig) == 1 {
|
||||
if block.Time.Cmp(common.MaxBig) == 1 {
|
||||
return BlockTSTooBigErr
|
||||
}
|
||||
} else {
|
||||
if header.Time.Cmp(big.NewInt(time.Now().Unix())) == 1 {
|
||||
if block.Time.Cmp(big.NewInt(time.Now().Unix())) == 1 {
|
||||
return BlockFutureErr
|
||||
}
|
||||
}
|
||||
if header.Time.Cmp(parent.Time) != 1 {
|
||||
if block.Time.Cmp(parent.Time()) != 1 {
|
||||
return BlockEqualTSErr
|
||||
}
|
||||
|
||||
expd := CalcDifficulty(header.Time.Uint64(), parent.Time.Uint64(), parent.Number, parent.Difficulty)
|
||||
if expd.Cmp(header.Difficulty) != 0 {
|
||||
return fmt.Errorf("Difficulty check failed for header %v, %v", header.Difficulty, expd)
|
||||
expd := CalcDifficulty(block.Time.Uint64(), parent.Time().Uint64(), parent.Number(), parent.Difficulty())
|
||||
if expd.Cmp(block.Difficulty) != 0 {
|
||||
return fmt.Errorf("Difficulty check failed for block %v, %v", block.Difficulty, expd)
|
||||
}
|
||||
|
||||
a := new(big.Int).Set(parent.GasLimit)
|
||||
a = a.Sub(a, header.GasLimit)
|
||||
var a, b *big.Int
|
||||
a = parent.GasLimit()
|
||||
a = a.Sub(a, block.GasLimit)
|
||||
a.Abs(a)
|
||||
b := new(big.Int).Set(parent.GasLimit)
|
||||
b = parent.GasLimit()
|
||||
b = b.Div(b, params.GasLimitBoundDivisor)
|
||||
if !(a.Cmp(b) < 0) || (header.GasLimit.Cmp(params.MinGasLimit) == -1) {
|
||||
return fmt.Errorf("GasLimit check failed for header %v (%v > %v)", header.GasLimit, a, b)
|
||||
if !(a.Cmp(b) < 0) || (block.GasLimit.Cmp(params.MinGasLimit) == -1) {
|
||||
return fmt.Errorf("GasLimit check failed for block %v (%v > %v)", block.GasLimit, a, b)
|
||||
}
|
||||
|
||||
num := new(big.Int).Set(parent.Number)
|
||||
num.Sub(header.Number, num)
|
||||
num := parent.Number()
|
||||
num.Sub(block.Number, num)
|
||||
if num.Cmp(big.NewInt(1)) != 0 {
|
||||
return BlockNumberErr
|
||||
}
|
||||
|
||||
if checkPow {
|
||||
// Verify the nonce of the header. Return an error if it's not valid
|
||||
if !pow.Verify(types.NewBlockWithHeader(header)) {
|
||||
return ValidationError("Header's nonce is invalid (= %x)", header.Nonce)
|
||||
// Verify the nonce of the block. Return an error if it's not valid
|
||||
if !pow.Verify(types.NewBlockWithHeader(block)) {
|
||||
return ValidationError("Block's nonce is invalid (= %x)", block.Nonce)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -48,13 +48,13 @@ func TestNumber(t *testing.T) {
|
||||
statedb := state.New(chain.Genesis().Root(), chain.chainDb)
|
||||
header := makeHeader(chain.Genesis(), statedb)
|
||||
header.Number = big.NewInt(3)
|
||||
err := ValidateHeader(pow, header, chain.Genesis().Header(), false, false)
|
||||
err := ValidateHeader(pow, header, chain.Genesis(), false, false)
|
||||
if err != BlockNumberErr {
|
||||
t.Errorf("expected block number error, got %q", err)
|
||||
}
|
||||
|
||||
header = makeHeader(chain.Genesis(), statedb)
|
||||
err = ValidateHeader(pow, header, chain.Genesis().Header(), false, false)
|
||||
err = ValidateHeader(pow, header, chain.Genesis(), false, false)
|
||||
if err == BlockNumberErr {
|
||||
t.Errorf("didn't expect block number error")
|
||||
}
|
||||
|
@ -22,7 +22,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/pow"
|
||||
)
|
||||
@ -131,17 +130,6 @@ func (b *BlockGen) PrevBlock(index int) *types.Block {
|
||||
return b.chain[index]
|
||||
}
|
||||
|
||||
// OffsetTime modifies the time instance of a block, implicitly changing its
|
||||
// associated difficulty. It's useful to test scenarios where forking is not
|
||||
// tied to chain length directly.
|
||||
func (b *BlockGen) OffsetTime(seconds int64) {
|
||||
b.header.Time.Add(b.header.Time, new(big.Int).SetInt64(seconds))
|
||||
if b.header.Time.Cmp(b.parent.Header().Time) <= 0 {
|
||||
panic("block time out of range")
|
||||
}
|
||||
b.header.Difficulty = CalcDifficulty(b.header.Time.Uint64(), b.parent.Time().Uint64(), b.parent.Number(), b.parent.Difficulty())
|
||||
}
|
||||
|
||||
// GenerateChain creates a chain of n blocks. The first block's
|
||||
// parent will be the provided parent. db is used to store
|
||||
// intermediate states and should contain the parent's state trie.
|
||||
@ -154,7 +142,7 @@ func (b *BlockGen) OffsetTime(seconds int64) {
|
||||
// Blocks created by GenerateChain do not contain valid proof of work
|
||||
// values. Inserting them into ChainManager requires use of FakePow or
|
||||
// a similar non-validating proof of work implementation.
|
||||
func GenerateChain(parent *types.Block, db ethdb.Database, n int, gen func(int, *BlockGen)) []*types.Block {
|
||||
func GenerateChain(parent *types.Block, db common.Database, n int, gen func(int, *BlockGen)) []*types.Block {
|
||||
statedb := state.New(parent.Root(), db)
|
||||
blocks := make(types.Blocks, n)
|
||||
genblock := func(i int, h *types.Header) *types.Block {
|
||||
@ -170,6 +158,7 @@ func GenerateChain(parent *types.Block, db ethdb.Database, n int, gen func(int,
|
||||
for i := 0; i < n; i++ {
|
||||
header := makeHeader(parent, statedb)
|
||||
block := genblock(i, header)
|
||||
block.Td = CalcTD(block, parent)
|
||||
blocks[i] = block
|
||||
parent = block
|
||||
}
|
||||
@ -197,7 +186,7 @@ func makeHeader(parent *types.Block, state *state.StateDB) *types.Header {
|
||||
|
||||
// newCanonical creates a new deterministic canonical chain by running
|
||||
// InsertChain on the result of makeChain.
|
||||
func newCanonical(n int, db ethdb.Database) (*BlockProcessor, error) {
|
||||
func newCanonical(n int, db common.Database) (*BlockProcessor, error) {
|
||||
evmux := &event.TypeMux{}
|
||||
|
||||
WriteTestNetGenesisBlock(db, 0)
|
||||
@ -213,7 +202,7 @@ func newCanonical(n int, db ethdb.Database) (*BlockProcessor, error) {
|
||||
return bman, err
|
||||
}
|
||||
|
||||
func makeChain(parent *types.Block, n int, db ethdb.Database, seed int) []*types.Block {
|
||||
func makeChain(parent *types.Block, n int, db common.Database, seed int) []*types.Block {
|
||||
return GenerateChain(parent, db, n, func(i int, b *BlockGen) {
|
||||
b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)})
|
||||
})
|
||||
|
@ -42,7 +42,7 @@ func ExampleGenerateChain() {
|
||||
)
|
||||
|
||||
// Ensure that key1 has some funds in the genesis block.
|
||||
genesis := WriteGenesisBlockForTesting(db, GenesisAccount{addr1, big.NewInt(1000000)})
|
||||
genesis := WriteGenesisBlockForTesting(db, addr1, big.NewInt(1000000))
|
||||
|
||||
// This call generates a chain of 5 blocks. The function runs for
|
||||
// each block and adds different features to gen based on the
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@ -29,13 +30,11 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/pow"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/hashicorp/golang-lru"
|
||||
)
|
||||
|
||||
@ -49,9 +48,6 @@ var (
|
||||
)
|
||||
|
||||
const (
|
||||
headerCacheLimit = 512
|
||||
bodyCacheLimit = 256
|
||||
tdCacheLimit = 1024
|
||||
blockCacheLimit = 256
|
||||
maxFutureBlocks = 256
|
||||
maxTimeFutureBlocks = 30
|
||||
@ -60,7 +56,7 @@ const (
|
||||
|
||||
type ChainManager struct {
|
||||
//eth EthManager
|
||||
chainDb ethdb.Database
|
||||
chainDb common.Database
|
||||
processor types.BlockProcessor
|
||||
eventMux *event.TypeMux
|
||||
genesisBlock *types.Block
|
||||
@ -72,13 +68,10 @@ type ChainManager struct {
|
||||
checkpoint int // checkpoint counts towards the new checkpoint
|
||||
td *big.Int
|
||||
currentBlock *types.Block
|
||||
lastBlockHash common.Hash
|
||||
currentGasLimit *big.Int
|
||||
|
||||
headerCache *lru.Cache // Cache for the most recent block headers
|
||||
bodyCache *lru.Cache // Cache for the most recent block bodies
|
||||
bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format
|
||||
tdCache *lru.Cache // Cache for the most recent block total difficulties
|
||||
blockCache *lru.Cache // Cache for the most recent entire blocks
|
||||
cache *lru.Cache // cache is the LRU caching
|
||||
futureBlocks *lru.Cache // future blocks are blocks added for later processing
|
||||
|
||||
quit chan struct{}
|
||||
@ -90,25 +83,14 @@ type ChainManager struct {
|
||||
pow pow.PoW
|
||||
}
|
||||
|
||||
func NewChainManager(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*ChainManager, error) {
|
||||
headerCache, _ := lru.New(headerCacheLimit)
|
||||
bodyCache, _ := lru.New(bodyCacheLimit)
|
||||
bodyRLPCache, _ := lru.New(bodyCacheLimit)
|
||||
tdCache, _ := lru.New(tdCacheLimit)
|
||||
blockCache, _ := lru.New(blockCacheLimit)
|
||||
futureBlocks, _ := lru.New(maxFutureBlocks)
|
||||
|
||||
func NewChainManager(chainDb common.Database, pow pow.PoW, mux *event.TypeMux) (*ChainManager, error) {
|
||||
cache, _ := lru.New(blockCacheLimit)
|
||||
bc := &ChainManager{
|
||||
chainDb: chainDb,
|
||||
eventMux: mux,
|
||||
quit: make(chan struct{}),
|
||||
headerCache: headerCache,
|
||||
bodyCache: bodyCache,
|
||||
bodyRLPCache: bodyRLPCache,
|
||||
tdCache: tdCache,
|
||||
blockCache: blockCache,
|
||||
futureBlocks: futureBlocks,
|
||||
pow: pow,
|
||||
chainDb: chainDb,
|
||||
eventMux: mux,
|
||||
quit: make(chan struct{}),
|
||||
cache: cache,
|
||||
pow: pow,
|
||||
}
|
||||
|
||||
bc.genesisBlock = bc.GetBlockByNumber(0)
|
||||
@ -123,9 +105,11 @@ func NewChainManager(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*
|
||||
}
|
||||
glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block")
|
||||
}
|
||||
|
||||
if err := bc.setLastState(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
|
||||
for hash, _ := range BadHashes {
|
||||
if block := bc.GetBlock(hash); block != nil {
|
||||
@ -139,8 +123,14 @@ func NewChainManager(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*
|
||||
glog.V(logger.Error).Infoln("Chain reorg was successfull. Resuming normal operation")
|
||||
}
|
||||
}
|
||||
|
||||
// Take ownership of this particular state
|
||||
|
||||
bc.futureBlocks, _ = lru.New(maxFutureBlocks)
|
||||
bc.makeCache()
|
||||
|
||||
go bc.update()
|
||||
|
||||
return bc, nil
|
||||
}
|
||||
|
||||
@ -149,16 +139,14 @@ func (bc *ChainManager) SetHead(head *types.Block) {
|
||||
defer bc.mu.Unlock()
|
||||
|
||||
for block := bc.currentBlock; block != nil && block.Hash() != head.Hash(); block = bc.GetBlock(block.ParentHash()) {
|
||||
DeleteBlock(bc.chainDb, block.Hash())
|
||||
bc.removeBlock(block)
|
||||
}
|
||||
bc.headerCache.Purge()
|
||||
bc.bodyCache.Purge()
|
||||
bc.bodyRLPCache.Purge()
|
||||
bc.blockCache.Purge()
|
||||
bc.futureBlocks.Purge()
|
||||
|
||||
bc.cache, _ = lru.New(blockCacheLimit)
|
||||
bc.currentBlock = head
|
||||
bc.setTotalDifficulty(bc.GetTd(head.Hash()))
|
||||
bc.makeCache()
|
||||
|
||||
bc.setTotalDifficulty(head.Td)
|
||||
bc.insert(head)
|
||||
bc.setLastState()
|
||||
}
|
||||
@ -181,7 +169,7 @@ func (self *ChainManager) LastBlockHash() common.Hash {
|
||||
self.mu.RLock()
|
||||
defer self.mu.RUnlock()
|
||||
|
||||
return self.currentBlock.Hash()
|
||||
return self.lastBlockHash
|
||||
}
|
||||
|
||||
func (self *ChainManager) CurrentBlock() *types.Block {
|
||||
@ -211,13 +199,13 @@ func (bc *ChainManager) recover() bool {
|
||||
if len(data) != 0 {
|
||||
block := bc.GetBlock(common.BytesToHash(data))
|
||||
if block != nil {
|
||||
if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil {
|
||||
glog.Fatalf("failed to write database head number: %v", err)
|
||||
}
|
||||
if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil {
|
||||
glog.Fatalf("failed to write database head hash: %v", err)
|
||||
err := bc.chainDb.Put([]byte("LastBlock"), block.Hash().Bytes())
|
||||
if err != nil {
|
||||
glog.Fatalln("db write err:", err)
|
||||
}
|
||||
|
||||
bc.currentBlock = block
|
||||
bc.lastBlockHash = block.Hash()
|
||||
return true
|
||||
}
|
||||
}
|
||||
@ -225,13 +213,14 @@ func (bc *ChainManager) recover() bool {
|
||||
}
|
||||
|
||||
func (bc *ChainManager) setLastState() error {
|
||||
head := GetHeadBlockHash(bc.chainDb)
|
||||
if head != (common.Hash{}) {
|
||||
block := bc.GetBlock(head)
|
||||
data, _ := bc.chainDb.Get([]byte("LastBlock"))
|
||||
if len(data) != 0 {
|
||||
block := bc.GetBlock(common.BytesToHash(data))
|
||||
if block != nil {
|
||||
bc.currentBlock = block
|
||||
bc.lastBlockHash = block.Hash()
|
||||
} else {
|
||||
glog.Infof("LastBlock (%x) not found. Recovering...\n", head)
|
||||
glog.Infof("LastBlock (%x) not found. Recovering...\n", data)
|
||||
if bc.recover() {
|
||||
glog.Infof("Recover successful")
|
||||
} else {
|
||||
@ -241,7 +230,7 @@ func (bc *ChainManager) setLastState() error {
|
||||
} else {
|
||||
bc.Reset()
|
||||
}
|
||||
bc.td = bc.GetTd(bc.currentBlock.Hash())
|
||||
bc.td = bc.currentBlock.Td
|
||||
bc.currentGasLimit = CalcGasLimit(bc.currentBlock)
|
||||
|
||||
if glog.V(logger.Info) {
|
||||
@ -251,38 +240,63 @@ func (bc *ChainManager) setLastState() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reset purges the entire blockchain, restoring it to its genesis state.
|
||||
func (bc *ChainManager) Reset() {
|
||||
bc.ResetWithGenesisBlock(bc.genesisBlock)
|
||||
func (bc *ChainManager) makeCache() {
|
||||
bc.cache, _ = lru.New(blockCacheLimit)
|
||||
// load in last `blockCacheLimit` - 1 blocks. Last block is the current.
|
||||
bc.cache.Add(bc.genesisBlock.Hash(), bc.genesisBlock)
|
||||
for _, block := range bc.GetBlocksFromHash(bc.currentBlock.Hash(), blockCacheLimit) {
|
||||
bc.cache.Add(block.Hash(), block)
|
||||
}
|
||||
}
|
||||
|
||||
// ResetWithGenesisBlock purges the entire blockchain, restoring it to the
|
||||
// specified genesis state.
|
||||
func (bc *ChainManager) ResetWithGenesisBlock(genesis *types.Block) {
|
||||
func (bc *ChainManager) Reset() {
|
||||
bc.mu.Lock()
|
||||
defer bc.mu.Unlock()
|
||||
|
||||
// Dump the entire block chain and purge the caches
|
||||
for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.ParentHash()) {
|
||||
DeleteBlock(bc.chainDb, block.Hash())
|
||||
bc.removeBlock(block)
|
||||
}
|
||||
bc.headerCache.Purge()
|
||||
bc.bodyCache.Purge()
|
||||
bc.bodyRLPCache.Purge()
|
||||
bc.blockCache.Purge()
|
||||
bc.futureBlocks.Purge()
|
||||
|
||||
// Prepare the genesis block and reinitialize the chain
|
||||
if err := WriteTd(bc.chainDb, genesis.Hash(), genesis.Difficulty()); err != nil {
|
||||
glog.Fatalf("failed to write genesis block TD: %v", err)
|
||||
bc.cache, _ = lru.New(blockCacheLimit)
|
||||
|
||||
// Prepare the genesis block
|
||||
err := WriteBlock(bc.chainDb, bc.genesisBlock)
|
||||
if err != nil {
|
||||
glog.Fatalln("db err:", err)
|
||||
}
|
||||
if err := WriteBlock(bc.chainDb, genesis); err != nil {
|
||||
glog.Fatalf("failed to write genesis block: %v", err)
|
||||
}
|
||||
bc.genesisBlock = genesis
|
||||
|
||||
bc.insert(bc.genesisBlock)
|
||||
bc.currentBlock = bc.genesisBlock
|
||||
bc.setTotalDifficulty(genesis.Difficulty())
|
||||
bc.makeCache()
|
||||
|
||||
bc.setTotalDifficulty(common.Big("0"))
|
||||
}
|
||||
|
||||
func (bc *ChainManager) removeBlock(block *types.Block) {
|
||||
bc.chainDb.Delete(append(blockHashPre, block.Hash().Bytes()...))
|
||||
}
|
||||
|
||||
func (bc *ChainManager) ResetWithGenesisBlock(gb *types.Block) {
|
||||
bc.mu.Lock()
|
||||
defer bc.mu.Unlock()
|
||||
|
||||
for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.ParentHash()) {
|
||||
bc.removeBlock(block)
|
||||
}
|
||||
|
||||
// Prepare the genesis block
|
||||
gb.Td = gb.Difficulty()
|
||||
bc.genesisBlock = gb
|
||||
|
||||
err := WriteBlock(bc.chainDb, bc.genesisBlock)
|
||||
if err != nil {
|
||||
glog.Fatalln("db err:", err)
|
||||
}
|
||||
|
||||
bc.insert(bc.genesisBlock)
|
||||
bc.currentBlock = bc.genesisBlock
|
||||
bc.makeCache()
|
||||
bc.td = gb.Difficulty()
|
||||
}
|
||||
|
||||
// Export writes the active chain to the given writer.
|
||||
@ -321,23 +335,23 @@ func (self *ChainManager) ExportN(w io.Writer, first uint64, last uint64) error
|
||||
// insert injects a block into the current chain block chain. Note, this function
|
||||
// assumes that the `mu` mutex is held!
|
||||
func (bc *ChainManager) insert(block *types.Block) {
|
||||
// Add the block to the canonical chain number scheme and mark as the head
|
||||
if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil {
|
||||
glog.Fatalf("failed to insert block number: %v", err)
|
||||
err := WriteHead(bc.chainDb, block)
|
||||
if err != nil {
|
||||
glog.Fatal("db write fail:", err)
|
||||
}
|
||||
if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil {
|
||||
glog.Fatalf("failed to insert block number: %v", err)
|
||||
}
|
||||
// Add a new restore point if we reached some limit
|
||||
|
||||
bc.checkpoint++
|
||||
if bc.checkpoint > checkpointLimit {
|
||||
if err := bc.chainDb.Put([]byte("checkpoint"), block.Hash().Bytes()); err != nil {
|
||||
glog.Fatalf("failed to create checkpoint: %v", err)
|
||||
err = bc.chainDb.Put([]byte("checkpoint"), block.Hash().Bytes())
|
||||
if err != nil {
|
||||
glog.Fatal("db write fail:", err)
|
||||
}
|
||||
|
||||
bc.checkpoint = 0
|
||||
}
|
||||
// Update the internal internal state with the head block
|
||||
|
||||
bc.currentBlock = block
|
||||
bc.lastBlockHash = block.Hash()
|
||||
}
|
||||
|
||||
// Accessors
|
||||
@ -345,141 +359,61 @@ func (bc *ChainManager) Genesis() *types.Block {
|
||||
return bc.genesisBlock
|
||||
}
|
||||
|
||||
// HasHeader checks if a block header is present in the database or not, caching
|
||||
// it if present.
|
||||
func (bc *ChainManager) HasHeader(hash common.Hash) bool {
|
||||
return bc.GetHeader(hash) != nil
|
||||
}
|
||||
|
||||
// GetHeader retrieves a block header from the database by hash, caching it if
|
||||
// found.
|
||||
func (self *ChainManager) GetHeader(hash common.Hash) *types.Header {
|
||||
// Short circuit if the header's already in the cache, retrieve otherwise
|
||||
if header, ok := self.headerCache.Get(hash); ok {
|
||||
return header.(*types.Header)
|
||||
}
|
||||
header := GetHeader(self.chainDb, hash)
|
||||
if header == nil {
|
||||
return nil
|
||||
}
|
||||
// Cache the found header for next time and return
|
||||
self.headerCache.Add(header.Hash(), header)
|
||||
return header
|
||||
}
|
||||
|
||||
// GetHeaderByNumber retrieves a block header from the database by number,
|
||||
// caching it (associated with its hash) if found.
|
||||
func (self *ChainManager) GetHeaderByNumber(number uint64) *types.Header {
|
||||
hash := GetCanonicalHash(self.chainDb, number)
|
||||
if hash == (common.Hash{}) {
|
||||
return nil
|
||||
}
|
||||
return self.GetHeader(hash)
|
||||
}
|
||||
|
||||
// GetBody retrieves a block body (transactions and uncles) from the database by
|
||||
// hash, caching it if found.
|
||||
func (self *ChainManager) GetBody(hash common.Hash) *types.Body {
|
||||
// Short circuit if the body's already in the cache, retrieve otherwise
|
||||
if cached, ok := self.bodyCache.Get(hash); ok {
|
||||
body := cached.(*types.Body)
|
||||
return body
|
||||
}
|
||||
body := GetBody(self.chainDb, hash)
|
||||
if body == nil {
|
||||
return nil
|
||||
}
|
||||
// Cache the found body for next time and return
|
||||
self.bodyCache.Add(hash, body)
|
||||
return body
|
||||
}
|
||||
|
||||
// GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
|
||||
// caching it if found.
|
||||
func (self *ChainManager) GetBodyRLP(hash common.Hash) rlp.RawValue {
|
||||
// Short circuit if the body's already in the cache, retrieve otherwise
|
||||
if cached, ok := self.bodyRLPCache.Get(hash); ok {
|
||||
return cached.(rlp.RawValue)
|
||||
}
|
||||
body := GetBodyRLP(self.chainDb, hash)
|
||||
if len(body) == 0 {
|
||||
return nil
|
||||
}
|
||||
// Cache the found body for next time and return
|
||||
self.bodyRLPCache.Add(hash, body)
|
||||
return body
|
||||
}
|
||||
|
||||
// GetTd retrieves a block's total difficulty in the canonical chain from the
|
||||
// database by hash, caching it if found.
|
||||
func (self *ChainManager) GetTd(hash common.Hash) *big.Int {
|
||||
// Short circuit if the td's already in the cache, retrieve otherwise
|
||||
if cached, ok := self.tdCache.Get(hash); ok {
|
||||
return cached.(*big.Int)
|
||||
}
|
||||
td := GetTd(self.chainDb, hash)
|
||||
if td == nil {
|
||||
return nil
|
||||
}
|
||||
// Cache the found body for next time and return
|
||||
self.tdCache.Add(hash, td)
|
||||
return td
|
||||
}
|
||||
|
||||
// HasBlock checks if a block is fully present in the database or not, caching
|
||||
// it if present.
|
||||
// Block fetching methods
|
||||
func (bc *ChainManager) HasBlock(hash common.Hash) bool {
|
||||
return bc.GetBlock(hash) != nil
|
||||
if bc.cache.Contains(hash) {
|
||||
return true
|
||||
}
|
||||
|
||||
data, _ := bc.chainDb.Get(append(blockHashPre, hash[:]...))
|
||||
return len(data) != 0
|
||||
}
|
||||
|
||||
func (self *ChainManager) GetBlockHashesFromHash(hash common.Hash, max uint64) (chain []common.Hash) {
|
||||
block := self.GetBlock(hash)
|
||||
if block == nil {
|
||||
return
|
||||
}
|
||||
// XXX Could be optimised by using a different database which only holds hashes (i.e., linked list)
|
||||
for i := uint64(0); i < max; i++ {
|
||||
block = self.GetBlock(block.ParentHash())
|
||||
if block == nil {
|
||||
break
|
||||
}
|
||||
|
||||
chain = append(chain, block.Hash())
|
||||
if block.Number().Cmp(common.Big0) <= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GetBlock retrieves a block from the database by hash, caching it if found.
|
||||
func (self *ChainManager) GetBlock(hash common.Hash) *types.Block {
|
||||
// Short circuit if the block's already in the cache, retrieve otherwise
|
||||
if block, ok := self.blockCache.Get(hash); ok {
|
||||
if block, ok := self.cache.Get(hash); ok {
|
||||
return block.(*types.Block)
|
||||
}
|
||||
block := GetBlock(self.chainDb, hash)
|
||||
|
||||
block := GetBlockByHash(self.chainDb, hash)
|
||||
if block == nil {
|
||||
return nil
|
||||
}
|
||||
// Cache the found block for next time and return
|
||||
self.blockCache.Add(block.Hash(), block)
|
||||
return block
|
||||
|
||||
// Add the block to the cache
|
||||
self.cache.Add(hash, (*types.Block)(block))
|
||||
|
||||
return (*types.Block)(block)
|
||||
}
|
||||
|
||||
// GetBlockByNumber retrieves a block from the database by number, caching it
|
||||
// (associated with its hash) if found.
|
||||
func (self *ChainManager) GetBlockByNumber(number uint64) *types.Block {
|
||||
hash := GetCanonicalHash(self.chainDb, number)
|
||||
if hash == (common.Hash{}) {
|
||||
return nil
|
||||
}
|
||||
return self.GetBlock(hash)
|
||||
func (self *ChainManager) GetBlockByNumber(num uint64) *types.Block {
|
||||
self.mu.RLock()
|
||||
defer self.mu.RUnlock()
|
||||
|
||||
return self.getBlockByNumber(num)
|
||||
|
||||
}
|
||||
|
||||
// GetBlockHashesFromHash retrieves a number of block hashes starting at a given
|
||||
// hash, fetching towards the genesis block.
|
||||
func (self *ChainManager) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
|
||||
// Get the origin header from which to fetch
|
||||
header := self.GetHeader(hash)
|
||||
if header == nil {
|
||||
return nil
|
||||
}
|
||||
// Iterate the headers until enough is collected or the genesis reached
|
||||
chain := make([]common.Hash, 0, max)
|
||||
for i := uint64(0); i < max; i++ {
|
||||
if header = self.GetHeader(header.ParentHash); header == nil {
|
||||
break
|
||||
}
|
||||
chain = append(chain, header.Hash())
|
||||
if header.Number.Cmp(common.Big0) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return chain
|
||||
}
|
||||
|
||||
// [deprecated by eth/62]
|
||||
// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
|
||||
func (self *ChainManager) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
|
||||
for i := 0; i < n; i++ {
|
||||
@ -493,6 +427,11 @@ func (self *ChainManager) GetBlocksFromHash(hash common.Hash, n int) (blocks []*
|
||||
return
|
||||
}
|
||||
|
||||
// non blocking version
|
||||
func (self *ChainManager) getBlockByNumber(num uint64) *types.Block {
|
||||
return GetBlockByNumber(self.chainDb, num)
|
||||
}
|
||||
|
||||
func (self *ChainManager) GetUnclesInChain(block *types.Block, length int) (uncles []*types.Header) {
|
||||
for i := 0; block != nil && i < length; i++ {
|
||||
uncles = append(uncles, block.Uncles()...)
|
||||
@ -548,48 +487,39 @@ const (
|
||||
SideStatTy
|
||||
)
|
||||
|
||||
// WriteBlock writes the block to the chain.
|
||||
func (self *ChainManager) WriteBlock(block *types.Block) (status writeStatus, err error) {
|
||||
// WriteBlock writes the block to the chain (or pending queue)
|
||||
func (self *ChainManager) WriteBlock(block *types.Block, queued bool) (status writeStatus, err error) {
|
||||
self.wg.Add(1)
|
||||
defer self.wg.Done()
|
||||
|
||||
// Calculate the total difficulty of the block
|
||||
ptd := self.GetTd(block.ParentHash())
|
||||
if ptd == nil {
|
||||
return NonStatTy, ParentError(block.ParentHash())
|
||||
}
|
||||
td := new(big.Int).Add(block.Difficulty(), ptd)
|
||||
|
||||
self.mu.RLock()
|
||||
cblock := self.currentBlock
|
||||
self.mu.RUnlock()
|
||||
|
||||
// Compare the TD of the last known block in the canonical chain to make sure it's greater.
|
||||
// At this point it's possible that a different chain (fork) becomes the new canonical chain.
|
||||
if td.Cmp(self.Td()) > 0 {
|
||||
if block.Td.Cmp(self.Td()) > 0 {
|
||||
// chain fork
|
||||
if block.ParentHash() != cblock.Hash() {
|
||||
// during split we merge two different chains and create the new canonical chain
|
||||
err := self.reorg(cblock, block)
|
||||
err := self.merge(cblock, block)
|
||||
if err != nil {
|
||||
return NonStatTy, err
|
||||
}
|
||||
|
||||
status = SplitStatTy
|
||||
}
|
||||
status = CanonStatTy
|
||||
|
||||
self.mu.Lock()
|
||||
self.setTotalDifficulty(td)
|
||||
self.setTotalDifficulty(block.Td)
|
||||
self.insert(block)
|
||||
self.mu.Unlock()
|
||||
|
||||
status = CanonStatTy
|
||||
} else {
|
||||
status = SideStatTy
|
||||
}
|
||||
|
||||
if err := WriteTd(self.chainDb, block.Hash(), td); err != nil {
|
||||
glog.Fatalf("failed to write block total difficulty: %v", err)
|
||||
}
|
||||
if err := WriteBlock(self.chainDb, block); err != nil {
|
||||
glog.Fatalf("filed to write block contents: %v", err)
|
||||
err = WriteBlock(self.chainDb, block)
|
||||
if err != nil {
|
||||
glog.Fatalln("db err:", err)
|
||||
}
|
||||
// Delete from future blocks
|
||||
self.futureBlocks.Remove(block.Hash())
|
||||
@ -615,12 +545,14 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
|
||||
stats struct{ queued, processed, ignored int }
|
||||
tstart = time.Now()
|
||||
|
||||
nonceDone = make(chan nonceResult, len(chain))
|
||||
nonceQuit = make(chan struct{})
|
||||
nonceChecked = make([]bool, len(chain))
|
||||
)
|
||||
|
||||
// Start the parallel nonce verifier.
|
||||
nonceAbort, nonceResults := verifyNoncesFromBlocks(self.pow, chain)
|
||||
defer close(nonceAbort)
|
||||
go verifyNonces(self.pow, chain, nonceQuit, nonceDone)
|
||||
defer close(nonceQuit)
|
||||
|
||||
txcount := 0
|
||||
for i, block := range chain {
|
||||
@ -633,19 +565,24 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
|
||||
// Wait for block i's nonce to be verified before processing
|
||||
// its state transition.
|
||||
for !nonceChecked[i] {
|
||||
r := <-nonceResults
|
||||
nonceChecked[r.index] = true
|
||||
r := <-nonceDone
|
||||
nonceChecked[r.i] = true
|
||||
if !r.valid {
|
||||
block := chain[r.index]
|
||||
return r.index, &BlockNonceErr{Hash: block.Hash(), Number: block.Number(), Nonce: block.Nonce()}
|
||||
block := chain[r.i]
|
||||
return r.i, &BlockNonceErr{Hash: block.Hash(), Number: block.Number(), Nonce: block.Nonce()}
|
||||
}
|
||||
}
|
||||
|
||||
if BadHashes[block.Hash()] {
|
||||
err := BadHashError(block.Hash())
|
||||
err := fmt.Errorf("Found known bad hash in chain %x", block.Hash())
|
||||
blockErr(block, err)
|
||||
return i, err
|
||||
}
|
||||
|
||||
// Setting block.Td regardless of error (known for example) prevents errors down the line
|
||||
// in the protocol handler
|
||||
block.Td = new(big.Int).Set(CalcTD(block, self.GetBlock(block.ParentHash())))
|
||||
|
||||
// Call in to the block processor and check for errors. It's likely that if one block fails
|
||||
// all others will fail too (unless a known block is returned).
|
||||
logs, receipts, err := self.processor.Process(block)
|
||||
@ -681,13 +618,11 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
|
||||
|
||||
return i, err
|
||||
}
|
||||
if err := PutBlockReceipts(self.chainDb, block, receipts); err != nil {
|
||||
glog.V(logger.Warn).Infoln("error writing block receipts:", err)
|
||||
}
|
||||
|
||||
txcount += len(block.Transactions())
|
||||
|
||||
// write the block to the chain and get the status
|
||||
status, err := self.WriteBlock(block)
|
||||
status, err := self.WriteBlock(block, true)
|
||||
if err != nil {
|
||||
return i, err
|
||||
}
|
||||
@ -713,6 +648,10 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
|
||||
queue[i] = ChainSplitEvent{block, logs}
|
||||
queueEvent.splitCount++
|
||||
}
|
||||
if err := PutBlockReceipts(self.chainDb, block, receipts); err != nil {
|
||||
glog.V(logger.Warn).Infoln("error writing block receipts:", err)
|
||||
}
|
||||
|
||||
stats.processed++
|
||||
}
|
||||
|
||||
@ -727,26 +666,20 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
|
||||
// to be part of the new canonical chain and accumulates potential missing transactions and post an
|
||||
// event about them
|
||||
func (self *ChainManager) reorg(oldBlock, newBlock *types.Block) error {
|
||||
self.mu.Lock()
|
||||
defer self.mu.Unlock()
|
||||
|
||||
// diff takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
|
||||
// to be part of the new canonical chain.
|
||||
func (self *ChainManager) diff(oldBlock, newBlock *types.Block) (types.Blocks, error) {
|
||||
var (
|
||||
newChain types.Blocks
|
||||
commonBlock *types.Block
|
||||
oldStart = oldBlock
|
||||
newStart = newBlock
|
||||
deletedTxs types.Transactions
|
||||
)
|
||||
|
||||
// first reduce whoever is higher bound
|
||||
if oldBlock.NumberU64() > newBlock.NumberU64() {
|
||||
// reduce old chain
|
||||
for oldBlock = oldBlock; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = self.GetBlock(oldBlock.ParentHash()) {
|
||||
deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
|
||||
}
|
||||
} else {
|
||||
// reduce new chain and append new chain blocks for inserting later on
|
||||
@ -755,10 +688,10 @@ func (self *ChainManager) reorg(oldBlock, newBlock *types.Block) error {
|
||||
}
|
||||
}
|
||||
if oldBlock == nil {
|
||||
return fmt.Errorf("Invalid old chain")
|
||||
return nil, fmt.Errorf("Invalid old chain")
|
||||
}
|
||||
if newBlock == nil {
|
||||
return fmt.Errorf("Invalid new chain")
|
||||
return nil, fmt.Errorf("Invalid new chain")
|
||||
}
|
||||
|
||||
numSplit := newBlock.Number()
|
||||
@ -768,14 +701,13 @@ func (self *ChainManager) reorg(oldBlock, newBlock *types.Block) error {
|
||||
break
|
||||
}
|
||||
newChain = append(newChain, newBlock)
|
||||
deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
|
||||
|
||||
oldBlock, newBlock = self.GetBlock(oldBlock.ParentHash()), self.GetBlock(newBlock.ParentHash())
|
||||
if oldBlock == nil {
|
||||
return fmt.Errorf("Invalid old chain")
|
||||
return nil, fmt.Errorf("Invalid old chain")
|
||||
}
|
||||
if newBlock == nil {
|
||||
return fmt.Errorf("Invalid new chain")
|
||||
return nil, fmt.Errorf("Invalid new chain")
|
||||
}
|
||||
}
|
||||
|
||||
@ -784,8 +716,18 @@ func (self *ChainManager) reorg(oldBlock, newBlock *types.Block) error {
|
||||
glog.Infof("Chain split detected @ %x. Reorganising chain from #%v %x to %x", commonHash[:4], numSplit, oldStart.Hash().Bytes()[:4], newStart.Hash().Bytes()[:4])
|
||||
}
|
||||
|
||||
var addedTxs types.Transactions
|
||||
return newChain, nil
|
||||
}
|
||||
|
||||
// merge merges two different chain to the new canonical chain
|
||||
func (self *ChainManager) merge(oldBlock, newBlock *types.Block) error {
|
||||
newChain, err := self.diff(oldBlock, newBlock)
|
||||
if err != nil {
|
||||
return fmt.Errorf("chain reorg failed: %v", err)
|
||||
}
|
||||
|
||||
// insert blocks. Order does not matter. Last block will be written in ImportChain itself which creates the new head properly
|
||||
self.mu.Lock()
|
||||
for _, block := range newChain {
|
||||
// insert the block in the canonical way, re-writing history
|
||||
self.insert(block)
|
||||
@ -793,20 +735,8 @@ func (self *ChainManager) reorg(oldBlock, newBlock *types.Block) error {
|
||||
PutTransactions(self.chainDb, block, block.Transactions())
|
||||
PutReceipts(self.chainDb, GetBlockReceipts(self.chainDb, block.Hash()))
|
||||
|
||||
addedTxs = append(addedTxs, block.Transactions()...)
|
||||
}
|
||||
|
||||
// calculate the difference between deleted and added transactions
|
||||
diff := types.TxDifference(deletedTxs, addedTxs)
|
||||
// When transactions get deleted from the database that means the
|
||||
// receipts that were created in the fork must also be deleted
|
||||
for _, tx := range diff {
|
||||
DeleteReceipt(self.chainDb, tx.Hash())
|
||||
DeleteTransaction(self.chainDb, tx.Hash())
|
||||
}
|
||||
// Must be posted in a goroutine because of the transaction pool trying
|
||||
// to acquire the chain manager lock
|
||||
go self.eventMux.Post(RemovedTransactionEvent{diff})
|
||||
self.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -825,11 +755,12 @@ out:
|
||||
case ChainEvent:
|
||||
// We need some control over the mining operation. Acquiring locks and waiting for the miner to create new block takes too long
|
||||
// and in most cases isn't even necessary.
|
||||
if self.currentBlock.Hash() == event.Hash {
|
||||
if self.lastBlockHash == event.Hash {
|
||||
self.currentGasLimit = CalcGasLimit(event.Block)
|
||||
self.eventMux.Post(ChainHeadEvent{event.Block})
|
||||
}
|
||||
}
|
||||
|
||||
self.eventMux.Post(event)
|
||||
}
|
||||
}
|
||||
@ -847,3 +778,40 @@ func blockErr(block *types.Block, err error) {
|
||||
glog.V(logger.Error).Infoln(err)
|
||||
glog.V(logger.Debug).Infoln(verifyNonces)
|
||||
}
|
||||
|
||||
type nonceResult struct {
|
||||
i int
|
||||
valid bool
|
||||
}
|
||||
|
||||
// block verifies nonces of the given blocks in parallel and returns
|
||||
// an error if one of the blocks nonce verifications failed.
|
||||
func verifyNonces(pow pow.PoW, blocks []*types.Block, quit <-chan struct{}, done chan<- nonceResult) {
|
||||
// Spawn a few workers. They listen for blocks on the in channel
|
||||
// and send results on done. The workers will exit in the
|
||||
// background when in is closed.
|
||||
var (
|
||||
in = make(chan int)
|
||||
nworkers = runtime.GOMAXPROCS(0)
|
||||
)
|
||||
defer close(in)
|
||||
if len(blocks) < nworkers {
|
||||
nworkers = len(blocks)
|
||||
}
|
||||
for i := 0; i < nworkers; i++ {
|
||||
go func() {
|
||||
for i := range in {
|
||||
done <- nonceResult{i: i, valid: pow.Verify(blocks[i])}
|
||||
}
|
||||
}()
|
||||
}
|
||||
// Feed block indices to the workers.
|
||||
for i := range blocks {
|
||||
select {
|
||||
case in <- i:
|
||||
continue
|
||||
case <-quit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -30,10 +30,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/pow"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/hashicorp/golang-lru"
|
||||
@ -48,7 +46,7 @@ func thePow() pow.PoW {
|
||||
return pow
|
||||
}
|
||||
|
||||
func theChainManager(db ethdb.Database, t *testing.T) *ChainManager {
|
||||
func theChainManager(db common.Database, t *testing.T) *ChainManager {
|
||||
var eventMux event.TypeMux
|
||||
WriteTestNetGenesisBlock(db, 0)
|
||||
chainMan, err := NewChainManager(db, thePow(), &eventMux)
|
||||
@ -75,11 +73,10 @@ func testFork(t *testing.T, bman *BlockProcessor, i, N int, f func(td1, td2 *big
|
||||
if err != nil {
|
||||
t.Fatal("could not make new canonical in testFork", err)
|
||||
}
|
||||
// assert the bmans have the same block at i
|
||||
// asert the bmans have the same block at i
|
||||
bi1 := bman.bc.GetBlockByNumber(uint64(i)).Hash()
|
||||
bi2 := bman2.bc.GetBlockByNumber(uint64(i)).Hash()
|
||||
if bi1 != bi2 {
|
||||
fmt.Printf("%+v\n%+v\n\n", bi1, bi2)
|
||||
t.Fatal("chains do not have the same hash at height", i)
|
||||
}
|
||||
bman2.bc.SetProcessor(bman2)
|
||||
@ -113,6 +110,7 @@ func printChain(bc *ChainManager) {
|
||||
|
||||
// process blocks against a chain
|
||||
func testChain(chainB types.Blocks, bman *BlockProcessor) (*big.Int, error) {
|
||||
td := new(big.Int)
|
||||
for _, block := range chainB {
|
||||
_, _, err := bman.bc.processor.Process(block)
|
||||
if err != nil {
|
||||
@ -121,12 +119,17 @@ func testChain(chainB types.Blocks, bman *BlockProcessor) (*big.Int, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
parent := bman.bc.GetBlock(block.ParentHash())
|
||||
block.Td = CalcTD(block, parent)
|
||||
td = block.Td
|
||||
|
||||
bman.bc.mu.Lock()
|
||||
WriteTd(bman.bc.chainDb, block.Hash(), new(big.Int).Add(block.Difficulty(), bman.bc.GetTd(block.ParentHash())))
|
||||
WriteBlock(bman.bc.chainDb, block)
|
||||
{
|
||||
WriteBlock(bman.bc.chainDb, block)
|
||||
}
|
||||
bman.bc.mu.Unlock()
|
||||
}
|
||||
return bman.bc.GetTd(chainB[len(chainB)-1].Hash()), nil
|
||||
return td, nil
|
||||
}
|
||||
|
||||
func loadChain(fn string, t *testing.T) (types.Blocks, error) {
|
||||
@ -382,14 +385,10 @@ func makeChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block
|
||||
return chain
|
||||
}
|
||||
|
||||
func chm(genesis *types.Block, db ethdb.Database) *ChainManager {
|
||||
func chm(genesis *types.Block, db common.Database) *ChainManager {
|
||||
var eventMux event.TypeMux
|
||||
bc := &ChainManager{chainDb: db, genesisBlock: genesis, eventMux: &eventMux, pow: FakePow{}}
|
||||
bc.headerCache, _ = lru.New(100)
|
||||
bc.bodyCache, _ = lru.New(100)
|
||||
bc.bodyRLPCache, _ = lru.New(100)
|
||||
bc.tdCache, _ = lru.New(100)
|
||||
bc.blockCache, _ = lru.New(100)
|
||||
bc.cache, _ = lru.New(100)
|
||||
bc.futureBlocks, _ = lru.New(100)
|
||||
bc.processor = bproc{}
|
||||
bc.ResetWithGenesisBlock(genesis)
|
||||
@ -421,59 +420,6 @@ func TestReorgLongest(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBadHashes(t *testing.T) {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
genesis, err := WriteTestNetGenesisBlock(db, 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.FailNow()
|
||||
}
|
||||
bc := chm(genesis, db)
|
||||
|
||||
chain := makeChainWithDiff(genesis, []int{1, 2, 4}, 10)
|
||||
BadHashes[chain[2].Header().Hash()] = true
|
||||
|
||||
_, err = bc.InsertChain(chain)
|
||||
if !IsBadHashError(err) {
|
||||
t.Errorf("error mismatch: want: BadHashError, have: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReorgBadHashes(t *testing.T) {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
genesis, err := WriteTestNetGenesisBlock(db, 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.FailNow()
|
||||
}
|
||||
bc := chm(genesis, db)
|
||||
|
||||
chain := makeChainWithDiff(genesis, []int{1, 2, 3, 4}, 11)
|
||||
bc.InsertChain(chain)
|
||||
|
||||
if chain[3].Header().Hash() != bc.LastBlockHash() {
|
||||
t.Errorf("last block hash mismatch: want: %x, have: %x", chain[3].Header().Hash(), bc.LastBlockHash())
|
||||
}
|
||||
|
||||
// NewChainManager should check BadHashes when loading it db
|
||||
BadHashes[chain[3].Header().Hash()] = true
|
||||
|
||||
var eventMux event.TypeMux
|
||||
ncm, err := NewChainManager(db, FakePow{}, &eventMux)
|
||||
if err != nil {
|
||||
t.Errorf("NewChainManager err: %s", err)
|
||||
}
|
||||
|
||||
// check it set head to (valid) parent of bad hash block
|
||||
if chain[2].Header().Hash() != ncm.LastBlockHash() {
|
||||
t.Errorf("last block hash mismatch: want: %x, have: %x", chain[2].Header().Hash(), ncm.LastBlockHash())
|
||||
}
|
||||
|
||||
if chain[2].Header().GasLimit.Cmp(ncm.GasLimit()) != 0 {
|
||||
t.Errorf("current block gasLimit mismatch: want: %x, have: %x", chain[2].Header().GasLimit, ncm.GasLimit())
|
||||
}
|
||||
}
|
||||
|
||||
func TestReorgShortest(t *testing.T) {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
genesis, err := WriteTestNetGenesisBlock(db, 0)
|
||||
@ -511,7 +457,7 @@ func TestInsertNonceError(t *testing.T) {
|
||||
|
||||
fail := rand.Int() % len(blocks)
|
||||
failblock := blocks[fail]
|
||||
bc.pow = failPow{failblock.NumberU64()}
|
||||
bc.pow = failpow{failblock.NumberU64()}
|
||||
n, err := bc.InsertChain(blocks)
|
||||
|
||||
// Check that the returned error indicates the nonce failure.
|
||||
@ -538,115 +484,34 @@ func TestInsertNonceError(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that chain reorganizations handle transaction removals and reinsertions.
|
||||
func TestChainTxReorgs(t *testing.T) {
|
||||
params.MinGasLimit = big.NewInt(125000) // Minimum the gas limit may ever be.
|
||||
params.GenesisGasLimit = big.NewInt(3141592) // Gas limit of the Genesis block.
|
||||
|
||||
var (
|
||||
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
|
||||
key3, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
|
||||
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
||||
addr2 = crypto.PubkeyToAddress(key2.PublicKey)
|
||||
addr3 = crypto.PubkeyToAddress(key3.PublicKey)
|
||||
db, _ = ethdb.NewMemDatabase()
|
||||
)
|
||||
genesis := WriteGenesisBlockForTesting(db,
|
||||
GenesisAccount{addr1, big.NewInt(1000000)},
|
||||
GenesisAccount{addr2, big.NewInt(1000000)},
|
||||
GenesisAccount{addr3, big.NewInt(1000000)},
|
||||
)
|
||||
// Create two transactions shared between the chains:
|
||||
// - postponed: transaction included at a later block in the forked chain
|
||||
// - swapped: transaction included at the same block number in the forked chain
|
||||
postponed, _ := types.NewTransaction(0, addr1, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key1)
|
||||
swapped, _ := types.NewTransaction(1, addr1, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key1)
|
||||
|
||||
// Create two transactions that will be dropped by the forked chain:
|
||||
// - pastDrop: transaction dropped retroactively from a past block
|
||||
// - freshDrop: transaction dropped exactly at the block where the reorg is detected
|
||||
var pastDrop, freshDrop *types.Transaction
|
||||
|
||||
// Create three transactions that will be added in the forked chain:
|
||||
// - pastAdd: transaction added before the reorganiztion is detected
|
||||
// - freshAdd: transaction added at the exact block the reorg is detected
|
||||
// - futureAdd: transaction added after the reorg has already finished
|
||||
var pastAdd, freshAdd, futureAdd *types.Transaction
|
||||
|
||||
chain := GenerateChain(genesis, db, 3, func(i int, gen *BlockGen) {
|
||||
switch i {
|
||||
case 0:
|
||||
pastDrop, _ = types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key2)
|
||||
|
||||
gen.AddTx(pastDrop) // This transaction will be dropped in the fork from below the split point
|
||||
gen.AddTx(postponed) // This transaction will be postponed till block #3 in the fork
|
||||
|
||||
case 2:
|
||||
freshDrop, _ = types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key2)
|
||||
|
||||
gen.AddTx(freshDrop) // This transaction will be dropped in the fork from exactly at the split point
|
||||
gen.AddTx(swapped) // This transaction will be swapped out at the exact height
|
||||
|
||||
gen.OffsetTime(9) // Lower the block difficulty to simulate a weaker chain
|
||||
}
|
||||
})
|
||||
// Import the chain. This runs all block validation rules.
|
||||
evmux := &event.TypeMux{}
|
||||
chainman, _ := NewChainManager(db, FakePow{}, evmux)
|
||||
chainman.SetProcessor(NewBlockProcessor(db, FakePow{}, chainman, evmux))
|
||||
if i, err := chainman.InsertChain(chain); err != nil {
|
||||
t.Fatalf("failed to insert original chain[%d]: %v", i, err)
|
||||
/*
|
||||
func TestGenesisMismatch(t *testing.T) {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
var mux event.TypeMux
|
||||
genesis := GenesisBlock(0, db)
|
||||
_, err := NewChainManager(genesis, db, db, db, thePow(), &mux)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// overwrite the old chain
|
||||
chain = GenerateChain(genesis, db, 5, func(i int, gen *BlockGen) {
|
||||
switch i {
|
||||
case 0:
|
||||
pastAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3)
|
||||
gen.AddTx(pastAdd) // This transaction needs to be injected during reorg
|
||||
|
||||
case 2:
|
||||
gen.AddTx(postponed) // This transaction was postponed from block #1 in the original chain
|
||||
gen.AddTx(swapped) // This transaction was swapped from the exact current spot in the original chain
|
||||
|
||||
freshAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3)
|
||||
gen.AddTx(freshAdd) // This transaction will be added exactly at reorg time
|
||||
|
||||
case 3:
|
||||
futureAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3)
|
||||
gen.AddTx(futureAdd) // This transaction will be added after a full reorg
|
||||
}
|
||||
})
|
||||
if _, err := chainman.InsertChain(chain); err != nil {
|
||||
t.Fatalf("failed to insert forked chain: %v", err)
|
||||
}
|
||||
|
||||
// removed tx
|
||||
for i, tx := range (types.Transactions{pastDrop, freshDrop}) {
|
||||
if GetTransaction(db, tx.Hash()) != nil {
|
||||
t.Errorf("drop %d: tx found while shouldn't have been", i)
|
||||
}
|
||||
if GetReceipt(db, tx.Hash()) != nil {
|
||||
t.Errorf("drop %d: receipt found while shouldn't have been", i)
|
||||
}
|
||||
}
|
||||
// added tx
|
||||
for i, tx := range (types.Transactions{pastAdd, freshAdd, futureAdd}) {
|
||||
if GetTransaction(db, tx.Hash()) == nil {
|
||||
t.Errorf("add %d: expected tx to be found", i)
|
||||
}
|
||||
if GetReceipt(db, tx.Hash()) == nil {
|
||||
t.Errorf("add %d: expected receipt to be found", i)
|
||||
}
|
||||
}
|
||||
// shared tx
|
||||
for i, tx := range (types.Transactions{postponed, swapped}) {
|
||||
if GetTransaction(db, tx.Hash()) == nil {
|
||||
t.Errorf("share %d: expected tx to be found", i)
|
||||
}
|
||||
if GetReceipt(db, tx.Hash()) == nil {
|
||||
t.Errorf("share %d: expected receipt to be found", i)
|
||||
}
|
||||
genesis = GenesisBlock(1, db)
|
||||
_, err = NewChainManager(genesis, db, db, db, thePow(), &mux)
|
||||
if err == nil {
|
||||
t.Error("expected genesis mismatch error")
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
// failpow returns false from Verify for a certain block number.
|
||||
type failpow struct{ num uint64 }
|
||||
|
||||
func (pow failpow) Search(pow.Block, <-chan struct{}) (nonce uint64, mixHash []byte) {
|
||||
return 0, nil
|
||||
}
|
||||
func (pow failpow) Verify(b pow.Block) bool {
|
||||
return b.NumberU64() != pow.num
|
||||
}
|
||||
func (pow failpow) GetHashrate() int64 {
|
||||
return 0
|
||||
}
|
||||
func (pow failpow) Turbo(bool) {
|
||||
}
|
||||
|
@ -1,87 +0,0 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/pow"
|
||||
)
|
||||
|
||||
// nonceCheckResult contains the result of a nonce verification.
|
||||
type nonceCheckResult struct {
|
||||
index int // Index of the item verified from an input array
|
||||
valid bool // Result of the nonce verification
|
||||
}
|
||||
|
||||
// verifyNoncesFromHeaders starts a concurrent header nonce verification,
|
||||
// returning a quit channel to abort the operations and a results channel
|
||||
// to retrieve the async verifications.
|
||||
func verifyNoncesFromHeaders(checker pow.PoW, headers []*types.Header) (chan<- struct{}, <-chan nonceCheckResult) {
|
||||
items := make([]pow.Block, len(headers))
|
||||
for i, header := range headers {
|
||||
items[i] = types.NewBlockWithHeader(header)
|
||||
}
|
||||
return verifyNonces(checker, items)
|
||||
}
|
||||
|
||||
// verifyNoncesFromBlocks starts a concurrent block nonce verification,
|
||||
// returning a quit channel to abort the operations and a results channel
|
||||
// to retrieve the async verifications.
|
||||
func verifyNoncesFromBlocks(checker pow.PoW, blocks []*types.Block) (chan<- struct{}, <-chan nonceCheckResult) {
|
||||
items := make([]pow.Block, len(blocks))
|
||||
for i, block := range blocks {
|
||||
items[i] = block
|
||||
}
|
||||
return verifyNonces(checker, items)
|
||||
}
|
||||
|
||||
// verifyNonces starts a concurrent nonce verification, returning a quit channel
|
||||
// to abort the operations and a results channel to retrieve the async checks.
|
||||
func verifyNonces(checker pow.PoW, items []pow.Block) (chan<- struct{}, <-chan nonceCheckResult) {
|
||||
// Spawn as many workers as allowed threads
|
||||
workers := runtime.GOMAXPROCS(0)
|
||||
if len(items) < workers {
|
||||
workers = len(items)
|
||||
}
|
||||
// Create a task channel and spawn the verifiers
|
||||
tasks := make(chan int, workers)
|
||||
results := make(chan nonceCheckResult, len(items)) // Buffered to make sure all workers stop
|
||||
for i := 0; i < workers; i++ {
|
||||
go func() {
|
||||
for index := range tasks {
|
||||
results <- nonceCheckResult{index: index, valid: checker.Verify(items[index])}
|
||||
}
|
||||
}()
|
||||
}
|
||||
// Feed item indices to the workers until done or aborted
|
||||
abort := make(chan struct{})
|
||||
go func() {
|
||||
defer close(tasks)
|
||||
|
||||
for i := range items {
|
||||
select {
|
||||
case tasks <- i:
|
||||
continue
|
||||
case <-abort:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return abort, results
|
||||
}
|
@ -1,233 +0,0 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/pow"
|
||||
)
|
||||
|
||||
// failPow is a non-validating proof of work implementation, that returns true
|
||||
// from Verify for all but one block.
|
||||
type failPow struct {
|
||||
failing uint64
|
||||
}
|
||||
|
||||
func (pow failPow) Search(pow.Block, <-chan struct{}) (uint64, []byte) {
|
||||
return 0, nil
|
||||
}
|
||||
func (pow failPow) Verify(block pow.Block) bool { return block.NumberU64() != pow.failing }
|
||||
func (pow failPow) GetHashrate() int64 { return 0 }
|
||||
func (pow failPow) Turbo(bool) {}
|
||||
|
||||
// delayedPow is a non-validating proof of work implementation, that returns true
|
||||
// from Verify for all blocks, but delays them the configured amount of time.
|
||||
type delayedPow struct {
|
||||
delay time.Duration
|
||||
}
|
||||
|
||||
func (pow delayedPow) Search(pow.Block, <-chan struct{}) (uint64, []byte) {
|
||||
return 0, nil
|
||||
}
|
||||
func (pow delayedPow) Verify(block pow.Block) bool { time.Sleep(pow.delay); return true }
|
||||
func (pow delayedPow) GetHashrate() int64 { return 0 }
|
||||
func (pow delayedPow) Turbo(bool) {}
|
||||
|
||||
// Tests that simple POW verification works, for both good and bad blocks.
|
||||
func TestPowVerification(t *testing.T) {
|
||||
// Create a simple chain to verify
|
||||
var (
|
||||
testdb, _ = ethdb.NewMemDatabase()
|
||||
genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
|
||||
blocks = GenerateChain(genesis, testdb, 8, nil)
|
||||
)
|
||||
headers := make([]*types.Header, len(blocks))
|
||||
for i, block := range blocks {
|
||||
headers[i] = block.Header()
|
||||
}
|
||||
// Run the POW checker for blocks one-by-one, checking for both valid and invalid nonces
|
||||
for i := 0; i < len(blocks); i++ {
|
||||
for j, full := range []bool{true, false} {
|
||||
for k, valid := range []bool{true, false} {
|
||||
var results <-chan nonceCheckResult
|
||||
|
||||
switch {
|
||||
case full && valid:
|
||||
_, results = verifyNoncesFromBlocks(FakePow{}, []*types.Block{blocks[i]})
|
||||
case full && !valid:
|
||||
_, results = verifyNoncesFromBlocks(failPow{blocks[i].NumberU64()}, []*types.Block{blocks[i]})
|
||||
case !full && valid:
|
||||
_, results = verifyNoncesFromHeaders(FakePow{}, []*types.Header{headers[i]})
|
||||
case !full && !valid:
|
||||
_, results = verifyNoncesFromHeaders(failPow{headers[i].Number.Uint64()}, []*types.Header{headers[i]})
|
||||
}
|
||||
// Wait for the verification result
|
||||
select {
|
||||
case result := <-results:
|
||||
if result.index != 0 {
|
||||
t.Errorf("test %d.%d.%d: invalid index: have %d, want 0", i, j, k, result.index)
|
||||
}
|
||||
if result.valid != valid {
|
||||
t.Errorf("test %d.%d.%d: validity mismatch: have %v, want %v", i, j, k, result.valid, valid)
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("test %d.%d.%d: verification timeout", i, j, k)
|
||||
}
|
||||
// Make sure no more data is returned
|
||||
select {
|
||||
case result := <-results:
|
||||
t.Fatalf("test %d.%d.%d: unexpected result returned: %v", i, j, k, result)
|
||||
case <-time.After(25 * time.Millisecond):
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that concurrent POW verification works, for both good and bad blocks.
|
||||
func TestPowConcurrentVerification2(t *testing.T) { testPowConcurrentVerification(t, 2) }
|
||||
func TestPowConcurrentVerification8(t *testing.T) { testPowConcurrentVerification(t, 8) }
|
||||
func TestPowConcurrentVerification32(t *testing.T) { testPowConcurrentVerification(t, 32) }
|
||||
|
||||
func testPowConcurrentVerification(t *testing.T, threads int) {
|
||||
// Create a simple chain to verify
|
||||
var (
|
||||
testdb, _ = ethdb.NewMemDatabase()
|
||||
genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
|
||||
blocks = GenerateChain(genesis, testdb, 8, nil)
|
||||
)
|
||||
headers := make([]*types.Header, len(blocks))
|
||||
for i, block := range blocks {
|
||||
headers[i] = block.Header()
|
||||
}
|
||||
// Set the number of threads to verify on
|
||||
old := runtime.GOMAXPROCS(threads)
|
||||
defer runtime.GOMAXPROCS(old)
|
||||
|
||||
// Run the POW checker for the entire block chain at once both for a valid and
|
||||
// also an invalid chain (enough if one is invalid, last but one (arbitrary)).
|
||||
for i, full := range []bool{true, false} {
|
||||
for j, valid := range []bool{true, false} {
|
||||
var results <-chan nonceCheckResult
|
||||
|
||||
switch {
|
||||
case full && valid:
|
||||
_, results = verifyNoncesFromBlocks(FakePow{}, blocks)
|
||||
case full && !valid:
|
||||
_, results = verifyNoncesFromBlocks(failPow{uint64(len(blocks) - 1)}, blocks)
|
||||
case !full && valid:
|
||||
_, results = verifyNoncesFromHeaders(FakePow{}, headers)
|
||||
case !full && !valid:
|
||||
_, results = verifyNoncesFromHeaders(failPow{uint64(len(headers) - 1)}, headers)
|
||||
}
|
||||
// Wait for all the verification results
|
||||
checks := make(map[int]bool)
|
||||
for k := 0; k < len(blocks); k++ {
|
||||
select {
|
||||
case result := <-results:
|
||||
if _, ok := checks[result.index]; ok {
|
||||
t.Fatalf("test %d.%d.%d: duplicate results for %d", i, j, k, result.index)
|
||||
}
|
||||
if result.index < 0 || result.index >= len(blocks) {
|
||||
t.Fatalf("test %d.%d.%d: result %d out of bounds [%d, %d]", i, j, k, result.index, 0, len(blocks)-1)
|
||||
}
|
||||
checks[result.index] = result.valid
|
||||
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("test %d.%d.%d: verification timeout", i, j, k)
|
||||
}
|
||||
}
|
||||
// Check nonce check validity
|
||||
for k := 0; k < len(blocks); k++ {
|
||||
want := valid || (k != len(blocks)-2) // We chose the last but one nonce in the chain to fail
|
||||
if checks[k] != want {
|
||||
t.Errorf("test %d.%d.%d: validity mismatch: have %v, want %v", i, j, k, checks[k], want)
|
||||
}
|
||||
}
|
||||
// Make sure no more data is returned
|
||||
select {
|
||||
case result := <-results:
|
||||
t.Fatalf("test %d.%d: unexpected result returned: %v", i, j, result)
|
||||
case <-time.After(25 * time.Millisecond):
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that aborting a POW validation indeed prevents further checks from being
|
||||
// run, as well as checks that no left-over goroutines are leaked.
|
||||
func TestPowConcurrentAbortion2(t *testing.T) { testPowConcurrentAbortion(t, 2) }
|
||||
func TestPowConcurrentAbortion8(t *testing.T) { testPowConcurrentAbortion(t, 8) }
|
||||
func TestPowConcurrentAbortion32(t *testing.T) { testPowConcurrentAbortion(t, 32) }
|
||||
|
||||
func testPowConcurrentAbortion(t *testing.T, threads int) {
|
||||
// Create a simple chain to verify
|
||||
var (
|
||||
testdb, _ = ethdb.NewMemDatabase()
|
||||
genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
|
||||
blocks = GenerateChain(genesis, testdb, 1024, nil)
|
||||
)
|
||||
headers := make([]*types.Header, len(blocks))
|
||||
for i, block := range blocks {
|
||||
headers[i] = block.Header()
|
||||
}
|
||||
// Set the number of threads to verify on
|
||||
old := runtime.GOMAXPROCS(threads)
|
||||
defer runtime.GOMAXPROCS(old)
|
||||
|
||||
// Run the POW checker for the entire block chain at once
|
||||
for i, full := range []bool{true, false} {
|
||||
var abort chan<- struct{}
|
||||
var results <-chan nonceCheckResult
|
||||
|
||||
// Start the verifications and immediately abort
|
||||
if full {
|
||||
abort, results = verifyNoncesFromBlocks(delayedPow{time.Millisecond}, blocks)
|
||||
} else {
|
||||
abort, results = verifyNoncesFromHeaders(delayedPow{time.Millisecond}, headers)
|
||||
}
|
||||
close(abort)
|
||||
|
||||
// Deplete the results channel
|
||||
verified := make(map[int]struct{})
|
||||
for depleted := false; !depleted; {
|
||||
select {
|
||||
case result := <-results:
|
||||
verified[result.index] = struct{}{}
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
depleted = true
|
||||
}
|
||||
}
|
||||
// Check that abortion was honored by not processing too many POWs
|
||||
if len(verified) > 2*threads {
|
||||
t.Errorf("test %d: verification count too large: have %d, want below %d", i, len(verified), 2*threads)
|
||||
}
|
||||
// Check that there are no gaps in the results
|
||||
for j := 0; j < len(verified); j++ {
|
||||
if _, ok := verified[j]; !ok {
|
||||
t.Errorf("test %d.%d: gap found in verification results", i, j)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -19,10 +19,10 @@ package core
|
||||
import (
|
||||
"bytes"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
@ -30,18 +30,9 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
headHeaderKey = []byte("LastHeader")
|
||||
headBlockKey = []byte("LastBlock")
|
||||
|
||||
blockPrefix = []byte("block-")
|
||||
blockNumPrefix = []byte("block-num-")
|
||||
|
||||
headerSuffix = []byte("-header")
|
||||
bodySuffix = []byte("-body")
|
||||
tdSuffix = []byte("-td")
|
||||
|
||||
blockHashPre = []byte("block-hash-")
|
||||
blockNumPre = []byte("block-num-")
|
||||
ExpDiffPeriod = big.NewInt(100000)
|
||||
blockHashPre = []byte("block-hash-") // [deprecated by eth/63]
|
||||
)
|
||||
|
||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns
|
||||
@ -78,6 +69,16 @@ func CalcDifficulty(time, parentTime uint64, parentNumber, parentDiff *big.Int)
|
||||
return diff
|
||||
}
|
||||
|
||||
// CalcTD computes the total difficulty of block.
|
||||
func CalcTD(block, parent *types.Block) *big.Int {
|
||||
if parent == nil {
|
||||
return block.Difficulty()
|
||||
}
|
||||
d := block.Difficulty()
|
||||
d.Add(d, parent.Td)
|
||||
return d
|
||||
}
|
||||
|
||||
// CalcGasLimit computes the gas limit of the next block after parent.
|
||||
// The result may be modified by the caller.
|
||||
// This is miner strategy, not consensus protocol.
|
||||
@ -111,230 +112,8 @@ func CalcGasLimit(parent *types.Block) *big.Int {
|
||||
return gl
|
||||
}
|
||||
|
||||
// GetCanonicalHash retrieves a hash assigned to a canonical block number.
|
||||
func GetCanonicalHash(db ethdb.Database, number uint64) common.Hash {
|
||||
data, _ := db.Get(append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...))
|
||||
if len(data) == 0 {
|
||||
return common.Hash{}
|
||||
}
|
||||
return common.BytesToHash(data)
|
||||
}
|
||||
|
||||
// GetHeadHeaderHash retrieves the hash of the current canonical head block's
|
||||
// header. The difference between this and GetHeadBlockHash is that whereas the
|
||||
// last block hash is only updated upon a full block import, the last header
|
||||
// hash is updated already at header import, allowing head tracking for the
|
||||
// fast synchronization mechanism.
|
||||
func GetHeadHeaderHash(db ethdb.Database) common.Hash {
|
||||
data, _ := db.Get(headHeaderKey)
|
||||
if len(data) == 0 {
|
||||
return common.Hash{}
|
||||
}
|
||||
return common.BytesToHash(data)
|
||||
}
|
||||
|
||||
// GetHeadBlockHash retrieves the hash of the current canonical head block.
|
||||
func GetHeadBlockHash(db ethdb.Database) common.Hash {
|
||||
data, _ := db.Get(headBlockKey)
|
||||
if len(data) == 0 {
|
||||
return common.Hash{}
|
||||
}
|
||||
return common.BytesToHash(data)
|
||||
}
|
||||
|
||||
// GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil
|
||||
// if the header's not found.
|
||||
func GetHeaderRLP(db ethdb.Database, hash common.Hash) rlp.RawValue {
|
||||
data, _ := db.Get(append(append(blockPrefix, hash[:]...), headerSuffix...))
|
||||
return data
|
||||
}
|
||||
|
||||
// GetHeader retrieves the block header corresponding to the hash, nil if none
|
||||
// found.
|
||||
func GetHeader(db ethdb.Database, hash common.Hash) *types.Header {
|
||||
data := GetHeaderRLP(db, hash)
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
}
|
||||
header := new(types.Header)
|
||||
if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
|
||||
glog.V(logger.Error).Infof("invalid block header RLP for hash %x: %v", hash, err)
|
||||
return nil
|
||||
}
|
||||
return header
|
||||
}
|
||||
|
||||
// GetBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
|
||||
func GetBodyRLP(db ethdb.Database, hash common.Hash) rlp.RawValue {
|
||||
data, _ := db.Get(append(append(blockPrefix, hash[:]...), bodySuffix...))
|
||||
return data
|
||||
}
|
||||
|
||||
// GetBody retrieves the block body (transactons, uncles) corresponding to the
|
||||
// hash, nil if none found.
|
||||
func GetBody(db ethdb.Database, hash common.Hash) *types.Body {
|
||||
data := GetBodyRLP(db, hash)
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
}
|
||||
body := new(types.Body)
|
||||
if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
|
||||
glog.V(logger.Error).Infof("invalid block body RLP for hash %x: %v", hash, err)
|
||||
return nil
|
||||
}
|
||||
return body
|
||||
}
|
||||
|
||||
// GetTd retrieves a block's total difficulty corresponding to the hash, nil if
|
||||
// none found.
|
||||
func GetTd(db ethdb.Database, hash common.Hash) *big.Int {
|
||||
data, _ := db.Get(append(append(blockPrefix, hash.Bytes()...), tdSuffix...))
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
}
|
||||
td := new(big.Int)
|
||||
if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
|
||||
glog.V(logger.Error).Infof("invalid block total difficulty RLP for hash %x: %v", hash, err)
|
||||
return nil
|
||||
}
|
||||
return td
|
||||
}
|
||||
|
||||
// GetBlock retrieves an entire block corresponding to the hash, assembling it
|
||||
// back from the stored header and body.
|
||||
func GetBlock(db ethdb.Database, hash common.Hash) *types.Block {
|
||||
// Retrieve the block header and body contents
|
||||
header := GetHeader(db, hash)
|
||||
if header == nil {
|
||||
return nil
|
||||
}
|
||||
body := GetBody(db, hash)
|
||||
if body == nil {
|
||||
return nil
|
||||
}
|
||||
// Reassemble the block and return
|
||||
return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles)
|
||||
}
|
||||
|
||||
// WriteCanonicalHash stores the canonical hash for the given block number.
|
||||
func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) error {
|
||||
key := append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...)
|
||||
if err := db.Put(key, hash.Bytes()); err != nil {
|
||||
glog.Fatalf("failed to store number to hash mapping into database: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteHeadHeaderHash stores the head header's hash.
|
||||
func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error {
|
||||
if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
|
||||
glog.Fatalf("failed to store last header's hash into database: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteHeadBlockHash stores the head block's hash.
|
||||
func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
|
||||
if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
|
||||
glog.Fatalf("failed to store last block's hash into database: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteHeader serializes a block header into the database.
|
||||
func WriteHeader(db ethdb.Database, header *types.Header) error {
|
||||
data, err := rlp.EncodeToBytes(header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := append(append(blockPrefix, header.Hash().Bytes()...), headerSuffix...)
|
||||
if err := db.Put(key, data); err != nil {
|
||||
glog.Fatalf("failed to store header into database: %v", err)
|
||||
return err
|
||||
}
|
||||
glog.V(logger.Debug).Infof("stored header #%v [%x…]", header.Number, header.Hash().Bytes()[:4])
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteBody serializes the body of a block into the database.
|
||||
func WriteBody(db ethdb.Database, hash common.Hash, body *types.Body) error {
|
||||
data, err := rlp.EncodeToBytes(body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := append(append(blockPrefix, hash.Bytes()...), bodySuffix...)
|
||||
if err := db.Put(key, data); err != nil {
|
||||
glog.Fatalf("failed to store block body into database: %v", err)
|
||||
return err
|
||||
}
|
||||
glog.V(logger.Debug).Infof("stored block body [%x…]", hash.Bytes()[:4])
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteTd serializes the total difficulty of a block into the database.
|
||||
func WriteTd(db ethdb.Database, hash common.Hash, td *big.Int) error {
|
||||
data, err := rlp.EncodeToBytes(td)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := append(append(blockPrefix, hash.Bytes()...), tdSuffix...)
|
||||
if err := db.Put(key, data); err != nil {
|
||||
glog.Fatalf("failed to store block total difficulty into database: %v", err)
|
||||
return err
|
||||
}
|
||||
glog.V(logger.Debug).Infof("stored block total difficulty [%x…]: %v", hash.Bytes()[:4], td)
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteBlock serializes a block into the database, header and body separately.
|
||||
func WriteBlock(db ethdb.Database, block *types.Block) error {
|
||||
// Store the body first to retain database consistency
|
||||
if err := WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil {
|
||||
return err
|
||||
}
|
||||
// Store the header too, signaling full block ownership
|
||||
if err := WriteHeader(db, block.Header()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteCanonicalHash removes the number to hash canonical mapping.
|
||||
func DeleteCanonicalHash(db ethdb.Database, number uint64) {
|
||||
db.Delete(append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...))
|
||||
}
|
||||
|
||||
// DeleteHeader removes all block header data associated with a hash.
|
||||
func DeleteHeader(db ethdb.Database, hash common.Hash) {
|
||||
db.Delete(append(append(blockPrefix, hash.Bytes()...), headerSuffix...))
|
||||
}
|
||||
|
||||
// DeleteBody removes all block body data associated with a hash.
|
||||
func DeleteBody(db ethdb.Database, hash common.Hash) {
|
||||
db.Delete(append(append(blockPrefix, hash.Bytes()...), bodySuffix...))
|
||||
}
|
||||
|
||||
// DeleteTd removes all block total difficulty data associated with a hash.
|
||||
func DeleteTd(db ethdb.Database, hash common.Hash) {
|
||||
db.Delete(append(append(blockPrefix, hash.Bytes()...), tdSuffix...))
|
||||
}
|
||||
|
||||
// DeleteBlock removes all block data associated with a hash.
|
||||
func DeleteBlock(db ethdb.Database, hash common.Hash) {
|
||||
DeleteHeader(db, hash)
|
||||
DeleteBody(db, hash)
|
||||
DeleteTd(db, hash)
|
||||
}
|
||||
|
||||
// [deprecated by eth/63]
|
||||
// GetBlockByHashOld returns the old combined block corresponding to the hash
|
||||
// or nil if not found. This method is only used by the upgrade mechanism to
|
||||
// access the old combined block representation. It will be dropped after the
|
||||
// network transitions to eth/63.
|
||||
func GetBlockByHashOld(db ethdb.Database, hash common.Hash) *types.Block {
|
||||
// GetBlockByHash returns the block corresponding to the hash or nil if not found
|
||||
func GetBlockByHash(db common.Database, hash common.Hash) *types.Block {
|
||||
data, _ := db.Get(append(blockHashPre, hash[:]...))
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
@ -346,3 +125,55 @@ func GetBlockByHashOld(db ethdb.Database, hash common.Hash) *types.Block {
|
||||
}
|
||||
return (*types.Block)(&block)
|
||||
}
|
||||
|
||||
// GetBlockByHash returns the canonical block by number or nil if not found
|
||||
func GetBlockByNumber(db common.Database, number uint64) *types.Block {
|
||||
key, _ := db.Get(append(blockNumPre, big.NewInt(int64(number)).Bytes()...))
|
||||
if len(key) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return GetBlockByHash(db, common.BytesToHash(key))
|
||||
}
|
||||
|
||||
// WriteCanonNumber writes the canonical hash for the given block
|
||||
func WriteCanonNumber(db common.Database, block *types.Block) error {
|
||||
key := append(blockNumPre, block.Number().Bytes()...)
|
||||
err := db.Put(key, block.Hash().Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteHead force writes the current head
|
||||
func WriteHead(db common.Database, block *types.Block) error {
|
||||
err := WriteCanonNumber(db, block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = db.Put([]byte("LastBlock"), block.Hash().Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteBlock writes a block to the database
|
||||
func WriteBlock(db common.Database, block *types.Block) error {
|
||||
tstart := time.Now()
|
||||
|
||||
enc, _ := rlp.EncodeToBytes((*types.StorageBlock)(block))
|
||||
key := append(blockHashPre, block.Hash().Bytes()...)
|
||||
err := db.Put(key, enc)
|
||||
if err != nil {
|
||||
glog.Fatal("db write fail:", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if glog.V(logger.Debug) {
|
||||
glog.Infof("wrote block #%v %s. Took %v\n", block.Number(), common.PP(block.Hash().Bytes()), time.Since(tstart))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -23,10 +23,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
type diffTest struct {
|
||||
@ -79,242 +75,3 @@ func TestDifficulty(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests block header storage and retrieval operations.
|
||||
func TestHeaderStorage(t *testing.T) {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
|
||||
// Create a test header to move around the database and make sure it's really new
|
||||
header := &types.Header{Extra: []byte("test header")}
|
||||
if entry := GetHeader(db, header.Hash()); entry != nil {
|
||||
t.Fatalf("Non existent header returned: %v", entry)
|
||||
}
|
||||
// Write and verify the header in the database
|
||||
if err := WriteHeader(db, header); err != nil {
|
||||
t.Fatalf("Failed to write header into database: %v", err)
|
||||
}
|
||||
if entry := GetHeader(db, header.Hash()); entry == nil {
|
||||
t.Fatalf("Stored header not found")
|
||||
} else if entry.Hash() != header.Hash() {
|
||||
t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, header)
|
||||
}
|
||||
if entry := GetHeaderRLP(db, header.Hash()); entry == nil {
|
||||
t.Fatalf("Stored header RLP not found")
|
||||
} else {
|
||||
hasher := sha3.NewKeccak256()
|
||||
hasher.Write(entry)
|
||||
|
||||
if hash := common.BytesToHash(hasher.Sum(nil)); hash != header.Hash() {
|
||||
t.Fatalf("Retrieved RLP header mismatch: have %v, want %v", entry, header)
|
||||
}
|
||||
}
|
||||
// Delete the header and verify the execution
|
||||
DeleteHeader(db, header.Hash())
|
||||
if entry := GetHeader(db, header.Hash()); entry != nil {
|
||||
t.Fatalf("Deleted header returned: %v", entry)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests block body storage and retrieval operations.
|
||||
func TestBodyStorage(t *testing.T) {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
|
||||
// Create a test body to move around the database and make sure it's really new
|
||||
body := &types.Body{Uncles: []*types.Header{{Extra: []byte("test header")}}}
|
||||
|
||||
hasher := sha3.NewKeccak256()
|
||||
rlp.Encode(hasher, body)
|
||||
hash := common.BytesToHash(hasher.Sum(nil))
|
||||
|
||||
if entry := GetBody(db, hash); entry != nil {
|
||||
t.Fatalf("Non existent body returned: %v", entry)
|
||||
}
|
||||
// Write and verify the body in the database
|
||||
if err := WriteBody(db, hash, body); err != nil {
|
||||
t.Fatalf("Failed to write body into database: %v", err)
|
||||
}
|
||||
if entry := GetBody(db, hash); entry == nil {
|
||||
t.Fatalf("Stored body not found")
|
||||
} else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(types.Transactions(body.Transactions)) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) {
|
||||
t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, body)
|
||||
}
|
||||
if entry := GetBodyRLP(db, hash); entry == nil {
|
||||
t.Fatalf("Stored body RLP not found")
|
||||
} else {
|
||||
hasher := sha3.NewKeccak256()
|
||||
hasher.Write(entry)
|
||||
|
||||
if calc := common.BytesToHash(hasher.Sum(nil)); calc != hash {
|
||||
t.Fatalf("Retrieved RLP body mismatch: have %v, want %v", entry, body)
|
||||
}
|
||||
}
|
||||
// Delete the body and verify the execution
|
||||
DeleteBody(db, hash)
|
||||
if entry := GetBody(db, hash); entry != nil {
|
||||
t.Fatalf("Deleted body returned: %v", entry)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests block storage and retrieval operations.
|
||||
func TestBlockStorage(t *testing.T) {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
|
||||
// Create a test block to move around the database and make sure it's really new
|
||||
block := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block")})
|
||||
if entry := GetBlock(db, block.Hash()); entry != nil {
|
||||
t.Fatalf("Non existent block returned: %v", entry)
|
||||
}
|
||||
if entry := GetHeader(db, block.Hash()); entry != nil {
|
||||
t.Fatalf("Non existent header returned: %v", entry)
|
||||
}
|
||||
if entry := GetBody(db, block.Hash()); entry != nil {
|
||||
t.Fatalf("Non existent body returned: %v", entry)
|
||||
}
|
||||
// Write and verify the block in the database
|
||||
if err := WriteBlock(db, block); err != nil {
|
||||
t.Fatalf("Failed to write block into database: %v", err)
|
||||
}
|
||||
if entry := GetBlock(db, block.Hash()); entry == nil {
|
||||
t.Fatalf("Stored block not found")
|
||||
} else if entry.Hash() != block.Hash() {
|
||||
t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
|
||||
}
|
||||
if entry := GetHeader(db, block.Hash()); entry == nil {
|
||||
t.Fatalf("Stored header not found")
|
||||
} else if entry.Hash() != block.Header().Hash() {
|
||||
t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, block.Header())
|
||||
}
|
||||
if entry := GetBody(db, block.Hash()); entry == nil {
|
||||
t.Fatalf("Stored body not found")
|
||||
} else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(block.Transactions()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) {
|
||||
t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, &types.Body{block.Transactions(), block.Uncles()})
|
||||
}
|
||||
// Delete the block and verify the execution
|
||||
DeleteBlock(db, block.Hash())
|
||||
if entry := GetBlock(db, block.Hash()); entry != nil {
|
||||
t.Fatalf("Deleted block returned: %v", entry)
|
||||
}
|
||||
if entry := GetHeader(db, block.Hash()); entry != nil {
|
||||
t.Fatalf("Deleted header returned: %v", entry)
|
||||
}
|
||||
if entry := GetBody(db, block.Hash()); entry != nil {
|
||||
t.Fatalf("Deleted body returned: %v", entry)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that partial block contents don't get reassembled into full blocks.
|
||||
func TestPartialBlockStorage(t *testing.T) {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
block := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block")})
|
||||
|
||||
// Store a header and check that it's not recognized as a block
|
||||
if err := WriteHeader(db, block.Header()); err != nil {
|
||||
t.Fatalf("Failed to write header into database: %v", err)
|
||||
}
|
||||
if entry := GetBlock(db, block.Hash()); entry != nil {
|
||||
t.Fatalf("Non existent block returned: %v", entry)
|
||||
}
|
||||
DeleteHeader(db, block.Hash())
|
||||
|
||||
// Store a body and check that it's not recognized as a block
|
||||
if err := WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil {
|
||||
t.Fatalf("Failed to write body into database: %v", err)
|
||||
}
|
||||
if entry := GetBlock(db, block.Hash()); entry != nil {
|
||||
t.Fatalf("Non existent block returned: %v", entry)
|
||||
}
|
||||
DeleteBody(db, block.Hash())
|
||||
|
||||
// Store a header and a body separately and check reassembly
|
||||
if err := WriteHeader(db, block.Header()); err != nil {
|
||||
t.Fatalf("Failed to write header into database: %v", err)
|
||||
}
|
||||
if err := WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil {
|
||||
t.Fatalf("Failed to write body into database: %v", err)
|
||||
}
|
||||
if entry := GetBlock(db, block.Hash()); entry == nil {
|
||||
t.Fatalf("Stored block not found")
|
||||
} else if entry.Hash() != block.Hash() {
|
||||
t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests block total difficulty storage and retrieval operations.
|
||||
func TestTdStorage(t *testing.T) {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
|
||||
// Create a test TD to move around the database and make sure it's really new
|
||||
hash, td := common.Hash{}, big.NewInt(314)
|
||||
if entry := GetTd(db, hash); entry != nil {
|
||||
t.Fatalf("Non existent TD returned: %v", entry)
|
||||
}
|
||||
// Write and verify the TD in the database
|
||||
if err := WriteTd(db, hash, td); err != nil {
|
||||
t.Fatalf("Failed to write TD into database: %v", err)
|
||||
}
|
||||
if entry := GetTd(db, hash); entry == nil {
|
||||
t.Fatalf("Stored TD not found")
|
||||
} else if entry.Cmp(td) != 0 {
|
||||
t.Fatalf("Retrieved TD mismatch: have %v, want %v", entry, td)
|
||||
}
|
||||
// Delete the TD and verify the execution
|
||||
DeleteTd(db, hash)
|
||||
if entry := GetTd(db, hash); entry != nil {
|
||||
t.Fatalf("Deleted TD returned: %v", entry)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that canonical numbers can be mapped to hashes and retrieved.
|
||||
func TestCanonicalMappingStorage(t *testing.T) {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
|
||||
// Create a test canonical number and assinged hash to move around
|
||||
hash, number := common.Hash{0: 0xff}, uint64(314)
|
||||
if entry := GetCanonicalHash(db, number); entry != (common.Hash{}) {
|
||||
t.Fatalf("Non existent canonical mapping returned: %v", entry)
|
||||
}
|
||||
// Write and verify the TD in the database
|
||||
if err := WriteCanonicalHash(db, hash, number); err != nil {
|
||||
t.Fatalf("Failed to write canonical mapping into database: %v", err)
|
||||
}
|
||||
if entry := GetCanonicalHash(db, number); entry == (common.Hash{}) {
|
||||
t.Fatalf("Stored canonical mapping not found")
|
||||
} else if entry != hash {
|
||||
t.Fatalf("Retrieved canonical mapping mismatch: have %v, want %v", entry, hash)
|
||||
}
|
||||
// Delete the TD and verify the execution
|
||||
DeleteCanonicalHash(db, number)
|
||||
if entry := GetCanonicalHash(db, number); entry != (common.Hash{}) {
|
||||
t.Fatalf("Deleted canonical mapping returned: %v", entry)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that head headers and head blocks can be assigned, individually.
|
||||
func TestHeadStorage(t *testing.T) {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
|
||||
blockHead := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block header")})
|
||||
blockFull := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block full")})
|
||||
|
||||
// Check that no head entries are in a pristine database
|
||||
if entry := GetHeadHeaderHash(db); entry != (common.Hash{}) {
|
||||
t.Fatalf("Non head header entry returned: %v", entry)
|
||||
}
|
||||
if entry := GetHeadBlockHash(db); entry != (common.Hash{}) {
|
||||
t.Fatalf("Non head block entry returned: %v", entry)
|
||||
}
|
||||
// Assign separate entries for the head header and block
|
||||
if err := WriteHeadHeaderHash(db, blockHead.Hash()); err != nil {
|
||||
t.Fatalf("Failed to write head header hash: %v", err)
|
||||
}
|
||||
if err := WriteHeadBlockHash(db, blockFull.Hash()); err != nil {
|
||||
t.Fatalf("Failed to write head block hash: %v", err)
|
||||
}
|
||||
// Check that both heads are present, and different (i.e. two heads maintained)
|
||||
if entry := GetHeadHeaderHash(db); entry != blockHead.Hash() {
|
||||
t.Fatalf("Head header hash mismatch: have %v, want %v", entry, blockHead.Hash())
|
||||
}
|
||||
if entry := GetHeadBlockHash(db); entry != blockFull.Hash() {
|
||||
t.Fatalf("Head block hash mismatch: have %v, want %v", entry, blockFull.Hash())
|
||||
}
|
||||
}
|
||||
|
@ -177,14 +177,3 @@ func IsValueTransferErr(e error) bool {
|
||||
_, ok := e.(*ValueTransferError)
|
||||
return ok
|
||||
}
|
||||
|
||||
type BadHashError common.Hash
|
||||
|
||||
func (h BadHashError) Error() string {
|
||||
return fmt.Sprintf("Found known bad hash in chain %x", h[:])
|
||||
}
|
||||
|
||||
func IsBadHashError(err error) bool {
|
||||
_, ok := err.(BadHashError)
|
||||
return ok
|
||||
}
|
||||
|
@ -36,9 +36,6 @@ type NewBlockEvent struct{ Block *types.Block }
|
||||
// NewMinedBlockEvent is posted when a block has been imported.
|
||||
type NewMinedBlockEvent struct{ Block *types.Block }
|
||||
|
||||
// RemovedTransactionEvent is posted when a reorg happens
|
||||
type RemovedTransactionEvent struct{ Txs types.Transactions }
|
||||
|
||||
// ChainSplit is posted when a new head is detected
|
||||
type ChainSplitEvent struct {
|
||||
Block *types.Block
|
||||
|
@ -131,12 +131,12 @@ done:
|
||||
|
||||
func includes(addresses []common.Address, a common.Address) bool {
|
||||
for _, addr := range addresses {
|
||||
if addr == a {
|
||||
return true
|
||||
if addr != a {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
return true
|
||||
}
|
||||
|
||||
func (self *Filter) FilterLogs(logs state.Logs) state.Logs {
|
||||
|
@ -27,14 +27,13 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
// WriteGenesisBlock writes the genesis block to the database as block number 0
|
||||
func WriteGenesisBlock(chainDb ethdb.Database, reader io.Reader) (*types.Block, error) {
|
||||
func WriteGenesisBlock(chainDb common.Database, reader io.Reader) (*types.Block, error) {
|
||||
contents, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -83,35 +82,34 @@ func WriteGenesisBlock(chainDb ethdb.Database, reader io.Reader) (*types.Block,
|
||||
Coinbase: common.HexToAddress(genesis.Coinbase),
|
||||
Root: statedb.Root(),
|
||||
}, nil, nil, nil)
|
||||
block.Td = difficulty
|
||||
|
||||
if block := GetBlock(chainDb, block.Hash()); block != nil {
|
||||
if block := GetBlockByHash(chainDb, block.Hash()); block != nil {
|
||||
glog.V(logger.Info).Infoln("Genesis block already in chain. Writing canonical number")
|
||||
err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
|
||||
err := WriteCanonNumber(chainDb, block)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return block, nil
|
||||
}
|
||||
|
||||
statedb.Sync()
|
||||
|
||||
if err := WriteTd(chainDb, block.Hash(), difficulty); err != nil {
|
||||
err = WriteBlock(chainDb, block)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := WriteBlock(chainDb, block); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := WriteHeadBlockHash(chainDb, block.Hash()); err != nil {
|
||||
err = WriteHead(chainDb, block)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return block, nil
|
||||
}
|
||||
|
||||
// GenesisBlockForTesting creates a block in which addr has the given wei balance.
|
||||
// The state trie of the block is written to db.
|
||||
func GenesisBlockForTesting(db ethdb.Database, addr common.Address, balance *big.Int) *types.Block {
|
||||
func GenesisBlockForTesting(db common.Database, addr common.Address, balance *big.Int) *types.Block {
|
||||
statedb := state.New(common.Hash{}, db)
|
||||
obj := statedb.GetOrNewStateObject(addr)
|
||||
obj.SetBalance(balance)
|
||||
@ -122,35 +120,24 @@ func GenesisBlockForTesting(db ethdb.Database, addr common.Address, balance *big
|
||||
GasLimit: params.GenesisGasLimit,
|
||||
Root: statedb.Root(),
|
||||
}, nil, nil, nil)
|
||||
block.Td = params.GenesisDifficulty
|
||||
return block
|
||||
}
|
||||
|
||||
type GenesisAccount struct {
|
||||
Address common.Address
|
||||
Balance *big.Int
|
||||
}
|
||||
|
||||
func WriteGenesisBlockForTesting(db ethdb.Database, accounts ...GenesisAccount) *types.Block {
|
||||
accountJson := "{"
|
||||
for i, account := range accounts {
|
||||
if i != 0 {
|
||||
accountJson += ","
|
||||
}
|
||||
accountJson += fmt.Sprintf(`"0x%x":{"balance":"0x%x"}`, account.Address, account.Balance.Bytes())
|
||||
}
|
||||
accountJson += "}"
|
||||
|
||||
func WriteGenesisBlockForTesting(db common.Database, addr common.Address, balance *big.Int) *types.Block {
|
||||
testGenesis := fmt.Sprintf(`{
|
||||
"nonce":"0x%x",
|
||||
"gasLimit":"0x%x",
|
||||
"difficulty":"0x%x",
|
||||
"alloc": %s
|
||||
}`, types.EncodeNonce(0), params.GenesisGasLimit.Bytes(), params.GenesisDifficulty.Bytes(), accountJson)
|
||||
"alloc": {
|
||||
"0x%x":{"balance":"0x%x"}
|
||||
}
|
||||
}`, types.EncodeNonce(0), params.GenesisGasLimit.Bytes(), params.GenesisDifficulty.Bytes(), addr, balance.Bytes())
|
||||
block, _ := WriteGenesisBlock(db, strings.NewReader(testGenesis))
|
||||
return block
|
||||
}
|
||||
|
||||
func WriteTestNetGenesisBlock(chainDb ethdb.Database, nonce uint64) (*types.Block, error) {
|
||||
func WriteTestNetGenesisBlock(chainDb common.Database, nonce uint64) (*types.Block, error) {
|
||||
testGenesis := fmt.Sprintf(`{
|
||||
"nonce":"0x%x",
|
||||
"gasLimit":"0x%x",
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
// "github.com/ethereum/go-ethereum/crypto"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
)
|
||||
@ -32,7 +32,7 @@ type TestManager struct {
|
||||
// stateManager *StateManager
|
||||
eventMux *event.TypeMux
|
||||
|
||||
db ethdb.Database
|
||||
db common.Database
|
||||
txPool *TxPool
|
||||
blockChain *ChainManager
|
||||
Blocks []*types.Block
|
||||
@ -74,7 +74,7 @@ func (tm *TestManager) EventMux() *event.TypeMux {
|
||||
// return nil
|
||||
// }
|
||||
|
||||
func (tm *TestManager) Db() ethdb.Database {
|
||||
func (tm *TestManager) Db() common.Database {
|
||||
return tm.db
|
||||
}
|
||||
|
||||
|
@ -18,7 +18,7 @@ package core
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
)
|
||||
|
||||
@ -28,7 +28,7 @@ type Backend interface {
|
||||
BlockProcessor() *BlockProcessor
|
||||
ChainManager() *ChainManager
|
||||
TxPool() *TxPool
|
||||
ChainDb() ethdb.Database
|
||||
DappDb() ethdb.Database
|
||||
ChainDb() common.Database
|
||||
DappDb() common.Database
|
||||
EventMux() *event.TypeMux
|
||||
}
|
||||
|
@ -23,7 +23,6 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
@ -57,7 +56,7 @@ func (self Storage) Copy() Storage {
|
||||
|
||||
type StateObject struct {
|
||||
// State database for storing state changes
|
||||
db ethdb.Database
|
||||
db common.Database
|
||||
trie *trie.SecureTrie
|
||||
|
||||
// Address belonging to this account
|
||||
@ -88,7 +87,11 @@ type StateObject struct {
|
||||
dirty bool
|
||||
}
|
||||
|
||||
func NewStateObject(address common.Address, db ethdb.Database) *StateObject {
|
||||
func (self *StateObject) Reset() {
|
||||
self.storage = make(Storage)
|
||||
}
|
||||
|
||||
func NewStateObject(address common.Address, db common.Database) *StateObject {
|
||||
object := &StateObject{db: db, address: address, balance: new(big.Int), gasPool: new(big.Int), dirty: true}
|
||||
object.trie = trie.NewSecure((common.Hash{}).Bytes(), db)
|
||||
object.storage = make(Storage)
|
||||
@ -97,7 +100,7 @@ func NewStateObject(address common.Address, db ethdb.Database) *StateObject {
|
||||
return object
|
||||
}
|
||||
|
||||
func NewStateObjectFromBytes(address common.Address, data []byte, db ethdb.Database) *StateObject {
|
||||
func NewStateObjectFromBytes(address common.Address, data []byte, db common.Database) *StateObject {
|
||||
// TODO clean me up
|
||||
var extobject struct {
|
||||
Nonce uint64
|
||||
@ -181,6 +184,14 @@ func (self *StateObject) Update() {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *StateObject) GetInstr(pc *big.Int) *common.Value {
|
||||
if int64(len(c.code)-1) < pc.Int64() {
|
||||
return common.NewValue(0)
|
||||
}
|
||||
|
||||
return common.NewValueFromBytes([]byte{c.code[pc.Int64()]})
|
||||
}
|
||||
|
||||
func (c *StateObject) AddBalance(amount *big.Int) {
|
||||
c.SetBalance(new(big.Int).Add(c.balance, amount))
|
||||
|
||||
@ -252,11 +263,14 @@ func (self *StateObject) Copy() *StateObject {
|
||||
stateObject.gasPool.Set(self.gasPool)
|
||||
stateObject.remove = self.remove
|
||||
stateObject.dirty = self.dirty
|
||||
stateObject.deleted = self.deleted
|
||||
|
||||
return stateObject
|
||||
}
|
||||
|
||||
func (self *StateObject) Set(stateObject *StateObject) {
|
||||
*self = *stateObject
|
||||
}
|
||||
|
||||
//
|
||||
// Attribute accessors
|
||||
//
|
||||
@ -265,11 +279,20 @@ func (self *StateObject) Balance() *big.Int {
|
||||
return self.balance
|
||||
}
|
||||
|
||||
func (c *StateObject) N() *big.Int {
|
||||
return big.NewInt(int64(c.nonce))
|
||||
}
|
||||
|
||||
// Returns the address of the contract/account
|
||||
func (c *StateObject) Address() common.Address {
|
||||
return c.address
|
||||
}
|
||||
|
||||
// Returns the initialization Code
|
||||
func (c *StateObject) Init() Code {
|
||||
return c.initCode
|
||||
}
|
||||
|
||||
func (self *StateObject) Trie() *trie.SecureTrie {
|
||||
return self.trie
|
||||
}
|
||||
@ -287,6 +310,11 @@ func (self *StateObject) SetCode(code []byte) {
|
||||
self.dirty = true
|
||||
}
|
||||
|
||||
func (self *StateObject) SetInitCode(code []byte) {
|
||||
self.initCode = code
|
||||
self.dirty = true
|
||||
}
|
||||
|
||||
func (self *StateObject) SetNonce(nonce uint64) {
|
||||
self.nonce = nonce
|
||||
self.dirty = true
|
||||
@ -325,6 +353,19 @@ func (c *StateObject) CodeHash() common.Bytes {
|
||||
return crypto.Sha3(c.code)
|
||||
}
|
||||
|
||||
func (c *StateObject) RlpDecode(data []byte) {
|
||||
decoder := common.NewValueFromBytes(data)
|
||||
c.nonce = decoder.Get(0).Uint()
|
||||
c.balance = decoder.Get(1).BigInt()
|
||||
c.trie = trie.NewSecure(decoder.Get(2).Bytes(), c.db)
|
||||
c.storage = make(map[string]common.Hash)
|
||||
c.gasPool = new(big.Int)
|
||||
|
||||
c.codeHash = decoder.Get(3).Bytes()
|
||||
|
||||
c.code, _ = c.db.Get(c.codeHash)
|
||||
}
|
||||
|
||||
// Storage change object. Used by the manifest for notifying changes to
|
||||
// the sub channels.
|
||||
type StorageState struct {
|
||||
|
@ -17,7 +17,6 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
@ -118,106 +117,3 @@ func (s *StateSuite) TestSnapshot(c *checker.C) {
|
||||
|
||||
c.Assert(data1, checker.DeepEquals, res)
|
||||
}
|
||||
|
||||
// use testing instead of checker because checker does not support
|
||||
// printing/logging in tests (-check.vv does not work)
|
||||
func TestSnapshot2(t *testing.T) {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
state := New(common.Hash{}, db)
|
||||
|
||||
stateobjaddr0 := toAddr([]byte("so0"))
|
||||
stateobjaddr1 := toAddr([]byte("so1"))
|
||||
var storageaddr common.Hash
|
||||
|
||||
data0 := common.BytesToHash([]byte{17})
|
||||
data1 := common.BytesToHash([]byte{18})
|
||||
|
||||
state.SetState(stateobjaddr0, storageaddr, data0)
|
||||
state.SetState(stateobjaddr1, storageaddr, data1)
|
||||
|
||||
// db, trie are already non-empty values
|
||||
so0 := state.GetStateObject(stateobjaddr0)
|
||||
so0.balance = big.NewInt(42)
|
||||
so0.nonce = 43
|
||||
so0.gasPool = big.NewInt(44)
|
||||
so0.code = []byte{'c', 'a', 'f', 'e'}
|
||||
so0.codeHash = so0.CodeHash()
|
||||
so0.remove = true
|
||||
so0.deleted = false
|
||||
so0.dirty = false
|
||||
state.SetStateObject(so0)
|
||||
|
||||
// and one with deleted == true
|
||||
so1 := state.GetStateObject(stateobjaddr1)
|
||||
so1.balance = big.NewInt(52)
|
||||
so1.nonce = 53
|
||||
so1.gasPool = big.NewInt(54)
|
||||
so1.code = []byte{'c', 'a', 'f', 'e', '2'}
|
||||
so1.codeHash = so1.CodeHash()
|
||||
so1.remove = true
|
||||
so1.deleted = true
|
||||
so1.dirty = true
|
||||
state.SetStateObject(so1)
|
||||
|
||||
so1 = state.GetStateObject(stateobjaddr1)
|
||||
if so1 != nil {
|
||||
t.Fatalf("deleted object not nil when getting")
|
||||
}
|
||||
|
||||
snapshot := state.Copy()
|
||||
state.Set(snapshot)
|
||||
|
||||
so0Restored := state.GetStateObject(stateobjaddr0)
|
||||
so1Restored := state.GetStateObject(stateobjaddr1)
|
||||
// non-deleted is equal (restored)
|
||||
compareStateObjects(so0Restored, so0, t)
|
||||
// deleted should be nil, both before and after restore of state copy
|
||||
if so1Restored != nil {
|
||||
t.Fatalf("deleted object not nil after restoring snapshot")
|
||||
}
|
||||
}
|
||||
|
||||
func compareStateObjects(so0, so1 *StateObject, t *testing.T) {
|
||||
if so0.address != so1.address {
|
||||
t.Fatalf("Address mismatch: have %v, want %v", so0.address, so1.address)
|
||||
}
|
||||
if so0.balance.Cmp(so1.balance) != 0 {
|
||||
t.Fatalf("Balance mismatch: have %v, want %v", so0.balance, so1.balance)
|
||||
}
|
||||
if so0.nonce != so1.nonce {
|
||||
t.Fatalf("Nonce mismatch: have %v, want %v", so0.nonce, so1.nonce)
|
||||
}
|
||||
if !bytes.Equal(so0.codeHash, so1.codeHash) {
|
||||
t.Fatalf("CodeHash mismatch: have %v, want %v", so0.codeHash, so1.codeHash)
|
||||
}
|
||||
if !bytes.Equal(so0.code, so1.code) {
|
||||
t.Fatalf("Code mismatch: have %v, want %v", so0.code, so1.code)
|
||||
}
|
||||
if !bytes.Equal(so0.initCode, so1.initCode) {
|
||||
t.Fatalf("InitCode mismatch: have %v, want %v", so0.initCode, so1.initCode)
|
||||
}
|
||||
|
||||
for k, v := range so1.storage {
|
||||
if so0.storage[k] != v {
|
||||
t.Fatalf("Storage key %s mismatch: have %v, want %v", k, so0.storage[k], v)
|
||||
}
|
||||
}
|
||||
for k, v := range so0.storage {
|
||||
if so1.storage[k] != v {
|
||||
t.Fatalf("Storage key %s mismatch: have %v, want none.", k, v)
|
||||
}
|
||||
}
|
||||
|
||||
if so0.gasPool.Cmp(so1.gasPool) != 0 {
|
||||
t.Fatalf("GasPool mismatch: have %v, want %v", so0.gasPool, so1.gasPool)
|
||||
}
|
||||
if so0.remove != so1.remove {
|
||||
t.Fatalf("Remove mismatch: have %v, want %v", so0.remove, so1.remove)
|
||||
}
|
||||
if so0.deleted != so1.deleted {
|
||||
t.Fatalf("Deleted mismatch: have %v, want %v", so0.deleted, so1.deleted)
|
||||
}
|
||||
if so0.dirty != so1.dirty {
|
||||
t.Fatalf("Dirty mismatch: have %v, want %v", so0.dirty, so1.dirty)
|
||||
}
|
||||
}
|
||||
|
@ -18,10 +18,10 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
@ -33,7 +33,7 @@ import (
|
||||
// * Contracts
|
||||
// * Accounts
|
||||
type StateDB struct {
|
||||
db ethdb.Database
|
||||
db common.Database
|
||||
trie *trie.SecureTrie
|
||||
root common.Hash
|
||||
|
||||
@ -48,7 +48,7 @@ type StateDB struct {
|
||||
}
|
||||
|
||||
// Create a new state from a given trie
|
||||
func New(root common.Hash, db ethdb.Database) *StateDB {
|
||||
func New(root common.Hash, db common.Database) *StateDB {
|
||||
trie := trie.NewSecure(root[:], db)
|
||||
return &StateDB{root: root, db: db, trie: trie, stateObjects: make(map[string]*StateObject), refund: new(big.Int), logs: make(map[common.Hash]Logs)}
|
||||
}
|
||||
@ -276,6 +276,10 @@ func (self *StateDB) CreateAccount(addr common.Address) *StateObject {
|
||||
// Setting, copying of the state methods
|
||||
//
|
||||
|
||||
func (s *StateDB) Cmp(other *StateDB) bool {
|
||||
return bytes.Equal(s.trie.Root(), other.trie.Root())
|
||||
}
|
||||
|
||||
func (self *StateDB) Copy() *StateDB {
|
||||
state := New(common.Hash{}, self.db)
|
||||
state.trie = self.trie
|
||||
@ -307,6 +311,22 @@ func (s *StateDB) Root() common.Hash {
|
||||
return common.BytesToHash(s.trie.Root())
|
||||
}
|
||||
|
||||
func (s *StateDB) Trie() *trie.SecureTrie {
|
||||
return s.trie
|
||||
}
|
||||
|
||||
// Resets the trie and all siblings
|
||||
func (s *StateDB) Reset() {
|
||||
s.trie.Reset()
|
||||
|
||||
// Reset all nested states
|
||||
for _, stateObject := range s.stateObjects {
|
||||
stateObject.Reset()
|
||||
}
|
||||
|
||||
s.Empty()
|
||||
}
|
||||
|
||||
// Syncs the trie and all siblings
|
||||
func (s *StateDB) Sync() {
|
||||
// Sync all nested states
|
||||
|
@ -45,7 +45,7 @@ import (
|
||||
* 6) Derive new state root
|
||||
*/
|
||||
type StateTransition struct {
|
||||
gp GasPool
|
||||
coinbase common.Address
|
||||
msg Message
|
||||
gas, gasPrice *big.Int
|
||||
initialGas *big.Int
|
||||
@ -53,6 +53,8 @@ type StateTransition struct {
|
||||
data []byte
|
||||
state *state.StateDB
|
||||
|
||||
cb, rec, sen *state.StateObject
|
||||
|
||||
env vm.Environment
|
||||
}
|
||||
|
||||
@ -94,13 +96,13 @@ func IntrinsicGas(data []byte) *big.Int {
|
||||
return igas
|
||||
}
|
||||
|
||||
func ApplyMessage(env vm.Environment, msg Message, gp GasPool) ([]byte, *big.Int, error) {
|
||||
return NewStateTransition(env, msg, gp).transitionState()
|
||||
func ApplyMessage(env vm.Environment, msg Message, coinbase *state.StateObject) ([]byte, *big.Int, error) {
|
||||
return NewStateTransition(env, msg, coinbase).transitionState()
|
||||
}
|
||||
|
||||
func NewStateTransition(env vm.Environment, msg Message, gp GasPool) *StateTransition {
|
||||
func NewStateTransition(env vm.Environment, msg Message, coinbase *state.StateObject) *StateTransition {
|
||||
return &StateTransition{
|
||||
gp: gp,
|
||||
coinbase: coinbase.Address(),
|
||||
env: env,
|
||||
msg: msg,
|
||||
gas: new(big.Int),
|
||||
@ -109,9 +111,13 @@ func NewStateTransition(env vm.Environment, msg Message, gp GasPool) *StateTrans
|
||||
value: msg.Value(),
|
||||
data: msg.Data(),
|
||||
state: env.State(),
|
||||
cb: coinbase,
|
||||
}
|
||||
}
|
||||
|
||||
func (self *StateTransition) Coinbase() *state.StateObject {
|
||||
return self.state.GetOrNewStateObject(self.coinbase)
|
||||
}
|
||||
func (self *StateTransition) From() (*state.StateObject, error) {
|
||||
f, err := self.msg.From()
|
||||
if err != nil {
|
||||
@ -154,7 +160,7 @@ func (self *StateTransition) BuyGas() error {
|
||||
if sender.Balance().Cmp(mgval) < 0 {
|
||||
return fmt.Errorf("insufficient ETH for gas (%x). Req %v, has %v", sender.Address().Bytes()[:4], mgval, sender.Balance())
|
||||
}
|
||||
if err = self.gp.SubGas(mgas, self.gasPrice); err != nil {
|
||||
if err = self.Coinbase().SubGas(mgas, self.gasPrice); err != nil {
|
||||
return err
|
||||
}
|
||||
self.AddGas(mgas)
|
||||
@ -235,12 +241,13 @@ func (self *StateTransition) transitionState() (ret []byte, usedGas *big.Int, er
|
||||
}
|
||||
|
||||
self.refundGas()
|
||||
self.state.AddBalance(self.env.Coinbase(), new(big.Int).Mul(self.gasUsed(), self.gasPrice))
|
||||
self.state.AddBalance(self.coinbase, new(big.Int).Mul(self.gasUsed(), self.gasPrice))
|
||||
|
||||
return ret, self.gasUsed(), err
|
||||
}
|
||||
|
||||
func (self *StateTransition) refundGas() {
|
||||
coinbase := self.Coinbase()
|
||||
sender, _ := self.From() // err already checked
|
||||
// Return remaining gas
|
||||
remaining := new(big.Int).Mul(self.gas, self.gasPrice)
|
||||
@ -251,7 +258,7 @@ func (self *StateTransition) refundGas() {
|
||||
self.gas.Add(self.gas, refund)
|
||||
self.state.AddBalance(sender.Address(), refund.Mul(refund, self.gasPrice))
|
||||
|
||||
self.gp.AddGas(self.gas, self.gasPrice)
|
||||
coinbase.AddGas(self.gas, self.gasPrice)
|
||||
}
|
||||
|
||||
func (self *StateTransition) gasUsed() *big.Int {
|
||||
|
@ -81,7 +81,7 @@ func NewTxPool(eventMux *event.TypeMux, currentStateFn stateFn, gasLimitFn func(
|
||||
gasLimit: gasLimitFn,
|
||||
minGasPrice: new(big.Int),
|
||||
pendingState: state.ManageState(currentStateFn()),
|
||||
events: eventMux.Subscribe(ChainHeadEvent{}, GasPriceChanged{}, RemovedTransactionEvent{}),
|
||||
events: eventMux.Subscribe(ChainHeadEvent{}, GasPriceChanged{}),
|
||||
}
|
||||
go pool.eventLoop()
|
||||
|
||||
@ -93,18 +93,16 @@ func (pool *TxPool) eventLoop() {
|
||||
// we need to know the new state. The new state will help us determine
|
||||
// the nonces in the managed state
|
||||
for ev := range pool.events.Chan() {
|
||||
pool.mu.Lock()
|
||||
|
||||
switch ev := ev.(type) {
|
||||
case ChainHeadEvent:
|
||||
pool.mu.Lock()
|
||||
pool.resetState()
|
||||
pool.mu.Unlock()
|
||||
case GasPriceChanged:
|
||||
pool.mu.Lock()
|
||||
pool.minGasPrice = ev.Price
|
||||
pool.mu.Unlock()
|
||||
case RemovedTransactionEvent:
|
||||
pool.AddTransactions(ev.Txs)
|
||||
}
|
||||
|
||||
pool.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
@ -123,8 +121,8 @@ func (pool *TxPool) resetState() {
|
||||
if addr, err := tx.From(); err == nil {
|
||||
// Set the nonce. Transaction nonce can never be lower
|
||||
// than the state nonce; validatePool took care of that.
|
||||
if pool.pendingState.GetNonce(addr) <= tx.Nonce() {
|
||||
pool.pendingState.SetNonce(addr, tx.Nonce()+1)
|
||||
if pool.pendingState.GetNonce(addr) < tx.Nonce() {
|
||||
pool.pendingState.SetNonce(addr, tx.Nonce())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -219,34 +219,3 @@ func TestMissingNonce(t *testing.T) {
|
||||
t.Error("expected 1 queued transaction, got", len(pool.queue[addr]))
|
||||
}
|
||||
}
|
||||
|
||||
func TestNonceRecovery(t *testing.T) {
|
||||
const n = 10
|
||||
pool, key := setupTxPool()
|
||||
addr := crypto.PubkeyToAddress(key.PublicKey)
|
||||
pool.currentState().SetNonce(addr, n)
|
||||
pool.currentState().AddBalance(addr, big.NewInt(100000000000000))
|
||||
pool.resetState()
|
||||
tx := transaction(n, big.NewInt(100000), key)
|
||||
if err := pool.Add(tx); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
// simulate some weird re-order of transactions and missing nonce(s)
|
||||
pool.currentState().SetNonce(addr, n-1)
|
||||
pool.resetState()
|
||||
if fn := pool.pendingState.GetNonce(addr); fn != n+1 {
|
||||
t.Errorf("expected nonce to be %d, got %d", n+1, fn)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemovedTxEvent(t *testing.T) {
|
||||
pool, key := setupTxPool()
|
||||
tx := transaction(0, big.NewInt(1000000), key)
|
||||
from, _ := tx.From()
|
||||
pool.currentState().AddBalance(from, big.NewInt(1000000000000))
|
||||
pool.eventMux.Post(RemovedTransactionEvent{types.Transactions{tx}})
|
||||
pool.eventMux.Post(ChainHeadEvent{nil})
|
||||
if len(pool.pending) != 1 {
|
||||
t.Error("expected 1 pending tx, got", len(pool.pending))
|
||||
}
|
||||
}
|
||||
|
@ -32,7 +32,7 @@ var (
|
||||
)
|
||||
|
||||
// PutTransactions stores the transactions in the given database
|
||||
func PutTransactions(db ethdb.Database, block *types.Block, txs types.Transactions) {
|
||||
func PutTransactions(db common.Database, block *types.Block, txs types.Transactions) {
|
||||
batch := new(leveldb.Batch)
|
||||
_, batchWrite := db.(*ethdb.LDBDatabase)
|
||||
|
||||
@ -77,24 +77,8 @@ func PutTransactions(db ethdb.Database, block *types.Block, txs types.Transactio
|
||||
}
|
||||
}
|
||||
|
||||
func DeleteTransaction(db ethdb.Database, txHash common.Hash) {
|
||||
db.Delete(txHash[:])
|
||||
}
|
||||
|
||||
func GetTransaction(db ethdb.Database, txhash common.Hash) *types.Transaction {
|
||||
data, _ := db.Get(txhash[:])
|
||||
if len(data) != 0 {
|
||||
var tx types.Transaction
|
||||
if err := rlp.DecodeBytes(data, &tx); err != nil {
|
||||
return nil
|
||||
}
|
||||
return &tx
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PutReceipts stores the receipts in the current database
|
||||
func PutReceipts(db ethdb.Database, receipts types.Receipts) error {
|
||||
func PutReceipts(db common.Database, receipts types.Receipts) error {
|
||||
batch := new(leveldb.Batch)
|
||||
_, batchWrite := db.(*ethdb.LDBDatabase)
|
||||
|
||||
@ -123,13 +107,8 @@ func PutReceipts(db ethdb.Database, receipts types.Receipts) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete a receipts from the database
|
||||
func DeleteReceipt(db ethdb.Database, txHash common.Hash) {
|
||||
db.Delete(append(receiptsPre, txHash[:]...))
|
||||
}
|
||||
|
||||
// GetReceipt returns a receipt by hash
|
||||
func GetReceipt(db ethdb.Database, txHash common.Hash) *types.Receipt {
|
||||
func GetReceipt(db common.Database, txHash common.Hash) *types.Receipt {
|
||||
data, _ := db.Get(append(receiptsPre, txHash[:]...))
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
@ -145,7 +124,7 @@ func GetReceipt(db ethdb.Database, txHash common.Hash) *types.Receipt {
|
||||
|
||||
// GetBlockReceipts returns the receipts generated by the transactions
|
||||
// included in block's given hash.
|
||||
func GetBlockReceipts(db ethdb.Database, hash common.Hash) types.Receipts {
|
||||
func GetBlockReceipts(db common.Database, hash common.Hash) types.Receipts {
|
||||
data, _ := db.Get(append(blockReceiptsPre, hash[:]...))
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
@ -162,7 +141,7 @@ func GetBlockReceipts(db ethdb.Database, hash common.Hash) types.Receipts {
|
||||
// PutBlockReceipts stores the block's transactions associated receipts
|
||||
// and stores them by block hash in a single slice. This is required for
|
||||
// forks and chain reorgs
|
||||
func PutBlockReceipts(db ethdb.Database, block *types.Block, receipts types.Receipts) error {
|
||||
func PutBlockReceipts(db common.Database, block *types.Block, receipts types.Receipts) error {
|
||||
rs := make([]*types.ReceiptForStorage, len(receipts))
|
||||
for i, receipt := range receipts {
|
||||
rs[i] = (*types.ReceiptForStorage)(receipt)
|
||||
|
@ -117,13 +117,6 @@ func rlpHash(x interface{}) (h common.Hash) {
|
||||
return h
|
||||
}
|
||||
|
||||
// Body is a simple (mutable, non-safe) data container for storing and moving
|
||||
// a block's data contents (transactions and uncles) together.
|
||||
type Body struct {
|
||||
Transactions []*Transaction
|
||||
Uncles []*Header
|
||||
}
|
||||
|
||||
type Block struct {
|
||||
header *Header
|
||||
uncles []*Header
|
||||
@ -136,20 +129,12 @@ type Block struct {
|
||||
|
||||
// Td is used by package core to store the total difficulty
|
||||
// of the chain up to and including the block.
|
||||
td *big.Int
|
||||
Td *big.Int
|
||||
|
||||
// ReceivedAt is used by package eth to track block propagation time.
|
||||
ReceivedAt time.Time
|
||||
}
|
||||
|
||||
// DeprecatedTd is an old relic for extracting the TD of a block. It is in the
|
||||
// code solely to facilitate upgrading the database from the old format to the
|
||||
// new, after which it should be deleted. Do not use!
|
||||
func (b *Block) DeprecatedTd() *big.Int {
|
||||
return b.td
|
||||
}
|
||||
|
||||
// [deprecated by eth/63]
|
||||
// StorageBlock defines the RLP encoding of a Block stored in the
|
||||
// state database. The StorageBlock encoding contains fields that
|
||||
// would otherwise need to be recomputed.
|
||||
@ -162,7 +147,6 @@ type extblock struct {
|
||||
Uncles []*Header
|
||||
}
|
||||
|
||||
// [deprecated by eth/63]
|
||||
// "storage" block encoding. used for database.
|
||||
type storageblock struct {
|
||||
Header *Header
|
||||
@ -184,7 +168,7 @@ var (
|
||||
// are ignored and set to values derived from the given txs, uncles
|
||||
// and receipts.
|
||||
func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt) *Block {
|
||||
b := &Block{header: copyHeader(header), td: new(big.Int)}
|
||||
b := &Block{header: copyHeader(header), Td: new(big.Int)}
|
||||
|
||||
// TODO: panic if len(txs) != len(receipts)
|
||||
if len(txs) == 0 {
|
||||
@ -284,16 +268,24 @@ func (b *Block) EncodeRLP(w io.Writer) error {
|
||||
})
|
||||
}
|
||||
|
||||
// [deprecated by eth/63]
|
||||
func (b *StorageBlock) DecodeRLP(s *rlp.Stream) error {
|
||||
var sb storageblock
|
||||
if err := s.Decode(&sb); err != nil {
|
||||
return err
|
||||
}
|
||||
b.header, b.uncles, b.transactions, b.td = sb.Header, sb.Uncles, sb.Txs, sb.TD
|
||||
b.header, b.uncles, b.transactions, b.Td = sb.Header, sb.Uncles, sb.Txs, sb.TD
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *StorageBlock) EncodeRLP(w io.Writer) error {
|
||||
return rlp.Encode(w, storageblock{
|
||||
Header: b.header,
|
||||
Txs: b.transactions,
|
||||
Uncles: b.uncles,
|
||||
TD: b.Td,
|
||||
})
|
||||
}
|
||||
|
||||
// TODO: copies
|
||||
func (b *Block) Uncles() []*Header { return b.uncles }
|
||||
func (b *Block) Transactions() Transactions { return b.transactions }
|
||||
@ -364,23 +356,10 @@ func (b *Block) WithMiningResult(nonce uint64, mixDigest common.Hash) *Block {
|
||||
transactions: b.transactions,
|
||||
receipts: b.receipts,
|
||||
uncles: b.uncles,
|
||||
Td: b.Td,
|
||||
}
|
||||
}
|
||||
|
||||
// WithBody returns a new block with the given transaction and uncle contents.
|
||||
func (b *Block) WithBody(transactions []*Transaction, uncles []*Header) *Block {
|
||||
block := &Block{
|
||||
header: copyHeader(b.header),
|
||||
transactions: make([]*Transaction, len(transactions)),
|
||||
uncles: make([]*Header, len(uncles)),
|
||||
}
|
||||
copy(block.transactions, transactions)
|
||||
for i := range uncles {
|
||||
block.uncles[i] = copyHeader(uncles[i])
|
||||
}
|
||||
return block
|
||||
}
|
||||
|
||||
// Implement pow.Block
|
||||
|
||||
func (b *Block) Hash() common.Hash {
|
||||
@ -393,7 +372,7 @@ func (b *Block) Hash() common.Hash {
|
||||
}
|
||||
|
||||
func (b *Block) String() string {
|
||||
str := fmt.Sprintf(`Block(#%v): Size: %v {
|
||||
str := fmt.Sprintf(`Block(#%v): Size: %v TD: %v {
|
||||
MinerHash: %x
|
||||
%v
|
||||
Transactions:
|
||||
@ -401,7 +380,7 @@ Transactions:
|
||||
Uncles:
|
||||
%v
|
||||
}
|
||||
`, b.Number(), b.Size(), b.header.HashNoNonce(), b.header, b.transactions, b.uncles)
|
||||
`, b.Number(), b.Size(), b.Td, b.header.HashNoNonce(), b.header, b.transactions, b.uncles)
|
||||
return str
|
||||
}
|
||||
|
||||
|
@ -33,6 +33,10 @@ import (
|
||||
|
||||
var ErrInvalidSig = errors.New("invalid v, r, s values")
|
||||
|
||||
func IsContractAddr(addr []byte) bool {
|
||||
return len(addr) == 0
|
||||
}
|
||||
|
||||
type Transaction struct {
|
||||
data txdata
|
||||
// caches
|
||||
@ -272,36 +276,14 @@ func (tx *Transaction) String() string {
|
||||
// Transaction slice type for basic sorting.
|
||||
type Transactions []*Transaction
|
||||
|
||||
// Len returns the length of s
|
||||
func (s Transactions) Len() int { return len(s) }
|
||||
|
||||
// Swap swaps the i'th and the j'th element in s
|
||||
func (s Transactions) Len() int { return len(s) }
|
||||
func (s Transactions) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// GetRlp implements Rlpable and returns the i'th element of s in rlp
|
||||
func (s Transactions) GetRlp(i int) []byte {
|
||||
enc, _ := rlp.EncodeToBytes(s[i])
|
||||
return enc
|
||||
}
|
||||
|
||||
// Returns a new set t which is the difference between a to b
|
||||
func TxDifference(a, b Transactions) (keep Transactions) {
|
||||
keep = make(Transactions, 0, len(a))
|
||||
|
||||
remove := make(map[common.Hash]struct{})
|
||||
for _, tx := range b {
|
||||
remove[tx.Hash()] = struct{}{}
|
||||
}
|
||||
|
||||
for _, tx := range a {
|
||||
if _, ok := remove[tx.Hash()]; !ok {
|
||||
keep = append(keep, tx)
|
||||
}
|
||||
}
|
||||
|
||||
return keep
|
||||
}
|
||||
|
||||
type TxByNonce struct{ Transactions }
|
||||
|
||||
func (s TxByNonce) Less(i, j int) bool {
|
||||
|
@ -25,3 +25,20 @@ import (
|
||||
|
||||
var OutOfGasError = errors.New("Out of gas")
|
||||
var DepthError = fmt.Errorf("Max call depth exceeded (%d)", params.CallCreateDepth)
|
||||
|
||||
type StackError struct {
|
||||
req, has int
|
||||
}
|
||||
|
||||
func StackErr(req, has int) StackError {
|
||||
return StackError{req, has}
|
||||
}
|
||||
|
||||
func (self StackError) Error() string {
|
||||
return fmt.Sprintf("stack error! require %v, have %v", self.req, self.has)
|
||||
}
|
||||
|
||||
func IsStackErr(err error) bool {
|
||||
_, ok := err.(StackError)
|
||||
return ok
|
||||
}
|
||||
|
@ -33,12 +33,12 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"code.google.com/p/go-uuid/uuid"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto/ecies"
|
||||
"github.com/ethereum/go-ethereum/crypto/secp256k1"
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/pborman/uuid"
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
"golang.org/x/crypto/ripemd160"
|
||||
)
|
||||
|
@ -23,8 +23,8 @@ import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
|
||||
"code.google.com/p/go-uuid/uuid"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pborman/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -36,9 +36,9 @@ import (
|
||||
"io"
|
||||
"reflect"
|
||||
|
||||
"code.google.com/p/go-uuid/uuid"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto/randentropy"
|
||||
"github.com/pborman/uuid"
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
"golang.org/x/crypto/scrypt"
|
||||
)
|
||||
|
188
eth/backend.go
188
eth/backend.go
@ -18,7 +18,6 @@
|
||||
package eth
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ecdsa"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@ -74,8 +73,6 @@ var (
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
DevMode bool
|
||||
|
||||
Name string
|
||||
NetworkId int
|
||||
GenesisNonce int
|
||||
@ -128,7 +125,7 @@ type Config struct {
|
||||
|
||||
// NewDB is used to create databases.
|
||||
// If nil, the default is to create leveldb databases on disk.
|
||||
NewDB func(path string) (ethdb.Database, error)
|
||||
NewDB func(path string) (common.Database, error)
|
||||
}
|
||||
|
||||
func (cfg *Config) parseBootNodes() []*discover.Node {
|
||||
@ -210,8 +207,11 @@ type Ethereum struct {
|
||||
shutdownChan chan bool
|
||||
|
||||
// DB interfaces
|
||||
chainDb ethdb.Database // Block chain database
|
||||
dappDb ethdb.Database // Dapp database
|
||||
chainDb common.Database // Block chain databe
|
||||
dappDb common.Database // Dapp database
|
||||
|
||||
// Closed when databases are flushed and closed
|
||||
databasesClosed chan bool
|
||||
|
||||
//*** SERVICES ***
|
||||
// State manager for processing new blocks and managing the over all states
|
||||
@ -264,10 +264,14 @@ func New(config *Config) (*Ethereum, error) {
|
||||
|
||||
newdb := config.NewDB
|
||||
if newdb == nil {
|
||||
newdb = func(path string) (ethdb.Database, error) { return ethdb.NewLDBDatabase(path, config.DatabaseCache) }
|
||||
newdb = func(path string) (common.Database, error) { return ethdb.NewLDBDatabase(path, config.DatabaseCache) }
|
||||
}
|
||||
|
||||
// attempt to merge database together, upgrading from an old version
|
||||
if err := mergeDatabases(config.DataDir, newdb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Open the chain database and perform any upgrades needed
|
||||
chainDb, err := newdb(filepath.Join(config.DataDir, "chaindata"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("blockchain db err: %v", err)
|
||||
@ -275,10 +279,6 @@ func New(config *Config) (*Ethereum, error) {
|
||||
if db, ok := chainDb.(*ethdb.LDBDatabase); ok {
|
||||
db.Meter("eth/db/chaindata/")
|
||||
}
|
||||
if err := upgradeChainDatabase(chainDb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dappDb, err := newdb(filepath.Join(config.DataDir, "dapp"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dapp db err: %v", err)
|
||||
@ -303,23 +303,18 @@ func New(config *Config) (*Ethereum, error) {
|
||||
glog.V(logger.Info).Infof("Successfully wrote genesis block. New genesis hash = %x\n", block.Hash())
|
||||
}
|
||||
|
||||
// different modes
|
||||
switch {
|
||||
case config.Olympic:
|
||||
glog.V(logger.Error).Infoln("Starting Olympic network")
|
||||
fallthrough
|
||||
case config.DevMode:
|
||||
if config.Olympic {
|
||||
_, err := core.WriteTestNetGenesisBlock(chainDb, 42)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(logger.Error).Infoln("Starting Olympic network")
|
||||
}
|
||||
|
||||
// This is for testing only.
|
||||
if config.GenesisBlock != nil {
|
||||
core.WriteTd(chainDb, config.GenesisBlock.Hash(), config.GenesisBlock.Difficulty())
|
||||
core.WriteBlock(chainDb, config.GenesisBlock)
|
||||
core.WriteCanonicalHash(chainDb, config.GenesisBlock.Hash(), config.GenesisBlock.NumberU64())
|
||||
core.WriteHeadBlockHash(chainDb, config.GenesisBlock.Hash())
|
||||
core.WriteHead(chainDb, config.GenesisBlock)
|
||||
}
|
||||
|
||||
if !config.SkipBcVersionCheck {
|
||||
@ -334,6 +329,7 @@ func New(config *Config) (*Ethereum, error) {
|
||||
|
||||
eth := &Ethereum{
|
||||
shutdownChan: make(chan bool),
|
||||
databasesClosed: make(chan bool),
|
||||
chainDb: chainDb,
|
||||
dappDb: dappDb,
|
||||
eventMux: &event.TypeMux{},
|
||||
@ -377,7 +373,7 @@ func New(config *Config) (*Ethereum, error) {
|
||||
|
||||
eth.blockProcessor = core.NewBlockProcessor(chainDb, eth.pow, eth.chainManager, eth.EventMux())
|
||||
eth.chainManager.SetProcessor(eth.blockProcessor)
|
||||
eth.protocolManager = NewProtocolManager(config.NetworkId, eth.eventMux, eth.txPool, eth.pow, eth.chainManager, chainDb)
|
||||
eth.protocolManager = NewProtocolManager(config.NetworkId, eth.eventMux, eth.txPool, eth.pow, eth.chainManager)
|
||||
|
||||
eth.miner = miner.New(eth, eth.EventMux(), eth.pow)
|
||||
eth.miner.SetGasPrice(config.GasPrice)
|
||||
@ -523,8 +519,8 @@ func (s *Ethereum) BlockProcessor() *core.BlockProcessor { return s.blockProcess
|
||||
func (s *Ethereum) TxPool() *core.TxPool { return s.txPool }
|
||||
func (s *Ethereum) Whisper() *whisper.Whisper { return s.whisper }
|
||||
func (s *Ethereum) EventMux() *event.TypeMux { return s.eventMux }
|
||||
func (s *Ethereum) ChainDb() ethdb.Database { return s.chainDb }
|
||||
func (s *Ethereum) DappDb() ethdb.Database { return s.dappDb }
|
||||
func (s *Ethereum) ChainDb() common.Database { return s.chainDb }
|
||||
func (s *Ethereum) DappDb() common.Database { return s.dappDb }
|
||||
func (s *Ethereum) IsListening() bool { return true } // Always listening
|
||||
func (s *Ethereum) PeerCount() int { return s.net.PeerCount() }
|
||||
func (s *Ethereum) Peers() []*p2p.Peer { return s.net.Peers() }
|
||||
@ -545,6 +541,8 @@ func (s *Ethereum) Start() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// periodically flush databases
|
||||
go s.syncDatabases()
|
||||
|
||||
if s.AutoDAG {
|
||||
s.StartAutoDAG()
|
||||
@ -560,6 +558,32 @@ func (s *Ethereum) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// sync databases every minute. If flushing fails we exit immediatly. The system
|
||||
// may not continue under any circumstances.
|
||||
func (s *Ethereum) syncDatabases() {
|
||||
ticker := time.NewTicker(1 * time.Minute)
|
||||
done:
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
// don't change the order of database flushes
|
||||
if err := s.dappDb.Flush(); err != nil {
|
||||
glog.Fatalf("fatal error: flush dappDb: %v (Restart your node. We are aware of this issue)\n", err)
|
||||
}
|
||||
if err := s.chainDb.Flush(); err != nil {
|
||||
glog.Fatalf("fatal error: flush chainDb: %v (Restart your node. We are aware of this issue)\n", err)
|
||||
}
|
||||
case <-s.shutdownChan:
|
||||
break done
|
||||
}
|
||||
}
|
||||
|
||||
s.chainDb.Close()
|
||||
s.dappDb.Close()
|
||||
|
||||
close(s.databasesClosed)
|
||||
}
|
||||
|
||||
func (s *Ethereum) StartForTest() {
|
||||
jsonlogger.LogJson(&logger.LogStarting{
|
||||
ClientString: s.net.Name,
|
||||
@ -590,13 +614,12 @@ func (s *Ethereum) Stop() {
|
||||
}
|
||||
s.StopAutoDAG()
|
||||
|
||||
s.chainDb.Close()
|
||||
s.dappDb.Close()
|
||||
close(s.shutdownChan)
|
||||
}
|
||||
|
||||
// This function will wait for a shutdown and resumes main thread execution
|
||||
func (s *Ethereum) WaitForShutdown() {
|
||||
<-s.databasesClosed
|
||||
<-s.shutdownChan
|
||||
}
|
||||
|
||||
@ -686,7 +709,7 @@ func dagFiles(epoch uint64) (string, string) {
|
||||
return dag, "full-R" + dag
|
||||
}
|
||||
|
||||
func saveBlockchainVersion(db ethdb.Database, bcVersion int) {
|
||||
func saveBlockchainVersion(db common.Database, bcVersion int) {
|
||||
d, _ := db.Get([]byte("BlockchainVersion"))
|
||||
blockchainVersion := common.NewValue(d).Uint()
|
||||
|
||||
@ -695,61 +718,74 @@ func saveBlockchainVersion(db ethdb.Database, bcVersion int) {
|
||||
}
|
||||
}
|
||||
|
||||
// upgradeChainDatabase ensures that the chain database stores block split into
|
||||
// separate header and body entries.
|
||||
func upgradeChainDatabase(db ethdb.Database) error {
|
||||
// Short circuit if the head block is stored already as separate header and body
|
||||
data, err := db.Get([]byte("LastBlock"))
|
||||
// mergeDatabases when required merge old database layout to one single database
|
||||
func mergeDatabases(datadir string, newdb func(path string) (common.Database, error)) error {
|
||||
// Check if already upgraded
|
||||
data := filepath.Join(datadir, "chaindata")
|
||||
if _, err := os.Stat(data); !os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
// make sure it's not just a clean path
|
||||
chainPath := filepath.Join(datadir, "blockchain")
|
||||
if _, err := os.Stat(chainPath); os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
glog.Infoln("Database upgrade required. Upgrading...")
|
||||
|
||||
database, err := newdb(data)
|
||||
if err != nil {
|
||||
return nil
|
||||
return fmt.Errorf("creating data db err: %v", err)
|
||||
}
|
||||
head := common.BytesToHash(data)
|
||||
defer database.Close()
|
||||
|
||||
if block := core.GetBlockByHashOld(db, head); block == nil {
|
||||
return nil
|
||||
// Migrate blocks
|
||||
chainDb, err := newdb(chainPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("state db err: %v", err)
|
||||
}
|
||||
// At least some of the database is still the old format, upgrade (skip the head block!)
|
||||
glog.V(logger.Info).Info("Old database detected, upgrading...")
|
||||
defer chainDb.Close()
|
||||
|
||||
if db, ok := db.(*ethdb.LDBDatabase); ok {
|
||||
blockPrefix := []byte("block-hash-")
|
||||
for it := db.NewIterator(); it.Next(); {
|
||||
// Skip anything other than a combined block
|
||||
if !bytes.HasPrefix(it.Key(), blockPrefix) {
|
||||
continue
|
||||
}
|
||||
// Skip the head block (merge last to signal upgrade completion)
|
||||
if bytes.HasSuffix(it.Key(), head.Bytes()) {
|
||||
continue
|
||||
}
|
||||
// Load the block, split and serialize (order!)
|
||||
block := core.GetBlockByHashOld(db, common.BytesToHash(bytes.TrimPrefix(it.Key(), blockPrefix)))
|
||||
|
||||
if err := core.WriteTd(db, block.Hash(), block.DeprecatedTd()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := core.WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := core.WriteHeader(db, block.Header()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := db.Delete(it.Key()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Lastly, upgrade the head block, disabling the upgrade mechanism
|
||||
current := core.GetBlockByHashOld(db, head)
|
||||
|
||||
if err := core.WriteTd(db, current.Hash(), current.DeprecatedTd()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := core.WriteBody(db, current.Hash(), &types.Body{current.Transactions(), current.Uncles()}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := core.WriteHeader(db, current.Header()); err != nil {
|
||||
return err
|
||||
if chain, ok := chainDb.(*ethdb.LDBDatabase); ok {
|
||||
glog.Infoln("Merging blockchain database...")
|
||||
it := chain.NewIterator()
|
||||
for it.Next() {
|
||||
database.Put(it.Key(), it.Value())
|
||||
}
|
||||
it.Release()
|
||||
}
|
||||
|
||||
// Migrate state
|
||||
stateDb, err := newdb(filepath.Join(datadir, "state"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("state db err: %v", err)
|
||||
}
|
||||
defer stateDb.Close()
|
||||
|
||||
if state, ok := stateDb.(*ethdb.LDBDatabase); ok {
|
||||
glog.Infoln("Merging state database...")
|
||||
it := state.NewIterator()
|
||||
for it.Next() {
|
||||
database.Put(it.Key(), it.Value())
|
||||
}
|
||||
it.Release()
|
||||
}
|
||||
|
||||
// Migrate transaction / receipts
|
||||
extraDb, err := newdb(filepath.Join(datadir, "extra"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("state db err: %v", err)
|
||||
}
|
||||
defer extraDb.Close()
|
||||
|
||||
if extra, ok := extraDb.(*ethdb.LDBDatabase); ok {
|
||||
glog.Infoln("Merging transaction database...")
|
||||
|
||||
it := extra.NewIterator()
|
||||
for it.Next() {
|
||||
database.Put(it.Key(), it.Value())
|
||||
}
|
||||
it.Release()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,45 +0,0 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Contains the metrics collected by the downloader.
|
||||
|
||||
package downloader
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
hashInMeter = metrics.NewMeter("eth/downloader/hashes/in")
|
||||
hashReqTimer = metrics.NewTimer("eth/downloader/hashes/req")
|
||||
hashDropMeter = metrics.NewMeter("eth/downloader/hashes/drop")
|
||||
hashTimeoutMeter = metrics.NewMeter("eth/downloader/hashes/timeout")
|
||||
|
||||
blockInMeter = metrics.NewMeter("eth/downloader/blocks/in")
|
||||
blockReqTimer = metrics.NewTimer("eth/downloader/blocks/req")
|
||||
blockDropMeter = metrics.NewMeter("eth/downloader/blocks/drop")
|
||||
blockTimeoutMeter = metrics.NewMeter("eth/downloader/blocks/timeout")
|
||||
|
||||
headerInMeter = metrics.NewMeter("eth/downloader/headers/in")
|
||||
headerReqTimer = metrics.NewTimer("eth/downloader/headers/req")
|
||||
headerDropMeter = metrics.NewMeter("eth/downloader/headers/drop")
|
||||
headerTimeoutMeter = metrics.NewMeter("eth/downloader/headers/timeout")
|
||||
|
||||
bodyInMeter = metrics.NewMeter("eth/downloader/bodies/in")
|
||||
bodyReqTimer = metrics.NewTimer("eth/downloader/bodies/req")
|
||||
bodyDropMeter = metrics.NewMeter("eth/downloader/bodies/drop")
|
||||
bodyTimeoutMeter = metrics.NewMeter("eth/downloader/bodies/timeout")
|
||||
)
|
@ -31,16 +31,10 @@ import (
|
||||
"gopkg.in/fatih/set.v0"
|
||||
)
|
||||
|
||||
// Hash and block fetchers belonging to eth/61 and below
|
||||
type relativeHashFetcherFn func(common.Hash) error
|
||||
type absoluteHashFetcherFn func(uint64, int) error
|
||||
type blockFetcherFn func([]common.Hash) error
|
||||
|
||||
// Block header and body fethers belonging to eth/62 and above
|
||||
type relativeHeaderFetcherFn func(common.Hash, int, int, bool) error
|
||||
type absoluteHeaderFetcherFn func(uint64, int, int, bool) error
|
||||
type blockBodyFetcherFn func([]common.Hash) error
|
||||
|
||||
var (
|
||||
errAlreadyFetching = errors.New("already fetching blocks from peer")
|
||||
errAlreadyRegistered = errors.New("peer is already registered")
|
||||
@ -60,37 +54,25 @@ type peer struct {
|
||||
|
||||
ignored *set.Set // Set of hashes not to request (didn't have previously)
|
||||
|
||||
getRelHashes relativeHashFetcherFn // [eth/61] Method to retrieve a batch of hashes from an origin hash
|
||||
getAbsHashes absoluteHashFetcherFn // [eth/61] Method to retrieve a batch of hashes from an absolute position
|
||||
getBlocks blockFetcherFn // [eth/61] Method to retrieve a batch of blocks
|
||||
|
||||
getRelHeaders relativeHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an origin hash
|
||||
getAbsHeaders absoluteHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an absolute position
|
||||
getBlockBodies blockBodyFetcherFn // [eth/62] Method to retrieve a batch of block bodies
|
||||
getRelHashes relativeHashFetcherFn // Method to retrieve a batch of hashes from an origin hash
|
||||
getAbsHashes absoluteHashFetcherFn // Method to retrieve a batch of hashes from an absolute position
|
||||
getBlocks blockFetcherFn // Method to retrieve a batch of blocks
|
||||
|
||||
version int // Eth protocol version number to switch strategies
|
||||
}
|
||||
|
||||
// newPeer create a new downloader peer, with specific hash and block retrieval
|
||||
// mechanisms.
|
||||
func newPeer(id string, version int, head common.Hash,
|
||||
getRelHashes relativeHashFetcherFn, getAbsHashes absoluteHashFetcherFn, getBlocks blockFetcherFn, // eth/61 callbacks, remove when upgrading
|
||||
getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn) *peer {
|
||||
func newPeer(id string, version int, head common.Hash, getRelHashes relativeHashFetcherFn, getAbsHashes absoluteHashFetcherFn, getBlocks blockFetcherFn) *peer {
|
||||
return &peer{
|
||||
id: id,
|
||||
head: head,
|
||||
capacity: 1,
|
||||
ignored: set.New(),
|
||||
|
||||
id: id,
|
||||
head: head,
|
||||
capacity: 1,
|
||||
getRelHashes: getRelHashes,
|
||||
getAbsHashes: getAbsHashes,
|
||||
getBlocks: getBlocks,
|
||||
|
||||
getRelHeaders: getRelHeaders,
|
||||
getAbsHeaders: getAbsHeaders,
|
||||
getBlockBodies: getBlockBodies,
|
||||
|
||||
version: version,
|
||||
ignored: set.New(),
|
||||
version: version,
|
||||
}
|
||||
}
|
||||
|
||||
@ -101,8 +83,8 @@ func (p *peer) Reset() {
|
||||
p.ignored.Clear()
|
||||
}
|
||||
|
||||
// Fetch61 sends a block retrieval request to the remote peer.
|
||||
func (p *peer) Fetch61(request *fetchRequest) error {
|
||||
// Fetch sends a block retrieval request to the remote peer.
|
||||
func (p *peer) Fetch(request *fetchRequest) error {
|
||||
// Short circuit if the peer is already fetching
|
||||
if !atomic.CompareAndSwapInt32(&p.idle, 0, 1) {
|
||||
return errAlreadyFetching
|
||||
@ -119,28 +101,10 @@ func (p *peer) Fetch61(request *fetchRequest) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fetch sends a block body retrieval request to the remote peer.
|
||||
func (p *peer) Fetch(request *fetchRequest) error {
|
||||
// Short circuit if the peer is already fetching
|
||||
if !atomic.CompareAndSwapInt32(&p.idle, 0, 1) {
|
||||
return errAlreadyFetching
|
||||
}
|
||||
p.started = time.Now()
|
||||
|
||||
// Convert the header set to a retrievable slice
|
||||
hashes := make([]common.Hash, 0, len(request.Headers))
|
||||
for _, header := range request.Headers {
|
||||
hashes = append(hashes, header.Hash())
|
||||
}
|
||||
go p.getBlockBodies(hashes)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetIdle61 sets the peer to idle, allowing it to execute new retrieval requests.
|
||||
// SetIdle sets the peer to idle, allowing it to execute new retrieval requests.
|
||||
// Its block retrieval allowance will also be updated either up- or downwards,
|
||||
// depending on whether the previous fetch completed in time or not.
|
||||
func (p *peer) SetIdle61() {
|
||||
func (p *peer) SetIdle() {
|
||||
// Update the peer's download allowance based on previous performance
|
||||
scale := 2.0
|
||||
if time.Since(p.started) > blockSoftTTL {
|
||||
@ -167,36 +131,6 @@ func (p *peer) SetIdle61() {
|
||||
atomic.StoreInt32(&p.idle, 0)
|
||||
}
|
||||
|
||||
// SetIdle sets the peer to idle, allowing it to execute new retrieval requests.
|
||||
// Its block body retrieval allowance will also be updated either up- or downwards,
|
||||
// depending on whether the previous fetch completed in time or not.
|
||||
func (p *peer) SetIdle() {
|
||||
// Update the peer's download allowance based on previous performance
|
||||
scale := 2.0
|
||||
if time.Since(p.started) > bodySoftTTL {
|
||||
scale = 0.5
|
||||
if time.Since(p.started) > bodyHardTTL {
|
||||
scale = 1 / float64(MaxBodyFetch) // reduces capacity to 1
|
||||
}
|
||||
}
|
||||
for {
|
||||
// Calculate the new download bandwidth allowance
|
||||
prev := atomic.LoadInt32(&p.capacity)
|
||||
next := int32(math.Max(1, math.Min(float64(MaxBodyFetch), float64(prev)*scale)))
|
||||
|
||||
// Try to update the old value
|
||||
if atomic.CompareAndSwapInt32(&p.capacity, prev, next) {
|
||||
// If we're having problems at 1 capacity, try to find better peers
|
||||
if next == 1 {
|
||||
p.Demote()
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
// Set the peer to idle to allow further block requests
|
||||
atomic.StoreInt32(&p.idle, 0)
|
||||
}
|
||||
|
||||
// Capacity retrieves the peers block download allowance based on its previously
|
||||
// discovered bandwidth capacity.
|
||||
func (p *peer) Capacity() int {
|
||||
@ -312,16 +246,14 @@ func (ps *peerSet) AllPeers() []*peer {
|
||||
|
||||
// IdlePeers retrieves a flat list of all the currently idle peers within the
|
||||
// active peer set, ordered by their reputation.
|
||||
func (ps *peerSet) IdlePeers(version int) []*peer {
|
||||
func (ps *peerSet) IdlePeers() []*peer {
|
||||
ps.lock.RLock()
|
||||
defer ps.lock.RUnlock()
|
||||
|
||||
list := make([]*peer, 0, len(ps.peers))
|
||||
for _, p := range ps.peers {
|
||||
if (version == eth61 && p.version == eth61) || (version >= eth62 && p.version >= eth62) {
|
||||
if atomic.LoadInt32(&p.idle) == 0 {
|
||||
list = append(list, p)
|
||||
}
|
||||
if atomic.LoadInt32(&p.idle) == 0 {
|
||||
list = append(list, p)
|
||||
}
|
||||
}
|
||||
for i := 0; i < len(list); i++ {
|
||||
|
@ -43,21 +43,16 @@ var (
|
||||
|
||||
// fetchRequest is a currently running block retrieval operation.
|
||||
type fetchRequest struct {
|
||||
Peer *peer // Peer to which the request was sent
|
||||
Hashes map[common.Hash]int // [eth/61] Requested hashes with their insertion index (priority)
|
||||
Headers []*types.Header // [eth/62] Requested headers, sorted by request order
|
||||
Time time.Time // Time when the request was made
|
||||
Peer *peer // Peer to which the request was sent
|
||||
Hashes map[common.Hash]int // Requested hashes with their insertion index (priority)
|
||||
Time time.Time // Time when the request was made
|
||||
}
|
||||
|
||||
// queue represents hashes that are either need fetching or are being fetched
|
||||
type queue struct {
|
||||
hashPool map[common.Hash]int // [eth/61] Pending hashes, mapping to their insertion index (priority)
|
||||
hashQueue *prque.Prque // [eth/61] Priority queue of the block hashes to fetch
|
||||
hashCounter int // [eth/61] Counter indexing the added hashes to ensure retrieval order
|
||||
|
||||
headerPool map[common.Hash]*types.Header // [eth/62] Pending headers, mapping from their hashes
|
||||
headerQueue *prque.Prque // [eth/62] Priority queue of the headers to fetch the bodies for
|
||||
headerHead common.Hash // [eth/62] Hash of the last queued header to verify order
|
||||
hashPool map[common.Hash]int // Pending hashes, mapping to their insertion index (priority)
|
||||
hashQueue *prque.Prque // Priority queue of the block hashes to fetch
|
||||
hashCounter int // Counter indexing the added hashes to ensure retrieval order
|
||||
|
||||
pendPool map[string]*fetchRequest // Currently pending block retrieval operations
|
||||
|
||||
@ -71,13 +66,11 @@ type queue struct {
|
||||
// newQueue creates a new download queue for scheduling block retrieval.
|
||||
func newQueue() *queue {
|
||||
return &queue{
|
||||
hashPool: make(map[common.Hash]int),
|
||||
hashQueue: prque.New(),
|
||||
headerPool: make(map[common.Hash]*types.Header),
|
||||
headerQueue: prque.New(),
|
||||
pendPool: make(map[string]*fetchRequest),
|
||||
blockPool: make(map[common.Hash]uint64),
|
||||
blockCache: make([]*Block, blockCacheLimit),
|
||||
hashPool: make(map[common.Hash]int),
|
||||
hashQueue: prque.New(),
|
||||
pendPool: make(map[string]*fetchRequest),
|
||||
blockPool: make(map[common.Hash]uint64),
|
||||
blockCache: make([]*Block, blockCacheLimit),
|
||||
}
|
||||
}
|
||||
|
||||
@ -90,10 +83,6 @@ func (q *queue) Reset() {
|
||||
q.hashQueue.Reset()
|
||||
q.hashCounter = 0
|
||||
|
||||
q.headerPool = make(map[common.Hash]*types.Header)
|
||||
q.headerQueue.Reset()
|
||||
q.headerHead = common.Hash{}
|
||||
|
||||
q.pendPool = make(map[string]*fetchRequest)
|
||||
|
||||
q.blockPool = make(map[common.Hash]uint64)
|
||||
@ -101,21 +90,21 @@ func (q *queue) Reset() {
|
||||
q.blockCache = make([]*Block, blockCacheLimit)
|
||||
}
|
||||
|
||||
// Size retrieves the number of blocks in the queue, returning separately for
|
||||
// Size retrieves the number of hashes in the queue, returning separately for
|
||||
// pending and already downloaded.
|
||||
func (q *queue) Size() (int, int) {
|
||||
q.lock.RLock()
|
||||
defer q.lock.RUnlock()
|
||||
|
||||
return len(q.hashPool) + len(q.headerPool), len(q.blockPool)
|
||||
return len(q.hashPool), len(q.blockPool)
|
||||
}
|
||||
|
||||
// Pending retrieves the number of blocks pending for retrieval.
|
||||
// Pending retrieves the number of hashes pending for retrieval.
|
||||
func (q *queue) Pending() int {
|
||||
q.lock.RLock()
|
||||
defer q.lock.RUnlock()
|
||||
|
||||
return q.hashQueue.Size() + q.headerQueue.Size()
|
||||
return q.hashQueue.Size()
|
||||
}
|
||||
|
||||
// InFlight retrieves the number of fetch requests currently in flight.
|
||||
@ -135,7 +124,7 @@ func (q *queue) Throttle() bool {
|
||||
// Calculate the currently in-flight block requests
|
||||
pending := 0
|
||||
for _, request := range q.pendPool {
|
||||
pending += len(request.Hashes) + len(request.Headers)
|
||||
pending += len(request.Hashes)
|
||||
}
|
||||
// Throttle if more blocks are in-flight than free space in the cache
|
||||
return pending >= len(q.blockCache)-len(q.blockPool)
|
||||
@ -149,18 +138,15 @@ func (q *queue) Has(hash common.Hash) bool {
|
||||
if _, ok := q.hashPool[hash]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := q.headerPool[hash]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := q.blockPool[hash]; ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Insert61 adds a set of hashes for the download queue for scheduling, returning
|
||||
// Insert adds a set of hashes for the download queue for scheduling, returning
|
||||
// the new hashes encountered.
|
||||
func (q *queue) Insert61(hashes []common.Hash, fifo bool) []common.Hash {
|
||||
func (q *queue) Insert(hashes []common.Hash, fifo bool) []common.Hash {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
@ -186,40 +172,6 @@ func (q *queue) Insert61(hashes []common.Hash, fifo bool) []common.Hash {
|
||||
return inserts
|
||||
}
|
||||
|
||||
// Insert adds a set of headers for the download queue for scheduling, returning
|
||||
// the new headers encountered.
|
||||
func (q *queue) Insert(headers []*types.Header, from uint64) []*types.Header {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
// Insert all the headers prioritized by the contained block number
|
||||
inserts := make([]*types.Header, 0, len(headers))
|
||||
for _, header := range headers {
|
||||
// Make sure no duplicate requests are executed
|
||||
hash := header.Hash()
|
||||
if _, ok := q.headerPool[hash]; ok {
|
||||
glog.V(logger.Warn).Infof("Header #%d [%x] already scheduled", header.Number.Uint64(), hash[:4])
|
||||
continue
|
||||
}
|
||||
// Make sure chain order is honored and preserved throughout
|
||||
if header.Number == nil || header.Number.Uint64() != from {
|
||||
glog.V(logger.Warn).Infof("Header #%v [%x] broke chain ordering, expected %d", header.Number, hash[:4], from)
|
||||
break
|
||||
}
|
||||
if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash {
|
||||
glog.V(logger.Warn).Infof("Header #%v [%x] broke chain ancestry", header.Number, hash[:4])
|
||||
break
|
||||
}
|
||||
// Queue the header for body retrieval
|
||||
inserts = append(inserts, header)
|
||||
q.headerPool[hash] = header
|
||||
q.headerQueue.Push(header, -float32(header.Number.Uint64()))
|
||||
q.headerHead = hash
|
||||
from++
|
||||
}
|
||||
return inserts
|
||||
}
|
||||
|
||||
// GetHeadBlock retrieves the first block from the cache, or nil if it hasn't
|
||||
// been downloaded yet (or simply non existent).
|
||||
func (q *queue) GetHeadBlock() *Block {
|
||||
@ -275,9 +227,9 @@ func (q *queue) TakeBlocks() []*Block {
|
||||
return blocks
|
||||
}
|
||||
|
||||
// Reserve61 reserves a set of hashes for the given peer, skipping any previously
|
||||
// Reserve reserves a set of hashes for the given peer, skipping any previously
|
||||
// failed download.
|
||||
func (q *queue) Reserve61(p *peer, count int) *fetchRequest {
|
||||
func (q *queue) Reserve(p *peer, count int) *fetchRequest {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
@ -324,68 +276,6 @@ func (q *queue) Reserve61(p *peer, count int) *fetchRequest {
|
||||
return request
|
||||
}
|
||||
|
||||
// Reserve reserves a set of headers for the given peer, skipping any previously
|
||||
// failed download. Beside the next batch of needed fetches, it also returns a
|
||||
// flag whether empty blocks were queued requiring processing.
|
||||
func (q *queue) Reserve(p *peer, count int) (*fetchRequest, bool, error) {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
// Short circuit if the pool has been depleted, or if the peer's already
|
||||
// downloading something (sanity check not to corrupt state)
|
||||
if q.headerQueue.Empty() {
|
||||
return nil, false, nil
|
||||
}
|
||||
if _, ok := q.pendPool[p.id]; ok {
|
||||
return nil, false, nil
|
||||
}
|
||||
// Calculate an upper limit on the bodies we might fetch (i.e. throttling)
|
||||
space := len(q.blockCache) - len(q.blockPool)
|
||||
for _, request := range q.pendPool {
|
||||
space -= len(request.Headers)
|
||||
}
|
||||
// Retrieve a batch of headers, skipping previously failed ones
|
||||
send := make([]*types.Header, 0, count)
|
||||
skip := make([]*types.Header, 0)
|
||||
|
||||
process := false
|
||||
for proc := 0; proc < space && len(send) < count && !q.headerQueue.Empty(); proc++ {
|
||||
header := q.headerQueue.PopItem().(*types.Header)
|
||||
|
||||
// If the header defines an empty block, deliver straight
|
||||
if header.TxHash == types.DeriveSha(types.Transactions{}) && header.UncleHash == types.CalcUncleHash([]*types.Header{}) {
|
||||
if err := q.enqueue("", types.NewBlockWithHeader(header)); err != nil {
|
||||
return nil, false, errInvalidChain
|
||||
}
|
||||
delete(q.headerPool, header.Hash())
|
||||
process, space, proc = true, space-1, proc-1
|
||||
continue
|
||||
}
|
||||
// If it's a content block, add to the body fetch request
|
||||
if p.ignored.Has(header.Hash()) {
|
||||
skip = append(skip, header)
|
||||
} else {
|
||||
send = append(send, header)
|
||||
}
|
||||
}
|
||||
// Merge all the skipped headers back
|
||||
for _, header := range skip {
|
||||
q.headerQueue.Push(header, -float32(header.Number.Uint64()))
|
||||
}
|
||||
// Assemble and return the block download request
|
||||
if len(send) == 0 {
|
||||
return nil, process, nil
|
||||
}
|
||||
request := &fetchRequest{
|
||||
Peer: p,
|
||||
Headers: send,
|
||||
Time: time.Now(),
|
||||
}
|
||||
q.pendPool[p.id] = request
|
||||
|
||||
return request, process, nil
|
||||
}
|
||||
|
||||
// Cancel aborts a fetch request, returning all pending hashes to the queue.
|
||||
func (q *queue) Cancel(request *fetchRequest) {
|
||||
q.lock.Lock()
|
||||
@ -394,9 +284,6 @@ func (q *queue) Cancel(request *fetchRequest) {
|
||||
for hash, index := range request.Hashes {
|
||||
q.hashQueue.Push(hash, float32(index))
|
||||
}
|
||||
for _, header := range request.Headers {
|
||||
q.headerQueue.Push(header, -float32(header.Number.Uint64()))
|
||||
}
|
||||
delete(q.pendPool, request.Peer.id)
|
||||
}
|
||||
|
||||
@ -410,19 +297,9 @@ func (q *queue) Expire(timeout time.Duration) []string {
|
||||
peers := []string{}
|
||||
for id, request := range q.pendPool {
|
||||
if time.Since(request.Time) > timeout {
|
||||
// Update the metrics with the timeout
|
||||
if len(request.Hashes) > 0 {
|
||||
blockTimeoutMeter.Mark(1)
|
||||
} else {
|
||||
bodyTimeoutMeter.Mark(1)
|
||||
}
|
||||
// Return any non satisfied requests to the pool
|
||||
for hash, index := range request.Hashes {
|
||||
q.hashQueue.Push(hash, float32(index))
|
||||
}
|
||||
for _, header := range request.Headers {
|
||||
q.headerQueue.Push(header, -float32(header.Number.Uint64()))
|
||||
}
|
||||
peers = append(peers, id)
|
||||
}
|
||||
}
|
||||
@ -433,8 +310,8 @@ func (q *queue) Expire(timeout time.Duration) []string {
|
||||
return peers
|
||||
}
|
||||
|
||||
// Deliver61 injects a block retrieval response into the download queue.
|
||||
func (q *queue) Deliver61(id string, blocks []*types.Block) (err error) {
|
||||
// Deliver injects a block retrieval response into the download queue.
|
||||
func (q *queue) Deliver(id string, blocks []*types.Block) (err error) {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
@ -443,7 +320,6 @@ func (q *queue) Deliver61(id string, blocks []*types.Block) (err error) {
|
||||
if request == nil {
|
||||
return errNoFetchesPending
|
||||
}
|
||||
blockReqTimer.UpdateSince(request.Time)
|
||||
delete(q.pendPool, id)
|
||||
|
||||
// If no blocks were retrieved, mark them as unavailable for the origin peer
|
||||
@ -461,12 +337,19 @@ func (q *queue) Deliver61(id string, blocks []*types.Block) (err error) {
|
||||
errs = append(errs, fmt.Errorf("non-requested block %x", hash))
|
||||
continue
|
||||
}
|
||||
// Queue the block up for processing
|
||||
if err := q.enqueue(id, block); err != nil {
|
||||
return err
|
||||
// If a requested block falls out of the range, the hash chain is invalid
|
||||
index := int(int64(block.NumberU64()) - int64(q.blockOffset))
|
||||
if index >= len(q.blockCache) || index < 0 {
|
||||
return errInvalidChain
|
||||
}
|
||||
// Otherwise merge the block and mark the hash block
|
||||
q.blockCache[index] = &Block{
|
||||
RawBlock: block,
|
||||
OriginPeer: id,
|
||||
}
|
||||
delete(request.Hashes, hash)
|
||||
delete(q.hashPool, hash)
|
||||
q.blockPool[hash] = block.NumberU64()
|
||||
}
|
||||
// Return all failed or missing fetches to the queue
|
||||
for hash, index := range request.Hashes {
|
||||
@ -482,89 +365,6 @@ func (q *queue) Deliver61(id string, blocks []*types.Block) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deliver injects a block body retrieval response into the download queue.
|
||||
func (q *queue) Deliver(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) error {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
// Short circuit if the block bodies were never requested
|
||||
request := q.pendPool[id]
|
||||
if request == nil {
|
||||
return errNoFetchesPending
|
||||
}
|
||||
bodyReqTimer.UpdateSince(request.Time)
|
||||
delete(q.pendPool, id)
|
||||
|
||||
// If no block bodies were retrieved, mark them as unavailable for the origin peer
|
||||
if len(txLists) == 0 || len(uncleLists) == 0 {
|
||||
for hash, _ := range request.Headers {
|
||||
request.Peer.ignored.Add(hash)
|
||||
}
|
||||
}
|
||||
// Assemble each of the block bodies with their headers and queue for processing
|
||||
errs := make([]error, 0)
|
||||
for i, header := range request.Headers {
|
||||
// Short circuit block assembly if no more bodies are found
|
||||
if i >= len(txLists) || i >= len(uncleLists) {
|
||||
break
|
||||
}
|
||||
// Reconstruct the next block if contents match up
|
||||
if types.DeriveSha(types.Transactions(txLists[i])) != header.TxHash || types.CalcUncleHash(uncleLists[i]) != header.UncleHash {
|
||||
errs = []error{errInvalidBody}
|
||||
break
|
||||
}
|
||||
block := types.NewBlockWithHeader(header).WithBody(txLists[i], uncleLists[i])
|
||||
|
||||
// Queue the block up for processing
|
||||
if err := q.enqueue(id, block); err != nil {
|
||||
errs = []error{err}
|
||||
break
|
||||
}
|
||||
request.Headers[i] = nil
|
||||
delete(q.headerPool, header.Hash())
|
||||
}
|
||||
// Return all failed or missing fetches to the queue
|
||||
for _, header := range request.Headers {
|
||||
if header != nil {
|
||||
q.headerQueue.Push(header, -float32(header.Number.Uint64()))
|
||||
}
|
||||
}
|
||||
// If none of the blocks were good, it's a stale delivery
|
||||
switch {
|
||||
case len(errs) == 0:
|
||||
return nil
|
||||
|
||||
case len(errs) == 1 && errs[0] == errInvalidBody:
|
||||
return errInvalidBody
|
||||
|
||||
case len(errs) == 1 && errs[0] == errInvalidChain:
|
||||
return errInvalidChain
|
||||
|
||||
case len(errs) == len(request.Headers):
|
||||
return errStaleDelivery
|
||||
|
||||
default:
|
||||
return fmt.Errorf("multiple failures: %v", errs)
|
||||
}
|
||||
}
|
||||
|
||||
// enqueue inserts a new block into the final delivery queue, waiting for pickup
|
||||
// by the processor.
|
||||
func (q *queue) enqueue(origin string, block *types.Block) error {
|
||||
// If a requested block falls out of the range, the hash chain is invalid
|
||||
index := int(int64(block.NumberU64()) - int64(q.blockOffset))
|
||||
if index >= len(q.blockCache) || index < 0 {
|
||||
return errInvalidChain
|
||||
}
|
||||
// Otherwise merge the block and mark the hash done
|
||||
q.blockCache[index] = &Block{
|
||||
RawBlock: block,
|
||||
OriginPeer: origin,
|
||||
}
|
||||
q.blockPool[block.Header().Hash()] = block.NumberU64()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Prepare configures the block cache offset to allow accepting inbound blocks.
|
||||
func (q *queue) Prepare(offset uint64) {
|
||||
q.lock.Lock()
|
||||
|
@ -51,12 +51,6 @@ type blockRetrievalFn func(common.Hash) *types.Block
|
||||
// blockRequesterFn is a callback type for sending a block retrieval request.
|
||||
type blockRequesterFn func([]common.Hash) error
|
||||
|
||||
// headerRequesterFn is a callback type for sending a header retrieval request.
|
||||
type headerRequesterFn func(common.Hash) error
|
||||
|
||||
// bodyRequesterFn is a callback type for sending a body retrieval request.
|
||||
type bodyRequesterFn func([]common.Hash) error
|
||||
|
||||
// blockValidatorFn is a callback type to verify a block's header for fast propagation.
|
||||
type blockValidatorFn func(block *types.Block, parent *types.Block) error
|
||||
|
||||
@ -75,30 +69,11 @@ type peerDropFn func(id string)
|
||||
// announce is the hash notification of the availability of a new block in the
|
||||
// network.
|
||||
type announce struct {
|
||||
hash common.Hash // Hash of the block being announced
|
||||
number uint64 // Number of the block being announced (0 = unknown | old protocol)
|
||||
header *types.Header // Header of the block partially reassembled (new protocol)
|
||||
time time.Time // Timestamp of the announcement
|
||||
hash common.Hash // Hash of the block being announced
|
||||
time time.Time // Timestamp of the announcement
|
||||
|
||||
origin string // Identifier of the peer originating the notification
|
||||
|
||||
fetch61 blockRequesterFn // [eth/61] Fetcher function to retrieve an announced block
|
||||
fetchHeader headerRequesterFn // [eth/62] Fetcher function to retrieve the header of an announced block
|
||||
fetchBodies bodyRequesterFn // [eth/62] Fetcher function to retrieve the body of an announced block
|
||||
}
|
||||
|
||||
// headerFilterTask represents a batch of headers needing fetcher filtering.
|
||||
type headerFilterTask struct {
|
||||
headers []*types.Header // Collection of headers to filter
|
||||
time time.Time // Arrival time of the headers
|
||||
}
|
||||
|
||||
// headerFilterTask represents a batch of block bodies (transactions and uncles)
|
||||
// needing fetcher filtering.
|
||||
type bodyFilterTask struct {
|
||||
transactions [][]*types.Transaction // Collection of transactions per block bodies
|
||||
uncles [][]*types.Header // Collection of uncles per block bodies
|
||||
time time.Time // Arrival time of the blocks' contents
|
||||
origin string // Identifier of the peer originating the notification
|
||||
fetch blockRequesterFn // Fetcher function to retrieve
|
||||
}
|
||||
|
||||
// inject represents a schedules import operation.
|
||||
@ -113,20 +88,14 @@ type Fetcher struct {
|
||||
// Various event channels
|
||||
notify chan *announce
|
||||
inject chan *inject
|
||||
|
||||
blockFilter chan chan []*types.Block
|
||||
headerFilter chan chan *headerFilterTask
|
||||
bodyFilter chan chan *bodyFilterTask
|
||||
|
||||
done chan common.Hash
|
||||
quit chan struct{}
|
||||
filter chan chan []*types.Block
|
||||
done chan common.Hash
|
||||
quit chan struct{}
|
||||
|
||||
// Announce states
|
||||
announces map[string]int // Per peer announce counts to prevent memory exhaustion
|
||||
announced map[common.Hash][]*announce // Announced blocks, scheduled for fetching
|
||||
fetching map[common.Hash]*announce // Announced blocks, currently fetching
|
||||
fetched map[common.Hash][]*announce // Blocks with headers fetched, scheduled for body retrieval
|
||||
completing map[common.Hash]*announce // Blocks with headers, currently body-completing
|
||||
announces map[string]int // Per peer announce counts to prevent memory exhaustion
|
||||
announced map[common.Hash][]*announce // Announced blocks, scheduled for fetching
|
||||
fetching map[common.Hash]*announce // Announced blocks, currently fetching
|
||||
|
||||
// Block cache
|
||||
queue *prque.Prque // Queue containing the import operations (block number sorted)
|
||||
@ -142,9 +111,8 @@ type Fetcher struct {
|
||||
dropPeer peerDropFn // Drops a peer for misbehaving
|
||||
|
||||
// Testing hooks
|
||||
fetchingHook func([]common.Hash) // Method to call upon starting a block (eth/61) or header (eth/62) fetch
|
||||
completingHook func([]common.Hash) // Method to call upon starting a block body fetch (eth/62)
|
||||
importedHook func(*types.Block) // Method to call upon successful block import (both eth/61 and eth/62)
|
||||
fetchingHook func([]common.Hash) // Method to call upon starting a block fetch
|
||||
importedHook func(*types.Block) // Method to call upon successful block import
|
||||
}
|
||||
|
||||
// New creates a block fetcher to retrieve blocks based on hash announcements.
|
||||
@ -152,16 +120,12 @@ func New(getBlock blockRetrievalFn, validateBlock blockValidatorFn, broadcastBlo
|
||||
return &Fetcher{
|
||||
notify: make(chan *announce),
|
||||
inject: make(chan *inject),
|
||||
blockFilter: make(chan chan []*types.Block),
|
||||
headerFilter: make(chan chan *headerFilterTask),
|
||||
bodyFilter: make(chan chan *bodyFilterTask),
|
||||
filter: make(chan chan []*types.Block),
|
||||
done: make(chan common.Hash),
|
||||
quit: make(chan struct{}),
|
||||
announces: make(map[string]int),
|
||||
announced: make(map[common.Hash][]*announce),
|
||||
fetching: make(map[common.Hash]*announce),
|
||||
fetched: make(map[common.Hash][]*announce),
|
||||
completing: make(map[common.Hash]*announce),
|
||||
queue: prque.New(),
|
||||
queues: make(map[string]int),
|
||||
queued: make(map[common.Hash]*inject),
|
||||
@ -188,17 +152,12 @@ func (f *Fetcher) Stop() {
|
||||
|
||||
// Notify announces the fetcher of the potential availability of a new block in
|
||||
// the network.
|
||||
func (f *Fetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time,
|
||||
blockFetcher blockRequesterFn, // eth/61 specific whole block fetcher
|
||||
headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error {
|
||||
func (f *Fetcher) Notify(peer string, hash common.Hash, time time.Time, fetcher blockRequesterFn) error {
|
||||
block := &announce{
|
||||
hash: hash,
|
||||
number: number,
|
||||
time: time,
|
||||
origin: peer,
|
||||
fetch61: blockFetcher,
|
||||
fetchHeader: headerFetcher,
|
||||
fetchBodies: bodyFetcher,
|
||||
hash: hash,
|
||||
time: time,
|
||||
origin: peer,
|
||||
fetch: fetcher,
|
||||
}
|
||||
select {
|
||||
case f.notify <- block:
|
||||
@ -222,16 +181,14 @@ func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
|
||||
}
|
||||
}
|
||||
|
||||
// FilterBlocks extracts all the blocks that were explicitly requested by the fetcher,
|
||||
// Filter extracts all the blocks that were explicitly requested by the fetcher,
|
||||
// returning those that should be handled differently.
|
||||
func (f *Fetcher) FilterBlocks(blocks types.Blocks) types.Blocks {
|
||||
glog.V(logger.Detail).Infof("[eth/61] filtering %d blocks", len(blocks))
|
||||
|
||||
func (f *Fetcher) Filter(blocks types.Blocks) types.Blocks {
|
||||
// Send the filter channel to the fetcher
|
||||
filter := make(chan []*types.Block)
|
||||
|
||||
select {
|
||||
case f.blockFilter <- filter:
|
||||
case f.filter <- filter:
|
||||
case <-f.quit:
|
||||
return nil
|
||||
}
|
||||
@ -250,69 +207,11 @@ func (f *Fetcher) FilterBlocks(blocks types.Blocks) types.Blocks {
|
||||
}
|
||||
}
|
||||
|
||||
// FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
|
||||
// returning those that should be handled differently.
|
||||
func (f *Fetcher) FilterHeaders(headers []*types.Header, time time.Time) []*types.Header {
|
||||
glog.V(logger.Detail).Infof("[eth/62] filtering %d headers", len(headers))
|
||||
|
||||
// Send the filter channel to the fetcher
|
||||
filter := make(chan *headerFilterTask)
|
||||
|
||||
select {
|
||||
case f.headerFilter <- filter:
|
||||
case <-f.quit:
|
||||
return nil
|
||||
}
|
||||
// Request the filtering of the header list
|
||||
select {
|
||||
case filter <- &headerFilterTask{headers: headers, time: time}:
|
||||
case <-f.quit:
|
||||
return nil
|
||||
}
|
||||
// Retrieve the headers remaining after filtering
|
||||
select {
|
||||
case task := <-filter:
|
||||
return task.headers
|
||||
case <-f.quit:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// FilterBodies extracts all the block bodies that were explicitly requested by
|
||||
// the fetcher, returning those that should be handled differently.
|
||||
func (f *Fetcher) FilterBodies(transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
|
||||
glog.V(logger.Detail).Infof("[eth/62] filtering %d:%d bodies", len(transactions), len(uncles))
|
||||
|
||||
// Send the filter channel to the fetcher
|
||||
filter := make(chan *bodyFilterTask)
|
||||
|
||||
select {
|
||||
case f.bodyFilter <- filter:
|
||||
case <-f.quit:
|
||||
return nil, nil
|
||||
}
|
||||
// Request the filtering of the body list
|
||||
select {
|
||||
case filter <- &bodyFilterTask{transactions: transactions, uncles: uncles, time: time}:
|
||||
case <-f.quit:
|
||||
return nil, nil
|
||||
}
|
||||
// Retrieve the bodies remaining after filtering
|
||||
select {
|
||||
case task := <-filter:
|
||||
return task.transactions, task.uncles
|
||||
case <-f.quit:
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Loop is the main fetcher loop, checking and processing various notification
|
||||
// events.
|
||||
func (f *Fetcher) loop() {
|
||||
// Iterate the block fetching until a quit is requested
|
||||
fetchTimer := time.NewTimer(0)
|
||||
completeTimer := time.NewTimer(0)
|
||||
|
||||
fetch := time.NewTimer(0)
|
||||
for {
|
||||
// Clean up any expired block fetches
|
||||
for hash, announce := range f.fetching {
|
||||
@ -347,38 +246,26 @@ func (f *Fetcher) loop() {
|
||||
|
||||
case notification := <-f.notify:
|
||||
// A block was announced, make sure the peer isn't DOSing us
|
||||
propAnnounceInMeter.Mark(1)
|
||||
announceMeter.Mark(1)
|
||||
|
||||
count := f.announces[notification.origin] + 1
|
||||
if count > hashLimit {
|
||||
glog.V(logger.Debug).Infof("Peer %s: exceeded outstanding announces (%d)", notification.origin, hashLimit)
|
||||
propAnnounceDOSMeter.Mark(1)
|
||||
break
|
||||
}
|
||||
// If we have a valid block number, check that it's potentially useful
|
||||
if notification.number > 0 {
|
||||
if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
|
||||
glog.V(logger.Debug).Infof("[eth/62] Peer %s: discarded announcement #%d [%x…], distance %d", notification.origin, notification.number, notification.hash[:4], dist)
|
||||
propAnnounceDropMeter.Mark(1)
|
||||
break
|
||||
}
|
||||
}
|
||||
// All is well, schedule the announce if block's not yet downloading
|
||||
if _, ok := f.fetching[notification.hash]; ok {
|
||||
break
|
||||
}
|
||||
if _, ok := f.completing[notification.hash]; ok {
|
||||
break
|
||||
}
|
||||
f.announces[notification.origin] = count
|
||||
f.announced[notification.hash] = append(f.announced[notification.hash], notification)
|
||||
if len(f.announced) == 1 {
|
||||
f.rescheduleFetch(fetchTimer)
|
||||
f.reschedule(fetch)
|
||||
}
|
||||
|
||||
case op := <-f.inject:
|
||||
// A direct block insertion was requested, try and fill any pending gaps
|
||||
propBroadcastInMeter.Mark(1)
|
||||
broadcastMeter.Mark(1)
|
||||
f.enqueue(op.origin, op.block)
|
||||
|
||||
case hash := <-f.done:
|
||||
@ -386,7 +273,7 @@ func (f *Fetcher) loop() {
|
||||
f.forgetHash(hash)
|
||||
f.forgetBlock(hash)
|
||||
|
||||
case <-fetchTimer.C:
|
||||
case <-fetch.C:
|
||||
// At least one block's timer ran out, check for needing retrieval
|
||||
request := make(map[string][]common.Hash)
|
||||
|
||||
@ -403,80 +290,30 @@ func (f *Fetcher) loop() {
|
||||
}
|
||||
}
|
||||
}
|
||||
// Send out all block (eth/61) or header (eth/62) requests
|
||||
// Send out all block requests
|
||||
for peer, hashes := range request {
|
||||
if glog.V(logger.Detail) && len(hashes) > 0 {
|
||||
list := "["
|
||||
for _, hash := range hashes {
|
||||
list += fmt.Sprintf("%x…, ", hash[:4])
|
||||
list += fmt.Sprintf("%x, ", hash[:4])
|
||||
}
|
||||
list = list[:len(list)-2] + "]"
|
||||
|
||||
if f.fetching[hashes[0]].fetch61 != nil {
|
||||
glog.V(logger.Detail).Infof("[eth/61] Peer %s: fetching blocks %s", peer, list)
|
||||
} else {
|
||||
glog.V(logger.Detail).Infof("[eth/62] Peer %s: fetching headers %s", peer, list)
|
||||
}
|
||||
glog.V(logger.Detail).Infof("Peer %s: fetching %s", peer, list)
|
||||
}
|
||||
// Create a closure of the fetch and schedule in on a new thread
|
||||
fetchBlocks, fetchHeader, hashes := f.fetching[hashes[0]].fetch61, f.fetching[hashes[0]].fetchHeader, hashes
|
||||
fetcher, hashes := f.fetching[hashes[0]].fetch, hashes
|
||||
go func() {
|
||||
if f.fetchingHook != nil {
|
||||
f.fetchingHook(hashes)
|
||||
}
|
||||
if fetchBlocks != nil {
|
||||
// Use old eth/61 protocol to retrieve whole blocks
|
||||
blockFetchMeter.Mark(int64(len(hashes)))
|
||||
fetchBlocks(hashes)
|
||||
} else {
|
||||
// Use new eth/62 protocol to retrieve headers first
|
||||
for _, hash := range hashes {
|
||||
headerFetchMeter.Mark(1)
|
||||
fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals
|
||||
}
|
||||
}
|
||||
fetcher(hashes)
|
||||
}()
|
||||
}
|
||||
// Schedule the next fetch if blocks are still pending
|
||||
f.rescheduleFetch(fetchTimer)
|
||||
f.reschedule(fetch)
|
||||
|
||||
case <-completeTimer.C:
|
||||
// At least one header's timer ran out, retrieve everything
|
||||
request := make(map[string][]common.Hash)
|
||||
|
||||
for hash, announces := range f.fetched {
|
||||
// Pick a random peer to retrieve from, reset all others
|
||||
announce := announces[rand.Intn(len(announces))]
|
||||
f.forgetHash(hash)
|
||||
|
||||
// If the block still didn't arrive, queue for completion
|
||||
if f.getBlock(hash) == nil {
|
||||
request[announce.origin] = append(request[announce.origin], hash)
|
||||
f.completing[hash] = announce
|
||||
}
|
||||
}
|
||||
// Send out all block body requests
|
||||
for peer, hashes := range request {
|
||||
if glog.V(logger.Detail) && len(hashes) > 0 {
|
||||
list := "["
|
||||
for _, hash := range hashes {
|
||||
list += fmt.Sprintf("%x…, ", hash[:4])
|
||||
}
|
||||
list = list[:len(list)-2] + "]"
|
||||
|
||||
glog.V(logger.Detail).Infof("[eth/62] Peer %s: fetching bodies %s", peer, list)
|
||||
}
|
||||
// Create a closure of the fetch and schedule in on a new thread
|
||||
if f.completingHook != nil {
|
||||
f.completingHook(hashes)
|
||||
}
|
||||
bodyFetchMeter.Mark(int64(len(hashes)))
|
||||
go f.completing[hashes[0]].fetchBodies(hashes)
|
||||
}
|
||||
// Schedule the next fetch if blocks are still pending
|
||||
f.rescheduleComplete(completeTimer)
|
||||
|
||||
case filter := <-f.blockFilter:
|
||||
case filter := <-f.filter:
|
||||
// Blocks arrived, extract any explicit fetches, return all else
|
||||
var blocks types.Blocks
|
||||
select {
|
||||
@ -484,7 +321,6 @@ func (f *Fetcher) loop() {
|
||||
case <-f.quit:
|
||||
return
|
||||
}
|
||||
blockFilterInMeter.Mark(int64(len(blocks)))
|
||||
|
||||
explicit, download := []*types.Block{}, []*types.Block{}
|
||||
for _, block := range blocks {
|
||||
@ -503,7 +339,6 @@ func (f *Fetcher) loop() {
|
||||
}
|
||||
}
|
||||
|
||||
blockFilterOutMeter.Mark(int64(len(download)))
|
||||
select {
|
||||
case filter <- download:
|
||||
case <-f.quit:
|
||||
@ -515,146 +350,12 @@ func (f *Fetcher) loop() {
|
||||
f.enqueue(announce.origin, block)
|
||||
}
|
||||
}
|
||||
|
||||
case filter := <-f.headerFilter:
|
||||
// Headers arrived from a remote peer. Extract those that were explicitly
|
||||
// requested by the fetcher, and return everything else so it's delivered
|
||||
// to other parts of the system.
|
||||
var task *headerFilterTask
|
||||
select {
|
||||
case task = <-filter:
|
||||
case <-f.quit:
|
||||
return
|
||||
}
|
||||
headerFilterInMeter.Mark(int64(len(task.headers)))
|
||||
|
||||
// Split the batch of headers into unknown ones (to return to the caller),
|
||||
// known incomplete ones (requiring body retrievals) and completed blocks.
|
||||
unknown, incomplete, complete := []*types.Header{}, []*announce{}, []*types.Block{}
|
||||
for _, header := range task.headers {
|
||||
hash := header.Hash()
|
||||
|
||||
// Filter fetcher-requested headers from other synchronisation algorithms
|
||||
if announce := f.fetching[hash]; announce != nil && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil {
|
||||
// If the delivered header does not match the promised number, drop the announcer
|
||||
if header.Number.Uint64() != announce.number {
|
||||
glog.V(logger.Detail).Infof("[eth/62] Peer %s: invalid block number for [%x…]: announced %d, provided %d", announce.origin, header.Hash().Bytes()[:4], announce.number, header.Number.Uint64())
|
||||
f.dropPeer(announce.origin)
|
||||
f.forgetHash(hash)
|
||||
continue
|
||||
}
|
||||
// Only keep if not imported by other means
|
||||
if f.getBlock(hash) == nil {
|
||||
announce.header = header
|
||||
announce.time = task.time
|
||||
|
||||
// If the block is empty (header only), short circuit into the final import queue
|
||||
if header.TxHash == types.DeriveSha(types.Transactions{}) && header.UncleHash == types.CalcUncleHash([]*types.Header{}) {
|
||||
glog.V(logger.Detail).Infof("[eth/62] Peer %s: block #%d [%x…] empty, skipping body retrieval", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4])
|
||||
|
||||
block := types.NewBlockWithHeader(header)
|
||||
block.ReceivedAt = task.time
|
||||
|
||||
complete = append(complete, block)
|
||||
f.completing[hash] = announce
|
||||
continue
|
||||
}
|
||||
// Otherwise add to the list of blocks needing completion
|
||||
incomplete = append(incomplete, announce)
|
||||
} else {
|
||||
glog.V(logger.Detail).Infof("[eth/62] Peer %s: block #%d [%x…] already imported, discarding header", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4])
|
||||
f.forgetHash(hash)
|
||||
}
|
||||
} else {
|
||||
// Fetcher doesn't know about it, add to the return list
|
||||
unknown = append(unknown, header)
|
||||
}
|
||||
}
|
||||
headerFilterOutMeter.Mark(int64(len(unknown)))
|
||||
select {
|
||||
case filter <- &headerFilterTask{headers: unknown, time: task.time}:
|
||||
case <-f.quit:
|
||||
return
|
||||
}
|
||||
// Schedule the retrieved headers for body completion
|
||||
for _, announce := range incomplete {
|
||||
hash := announce.header.Hash()
|
||||
if _, ok := f.completing[hash]; ok {
|
||||
continue
|
||||
}
|
||||
f.fetched[hash] = append(f.fetched[hash], announce)
|
||||
if len(f.fetched) == 1 {
|
||||
f.rescheduleComplete(completeTimer)
|
||||
}
|
||||
}
|
||||
// Schedule the header-only blocks for import
|
||||
for _, block := range complete {
|
||||
if announce := f.completing[block.Hash()]; announce != nil {
|
||||
f.enqueue(announce.origin, block)
|
||||
}
|
||||
}
|
||||
|
||||
case filter := <-f.bodyFilter:
|
||||
// Block bodies arrived, extract any explicitly requested blocks, return the rest
|
||||
var task *bodyFilterTask
|
||||
select {
|
||||
case task = <-filter:
|
||||
case <-f.quit:
|
||||
return
|
||||
}
|
||||
bodyFilterInMeter.Mark(int64(len(task.transactions)))
|
||||
|
||||
blocks := []*types.Block{}
|
||||
for i := 0; i < len(task.transactions) && i < len(task.uncles); i++ {
|
||||
// Match up a body to any possible completion request
|
||||
matched := false
|
||||
|
||||
for hash, announce := range f.completing {
|
||||
if f.queued[hash] == nil {
|
||||
txnHash := types.DeriveSha(types.Transactions(task.transactions[i]))
|
||||
uncleHash := types.CalcUncleHash(task.uncles[i])
|
||||
|
||||
if txnHash == announce.header.TxHash && uncleHash == announce.header.UncleHash {
|
||||
// Mark the body matched, reassemble if still unknown
|
||||
matched = true
|
||||
|
||||
if f.getBlock(hash) == nil {
|
||||
block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i])
|
||||
block.ReceivedAt = task.time
|
||||
|
||||
blocks = append(blocks, block)
|
||||
} else {
|
||||
f.forgetHash(hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if matched {
|
||||
task.transactions = append(task.transactions[:i], task.transactions[i+1:]...)
|
||||
task.uncles = append(task.uncles[:i], task.uncles[i+1:]...)
|
||||
i--
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
bodyFilterOutMeter.Mark(int64(len(task.transactions)))
|
||||
select {
|
||||
case filter <- task:
|
||||
case <-f.quit:
|
||||
return
|
||||
}
|
||||
// Schedule the retrieved blocks for ordered import
|
||||
for _, block := range blocks {
|
||||
if announce := f.completing[block.Hash()]; announce != nil {
|
||||
f.enqueue(announce.origin, block)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// rescheduleFetch resets the specified fetch timer to the next announce timeout.
|
||||
func (f *Fetcher) rescheduleFetch(fetch *time.Timer) {
|
||||
// reschedule resets the specified fetch timer to the next announce timeout.
|
||||
func (f *Fetcher) reschedule(fetch *time.Timer) {
|
||||
// Short circuit if no blocks are announced
|
||||
if len(f.announced) == 0 {
|
||||
return
|
||||
@ -669,22 +370,6 @@ func (f *Fetcher) rescheduleFetch(fetch *time.Timer) {
|
||||
fetch.Reset(arriveTimeout - time.Since(earliest))
|
||||
}
|
||||
|
||||
// rescheduleComplete resets the specified completion timer to the next fetch timeout.
|
||||
func (f *Fetcher) rescheduleComplete(complete *time.Timer) {
|
||||
// Short circuit if no headers are fetched
|
||||
if len(f.fetched) == 0 {
|
||||
return
|
||||
}
|
||||
// Otherwise find the earliest expiring announcement
|
||||
earliest := time.Now()
|
||||
for _, announces := range f.fetched {
|
||||
if earliest.After(announces[0].time) {
|
||||
earliest = announces[0].time
|
||||
}
|
||||
}
|
||||
complete.Reset(gatherSlack - time.Since(earliest))
|
||||
}
|
||||
|
||||
// enqueue schedules a new future import operation, if the block to be imported
|
||||
// has not yet been seen.
|
||||
func (f *Fetcher) enqueue(peer string, block *types.Block) {
|
||||
@ -693,16 +378,13 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) {
|
||||
// Ensure the peer isn't DOSing us
|
||||
count := f.queues[peer] + 1
|
||||
if count > blockLimit {
|
||||
glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x…], exceeded allowance (%d)", peer, block.NumberU64(), hash.Bytes()[:4], blockLimit)
|
||||
propBroadcastDOSMeter.Mark(1)
|
||||
f.forgetHash(hash)
|
||||
glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x], exceeded allowance (%d)", peer, block.NumberU64(), hash.Bytes()[:4], blockLimit)
|
||||
return
|
||||
}
|
||||
// Discard any past or too distant blocks
|
||||
if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
|
||||
glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x…], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist)
|
||||
propBroadcastDropMeter.Mark(1)
|
||||
f.forgetHash(hash)
|
||||
glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist)
|
||||
discardMeter.Mark(1)
|
||||
return
|
||||
}
|
||||
// Schedule the block for future importing
|
||||
@ -716,7 +398,7 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) {
|
||||
f.queue.Push(op, -float32(block.NumberU64()))
|
||||
|
||||
if glog.V(logger.Debug) {
|
||||
glog.Infof("Peer %s: queued block #%d [%x…], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size())
|
||||
glog.Infof("Peer %s: queued block #%d [%x], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -728,39 +410,39 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
|
||||
hash := block.Hash()
|
||||
|
||||
// Run the import on a new thread
|
||||
glog.V(logger.Debug).Infof("Peer %s: importing block #%d [%x…]", peer, block.NumberU64(), hash[:4])
|
||||
glog.V(logger.Debug).Infof("Peer %s: importing block #%d [%x]", peer, block.NumberU64(), hash[:4])
|
||||
go func() {
|
||||
defer func() { f.done <- hash }()
|
||||
|
||||
// If the parent's unknown, abort insertion
|
||||
parent := f.getBlock(block.ParentHash())
|
||||
if parent == nil {
|
||||
glog.V(logger.Debug).Infof("Peer %s: parent []%x] of block #%d [%x…] unknown", block.ParentHash().Bytes()[:4], peer, block.NumberU64(), hash[:4])
|
||||
return
|
||||
}
|
||||
// Quickly validate the header and propagate the block if it passes
|
||||
switch err := f.validateBlock(block, parent); err {
|
||||
case nil:
|
||||
// All ok, quickly propagate to our peers
|
||||
propBroadcastOutTimer.UpdateSince(block.ReceivedAt)
|
||||
broadcastTimer.UpdateSince(block.ReceivedAt)
|
||||
go f.broadcastBlock(block, true)
|
||||
|
||||
case core.BlockFutureErr:
|
||||
futureMeter.Mark(1)
|
||||
// Weird future block, don't fail, but neither propagate
|
||||
|
||||
default:
|
||||
// Something went very wrong, drop the peer
|
||||
glog.V(logger.Debug).Infof("Peer %s: block #%d [%x…] verification failed: %v", peer, block.NumberU64(), hash[:4], err)
|
||||
glog.V(logger.Debug).Infof("Peer %s: block #%d [%x] verification failed: %v", peer, block.NumberU64(), hash[:4], err)
|
||||
f.dropPeer(peer)
|
||||
return
|
||||
}
|
||||
// Run the actual import and log any issues
|
||||
if _, err := f.insertChain(types.Blocks{block}); err != nil {
|
||||
glog.V(logger.Warn).Infof("Peer %s: block #%d [%x…] import failed: %v", peer, block.NumberU64(), hash[:4], err)
|
||||
glog.V(logger.Warn).Infof("Peer %s: block #%d [%x] import failed: %v", peer, block.NumberU64(), hash[:4], err)
|
||||
return
|
||||
}
|
||||
// If import succeeded, broadcast the block
|
||||
propAnnounceOutTimer.UpdateSince(block.ReceivedAt)
|
||||
announceTimer.UpdateSince(block.ReceivedAt)
|
||||
go f.broadcastBlock(block, false)
|
||||
|
||||
// Invoke the testing hook if needed
|
||||
@ -790,27 +472,9 @@ func (f *Fetcher) forgetHash(hash common.Hash) {
|
||||
}
|
||||
delete(f.fetching, hash)
|
||||
}
|
||||
|
||||
// Remove any pending completion requests and decrement the DOS counters
|
||||
for _, announce := range f.fetched[hash] {
|
||||
f.announces[announce.origin]--
|
||||
if f.announces[announce.origin] == 0 {
|
||||
delete(f.announces, announce.origin)
|
||||
}
|
||||
}
|
||||
delete(f.fetched, hash)
|
||||
|
||||
// Remove any pending completions and decrement the DOS counters
|
||||
if announce := f.completing[hash]; announce != nil {
|
||||
f.announces[announce.origin]--
|
||||
if f.announces[announce.origin] == 0 {
|
||||
delete(f.announces, announce.origin)
|
||||
}
|
||||
delete(f.completing, hash)
|
||||
}
|
||||
}
|
||||
|
||||
// forgetBlock removes all traces of a queued block from the fetcher's internal
|
||||
// forgetBlock removes all traces of a queued block frmo the fetcher's internal
|
||||
// state.
|
||||
func (f *Fetcher) forgetBlock(hash common.Hash) {
|
||||
if insert := f.queued[hash]; insert != nil {
|
||||
|
@ -27,39 +27,21 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
var (
|
||||
testdb, _ = ethdb.NewMemDatabase()
|
||||
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
|
||||
genesis = core.GenesisBlockForTesting(testdb, common.Address{}, big.NewInt(0))
|
||||
unknownBlock = types.NewBlock(&types.Header{GasLimit: params.GenesisGasLimit}, nil, nil, nil)
|
||||
)
|
||||
|
||||
// makeChain creates a chain of n blocks starting at and including parent.
|
||||
// the returned hash chain is ordered head->parent. In addition, every 3rd block
|
||||
// contains a transaction and every 5th an uncle to allow testing correct block
|
||||
// reassembly.
|
||||
// the returned hash chain is ordered head->parent.
|
||||
func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common.Hash]*types.Block) {
|
||||
blocks := core.GenerateChain(parent, testdb, n, func(i int, block *core.BlockGen) {
|
||||
block.SetCoinbase(common.Address{seed})
|
||||
|
||||
// If the block number is multiple of 3, send a bonus transaction to the miner
|
||||
if parent == genesis && i%3 == 0 {
|
||||
tx, err := types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(testKey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
block.AddTx(tx)
|
||||
}
|
||||
// If the block number is a multiple of 5, add a bonus uncle to the block
|
||||
if i%5 == 0 {
|
||||
block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 1).Hash(), Number: big.NewInt(int64(i - 1))})
|
||||
}
|
||||
blocks := core.GenerateChain(parent, testdb, n, func(i int, gen *core.BlockGen) {
|
||||
gen.SetCoinbase(common.Address{seed})
|
||||
})
|
||||
hashes := make([]common.Hash, n+1)
|
||||
hashes[len(hashes)-1] = parent.Hash()
|
||||
@ -78,7 +60,6 @@ type fetcherTester struct {
|
||||
|
||||
hashes []common.Hash // Hash chain belonging to the tester
|
||||
blocks map[common.Hash]*types.Block // Blocks belonging to the tester
|
||||
drops map[string]bool // Map of peers dropped by the fetcher
|
||||
|
||||
lock sync.RWMutex
|
||||
}
|
||||
@ -88,7 +69,6 @@ func newTester() *fetcherTester {
|
||||
tester := &fetcherTester{
|
||||
hashes: []common.Hash{genesis.Hash()},
|
||||
blocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
|
||||
drops: make(map[string]bool),
|
||||
}
|
||||
tester.fetcher = New(tester.getBlock, tester.verifyBlock, tester.broadcastBlock, tester.chainHeight, tester.insertChain, tester.dropPeer)
|
||||
tester.fetcher.Start()
|
||||
@ -142,14 +122,12 @@ func (f *fetcherTester) insertChain(blocks types.Blocks) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// dropPeer is an emulator for the peer removal, simply accumulating the various
|
||||
// peers dropped by the fetcher.
|
||||
// dropPeer is a nop placeholder for the peer removal.
|
||||
func (f *fetcherTester) dropPeer(peer string) {
|
||||
f.drops[peer] = true
|
||||
}
|
||||
|
||||
// makeBlockFetcher retrieves a block fetcher associated with a simulated peer.
|
||||
func (f *fetcherTester) makeBlockFetcher(blocks map[common.Hash]*types.Block) blockRequesterFn {
|
||||
// peerFetcher retrieves a fetcher associated with a simulated peer.
|
||||
func (f *fetcherTester) makeFetcher(blocks map[common.Hash]*types.Block) blockRequesterFn {
|
||||
closure := make(map[common.Hash]*types.Block)
|
||||
for hash, block := range blocks {
|
||||
closure[hash] = block
|
||||
@ -164,105 +142,18 @@ func (f *fetcherTester) makeBlockFetcher(blocks map[common.Hash]*types.Block) bl
|
||||
}
|
||||
}
|
||||
// Return on a new thread
|
||||
go f.fetcher.FilterBlocks(blocks)
|
||||
go f.fetcher.Filter(blocks)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// makeHeaderFetcher retrieves a block header fetcher associated with a simulated peer.
|
||||
func (f *fetcherTester) makeHeaderFetcher(blocks map[common.Hash]*types.Block, drift time.Duration) headerRequesterFn {
|
||||
closure := make(map[common.Hash]*types.Block)
|
||||
for hash, block := range blocks {
|
||||
closure[hash] = block
|
||||
}
|
||||
// Create a function that return a header from the closure
|
||||
return func(hash common.Hash) error {
|
||||
// Gather the blocks to return
|
||||
headers := make([]*types.Header, 0, 1)
|
||||
if block, ok := closure[hash]; ok {
|
||||
headers = append(headers, block.Header())
|
||||
}
|
||||
// Return on a new thread
|
||||
go f.fetcher.FilterHeaders(headers, time.Now().Add(drift))
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// makeBodyFetcher retrieves a block body fetcher associated with a simulated peer.
|
||||
func (f *fetcherTester) makeBodyFetcher(blocks map[common.Hash]*types.Block, drift time.Duration) bodyRequesterFn {
|
||||
closure := make(map[common.Hash]*types.Block)
|
||||
for hash, block := range blocks {
|
||||
closure[hash] = block
|
||||
}
|
||||
// Create a function that returns blocks from the closure
|
||||
return func(hashes []common.Hash) error {
|
||||
// Gather the block bodies to return
|
||||
transactions := make([][]*types.Transaction, 0, len(hashes))
|
||||
uncles := make([][]*types.Header, 0, len(hashes))
|
||||
|
||||
for _, hash := range hashes {
|
||||
if block, ok := closure[hash]; ok {
|
||||
transactions = append(transactions, block.Transactions())
|
||||
uncles = append(uncles, block.Uncles())
|
||||
}
|
||||
}
|
||||
// Return on a new thread
|
||||
go f.fetcher.FilterBodies(transactions, uncles, time.Now().Add(drift))
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// verifyFetchingEvent verifies that one single event arrive on an fetching channel.
|
||||
func verifyFetchingEvent(t *testing.T, fetching chan []common.Hash, arrive bool) {
|
||||
if arrive {
|
||||
select {
|
||||
case <-fetching:
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("fetching timeout")
|
||||
}
|
||||
} else {
|
||||
select {
|
||||
case <-fetching:
|
||||
t.Fatalf("fetching invoked")
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// verifyCompletingEvent verifies that one single event arrive on an completing channel.
|
||||
func verifyCompletingEvent(t *testing.T, completing chan []common.Hash, arrive bool) {
|
||||
if arrive {
|
||||
select {
|
||||
case <-completing:
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("completing timeout")
|
||||
}
|
||||
} else {
|
||||
select {
|
||||
case <-completing:
|
||||
t.Fatalf("completing invoked")
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// verifyImportEvent verifies that one single event arrive on an import channel.
|
||||
func verifyImportEvent(t *testing.T, imported chan *types.Block, arrive bool) {
|
||||
if arrive {
|
||||
select {
|
||||
case <-imported:
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("import timeout")
|
||||
}
|
||||
} else {
|
||||
select {
|
||||
case <-imported:
|
||||
t.Fatalf("import invoked")
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
}
|
||||
func verifyImportEvent(t *testing.T, imported chan *types.Block) {
|
||||
select {
|
||||
case <-imported:
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("import timeout")
|
||||
}
|
||||
}
|
||||
|
||||
@ -273,7 +164,7 @@ func verifyImportCount(t *testing.T, imported chan *types.Block, count int) {
|
||||
select {
|
||||
case <-imported:
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("block %d: import timeout", i+1)
|
||||
t.Fatalf("block %d: import timeout", i)
|
||||
}
|
||||
}
|
||||
verifyImportDone(t, imported)
|
||||
@ -290,78 +181,51 @@ func verifyImportDone(t *testing.T, imported chan *types.Block) {
|
||||
|
||||
// Tests that a fetcher accepts block announcements and initiates retrievals for
|
||||
// them, successfully importing into the local chain.
|
||||
func TestSequentialAnnouncements61(t *testing.T) { testSequentialAnnouncements(t, 61) }
|
||||
func TestSequentialAnnouncements62(t *testing.T) { testSequentialAnnouncements(t, 62) }
|
||||
func TestSequentialAnnouncements63(t *testing.T) { testSequentialAnnouncements(t, 63) }
|
||||
func TestSequentialAnnouncements64(t *testing.T) { testSequentialAnnouncements(t, 64) }
|
||||
|
||||
func testSequentialAnnouncements(t *testing.T, protocol int) {
|
||||
func TestSequentialAnnouncements(t *testing.T) {
|
||||
// Create a chain of blocks to import
|
||||
targetBlocks := 4 * hashLimit
|
||||
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
||||
|
||||
tester := newTester()
|
||||
blockFetcher := tester.makeBlockFetcher(blocks)
|
||||
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
|
||||
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
|
||||
fetcher := tester.makeFetcher(blocks)
|
||||
|
||||
// Iteratively announce blocks until all are imported
|
||||
imported := make(chan *types.Block)
|
||||
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
||||
|
||||
for i := len(hashes) - 2; i >= 0; i-- {
|
||||
if protocol < 62 {
|
||||
tester.fetcher.Notify("valid", hashes[i], 0, time.Now().Add(-arriveTimeout), blockFetcher, nil, nil)
|
||||
} else {
|
||||
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
|
||||
}
|
||||
verifyImportEvent(t, imported, true)
|
||||
tester.fetcher.Notify("valid", hashes[i], time.Now().Add(-arriveTimeout), fetcher)
|
||||
verifyImportEvent(t, imported)
|
||||
}
|
||||
verifyImportDone(t, imported)
|
||||
}
|
||||
|
||||
// Tests that if blocks are announced by multiple peers (or even the same buggy
|
||||
// peer), they will only get downloaded at most once.
|
||||
func TestConcurrentAnnouncements61(t *testing.T) { testConcurrentAnnouncements(t, 61) }
|
||||
func TestConcurrentAnnouncements62(t *testing.T) { testConcurrentAnnouncements(t, 62) }
|
||||
func TestConcurrentAnnouncements63(t *testing.T) { testConcurrentAnnouncements(t, 63) }
|
||||
func TestConcurrentAnnouncements64(t *testing.T) { testConcurrentAnnouncements(t, 64) }
|
||||
|
||||
func testConcurrentAnnouncements(t *testing.T, protocol int) {
|
||||
func TestConcurrentAnnouncements(t *testing.T) {
|
||||
// Create a chain of blocks to import
|
||||
targetBlocks := 4 * hashLimit
|
||||
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
||||
|
||||
// Assemble a tester with a built in counter for the requests
|
||||
tester := newTester()
|
||||
blockFetcher := tester.makeBlockFetcher(blocks)
|
||||
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
|
||||
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
|
||||
fetcher := tester.makeFetcher(blocks)
|
||||
|
||||
counter := uint32(0)
|
||||
blockWrapper := func(hashes []common.Hash) error {
|
||||
wrapper := func(hashes []common.Hash) error {
|
||||
atomic.AddUint32(&counter, uint32(len(hashes)))
|
||||
return blockFetcher(hashes)
|
||||
}
|
||||
headerWrapper := func(hash common.Hash) error {
|
||||
atomic.AddUint32(&counter, 1)
|
||||
return headerFetcher(hash)
|
||||
return fetcher(hashes)
|
||||
}
|
||||
// Iteratively announce blocks until all are imported
|
||||
imported := make(chan *types.Block)
|
||||
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
||||
|
||||
for i := len(hashes) - 2; i >= 0; i-- {
|
||||
if protocol < 62 {
|
||||
tester.fetcher.Notify("first", hashes[i], 0, time.Now().Add(-arriveTimeout), blockWrapper, nil, nil)
|
||||
tester.fetcher.Notify("second", hashes[i], 0, time.Now().Add(-arriveTimeout+time.Millisecond), blockWrapper, nil, nil)
|
||||
tester.fetcher.Notify("second", hashes[i], 0, time.Now().Add(-arriveTimeout-time.Millisecond), blockWrapper, nil, nil)
|
||||
} else {
|
||||
tester.fetcher.Notify("first", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), nil, headerWrapper, bodyFetcher)
|
||||
tester.fetcher.Notify("second", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout+time.Millisecond), nil, headerWrapper, bodyFetcher)
|
||||
tester.fetcher.Notify("second", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout-time.Millisecond), nil, headerWrapper, bodyFetcher)
|
||||
}
|
||||
verifyImportEvent(t, imported, true)
|
||||
tester.fetcher.Notify("first", hashes[i], time.Now().Add(-arriveTimeout), wrapper)
|
||||
tester.fetcher.Notify("second", hashes[i], time.Now().Add(-arriveTimeout+time.Millisecond), wrapper)
|
||||
tester.fetcher.Notify("second", hashes[i], time.Now().Add(-arriveTimeout-time.Millisecond), wrapper)
|
||||
|
||||
verifyImportEvent(t, imported)
|
||||
}
|
||||
verifyImportDone(t, imported)
|
||||
|
||||
@ -373,90 +237,56 @@ func testConcurrentAnnouncements(t *testing.T, protocol int) {
|
||||
|
||||
// Tests that announcements arriving while a previous is being fetched still
|
||||
// results in a valid import.
|
||||
func TestOverlappingAnnouncements61(t *testing.T) { testOverlappingAnnouncements(t, 61) }
|
||||
func TestOverlappingAnnouncements62(t *testing.T) { testOverlappingAnnouncements(t, 62) }
|
||||
func TestOverlappingAnnouncements63(t *testing.T) { testOverlappingAnnouncements(t, 63) }
|
||||
func TestOverlappingAnnouncements64(t *testing.T) { testOverlappingAnnouncements(t, 64) }
|
||||
|
||||
func testOverlappingAnnouncements(t *testing.T, protocol int) {
|
||||
func TestOverlappingAnnouncements(t *testing.T) {
|
||||
// Create a chain of blocks to import
|
||||
targetBlocks := 4 * hashLimit
|
||||
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
||||
|
||||
tester := newTester()
|
||||
blockFetcher := tester.makeBlockFetcher(blocks)
|
||||
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
|
||||
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
|
||||
fetcher := tester.makeFetcher(blocks)
|
||||
|
||||
// Iteratively announce blocks, but overlap them continuously
|
||||
overlap := 16
|
||||
fetching := make(chan []common.Hash)
|
||||
imported := make(chan *types.Block, len(hashes)-1)
|
||||
for i := 0; i < overlap; i++ {
|
||||
imported <- nil
|
||||
}
|
||||
tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- hashes }
|
||||
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
||||
|
||||
for i := len(hashes) - 2; i >= 0; i-- {
|
||||
if protocol < 62 {
|
||||
tester.fetcher.Notify("valid", hashes[i], 0, time.Now().Add(-arriveTimeout), blockFetcher, nil, nil)
|
||||
} else {
|
||||
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
|
||||
}
|
||||
tester.fetcher.Notify("valid", hashes[i], time.Now().Add(-arriveTimeout), fetcher)
|
||||
select {
|
||||
case <-imported:
|
||||
case <-fetching:
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("block %d: import timeout", len(hashes)-i)
|
||||
t.Fatalf("hash %d: announce timeout", len(hashes)-i)
|
||||
}
|
||||
}
|
||||
// Wait for all the imports to complete and check count
|
||||
verifyImportCount(t, imported, overlap)
|
||||
verifyImportCount(t, imported, len(hashes)-1)
|
||||
}
|
||||
|
||||
// Tests that announces already being retrieved will not be duplicated.
|
||||
func TestPendingDeduplication61(t *testing.T) { testPendingDeduplication(t, 61) }
|
||||
func TestPendingDeduplication62(t *testing.T) { testPendingDeduplication(t, 62) }
|
||||
func TestPendingDeduplication63(t *testing.T) { testPendingDeduplication(t, 63) }
|
||||
func TestPendingDeduplication64(t *testing.T) { testPendingDeduplication(t, 64) }
|
||||
|
||||
func testPendingDeduplication(t *testing.T, protocol int) {
|
||||
func TestPendingDeduplication(t *testing.T) {
|
||||
// Create a hash and corresponding block
|
||||
hashes, blocks := makeChain(1, 0, genesis)
|
||||
|
||||
// Assemble a tester with a built in counter and delayed fetcher
|
||||
tester := newTester()
|
||||
blockFetcher := tester.makeBlockFetcher(blocks)
|
||||
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
|
||||
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
|
||||
fetcher := tester.makeFetcher(blocks)
|
||||
|
||||
delay := 50 * time.Millisecond
|
||||
counter := uint32(0)
|
||||
blockWrapper := func(hashes []common.Hash) error {
|
||||
wrapper := func(hashes []common.Hash) error {
|
||||
atomic.AddUint32(&counter, uint32(len(hashes)))
|
||||
|
||||
// Simulate a long running fetch
|
||||
go func() {
|
||||
time.Sleep(delay)
|
||||
blockFetcher(hashes)
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
headerWrapper := func(hash common.Hash) error {
|
||||
atomic.AddUint32(&counter, 1)
|
||||
|
||||
// Simulate a long running fetch
|
||||
go func() {
|
||||
time.Sleep(delay)
|
||||
headerFetcher(hash)
|
||||
fetcher(hashes)
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
// Announce the same block many times until it's fetched (wait for any pending ops)
|
||||
for tester.getBlock(hashes[0]) == nil {
|
||||
if protocol < 62 {
|
||||
tester.fetcher.Notify("repeater", hashes[0], 0, time.Now().Add(-arriveTimeout), blockWrapper, nil, nil)
|
||||
} else {
|
||||
tester.fetcher.Notify("repeater", hashes[0], 1, time.Now().Add(-arriveTimeout), nil, headerWrapper, bodyFetcher)
|
||||
}
|
||||
tester.fetcher.Notify("repeater", hashes[0], time.Now().Add(-arriveTimeout), wrapper)
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
time.Sleep(delay)
|
||||
@ -472,21 +302,14 @@ func testPendingDeduplication(t *testing.T, protocol int) {
|
||||
|
||||
// Tests that announcements retrieved in a random order are cached and eventually
|
||||
// imported when all the gaps are filled in.
|
||||
func TestRandomArrivalImport61(t *testing.T) { testRandomArrivalImport(t, 61) }
|
||||
func TestRandomArrivalImport62(t *testing.T) { testRandomArrivalImport(t, 62) }
|
||||
func TestRandomArrivalImport63(t *testing.T) { testRandomArrivalImport(t, 63) }
|
||||
func TestRandomArrivalImport64(t *testing.T) { testRandomArrivalImport(t, 64) }
|
||||
|
||||
func testRandomArrivalImport(t *testing.T, protocol int) {
|
||||
func TestRandomArrivalImport(t *testing.T) {
|
||||
// Create a chain of blocks to import, and choose one to delay
|
||||
targetBlocks := maxQueueDist
|
||||
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
||||
skip := targetBlocks / 2
|
||||
|
||||
tester := newTester()
|
||||
blockFetcher := tester.makeBlockFetcher(blocks)
|
||||
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
|
||||
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
|
||||
fetcher := tester.makeFetcher(blocks)
|
||||
|
||||
// Iteratively announce blocks, skipping one entry
|
||||
imported := make(chan *types.Block, len(hashes)-1)
|
||||
@ -494,40 +317,25 @@ func testRandomArrivalImport(t *testing.T, protocol int) {
|
||||
|
||||
for i := len(hashes) - 1; i >= 0; i-- {
|
||||
if i != skip {
|
||||
if protocol < 62 {
|
||||
tester.fetcher.Notify("valid", hashes[i], 0, time.Now().Add(-arriveTimeout), blockFetcher, nil, nil)
|
||||
} else {
|
||||
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
|
||||
}
|
||||
tester.fetcher.Notify("valid", hashes[i], time.Now().Add(-arriveTimeout), fetcher)
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
}
|
||||
// Finally announce the skipped entry and check full import
|
||||
if protocol < 62 {
|
||||
tester.fetcher.Notify("valid", hashes[skip], 0, time.Now().Add(-arriveTimeout), blockFetcher, nil, nil)
|
||||
} else {
|
||||
tester.fetcher.Notify("valid", hashes[skip], uint64(len(hashes)-skip-1), time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
|
||||
}
|
||||
tester.fetcher.Notify("valid", hashes[skip], time.Now().Add(-arriveTimeout), fetcher)
|
||||
verifyImportCount(t, imported, len(hashes)-1)
|
||||
}
|
||||
|
||||
// Tests that direct block enqueues (due to block propagation vs. hash announce)
|
||||
// are correctly schedule, filling and import queue gaps.
|
||||
func TestQueueGapFill61(t *testing.T) { testQueueGapFill(t, 61) }
|
||||
func TestQueueGapFill62(t *testing.T) { testQueueGapFill(t, 62) }
|
||||
func TestQueueGapFill63(t *testing.T) { testQueueGapFill(t, 63) }
|
||||
func TestQueueGapFill64(t *testing.T) { testQueueGapFill(t, 64) }
|
||||
|
||||
func testQueueGapFill(t *testing.T, protocol int) {
|
||||
func TestQueueGapFill(t *testing.T) {
|
||||
// Create a chain of blocks to import, and choose one to not announce at all
|
||||
targetBlocks := maxQueueDist
|
||||
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
||||
skip := targetBlocks / 2
|
||||
|
||||
tester := newTester()
|
||||
blockFetcher := tester.makeBlockFetcher(blocks)
|
||||
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
|
||||
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
|
||||
fetcher := tester.makeFetcher(blocks)
|
||||
|
||||
// Iteratively announce blocks, skipping one entry
|
||||
imported := make(chan *types.Block, len(hashes)-1)
|
||||
@ -535,11 +343,7 @@ func testQueueGapFill(t *testing.T, protocol int) {
|
||||
|
||||
for i := len(hashes) - 1; i >= 0; i-- {
|
||||
if i != skip {
|
||||
if protocol < 62 {
|
||||
tester.fetcher.Notify("valid", hashes[i], 0, time.Now().Add(-arriveTimeout), blockFetcher, nil, nil)
|
||||
} else {
|
||||
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
|
||||
}
|
||||
tester.fetcher.Notify("valid", hashes[i], time.Now().Add(-arriveTimeout), fetcher)
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
}
|
||||
@ -550,20 +354,13 @@ func testQueueGapFill(t *testing.T, protocol int) {
|
||||
|
||||
// Tests that blocks arriving from various sources (multiple propagations, hash
|
||||
// announces, etc) do not get scheduled for import multiple times.
|
||||
func TestImportDeduplication61(t *testing.T) { testImportDeduplication(t, 61) }
|
||||
func TestImportDeduplication62(t *testing.T) { testImportDeduplication(t, 62) }
|
||||
func TestImportDeduplication63(t *testing.T) { testImportDeduplication(t, 63) }
|
||||
func TestImportDeduplication64(t *testing.T) { testImportDeduplication(t, 64) }
|
||||
|
||||
func testImportDeduplication(t *testing.T, protocol int) {
|
||||
func TestImportDeduplication(t *testing.T) {
|
||||
// Create two blocks to import (one for duplication, the other for stalling)
|
||||
hashes, blocks := makeChain(2, 0, genesis)
|
||||
|
||||
// Create the tester and wrap the importer with a counter
|
||||
tester := newTester()
|
||||
blockFetcher := tester.makeBlockFetcher(blocks)
|
||||
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
|
||||
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
|
||||
fetcher := tester.makeFetcher(blocks)
|
||||
|
||||
counter := uint32(0)
|
||||
tester.fetcher.insertChain = func(blocks types.Blocks) (int, error) {
|
||||
@ -577,11 +374,7 @@ func testImportDeduplication(t *testing.T, protocol int) {
|
||||
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
||||
|
||||
// Announce the duplicating block, wait for retrieval, and also propagate directly
|
||||
if protocol < 62 {
|
||||
tester.fetcher.Notify("valid", hashes[0], 0, time.Now().Add(-arriveTimeout), blockFetcher, nil, nil)
|
||||
} else {
|
||||
tester.fetcher.Notify("valid", hashes[0], 1, time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
|
||||
}
|
||||
tester.fetcher.Notify("valid", hashes[0], time.Now().Add(-arriveTimeout), fetcher)
|
||||
<-fetching
|
||||
|
||||
tester.fetcher.Enqueue("valid", blocks[hashes[0]])
|
||||
@ -598,157 +391,35 @@ func testImportDeduplication(t *testing.T, protocol int) {
|
||||
}
|
||||
|
||||
// Tests that blocks with numbers much lower or higher than out current head get
|
||||
// discarded to prevent wasting resources on useless blocks from faulty peers.
|
||||
func TestDistantPropagationDiscarding(t *testing.T) {
|
||||
// Create a long chain to import and define the discard boundaries
|
||||
// discarded no prevent wasting resources on useless blocks from faulty peers.
|
||||
func TestDistantDiscarding(t *testing.T) {
|
||||
// Create a long chain to import
|
||||
hashes, blocks := makeChain(3*maxQueueDist, 0, genesis)
|
||||
head := hashes[len(hashes)/2]
|
||||
|
||||
low, high := len(hashes)/2+maxUncleDist+1, len(hashes)/2-maxQueueDist-1
|
||||
|
||||
// Create a tester and simulate a head block being the middle of the above chain
|
||||
tester := newTester()
|
||||
tester.hashes = []common.Hash{head}
|
||||
tester.blocks = map[common.Hash]*types.Block{head: blocks[head]}
|
||||
|
||||
// Ensure that a block with a lower number than the threshold is discarded
|
||||
tester.fetcher.Enqueue("lower", blocks[hashes[low]])
|
||||
tester.fetcher.Enqueue("lower", blocks[hashes[0]])
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
if !tester.fetcher.queue.Empty() {
|
||||
t.Fatalf("fetcher queued stale block")
|
||||
}
|
||||
// Ensure that a block with a higher number than the threshold is discarded
|
||||
tester.fetcher.Enqueue("higher", blocks[hashes[high]])
|
||||
tester.fetcher.Enqueue("higher", blocks[hashes[len(hashes)-1]])
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
if !tester.fetcher.queue.Empty() {
|
||||
t.Fatalf("fetcher queued future block")
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that announcements with numbers much lower or higher than out current
|
||||
// head get discarded to prevent wasting resources on useless blocks from faulty
|
||||
// peers.
|
||||
func TestDistantAnnouncementDiscarding62(t *testing.T) { testDistantAnnouncementDiscarding(t, 62) }
|
||||
func TestDistantAnnouncementDiscarding63(t *testing.T) { testDistantAnnouncementDiscarding(t, 63) }
|
||||
func TestDistantAnnouncementDiscarding64(t *testing.T) { testDistantAnnouncementDiscarding(t, 64) }
|
||||
|
||||
func testDistantAnnouncementDiscarding(t *testing.T, protocol int) {
|
||||
// Create a long chain to import and define the discard boundaries
|
||||
hashes, blocks := makeChain(3*maxQueueDist, 0, genesis)
|
||||
head := hashes[len(hashes)/2]
|
||||
|
||||
low, high := len(hashes)/2+maxUncleDist+1, len(hashes)/2-maxQueueDist-1
|
||||
|
||||
// Create a tester and simulate a head block being the middle of the above chain
|
||||
tester := newTester()
|
||||
tester.hashes = []common.Hash{head}
|
||||
tester.blocks = map[common.Hash]*types.Block{head: blocks[head]}
|
||||
|
||||
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
|
||||
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
|
||||
|
||||
fetching := make(chan struct{}, 2)
|
||||
tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- struct{}{} }
|
||||
|
||||
// Ensure that a block with a lower number than the threshold is discarded
|
||||
tester.fetcher.Notify("lower", hashes[low], blocks[hashes[low]].NumberU64(), time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
|
||||
select {
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
case <-fetching:
|
||||
t.Fatalf("fetcher requested stale header")
|
||||
}
|
||||
// Ensure that a block with a higher number than the threshold is discarded
|
||||
tester.fetcher.Notify("higher", hashes[high], blocks[hashes[high]].NumberU64(), time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
|
||||
select {
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
case <-fetching:
|
||||
t.Fatalf("fetcher requested future header")
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that peers announcing blocks with invalid numbers (i.e. not matching
|
||||
// the headers provided afterwards) get dropped as malicious.
|
||||
func TestInvalidNumberAnnouncement62(t *testing.T) { testInvalidNumberAnnouncement(t, 62) }
|
||||
func TestInvalidNumberAnnouncement63(t *testing.T) { testInvalidNumberAnnouncement(t, 63) }
|
||||
func TestInvalidNumberAnnouncement64(t *testing.T) { testInvalidNumberAnnouncement(t, 64) }
|
||||
|
||||
func testInvalidNumberAnnouncement(t *testing.T, protocol int) {
|
||||
// Create a single block to import and check numbers against
|
||||
hashes, blocks := makeChain(1, 0, genesis)
|
||||
|
||||
tester := newTester()
|
||||
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
|
||||
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
|
||||
|
||||
imported := make(chan *types.Block)
|
||||
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
||||
|
||||
// Announce a block with a bad number, check for immediate drop
|
||||
tester.fetcher.Notify("bad", hashes[0], 2, time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
|
||||
verifyImportEvent(t, imported, false)
|
||||
|
||||
if !tester.drops["bad"] {
|
||||
t.Fatalf("peer with invalid numbered announcement not dropped")
|
||||
}
|
||||
// Make sure a good announcement passes without a drop
|
||||
tester.fetcher.Notify("good", hashes[0], 1, time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
|
||||
verifyImportEvent(t, imported, true)
|
||||
|
||||
if tester.drops["good"] {
|
||||
t.Fatalf("peer with valid numbered announcement dropped")
|
||||
}
|
||||
verifyImportDone(t, imported)
|
||||
}
|
||||
|
||||
// Tests that if a block is empty (i.e. header only), no body request should be
|
||||
// made, and instead the header should be assembled into a whole block in itself.
|
||||
func TestEmptyBlockShortCircuit62(t *testing.T) { testEmptyBlockShortCircuit(t, 62) }
|
||||
func TestEmptyBlockShortCircuit63(t *testing.T) { testEmptyBlockShortCircuit(t, 63) }
|
||||
func TestEmptyBlockShortCircuit64(t *testing.T) { testEmptyBlockShortCircuit(t, 64) }
|
||||
|
||||
func testEmptyBlockShortCircuit(t *testing.T, protocol int) {
|
||||
// Create a chain of blocks to import
|
||||
hashes, blocks := makeChain(32, 0, genesis)
|
||||
|
||||
tester := newTester()
|
||||
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
|
||||
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
|
||||
|
||||
// Add a monitoring hook for all internal events
|
||||
fetching := make(chan []common.Hash)
|
||||
tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- hashes }
|
||||
|
||||
completing := make(chan []common.Hash)
|
||||
tester.fetcher.completingHook = func(hashes []common.Hash) { completing <- hashes }
|
||||
|
||||
imported := make(chan *types.Block)
|
||||
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
||||
|
||||
// Iteratively announce blocks until all are imported
|
||||
for i := len(hashes) - 2; i >= 0; i-- {
|
||||
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
|
||||
|
||||
// All announces should fetch the header
|
||||
verifyFetchingEvent(t, fetching, true)
|
||||
|
||||
// Only blocks with data contents should request bodies
|
||||
verifyCompletingEvent(t, completing, len(blocks[hashes[i]].Transactions()) > 0 || len(blocks[hashes[i]].Uncles()) > 0)
|
||||
|
||||
// Irrelevant of the construct, import should succeed
|
||||
verifyImportEvent(t, imported, true)
|
||||
}
|
||||
verifyImportDone(t, imported)
|
||||
}
|
||||
|
||||
// Tests that a peer is unable to use unbounded memory with sending infinite
|
||||
// block announcements to a node, but that even in the face of such an attack,
|
||||
// the fetcher remains operational.
|
||||
func TestHashMemoryExhaustionAttack61(t *testing.T) { testHashMemoryExhaustionAttack(t, 61) }
|
||||
func TestHashMemoryExhaustionAttack62(t *testing.T) { testHashMemoryExhaustionAttack(t, 62) }
|
||||
func TestHashMemoryExhaustionAttack63(t *testing.T) { testHashMemoryExhaustionAttack(t, 63) }
|
||||
func TestHashMemoryExhaustionAttack64(t *testing.T) { testHashMemoryExhaustionAttack(t, 64) }
|
||||
|
||||
func testHashMemoryExhaustionAttack(t *testing.T, protocol int) {
|
||||
func TestHashMemoryExhaustionAttack(t *testing.T) {
|
||||
// Create a tester with instrumented import hooks
|
||||
tester := newTester()
|
||||
|
||||
@ -758,29 +429,17 @@ func testHashMemoryExhaustionAttack(t *testing.T, protocol int) {
|
||||
// Create a valid chain and an infinite junk chain
|
||||
targetBlocks := hashLimit + 2*maxQueueDist
|
||||
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
||||
validBlockFetcher := tester.makeBlockFetcher(blocks)
|
||||
validHeaderFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
|
||||
validBodyFetcher := tester.makeBodyFetcher(blocks, 0)
|
||||
valid := tester.makeFetcher(blocks)
|
||||
|
||||
attack, _ := makeChain(targetBlocks, 0, unknownBlock)
|
||||
attackerBlockFetcher := tester.makeBlockFetcher(nil)
|
||||
attackerHeaderFetcher := tester.makeHeaderFetcher(nil, -gatherSlack)
|
||||
attackerBodyFetcher := tester.makeBodyFetcher(nil, 0)
|
||||
attacker := tester.makeFetcher(nil)
|
||||
|
||||
// Feed the tester a huge hashset from the attacker, and a limited from the valid peer
|
||||
for i := 0; i < len(attack); i++ {
|
||||
if i < maxQueueDist {
|
||||
if protocol < 62 {
|
||||
tester.fetcher.Notify("valid", hashes[len(hashes)-2-i], 0, time.Now(), validBlockFetcher, nil, nil)
|
||||
} else {
|
||||
tester.fetcher.Notify("valid", hashes[len(hashes)-2-i], uint64(i+1), time.Now(), nil, validHeaderFetcher, validBodyFetcher)
|
||||
}
|
||||
}
|
||||
if protocol < 62 {
|
||||
tester.fetcher.Notify("attacker", attack[i], 0, time.Now(), attackerBlockFetcher, nil, nil)
|
||||
} else {
|
||||
tester.fetcher.Notify("attacker", attack[i], 1 /* don't distance drop */, time.Now(), nil, attackerHeaderFetcher, attackerBodyFetcher)
|
||||
tester.fetcher.Notify("valid", hashes[len(hashes)-2-i], time.Now(), valid)
|
||||
}
|
||||
tester.fetcher.Notify("attacker", attack[i], time.Now(), attacker)
|
||||
}
|
||||
if len(tester.fetcher.announced) != hashLimit+maxQueueDist {
|
||||
t.Fatalf("queued announce count mismatch: have %d, want %d", len(tester.fetcher.announced), hashLimit+maxQueueDist)
|
||||
@ -790,12 +449,8 @@ func testHashMemoryExhaustionAttack(t *testing.T, protocol int) {
|
||||
|
||||
// Feed the remaining valid hashes to ensure DOS protection state remains clean
|
||||
for i := len(hashes) - maxQueueDist - 2; i >= 0; i-- {
|
||||
if protocol < 62 {
|
||||
tester.fetcher.Notify("valid", hashes[i], 0, time.Now().Add(-arriveTimeout), validBlockFetcher, nil, nil)
|
||||
} else {
|
||||
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), nil, validHeaderFetcher, validBodyFetcher)
|
||||
}
|
||||
verifyImportEvent(t, imported, true)
|
||||
tester.fetcher.Notify("valid", hashes[i], time.Now().Add(-arriveTimeout), valid)
|
||||
verifyImportEvent(t, imported)
|
||||
}
|
||||
verifyImportDone(t, imported)
|
||||
}
|
||||
@ -843,7 +498,7 @@ func TestBlockMemoryExhaustionAttack(t *testing.T) {
|
||||
// Insert the remaining blocks in chunks to ensure clean DOS protection
|
||||
for i := maxQueueDist; i < len(hashes)-1; i++ {
|
||||
tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-2-i]])
|
||||
verifyImportEvent(t, imported, true)
|
||||
verifyImportEvent(t, imported)
|
||||
}
|
||||
verifyImportDone(t, imported)
|
||||
}
|
||||
|
@ -23,24 +23,10 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
propAnnounceInMeter = metrics.NewMeter("eth/fetcher/prop/announces/in")
|
||||
propAnnounceOutTimer = metrics.NewTimer("eth/fetcher/prop/announces/out")
|
||||
propAnnounceDropMeter = metrics.NewMeter("eth/fetcher/prop/announces/drop")
|
||||
propAnnounceDOSMeter = metrics.NewMeter("eth/fetcher/prop/announces/dos")
|
||||
|
||||
propBroadcastInMeter = metrics.NewMeter("eth/fetcher/prop/broadcasts/in")
|
||||
propBroadcastOutTimer = metrics.NewTimer("eth/fetcher/prop/broadcasts/out")
|
||||
propBroadcastDropMeter = metrics.NewMeter("eth/fetcher/prop/broadcasts/drop")
|
||||
propBroadcastDOSMeter = metrics.NewMeter("eth/fetcher/prop/broadcasts/dos")
|
||||
|
||||
blockFetchMeter = metrics.NewMeter("eth/fetcher/fetch/blocks")
|
||||
headerFetchMeter = metrics.NewMeter("eth/fetcher/fetch/headers")
|
||||
bodyFetchMeter = metrics.NewMeter("eth/fetcher/fetch/bodies")
|
||||
|
||||
blockFilterInMeter = metrics.NewMeter("eth/fetcher/filter/blocks/in")
|
||||
blockFilterOutMeter = metrics.NewMeter("eth/fetcher/filter/blocks/out")
|
||||
headerFilterInMeter = metrics.NewMeter("eth/fetcher/filter/headers/in")
|
||||
headerFilterOutMeter = metrics.NewMeter("eth/fetcher/filter/headers/out")
|
||||
bodyFilterInMeter = metrics.NewMeter("eth/fetcher/filter/bodies/in")
|
||||
bodyFilterOutMeter = metrics.NewMeter("eth/fetcher/filter/bodies/out")
|
||||
announceMeter = metrics.NewMeter("eth/sync/RemoteAnnounces")
|
||||
announceTimer = metrics.NewTimer("eth/sync/LocalAnnounces")
|
||||
broadcastMeter = metrics.NewMeter("eth/sync/RemoteBroadcasts")
|
||||
broadcastTimer = metrics.NewTimer("eth/sync/LocalBroadcasts")
|
||||
discardMeter = metrics.NewMeter("eth/sync/DiscardedBlocks")
|
||||
futureMeter = metrics.NewMeter("eth/sync/FutureBlocks")
|
||||
)
|
||||
|
347
eth/handler.go
347
eth/handler.go
@ -28,7 +28,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/eth/fetcher"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
@ -37,10 +36,10 @@ import (
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
const (
|
||||
softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data.
|
||||
estHeaderRlpSize = 500 // Approximate size of an RLP encoded block header
|
||||
)
|
||||
// This is the target maximum size of returned blocks for the
|
||||
// getBlocks message. The reply message may exceed it
|
||||
// if a single block is larger than the limit.
|
||||
const maxBlockRespSize = 2 * 1024 * 1024
|
||||
|
||||
func errResp(code errCode, format string, v ...interface{}) error {
|
||||
return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
|
||||
@ -60,13 +59,12 @@ func (ep extProt) GetHashes(hash common.Hash) error { return ep.getHashes(has
|
||||
func (ep extProt) GetBlock(hashes []common.Hash) error { return ep.getBlocks(hashes) }
|
||||
|
||||
type ProtocolManager struct {
|
||||
txpool txPool
|
||||
chainman *core.ChainManager
|
||||
chaindb ethdb.Database
|
||||
|
||||
downloader *downloader.Downloader
|
||||
fetcher *fetcher.Fetcher
|
||||
peers *peerSet
|
||||
protVer, netId int
|
||||
txpool txPool
|
||||
chainman *core.ChainManager
|
||||
downloader *downloader.Downloader
|
||||
fetcher *fetcher.Fetcher
|
||||
peers *peerSet
|
||||
|
||||
SubProtocols []p2p.Protocol
|
||||
|
||||
@ -87,17 +85,17 @@ type ProtocolManager struct {
|
||||
|
||||
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
|
||||
// with the ethereum network.
|
||||
func NewProtocolManager(networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, chainman *core.ChainManager, chaindb ethdb.Database) *ProtocolManager {
|
||||
func NewProtocolManager(networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, chainman *core.ChainManager) *ProtocolManager {
|
||||
// Create the protocol manager with the base fields
|
||||
manager := &ProtocolManager{
|
||||
eventMux: mux,
|
||||
txpool: txpool,
|
||||
chainman: chainman,
|
||||
chaindb: chaindb,
|
||||
peers: newPeerSet(),
|
||||
newPeerCh: make(chan *peer, 1),
|
||||
txsyncCh: make(chan *txsync),
|
||||
quitSync: make(chan struct{}),
|
||||
netId: networkId,
|
||||
}
|
||||
// Initiate a sub-protocol for every implemented version we can handle
|
||||
manager.SubProtocols = make([]p2p.Protocol, len(ProtocolVersions))
|
||||
@ -116,10 +114,10 @@ func NewProtocolManager(networkId int, mux *event.TypeMux, txpool txPool, pow po
|
||||
}
|
||||
}
|
||||
// Construct the different synchronisation mechanisms
|
||||
manager.downloader = downloader.New(manager.eventMux, manager.chainman.HasBlock, manager.chainman.GetBlock, manager.chainman.CurrentBlock, manager.chainman.GetTd, manager.chainman.InsertChain, manager.removePeer)
|
||||
manager.downloader = downloader.New(manager.eventMux, manager.chainman.HasBlock, manager.chainman.GetBlock, manager.chainman.CurrentBlock, manager.chainman.InsertChain, manager.removePeer)
|
||||
|
||||
validator := func(block *types.Block, parent *types.Block) error {
|
||||
return core.ValidateHeader(pow, block.Header(), parent.Header(), true, false)
|
||||
return core.ValidateHeader(pow, block.Header(), parent, true, false)
|
||||
}
|
||||
heighter := func() uint64 {
|
||||
return manager.chainman.CurrentBlock().NumberU64()
|
||||
@ -178,7 +176,7 @@ func (pm *ProtocolManager) Stop() {
|
||||
}
|
||||
|
||||
func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
|
||||
return newPeer(pv, nv, p, newMeteredMsgWriter(rw))
|
||||
return newPeer(pv, nv, p, rw)
|
||||
}
|
||||
|
||||
// handle is the callback invoked to manage the life cycle of an eth peer. When
|
||||
@ -192,9 +190,6 @@ func (pm *ProtocolManager) handle(p *peer) error {
|
||||
glog.V(logger.Debug).Infof("%v: handshake failed: %v", p, err)
|
||||
return err
|
||||
}
|
||||
if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
|
||||
rw.Init(p.version)
|
||||
}
|
||||
// Register the peer locally
|
||||
glog.V(logger.Detail).Infof("%v: adding peer", p)
|
||||
if err := pm.peers.Register(p); err != nil {
|
||||
@ -204,9 +199,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
|
||||
defer pm.removePeer(p.id)
|
||||
|
||||
// Register the peer in the downloader. If the downloader considers it banned, we disconnect
|
||||
if err := pm.downloader.RegisterPeer(p.id, p.version, p.Head(),
|
||||
p.RequestHashes, p.RequestHashesFromNumber, p.RequestBlocks,
|
||||
p.RequestHeadersByHash, p.RequestHeadersByNumber, p.RequestBodies); err != nil {
|
||||
if err := pm.downloader.RegisterPeer(p.id, p.version, p.Head(), p.RequestHashes, p.RequestHashesFromNumber, p.RequestBlocks); err != nil {
|
||||
return err
|
||||
}
|
||||
// Propagate existing transactions. new transactions appearing
|
||||
@ -237,12 +230,12 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
defer msg.Discard()
|
||||
|
||||
// Handle the message depending on its contents
|
||||
switch {
|
||||
case msg.Code == StatusMsg:
|
||||
switch msg.Code {
|
||||
case StatusMsg:
|
||||
// Status messages should never arrive after the handshake
|
||||
return errResp(ErrExtraStatusMsg, "uncontrolled status message")
|
||||
|
||||
case p.version < eth62 && msg.Code == GetBlockHashesMsg:
|
||||
case GetBlockHashesMsg:
|
||||
// Retrieve the number of hashes to return and from which origin hash
|
||||
var request getBlockHashesData
|
||||
if err := msg.Decode(&request); err != nil {
|
||||
@ -258,7 +251,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
}
|
||||
return p.SendBlockHashes(hashes)
|
||||
|
||||
case p.version < eth62 && msg.Code == GetBlockHashesFromNumberMsg:
|
||||
case GetBlockHashesFromNumberMsg:
|
||||
// Retrieve and decode the number of hashes to return and from which origin number
|
||||
var request getBlockHashesFromNumberData
|
||||
if err := msg.Decode(&request); err != nil {
|
||||
@ -285,19 +278,24 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
}
|
||||
return p.SendBlockHashes(hashes)
|
||||
|
||||
case p.version < eth62 && msg.Code == BlockHashesMsg:
|
||||
case BlockHashesMsg:
|
||||
// A batch of hashes arrived to one of our previous requests
|
||||
msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
|
||||
reqHashInPacketsMeter.Mark(1)
|
||||
|
||||
var hashes []common.Hash
|
||||
if err := msg.Decode(&hashes); err != nil {
|
||||
if err := msgStream.Decode(&hashes); err != nil {
|
||||
break
|
||||
}
|
||||
reqHashInTrafficMeter.Mark(int64(32 * len(hashes)))
|
||||
|
||||
// Deliver them all to the downloader for queuing
|
||||
err := pm.downloader.DeliverHashes61(p.id, hashes)
|
||||
err := pm.downloader.DeliverHashes(p.id, hashes)
|
||||
if err != nil {
|
||||
glog.V(logger.Debug).Infoln(err)
|
||||
}
|
||||
|
||||
case p.version < eth62 && msg.Code == GetBlocksMsg:
|
||||
case GetBlocksMsg:
|
||||
// Decode the retrieval message
|
||||
msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
|
||||
if _, err := msgStream.List(); err != nil {
|
||||
@ -307,279 +305,94 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
var (
|
||||
hash common.Hash
|
||||
bytes common.StorageSize
|
||||
hashes []common.Hash
|
||||
blocks []*types.Block
|
||||
)
|
||||
for len(blocks) < downloader.MaxBlockFetch && bytes < softResponseLimit {
|
||||
//Retrieve the hash of the next block
|
||||
for {
|
||||
err := msgStream.Decode(&hash)
|
||||
if err == rlp.EOL {
|
||||
break
|
||||
} else if err != nil {
|
||||
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
||||
}
|
||||
hashes = append(hashes, hash)
|
||||
|
||||
// Retrieve the requested block, stopping if enough was found
|
||||
if block := pm.chainman.GetBlock(hash); block != nil {
|
||||
blocks = append(blocks, block)
|
||||
bytes += block.Size()
|
||||
if len(blocks) >= downloader.MaxBlockFetch || bytes > maxBlockRespSize {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if glog.V(logger.Detail) && len(blocks) == 0 && len(hashes) > 0 {
|
||||
list := "["
|
||||
for _, hash := range hashes {
|
||||
list += fmt.Sprintf("%x, ", hash[:4])
|
||||
}
|
||||
list = list[:len(list)-2] + "]"
|
||||
|
||||
glog.Infof("%v: no blocks found for requested hashes %s", p, list)
|
||||
}
|
||||
return p.SendBlocks(blocks)
|
||||
|
||||
case p.version < eth62 && msg.Code == BlocksMsg:
|
||||
case BlocksMsg:
|
||||
// Decode the arrived block message
|
||||
msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
|
||||
reqBlockInPacketsMeter.Mark(1)
|
||||
|
||||
var blocks []*types.Block
|
||||
if err := msg.Decode(&blocks); err != nil {
|
||||
if err := msgStream.Decode(&blocks); err != nil {
|
||||
glog.V(logger.Detail).Infoln("Decode error", err)
|
||||
blocks = nil
|
||||
}
|
||||
// Update the receive timestamp of each block
|
||||
for _, block := range blocks {
|
||||
reqBlockInTrafficMeter.Mark(block.Size().Int64())
|
||||
block.ReceivedAt = msg.ReceivedAt
|
||||
}
|
||||
// Filter out any explicitly requested blocks, deliver the rest to the downloader
|
||||
if blocks := pm.fetcher.FilterBlocks(blocks); len(blocks) > 0 {
|
||||
pm.downloader.DeliverBlocks61(p.id, blocks)
|
||||
if blocks := pm.fetcher.Filter(blocks); len(blocks) > 0 {
|
||||
pm.downloader.DeliverBlocks(p.id, blocks)
|
||||
}
|
||||
|
||||
// Block header query, collect the requested headers and reply
|
||||
case p.version >= eth62 && msg.Code == GetBlockHeadersMsg:
|
||||
// Decode the complex header query
|
||||
var query getBlockHeadersData
|
||||
if err := msg.Decode(&query); err != nil {
|
||||
return errResp(ErrDecode, "%v: %v", msg, err)
|
||||
}
|
||||
// Gather headers until the fetch or network limits is reached
|
||||
var (
|
||||
bytes common.StorageSize
|
||||
headers []*types.Header
|
||||
unknown bool
|
||||
)
|
||||
for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit && len(headers) < downloader.MaxHeaderFetch {
|
||||
// Retrieve the next header satisfying the query
|
||||
var origin *types.Header
|
||||
if query.Origin.Hash != (common.Hash{}) {
|
||||
origin = pm.chainman.GetHeader(query.Origin.Hash)
|
||||
} else {
|
||||
origin = pm.chainman.GetHeaderByNumber(query.Origin.Number)
|
||||
}
|
||||
if origin == nil {
|
||||
break
|
||||
}
|
||||
headers = append(headers, origin)
|
||||
bytes += estHeaderRlpSize
|
||||
|
||||
// Advance to the next header of the query
|
||||
switch {
|
||||
case query.Origin.Hash != (common.Hash{}) && query.Reverse:
|
||||
// Hash based traversal towards the genesis block
|
||||
for i := 0; i < int(query.Skip)+1; i++ {
|
||||
if header := pm.chainman.GetHeader(query.Origin.Hash); header != nil {
|
||||
query.Origin.Hash = header.ParentHash
|
||||
} else {
|
||||
unknown = true
|
||||
break
|
||||
}
|
||||
}
|
||||
case query.Origin.Hash != (common.Hash{}) && !query.Reverse:
|
||||
// Hash based traversal towards the leaf block
|
||||
if header := pm.chainman.GetHeaderByNumber(origin.Number.Uint64() + query.Skip + 1); header != nil {
|
||||
if pm.chainman.GetBlockHashesFromHash(header.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash {
|
||||
query.Origin.Hash = header.Hash()
|
||||
} else {
|
||||
unknown = true
|
||||
}
|
||||
} else {
|
||||
unknown = true
|
||||
}
|
||||
case query.Reverse:
|
||||
// Number based traversal towards the genesis block
|
||||
if query.Origin.Number >= query.Skip+1 {
|
||||
query.Origin.Number -= (query.Skip + 1)
|
||||
} else {
|
||||
unknown = true
|
||||
}
|
||||
|
||||
case !query.Reverse:
|
||||
// Number based traversal towards the leaf block
|
||||
query.Origin.Number += (query.Skip + 1)
|
||||
}
|
||||
}
|
||||
return p.SendBlockHeaders(headers)
|
||||
|
||||
case p.version >= eth62 && msg.Code == BlockHeadersMsg:
|
||||
// A batch of headers arrived to one of our previous requests
|
||||
var headers []*types.Header
|
||||
if err := msg.Decode(&headers); err != nil {
|
||||
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
||||
}
|
||||
// Filter out any explicitly requested headers, deliver the rest to the downloader
|
||||
filter := len(headers) == 1
|
||||
if filter {
|
||||
headers = pm.fetcher.FilterHeaders(headers, time.Now())
|
||||
}
|
||||
if len(headers) > 0 || !filter {
|
||||
err := pm.downloader.DeliverHeaders(p.id, headers)
|
||||
if err != nil {
|
||||
glog.V(logger.Debug).Infoln(err)
|
||||
}
|
||||
}
|
||||
|
||||
case p.version >= eth62 && msg.Code == BlockBodiesMsg:
|
||||
// A batch of block bodies arrived to one of our previous requests
|
||||
var request blockBodiesData
|
||||
if err := msg.Decode(&request); err != nil {
|
||||
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
||||
}
|
||||
// Deliver them all to the downloader for queuing
|
||||
trasactions := make([][]*types.Transaction, len(request))
|
||||
uncles := make([][]*types.Header, len(request))
|
||||
|
||||
for i, body := range request {
|
||||
trasactions[i] = body.Transactions
|
||||
uncles[i] = body.Uncles
|
||||
}
|
||||
// Filter out any explicitly requested bodies, deliver the rest to the downloader
|
||||
if trasactions, uncles := pm.fetcher.FilterBodies(trasactions, uncles, time.Now()); len(trasactions) > 0 || len(uncles) > 0 {
|
||||
err := pm.downloader.DeliverBodies(p.id, trasactions, uncles)
|
||||
if err != nil {
|
||||
glog.V(logger.Debug).Infoln(err)
|
||||
}
|
||||
}
|
||||
|
||||
case p.version >= eth62 && msg.Code == GetBlockBodiesMsg:
|
||||
// Decode the retrieval message
|
||||
msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
|
||||
if _, err := msgStream.List(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Gather blocks until the fetch or network limits is reached
|
||||
var (
|
||||
hash common.Hash
|
||||
bytes int
|
||||
bodies []rlp.RawValue
|
||||
)
|
||||
for bytes < softResponseLimit && len(bodies) < downloader.MaxBlockFetch {
|
||||
// Retrieve the hash of the next block
|
||||
if err := msgStream.Decode(&hash); err == rlp.EOL {
|
||||
break
|
||||
} else if err != nil {
|
||||
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
||||
}
|
||||
// Retrieve the requested block body, stopping if enough was found
|
||||
if data := pm.chainman.GetBodyRLP(hash); len(data) != 0 {
|
||||
bodies = append(bodies, data)
|
||||
bytes += len(data)
|
||||
}
|
||||
}
|
||||
return p.SendBlockBodiesRLP(bodies)
|
||||
|
||||
case p.version >= eth63 && msg.Code == GetNodeDataMsg:
|
||||
// Decode the retrieval message
|
||||
msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
|
||||
if _, err := msgStream.List(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Gather state data until the fetch or network limits is reached
|
||||
var (
|
||||
hash common.Hash
|
||||
bytes int
|
||||
data [][]byte
|
||||
)
|
||||
for bytes < softResponseLimit && len(data) < downloader.MaxStateFetch {
|
||||
// Retrieve the hash of the next state entry
|
||||
if err := msgStream.Decode(&hash); err == rlp.EOL {
|
||||
break
|
||||
} else if err != nil {
|
||||
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
||||
}
|
||||
// Retrieve the requested state entry, stopping if enough was found
|
||||
if entry, err := pm.chaindb.Get(hash.Bytes()); err == nil {
|
||||
data = append(data, entry)
|
||||
bytes += len(entry)
|
||||
}
|
||||
}
|
||||
return p.SendNodeData(data)
|
||||
|
||||
case p.version >= eth63 && msg.Code == GetReceiptsMsg:
|
||||
// Decode the retrieval message
|
||||
msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
|
||||
if _, err := msgStream.List(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Gather state data until the fetch or network limits is reached
|
||||
var (
|
||||
hash common.Hash
|
||||
bytes int
|
||||
receipts []*types.Receipt
|
||||
)
|
||||
for bytes < softResponseLimit && len(receipts) < downloader.MaxReceiptsFetch {
|
||||
// Retrieve the hash of the next transaction receipt
|
||||
if err := msgStream.Decode(&hash); err == rlp.EOL {
|
||||
break
|
||||
} else if err != nil {
|
||||
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
||||
}
|
||||
// Retrieve the requested receipt, stopping if enough was found
|
||||
if receipt := core.GetReceipt(pm.chaindb, hash); receipt != nil {
|
||||
receipts = append(receipts, receipt)
|
||||
bytes += len(receipt.RlpEncode())
|
||||
}
|
||||
}
|
||||
return p.SendReceipts(receipts)
|
||||
|
||||
case msg.Code == NewBlockHashesMsg:
|
||||
case NewBlockHashesMsg:
|
||||
// Retrieve and deseralize the remote new block hashes notification
|
||||
type announce struct {
|
||||
Hash common.Hash
|
||||
Number uint64
|
||||
}
|
||||
var announces = []announce{}
|
||||
msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
|
||||
|
||||
if p.version < eth62 {
|
||||
// We're running the old protocol, make block number unknown (0)
|
||||
var hashes []common.Hash
|
||||
if err := msg.Decode(&hashes); err != nil {
|
||||
return errResp(ErrDecode, "%v: %v", msg, err)
|
||||
}
|
||||
for _, hash := range hashes {
|
||||
announces = append(announces, announce{hash, 0})
|
||||
}
|
||||
} else {
|
||||
// Otherwise extract both block hash and number
|
||||
var request newBlockHashesData
|
||||
if err := msg.Decode(&request); err != nil {
|
||||
return errResp(ErrDecode, "%v: %v", msg, err)
|
||||
}
|
||||
for _, block := range request {
|
||||
announces = append(announces, announce{block.Hash, block.Number})
|
||||
}
|
||||
var hashes []common.Hash
|
||||
if err := msgStream.Decode(&hashes); err != nil {
|
||||
break
|
||||
}
|
||||
propHashInPacketsMeter.Mark(1)
|
||||
propHashInTrafficMeter.Mark(int64(32 * len(hashes)))
|
||||
|
||||
// Mark the hashes as present at the remote node
|
||||
for _, block := range announces {
|
||||
p.MarkBlock(block.Hash)
|
||||
p.SetHead(block.Hash)
|
||||
for _, hash := range hashes {
|
||||
p.MarkBlock(hash)
|
||||
p.SetHead(hash)
|
||||
}
|
||||
// Schedule all the unknown hashes for retrieval
|
||||
unknown := make([]announce, 0, len(announces))
|
||||
for _, block := range announces {
|
||||
if !pm.chainman.HasBlock(block.Hash) {
|
||||
unknown = append(unknown, block)
|
||||
unknown := make([]common.Hash, 0, len(hashes))
|
||||
for _, hash := range hashes {
|
||||
if !pm.chainman.HasBlock(hash) {
|
||||
unknown = append(unknown, hash)
|
||||
}
|
||||
}
|
||||
for _, block := range unknown {
|
||||
if p.version < eth62 {
|
||||
pm.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestBlocks, nil, nil)
|
||||
} else {
|
||||
pm.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), nil, p.RequestOneHeader, p.RequestBodies)
|
||||
}
|
||||
for _, hash := range unknown {
|
||||
pm.fetcher.Notify(p.id, hash, time.Now(), p.RequestBlocks)
|
||||
}
|
||||
|
||||
case msg.Code == NewBlockMsg:
|
||||
case NewBlockMsg:
|
||||
// Retrieve and decode the propagated block
|
||||
var request newBlockData
|
||||
if err := msg.Decode(&request); err != nil {
|
||||
return errResp(ErrDecode, "%v: %v", msg, err)
|
||||
}
|
||||
propBlockInPacketsMeter.Mark(1)
|
||||
propBlockInTrafficMeter.Mark(request.Block.Size().Int64())
|
||||
|
||||
if err := request.Block.ValidateFields(); err != nil {
|
||||
return errResp(ErrDecode, "block validation %v: %v", msg, err)
|
||||
}
|
||||
@ -608,12 +421,13 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
}
|
||||
}
|
||||
|
||||
case msg.Code == TxMsg:
|
||||
case TxMsg:
|
||||
// Transactions arrived, parse all of them and deliver to the pool
|
||||
var txs []*types.Transaction
|
||||
if err := msg.Decode(&txs); err != nil {
|
||||
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
||||
}
|
||||
propTxnInPacketsMeter.Mark(1)
|
||||
for i, tx := range txs {
|
||||
// Validate and mark the remote transaction
|
||||
if tx == nil {
|
||||
@ -622,6 +436,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
p.MarkTransaction(tx.Hash())
|
||||
|
||||
// Log it's arrival for later analysis
|
||||
propTxnInTrafficMeter.Mark(tx.Size().Int64())
|
||||
jsonlogger.LogJson(&logger.EthTxReceived{
|
||||
TxHash: tx.Hash().Hex(),
|
||||
RemoteId: p.ID().String(),
|
||||
@ -646,7 +461,7 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
|
||||
// Calculate the TD of the block (it's not imported yet, so block.Td is not valid)
|
||||
var td *big.Int
|
||||
if parent := pm.chainman.GetBlock(block.ParentHash()); parent != nil {
|
||||
td = new(big.Int).Add(block.Difficulty(), pm.chainman.GetTd(block.ParentHash()))
|
||||
td = new(big.Int).Add(parent.Td, block.Difficulty())
|
||||
} else {
|
||||
glog.V(logger.Error).Infof("propagating dangling block #%d [%x]", block.NumberU64(), hash[:4])
|
||||
return
|
||||
@ -661,11 +476,7 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
|
||||
// Otherwise if the block is indeed in out own chain, announce it
|
||||
if pm.chainman.HasBlock(hash) {
|
||||
for _, peer := range peers {
|
||||
if peer.version < eth62 {
|
||||
peer.SendNewBlockHashes61([]common.Hash{hash})
|
||||
} else {
|
||||
peer.SendNewBlockHashes([]common.Hash{hash}, []uint64{block.NumberU64()})
|
||||
}
|
||||
peer.SendNewBlockHashes([]common.Hash{hash})
|
||||
}
|
||||
glog.V(logger.Detail).Infof("announced block %x to %d peers in %v", hash[:4], len(peers), time.Since(block.ReceivedAt))
|
||||
}
|
||||
|
@ -1,522 +0,0 @@
|
||||
package eth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
// Tests that hashes can be retrieved from a remote chain by hashes in reverse
|
||||
// order.
|
||||
func TestGetBlockHashes61(t *testing.T) { testGetBlockHashes(t, 61) }
|
||||
|
||||
func testGetBlockHashes(t *testing.T, protocol int) {
|
||||
pm := newTestProtocolManager(downloader.MaxHashFetch+15, nil, nil)
|
||||
peer, _ := newTestPeer("peer", protocol, pm, true)
|
||||
defer peer.close()
|
||||
|
||||
// Create a batch of tests for various scenarios
|
||||
limit := downloader.MaxHashFetch
|
||||
tests := []struct {
|
||||
origin common.Hash
|
||||
number int
|
||||
result int
|
||||
}{
|
||||
{common.Hash{}, 1, 0}, // Make sure non existent hashes don't return results
|
||||
{pm.chainman.Genesis().Hash(), 1, 0}, // There are no hashes to retrieve up from the genesis
|
||||
{pm.chainman.GetBlockByNumber(5).Hash(), 5, 5}, // All the hashes including the genesis requested
|
||||
{pm.chainman.GetBlockByNumber(5).Hash(), 10, 5}, // More hashes than available till the genesis requested
|
||||
{pm.chainman.GetBlockByNumber(100).Hash(), 10, 10}, // All hashes available from the middle of the chain
|
||||
{pm.chainman.CurrentBlock().Hash(), 10, 10}, // All hashes available from the head of the chain
|
||||
{pm.chainman.CurrentBlock().Hash(), limit, limit}, // Request the maximum allowed hash count
|
||||
{pm.chainman.CurrentBlock().Hash(), limit + 1, limit}, // Request more than the maximum allowed hash count
|
||||
}
|
||||
// Run each of the tests and verify the results against the chain
|
||||
for i, tt := range tests {
|
||||
// Assemble the hash response we would like to receive
|
||||
resp := make([]common.Hash, tt.result)
|
||||
if len(resp) > 0 {
|
||||
from := pm.chainman.GetBlock(tt.origin).NumberU64() - 1
|
||||
for j := 0; j < len(resp); j++ {
|
||||
resp[j] = pm.chainman.GetBlockByNumber(uint64(int(from) - j)).Hash()
|
||||
}
|
||||
}
|
||||
// Send the hash request and verify the response
|
||||
p2p.Send(peer.app, 0x03, getBlockHashesData{tt.origin, uint64(tt.number)})
|
||||
if err := p2p.ExpectMsg(peer.app, 0x04, resp); err != nil {
|
||||
t.Errorf("test %d: block hashes mismatch: %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that hashes can be retrieved from a remote chain by numbers in forward
|
||||
// order.
|
||||
func TestGetBlockHashesFromNumber61(t *testing.T) { testGetBlockHashesFromNumber(t, 61) }
|
||||
|
||||
func testGetBlockHashesFromNumber(t *testing.T, protocol int) {
|
||||
pm := newTestProtocolManager(downloader.MaxHashFetch+15, nil, nil)
|
||||
peer, _ := newTestPeer("peer", protocol, pm, true)
|
||||
defer peer.close()
|
||||
|
||||
// Create a batch of tests for various scenarios
|
||||
limit := downloader.MaxHashFetch
|
||||
tests := []struct {
|
||||
origin uint64
|
||||
number int
|
||||
result int
|
||||
}{
|
||||
{pm.chainman.CurrentBlock().NumberU64() + 1, 1, 0}, // Out of bounds requests should return empty
|
||||
{pm.chainman.CurrentBlock().NumberU64(), 1, 1}, // Make sure the head hash can be retrieved
|
||||
{pm.chainman.CurrentBlock().NumberU64() - 4, 5, 5}, // All hashes, including the head hash requested
|
||||
{pm.chainman.CurrentBlock().NumberU64() - 4, 10, 5}, // More hashes requested than available till the head
|
||||
{pm.chainman.CurrentBlock().NumberU64() - 100, 10, 10}, // All hashes available from the middle of the chain
|
||||
{0, 10, 10}, // All hashes available from the root of the chain
|
||||
{0, limit, limit}, // Request the maximum allowed hash count
|
||||
{0, limit + 1, limit}, // Request more than the maximum allowed hash count
|
||||
{0, 1, 1}, // Make sure the genesis hash can be retrieved
|
||||
}
|
||||
// Run each of the tests and verify the results against the chain
|
||||
for i, tt := range tests {
|
||||
// Assemble the hash response we would like to receive
|
||||
resp := make([]common.Hash, tt.result)
|
||||
for j := 0; j < len(resp); j++ {
|
||||
resp[j] = pm.chainman.GetBlockByNumber(tt.origin + uint64(j)).Hash()
|
||||
}
|
||||
// Send the hash request and verify the response
|
||||
p2p.Send(peer.app, 0x08, getBlockHashesFromNumberData{tt.origin, uint64(tt.number)})
|
||||
if err := p2p.ExpectMsg(peer.app, 0x04, resp); err != nil {
|
||||
t.Errorf("test %d: block hashes mismatch: %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that blocks can be retrieved from a remote chain based on their hashes.
|
||||
func TestGetBlocks61(t *testing.T) { testGetBlocks(t, 61) }
|
||||
|
||||
func testGetBlocks(t *testing.T, protocol int) {
|
||||
pm := newTestProtocolManager(downloader.MaxHashFetch+15, nil, nil)
|
||||
peer, _ := newTestPeer("peer", protocol, pm, true)
|
||||
defer peer.close()
|
||||
|
||||
// Create a batch of tests for various scenarios
|
||||
limit := downloader.MaxBlockFetch
|
||||
tests := []struct {
|
||||
random int // Number of blocks to fetch randomly from the chain
|
||||
explicit []common.Hash // Explicitly requested blocks
|
||||
available []bool // Availability of explicitly requested blocks
|
||||
expected int // Total number of existing blocks to expect
|
||||
}{
|
||||
{1, nil, nil, 1}, // A single random block should be retrievable
|
||||
{10, nil, nil, 10}, // Multiple random blocks should be retrievable
|
||||
{limit, nil, nil, limit}, // The maximum possible blocks should be retrievable
|
||||
{limit + 1, nil, nil, limit}, // No more that the possible block count should be returned
|
||||
{0, []common.Hash{pm.chainman.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable
|
||||
{0, []common.Hash{pm.chainman.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable
|
||||
{0, []common.Hash{common.Hash{}}, []bool{false}, 0}, // A non existent block should not be returned
|
||||
|
||||
// Existing and non-existing blocks interleaved should not cause problems
|
||||
{0, []common.Hash{
|
||||
common.Hash{},
|
||||
pm.chainman.GetBlockByNumber(1).Hash(),
|
||||
common.Hash{},
|
||||
pm.chainman.GetBlockByNumber(10).Hash(),
|
||||
common.Hash{},
|
||||
pm.chainman.GetBlockByNumber(100).Hash(),
|
||||
common.Hash{},
|
||||
}, []bool{false, true, false, true, false, true, false}, 3},
|
||||
}
|
||||
// Run each of the tests and verify the results against the chain
|
||||
for i, tt := range tests {
|
||||
// Collect the hashes to request, and the response to expect
|
||||
hashes, seen := []common.Hash{}, make(map[int64]bool)
|
||||
blocks := []*types.Block{}
|
||||
|
||||
for j := 0; j < tt.random; j++ {
|
||||
for {
|
||||
num := rand.Int63n(int64(pm.chainman.CurrentBlock().NumberU64()))
|
||||
if !seen[num] {
|
||||
seen[num] = true
|
||||
|
||||
block := pm.chainman.GetBlockByNumber(uint64(num))
|
||||
hashes = append(hashes, block.Hash())
|
||||
if len(blocks) < tt.expected {
|
||||
blocks = append(blocks, block)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
for j, hash := range tt.explicit {
|
||||
hashes = append(hashes, hash)
|
||||
if tt.available[j] && len(blocks) < tt.expected {
|
||||
blocks = append(blocks, pm.chainman.GetBlock(hash))
|
||||
}
|
||||
}
|
||||
// Send the hash request and verify the response
|
||||
p2p.Send(peer.app, 0x05, hashes)
|
||||
if err := p2p.ExpectMsg(peer.app, 0x06, blocks); err != nil {
|
||||
t.Errorf("test %d: blocks mismatch: %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that block headers can be retrieved from a remote chain based on user queries.
|
||||
func TestGetBlockHeaders62(t *testing.T) { testGetBlockHeaders(t, 62) }
|
||||
func TestGetBlockHeaders63(t *testing.T) { testGetBlockHeaders(t, 63) }
|
||||
func TestGetBlockHeaders64(t *testing.T) { testGetBlockHeaders(t, 64) }
|
||||
|
||||
func testGetBlockHeaders(t *testing.T, protocol int) {
|
||||
pm := newTestProtocolManager(downloader.MaxHashFetch+15, nil, nil)
|
||||
peer, _ := newTestPeer("peer", protocol, pm, true)
|
||||
defer peer.close()
|
||||
|
||||
// Create a "random" unknown hash for testing
|
||||
var unknown common.Hash
|
||||
for i, _ := range unknown {
|
||||
unknown[i] = byte(i)
|
||||
}
|
||||
// Create a batch of tests for various scenarios
|
||||
limit := uint64(downloader.MaxHeaderFetch)
|
||||
tests := []struct {
|
||||
query *getBlockHeadersData // The query to execute for header retrieval
|
||||
expect []common.Hash // The hashes of the block whose headers are expected
|
||||
}{
|
||||
// A single random block should be retrievable by hash and number too
|
||||
{
|
||||
&getBlockHeadersData{Origin: hashOrNumber{Hash: pm.chainman.GetBlockByNumber(limit / 2).Hash()}, Amount: 1},
|
||||
[]common.Hash{pm.chainman.GetBlockByNumber(limit / 2).Hash()},
|
||||
}, {
|
||||
&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 1},
|
||||
[]common.Hash{pm.chainman.GetBlockByNumber(limit / 2).Hash()},
|
||||
},
|
||||
// Multiple headers should be retrievable in both directions
|
||||
{
|
||||
&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3},
|
||||
[]common.Hash{
|
||||
pm.chainman.GetBlockByNumber(limit / 2).Hash(),
|
||||
pm.chainman.GetBlockByNumber(limit/2 + 1).Hash(),
|
||||
pm.chainman.GetBlockByNumber(limit/2 + 2).Hash(),
|
||||
},
|
||||
}, {
|
||||
&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true},
|
||||
[]common.Hash{
|
||||
pm.chainman.GetBlockByNumber(limit / 2).Hash(),
|
||||
pm.chainman.GetBlockByNumber(limit/2 - 1).Hash(),
|
||||
pm.chainman.GetBlockByNumber(limit/2 - 2).Hash(),
|
||||
},
|
||||
},
|
||||
// Multiple headers with skip lists should be retrievable
|
||||
{
|
||||
&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3},
|
||||
[]common.Hash{
|
||||
pm.chainman.GetBlockByNumber(limit / 2).Hash(),
|
||||
pm.chainman.GetBlockByNumber(limit/2 + 4).Hash(),
|
||||
pm.chainman.GetBlockByNumber(limit/2 + 8).Hash(),
|
||||
},
|
||||
}, {
|
||||
&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true},
|
||||
[]common.Hash{
|
||||
pm.chainman.GetBlockByNumber(limit / 2).Hash(),
|
||||
pm.chainman.GetBlockByNumber(limit/2 - 4).Hash(),
|
||||
pm.chainman.GetBlockByNumber(limit/2 - 8).Hash(),
|
||||
},
|
||||
},
|
||||
// The chain endpoints should be retrievable
|
||||
{
|
||||
&getBlockHeadersData{Origin: hashOrNumber{Number: 0}, Amount: 1},
|
||||
[]common.Hash{pm.chainman.GetBlockByNumber(0).Hash()},
|
||||
}, {
|
||||
&getBlockHeadersData{Origin: hashOrNumber{Number: pm.chainman.CurrentBlock().NumberU64()}, Amount: 1},
|
||||
[]common.Hash{pm.chainman.CurrentBlock().Hash()},
|
||||
},
|
||||
// Ensure protocol limits are honored
|
||||
{
|
||||
&getBlockHeadersData{Origin: hashOrNumber{Number: pm.chainman.CurrentBlock().NumberU64() - 1}, Amount: limit + 10, Reverse: true},
|
||||
pm.chainman.GetBlockHashesFromHash(pm.chainman.CurrentBlock().Hash(), limit),
|
||||
},
|
||||
// Check that requesting more than available is handled gracefully
|
||||
{
|
||||
&getBlockHeadersData{Origin: hashOrNumber{Number: pm.chainman.CurrentBlock().NumberU64() - 4}, Skip: 3, Amount: 3},
|
||||
[]common.Hash{
|
||||
pm.chainman.GetBlockByNumber(pm.chainman.CurrentBlock().NumberU64() - 4).Hash(),
|
||||
pm.chainman.GetBlockByNumber(pm.chainman.CurrentBlock().NumberU64()).Hash(),
|
||||
},
|
||||
}, {
|
||||
&getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true},
|
||||
[]common.Hash{
|
||||
pm.chainman.GetBlockByNumber(4).Hash(),
|
||||
pm.chainman.GetBlockByNumber(0).Hash(),
|
||||
},
|
||||
},
|
||||
// Check that requesting more than available is handled gracefully, even if mid skip
|
||||
{
|
||||
&getBlockHeadersData{Origin: hashOrNumber{Number: pm.chainman.CurrentBlock().NumberU64() - 4}, Skip: 2, Amount: 3},
|
||||
[]common.Hash{
|
||||
pm.chainman.GetBlockByNumber(pm.chainman.CurrentBlock().NumberU64() - 4).Hash(),
|
||||
pm.chainman.GetBlockByNumber(pm.chainman.CurrentBlock().NumberU64() - 1).Hash(),
|
||||
},
|
||||
}, {
|
||||
&getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true},
|
||||
[]common.Hash{
|
||||
pm.chainman.GetBlockByNumber(4).Hash(),
|
||||
pm.chainman.GetBlockByNumber(1).Hash(),
|
||||
},
|
||||
},
|
||||
// Check that non existing headers aren't returned
|
||||
{
|
||||
&getBlockHeadersData{Origin: hashOrNumber{Hash: unknown}, Amount: 1},
|
||||
[]common.Hash{},
|
||||
}, {
|
||||
&getBlockHeadersData{Origin: hashOrNumber{Number: pm.chainman.CurrentBlock().NumberU64() + 1}, Amount: 1},
|
||||
[]common.Hash{},
|
||||
},
|
||||
}
|
||||
// Run each of the tests and verify the results against the chain
|
||||
for i, tt := range tests {
|
||||
// Collect the headers to expect in the response
|
||||
headers := []*types.Header{}
|
||||
for _, hash := range tt.expect {
|
||||
headers = append(headers, pm.chainman.GetBlock(hash).Header())
|
||||
}
|
||||
// Send the hash request and verify the response
|
||||
p2p.Send(peer.app, 0x03, tt.query)
|
||||
if err := p2p.ExpectMsg(peer.app, 0x04, headers); err != nil {
|
||||
t.Errorf("test %d: headers mismatch: %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that block contents can be retrieved from a remote chain based on their hashes.
|
||||
func TestGetBlockBodies62(t *testing.T) { testGetBlockBodies(t, 62) }
|
||||
func TestGetBlockBodies63(t *testing.T) { testGetBlockBodies(t, 63) }
|
||||
func TestGetBlockBodies64(t *testing.T) { testGetBlockBodies(t, 64) }
|
||||
|
||||
func testGetBlockBodies(t *testing.T, protocol int) {
|
||||
pm := newTestProtocolManager(downloader.MaxBlockFetch+15, nil, nil)
|
||||
peer, _ := newTestPeer("peer", protocol, pm, true)
|
||||
defer peer.close()
|
||||
|
||||
// Create a batch of tests for various scenarios
|
||||
limit := downloader.MaxBlockFetch
|
||||
tests := []struct {
|
||||
random int // Number of blocks to fetch randomly from the chain
|
||||
explicit []common.Hash // Explicitly requested blocks
|
||||
available []bool // Availability of explicitly requested blocks
|
||||
expected int // Total number of existing blocks to expect
|
||||
}{
|
||||
{1, nil, nil, 1}, // A single random block should be retrievable
|
||||
{10, nil, nil, 10}, // Multiple random blocks should be retrievable
|
||||
{limit, nil, nil, limit}, // The maximum possible blocks should be retrievable
|
||||
{limit + 1, nil, nil, limit}, // No more that the possible block count should be returned
|
||||
{0, []common.Hash{pm.chainman.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable
|
||||
{0, []common.Hash{pm.chainman.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable
|
||||
{0, []common.Hash{common.Hash{}}, []bool{false}, 0}, // A non existent block should not be returned
|
||||
|
||||
// Existing and non-existing blocks interleaved should not cause problems
|
||||
{0, []common.Hash{
|
||||
common.Hash{},
|
||||
pm.chainman.GetBlockByNumber(1).Hash(),
|
||||
common.Hash{},
|
||||
pm.chainman.GetBlockByNumber(10).Hash(),
|
||||
common.Hash{},
|
||||
pm.chainman.GetBlockByNumber(100).Hash(),
|
||||
common.Hash{},
|
||||
}, []bool{false, true, false, true, false, true, false}, 3},
|
||||
}
|
||||
// Run each of the tests and verify the results against the chain
|
||||
for i, tt := range tests {
|
||||
// Collect the hashes to request, and the response to expect
|
||||
hashes, seen := []common.Hash{}, make(map[int64]bool)
|
||||
bodies := []*blockBody{}
|
||||
|
||||
for j := 0; j < tt.random; j++ {
|
||||
for {
|
||||
num := rand.Int63n(int64(pm.chainman.CurrentBlock().NumberU64()))
|
||||
if !seen[num] {
|
||||
seen[num] = true
|
||||
|
||||
block := pm.chainman.GetBlockByNumber(uint64(num))
|
||||
hashes = append(hashes, block.Hash())
|
||||
if len(bodies) < tt.expected {
|
||||
bodies = append(bodies, &blockBody{Transactions: block.Transactions(), Uncles: block.Uncles()})
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
for j, hash := range tt.explicit {
|
||||
hashes = append(hashes, hash)
|
||||
if tt.available[j] && len(bodies) < tt.expected {
|
||||
block := pm.chainman.GetBlock(hash)
|
||||
bodies = append(bodies, &blockBody{Transactions: block.Transactions(), Uncles: block.Uncles()})
|
||||
}
|
||||
}
|
||||
// Send the hash request and verify the response
|
||||
p2p.Send(peer.app, 0x05, hashes)
|
||||
if err := p2p.ExpectMsg(peer.app, 0x06, bodies); err != nil {
|
||||
t.Errorf("test %d: bodies mismatch: %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that the node state database can be retrieved based on hashes.
|
||||
func TestGetNodeData63(t *testing.T) { testGetNodeData(t, 63) }
|
||||
func TestGetNodeData64(t *testing.T) { testGetNodeData(t, 64) }
|
||||
|
||||
func testGetNodeData(t *testing.T, protocol int) {
|
||||
// Define three accounts to simulate transactions with
|
||||
acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
|
||||
acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
|
||||
acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey)
|
||||
acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey)
|
||||
|
||||
// Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_makerts_test)
|
||||
generator := func(i int, block *core.BlockGen) {
|
||||
switch i {
|
||||
case 0:
|
||||
// In block 1, the test bank sends account #1 some ether.
|
||||
tx, _ := types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil).SignECDSA(testBankKey)
|
||||
block.AddTx(tx)
|
||||
case 1:
|
||||
// In block 2, the test bank sends some more ether to account #1.
|
||||
// acc1Addr passes it on to account #2.
|
||||
tx1, _ := types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(testBankKey)
|
||||
tx2, _ := types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(acc1Key)
|
||||
block.AddTx(tx1)
|
||||
block.AddTx(tx2)
|
||||
case 2:
|
||||
// Block 3 is empty but was mined by account #2.
|
||||
block.SetCoinbase(acc2Addr)
|
||||
block.SetExtra([]byte("yeehaw"))
|
||||
case 3:
|
||||
// Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data).
|
||||
b2 := block.PrevBlock(1).Header()
|
||||
b2.Extra = []byte("foo")
|
||||
block.AddUncle(b2)
|
||||
b3 := block.PrevBlock(2).Header()
|
||||
b3.Extra = []byte("foo")
|
||||
block.AddUncle(b3)
|
||||
}
|
||||
}
|
||||
// Assemble the test environment
|
||||
pm := newTestProtocolManager(4, generator, nil)
|
||||
peer, _ := newTestPeer("peer", protocol, pm, true)
|
||||
defer peer.close()
|
||||
|
||||
// Fetch for now the entire chain db
|
||||
hashes := []common.Hash{}
|
||||
for _, key := range pm.chaindb.(*ethdb.MemDatabase).Keys() {
|
||||
hashes = append(hashes, common.BytesToHash(key))
|
||||
}
|
||||
p2p.Send(peer.app, 0x0d, hashes)
|
||||
msg, err := peer.app.ReadMsg()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read node data response: %v", err)
|
||||
}
|
||||
if msg.Code != 0x0e {
|
||||
t.Fatalf("response packet code mismatch: have %x, want %x", msg.Code, 0x0c)
|
||||
}
|
||||
var data [][]byte
|
||||
if err := msg.Decode(&data); err != nil {
|
||||
t.Fatalf("failed to decode response node data: %v", err)
|
||||
}
|
||||
// Verify that all hashes correspond to the requested data, and reconstruct a state tree
|
||||
for i, want := range hashes {
|
||||
if hash := crypto.Sha3Hash(data[i]); hash != want {
|
||||
fmt.Errorf("data hash mismatch: have %x, want %x", hash, want)
|
||||
}
|
||||
}
|
||||
statedb, _ := ethdb.NewMemDatabase()
|
||||
for i := 0; i < len(data); i++ {
|
||||
statedb.Put(hashes[i].Bytes(), data[i])
|
||||
}
|
||||
accounts := []common.Address{testBankAddress, acc1Addr, acc2Addr}
|
||||
for i := uint64(0); i <= pm.chainman.CurrentBlock().NumberU64(); i++ {
|
||||
trie := state.New(pm.chainman.GetBlockByNumber(i).Root(), statedb)
|
||||
|
||||
for j, acc := range accounts {
|
||||
bw := pm.chainman.State().GetBalance(acc)
|
||||
bh := trie.GetBalance(acc)
|
||||
|
||||
if (bw != nil && bh == nil) || (bw == nil && bh != nil) {
|
||||
t.Errorf("test %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw)
|
||||
}
|
||||
if bw != nil && bh != nil && bw.Cmp(bw) != 0 {
|
||||
t.Errorf("test %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that the transaction receipts can be retrieved based on hashes.
|
||||
func TestGetReceipt63(t *testing.T) { testGetReceipt(t, 63) }
|
||||
func TestGetReceipt64(t *testing.T) { testGetReceipt(t, 64) }
|
||||
|
||||
func testGetReceipt(t *testing.T, protocol int) {
|
||||
// Define three accounts to simulate transactions with
|
||||
acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
|
||||
acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
|
||||
acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey)
|
||||
acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey)
|
||||
|
||||
// Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_makerts_test)
|
||||
generator := func(i int, block *core.BlockGen) {
|
||||
switch i {
|
||||
case 0:
|
||||
// In block 1, the test bank sends account #1 some ether.
|
||||
tx, _ := types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil).SignECDSA(testBankKey)
|
||||
block.AddTx(tx)
|
||||
case 1:
|
||||
// In block 2, the test bank sends some more ether to account #1.
|
||||
// acc1Addr passes it on to account #2.
|
||||
tx1, _ := types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(testBankKey)
|
||||
tx2, _ := types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(acc1Key)
|
||||
block.AddTx(tx1)
|
||||
block.AddTx(tx2)
|
||||
case 2:
|
||||
// Block 3 is empty but was mined by account #2.
|
||||
block.SetCoinbase(acc2Addr)
|
||||
block.SetExtra([]byte("yeehaw"))
|
||||
case 3:
|
||||
// Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data).
|
||||
b2 := block.PrevBlock(1).Header()
|
||||
b2.Extra = []byte("foo")
|
||||
block.AddUncle(b2)
|
||||
b3 := block.PrevBlock(2).Header()
|
||||
b3.Extra = []byte("foo")
|
||||
block.AddUncle(b3)
|
||||
}
|
||||
}
|
||||
// Assemble the test environment
|
||||
pm := newTestProtocolManager(4, generator, nil)
|
||||
peer, _ := newTestPeer("peer", protocol, pm, true)
|
||||
defer peer.close()
|
||||
|
||||
// Collect the hashes to request, and the response to expect
|
||||
hashes := []common.Hash{}
|
||||
for i := uint64(0); i <= pm.chainman.CurrentBlock().NumberU64(); i++ {
|
||||
for _, tx := range pm.chainman.GetBlockByNumber(i).Transactions() {
|
||||
hashes = append(hashes, tx.Hash())
|
||||
}
|
||||
}
|
||||
receipts := make([]*types.Receipt, len(hashes))
|
||||
for i, hash := range hashes {
|
||||
receipts[i] = core.GetReceipt(pm.chaindb, hash)
|
||||
}
|
||||
// Send the hash request and verify the response
|
||||
p2p.Send(peer.app, 0x0f, hashes)
|
||||
if err := p2p.ExpectMsg(peer.app, 0x10, receipts); err != nil {
|
||||
t.Errorf("receipts mismatch: %v", err)
|
||||
}
|
||||
}
|
@ -1,147 +0,0 @@
|
||||
// This file contains some shares testing functionality, common to multiple
|
||||
// different files and modules being tested.
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"math/big"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
)
|
||||
|
||||
var (
|
||||
testBankKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
testBankAddress = crypto.PubkeyToAddress(testBankKey.PublicKey)
|
||||
testBankFunds = big.NewInt(1000000)
|
||||
)
|
||||
|
||||
// newTestProtocolManager creates a new protocol manager for testing purposes,
|
||||
// with the given number of blocks already known, and potential notification
|
||||
// channels for different events.
|
||||
func newTestProtocolManager(blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) *ProtocolManager {
|
||||
var (
|
||||
evmux = new(event.TypeMux)
|
||||
pow = new(core.FakePow)
|
||||
db, _ = ethdb.NewMemDatabase()
|
||||
genesis = core.WriteGenesisBlockForTesting(db, core.GenesisAccount{testBankAddress, testBankFunds})
|
||||
chainman, _ = core.NewChainManager(db, pow, evmux)
|
||||
blockproc = core.NewBlockProcessor(db, pow, chainman, evmux)
|
||||
)
|
||||
chainman.SetProcessor(blockproc)
|
||||
if _, err := chainman.InsertChain(core.GenerateChain(genesis, db, blocks, generator)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
pm := NewProtocolManager(NetworkId, evmux, &testTxPool{added: newtx}, pow, chainman, db)
|
||||
pm.Start()
|
||||
return pm
|
||||
}
|
||||
|
||||
// testTxPool is a fake, helper transaction pool for testing purposes
|
||||
type testTxPool struct {
|
||||
pool []*types.Transaction // Collection of all transactions
|
||||
added chan<- []*types.Transaction // Notification channel for new transactions
|
||||
|
||||
lock sync.RWMutex // Protects the transaction pool
|
||||
}
|
||||
|
||||
// AddTransactions appends a batch of transactions to the pool, and notifies any
|
||||
// listeners if the addition channel is non nil
|
||||
func (p *testTxPool) AddTransactions(txs []*types.Transaction) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
p.pool = append(p.pool, txs...)
|
||||
if p.added != nil {
|
||||
p.added <- txs
|
||||
}
|
||||
}
|
||||
|
||||
// GetTransactions returns all the transactions known to the pool
|
||||
func (p *testTxPool) GetTransactions() types.Transactions {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
txs := make([]*types.Transaction, len(p.pool))
|
||||
copy(txs, p.pool)
|
||||
|
||||
return txs
|
||||
}
|
||||
|
||||
// newTestTransaction create a new dummy transaction.
|
||||
func newTestTransaction(from *crypto.Key, nonce uint64, datasize int) *types.Transaction {
|
||||
tx := types.NewTransaction(nonce, common.Address{}, big.NewInt(0), big.NewInt(100000), big.NewInt(0), make([]byte, datasize))
|
||||
tx, _ = tx.SignECDSA(from.PrivateKey)
|
||||
|
||||
return tx
|
||||
}
|
||||
|
||||
// testPeer is a simulated peer to allow testing direct network calls.
|
||||
type testPeer struct {
|
||||
net p2p.MsgReadWriter // Network layer reader/writer to simulate remote messaging
|
||||
app *p2p.MsgPipeRW // Application layer reader/writer to simulate the local side
|
||||
*peer
|
||||
}
|
||||
|
||||
// newTestPeer creates a new peer registered at the given protocol manager.
|
||||
func newTestPeer(name string, version int, pm *ProtocolManager, shake bool) (*testPeer, <-chan error) {
|
||||
// Create a message pipe to communicate through
|
||||
app, net := p2p.MsgPipe()
|
||||
|
||||
// Generate a random id and create the peer
|
||||
var id discover.NodeID
|
||||
rand.Read(id[:])
|
||||
|
||||
peer := pm.newPeer(version, NetworkId, p2p.NewPeer(id, name, nil), net)
|
||||
|
||||
// Start the peer on a new thread
|
||||
errc := make(chan error, 1)
|
||||
go func() {
|
||||
pm.newPeerCh <- peer
|
||||
errc <- pm.handle(peer)
|
||||
}()
|
||||
tp := &testPeer{
|
||||
app: app,
|
||||
net: net,
|
||||
peer: peer,
|
||||
}
|
||||
// Execute any implicitly requested handshakes and return
|
||||
if shake {
|
||||
td, head, genesis := pm.chainman.Status()
|
||||
tp.handshake(nil, td, head, genesis)
|
||||
}
|
||||
return tp, errc
|
||||
}
|
||||
|
||||
// handshake simulates a trivial handshake that expects the same state from the
|
||||
// remote side as we are simulating locally.
|
||||
func (p *testPeer) handshake(t *testing.T, td *big.Int, head common.Hash, genesis common.Hash) {
|
||||
msg := &statusData{
|
||||
ProtocolVersion: uint32(p.version),
|
||||
NetworkId: uint32(NetworkId),
|
||||
TD: td,
|
||||
CurrentBlock: head,
|
||||
GenesisBlock: genesis,
|
||||
}
|
||||
if err := p2p.ExpectMsg(p.app, StatusMsg, msg); err != nil {
|
||||
t.Fatalf("status recv: %v", err)
|
||||
}
|
||||
if err := p2p.Send(p.app, StatusMsg, msg); err != nil {
|
||||
t.Fatalf("status send: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// close terminates the local side of the peer, notifying the remote protocol
|
||||
// manager of termination.
|
||||
func (p *testPeer) close() {
|
||||
p.app.Close()
|
||||
}
|
153
eth/metrics.go
153
eth/metrics.go
@ -18,140 +18,27 @@ package eth
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
)
|
||||
|
||||
var (
|
||||
propTxnInPacketsMeter = metrics.NewMeter("eth/prop/txns/in/packets")
|
||||
propTxnInTrafficMeter = metrics.NewMeter("eth/prop/txns/in/traffic")
|
||||
propTxnOutPacketsMeter = metrics.NewMeter("eth/prop/txns/out/packets")
|
||||
propTxnOutTrafficMeter = metrics.NewMeter("eth/prop/txns/out/traffic")
|
||||
propHashInPacketsMeter = metrics.NewMeter("eth/prop/hashes/in/packets")
|
||||
propHashInTrafficMeter = metrics.NewMeter("eth/prop/hashes/in/traffic")
|
||||
propHashOutPacketsMeter = metrics.NewMeter("eth/prop/hashes/out/packets")
|
||||
propHashOutTrafficMeter = metrics.NewMeter("eth/prop/hashes/out/traffic")
|
||||
propBlockInPacketsMeter = metrics.NewMeter("eth/prop/blocks/in/packets")
|
||||
propBlockInTrafficMeter = metrics.NewMeter("eth/prop/blocks/in/traffic")
|
||||
propBlockOutPacketsMeter = metrics.NewMeter("eth/prop/blocks/out/packets")
|
||||
propBlockOutTrafficMeter = metrics.NewMeter("eth/prop/blocks/out/traffic")
|
||||
reqHashInPacketsMeter = metrics.NewMeter("eth/req/hashes/in/packets")
|
||||
reqHashInTrafficMeter = metrics.NewMeter("eth/req/hashes/in/traffic")
|
||||
reqHashOutPacketsMeter = metrics.NewMeter("eth/req/hashes/out/packets")
|
||||
reqHashOutTrafficMeter = metrics.NewMeter("eth/req/hashes/out/traffic")
|
||||
reqBlockInPacketsMeter = metrics.NewMeter("eth/req/blocks/in/packets")
|
||||
reqBlockInTrafficMeter = metrics.NewMeter("eth/req/blocks/in/traffic")
|
||||
reqBlockOutPacketsMeter = metrics.NewMeter("eth/req/blocks/out/packets")
|
||||
reqBlockOutTrafficMeter = metrics.NewMeter("eth/req/blocks/out/traffic")
|
||||
reqHeaderInPacketsMeter = metrics.NewMeter("eth/req/headers/in/packets")
|
||||
reqHeaderInTrafficMeter = metrics.NewMeter("eth/req/headers/in/traffic")
|
||||
reqHeaderOutPacketsMeter = metrics.NewMeter("eth/req/headers/out/packets")
|
||||
reqHeaderOutTrafficMeter = metrics.NewMeter("eth/req/headers/out/traffic")
|
||||
reqBodyInPacketsMeter = metrics.NewMeter("eth/req/bodies/in/packets")
|
||||
reqBodyInTrafficMeter = metrics.NewMeter("eth/req/bodies/in/traffic")
|
||||
reqBodyOutPacketsMeter = metrics.NewMeter("eth/req/bodies/out/packets")
|
||||
reqBodyOutTrafficMeter = metrics.NewMeter("eth/req/bodies/out/traffic")
|
||||
reqStateInPacketsMeter = metrics.NewMeter("eth/req/states/in/packets")
|
||||
reqStateInTrafficMeter = metrics.NewMeter("eth/req/states/in/traffic")
|
||||
reqStateOutPacketsMeter = metrics.NewMeter("eth/req/states/out/packets")
|
||||
reqStateOutTrafficMeter = metrics.NewMeter("eth/req/states/out/traffic")
|
||||
reqReceiptInPacketsMeter = metrics.NewMeter("eth/req/receipts/in/packets")
|
||||
reqReceiptInTrafficMeter = metrics.NewMeter("eth/req/receipts/in/traffic")
|
||||
reqReceiptOutPacketsMeter = metrics.NewMeter("eth/req/receipts/out/packets")
|
||||
reqReceiptOutTrafficMeter = metrics.NewMeter("eth/req/receipts/out/traffic")
|
||||
miscInPacketsMeter = metrics.NewMeter("eth/misc/in/packets")
|
||||
miscInTrafficMeter = metrics.NewMeter("eth/misc/in/traffic")
|
||||
miscOutPacketsMeter = metrics.NewMeter("eth/misc/out/packets")
|
||||
miscOutTrafficMeter = metrics.NewMeter("eth/misc/out/traffic")
|
||||
propTxnInPacketsMeter = metrics.NewMeter("eth/prop/txns/in/packets")
|
||||
propTxnInTrafficMeter = metrics.NewMeter("eth/prop/txns/in/traffic")
|
||||
propTxnOutPacketsMeter = metrics.NewMeter("eth/prop/txns/out/packets")
|
||||
propTxnOutTrafficMeter = metrics.NewMeter("eth/prop/txns/out/traffic")
|
||||
propHashInPacketsMeter = metrics.NewMeter("eth/prop/hashes/in/packets")
|
||||
propHashInTrafficMeter = metrics.NewMeter("eth/prop/hashes/in/traffic")
|
||||
propHashOutPacketsMeter = metrics.NewMeter("eth/prop/hashes/out/packets")
|
||||
propHashOutTrafficMeter = metrics.NewMeter("eth/prop/hashes/out/traffic")
|
||||
propBlockInPacketsMeter = metrics.NewMeter("eth/prop/blocks/in/packets")
|
||||
propBlockInTrafficMeter = metrics.NewMeter("eth/prop/blocks/in/traffic")
|
||||
propBlockOutPacketsMeter = metrics.NewMeter("eth/prop/blocks/out/packets")
|
||||
propBlockOutTrafficMeter = metrics.NewMeter("eth/prop/blocks/out/traffic")
|
||||
reqHashInPacketsMeter = metrics.NewMeter("eth/req/hashes/in/packets")
|
||||
reqHashInTrafficMeter = metrics.NewMeter("eth/req/hashes/in/traffic")
|
||||
reqHashOutPacketsMeter = metrics.NewMeter("eth/req/hashes/out/packets")
|
||||
reqHashOutTrafficMeter = metrics.NewMeter("eth/req/hashes/out/traffic")
|
||||
reqBlockInPacketsMeter = metrics.NewMeter("eth/req/blocks/in/packets")
|
||||
reqBlockInTrafficMeter = metrics.NewMeter("eth/req/blocks/in/traffic")
|
||||
reqBlockOutPacketsMeter = metrics.NewMeter("eth/req/blocks/out/packets")
|
||||
reqBlockOutTrafficMeter = metrics.NewMeter("eth/req/blocks/out/traffic")
|
||||
)
|
||||
|
||||
// meteredMsgReadWriter is a wrapper around a p2p.MsgReadWriter, capable of
|
||||
// accumulating the above defined metrics based on the data stream contents.
|
||||
type meteredMsgReadWriter struct {
|
||||
p2p.MsgReadWriter // Wrapped message stream to meter
|
||||
version int // Protocol version to select correct meters
|
||||
}
|
||||
|
||||
// newMeteredMsgWriter wraps a p2p MsgReadWriter with metering support. If the
|
||||
// metrics system is disabled, this fucntion returns the original object.
|
||||
func newMeteredMsgWriter(rw p2p.MsgReadWriter) p2p.MsgReadWriter {
|
||||
if !metrics.Enabled {
|
||||
return rw
|
||||
}
|
||||
return &meteredMsgReadWriter{MsgReadWriter: rw}
|
||||
}
|
||||
|
||||
// Init sets the protocol version used by the stream to know which meters to
|
||||
// increment in case of overlapping message ids between protocol versions.
|
||||
func (rw *meteredMsgReadWriter) Init(version int) {
|
||||
rw.version = version
|
||||
}
|
||||
|
||||
func (rw *meteredMsgReadWriter) ReadMsg() (p2p.Msg, error) {
|
||||
// Read the message and short circuit in case of an error
|
||||
msg, err := rw.MsgReadWriter.ReadMsg()
|
||||
if err != nil {
|
||||
return msg, err
|
||||
}
|
||||
// Account for the data traffic
|
||||
packets, traffic := miscInPacketsMeter, miscInTrafficMeter
|
||||
switch {
|
||||
case rw.version < eth62 && msg.Code == BlockHashesMsg:
|
||||
packets, traffic = reqHashInPacketsMeter, reqHashInTrafficMeter
|
||||
case rw.version < eth62 && msg.Code == BlocksMsg:
|
||||
packets, traffic = reqBlockInPacketsMeter, reqBlockInTrafficMeter
|
||||
|
||||
case rw.version >= eth62 && msg.Code == BlockHeadersMsg:
|
||||
packets, traffic = reqBlockInPacketsMeter, reqBlockInTrafficMeter
|
||||
case rw.version >= eth62 && msg.Code == BlockBodiesMsg:
|
||||
packets, traffic = reqBodyInPacketsMeter, reqBodyInTrafficMeter
|
||||
|
||||
case rw.version >= eth63 && msg.Code == NodeDataMsg:
|
||||
packets, traffic = reqStateInPacketsMeter, reqStateInTrafficMeter
|
||||
case rw.version >= eth63 && msg.Code == ReceiptsMsg:
|
||||
packets, traffic = reqReceiptInPacketsMeter, reqReceiptInTrafficMeter
|
||||
|
||||
case msg.Code == NewBlockHashesMsg:
|
||||
packets, traffic = propHashInPacketsMeter, propHashInTrafficMeter
|
||||
case msg.Code == NewBlockMsg:
|
||||
packets, traffic = propBlockInPacketsMeter, propBlockInTrafficMeter
|
||||
case msg.Code == TxMsg:
|
||||
packets, traffic = propTxnInPacketsMeter, propTxnInTrafficMeter
|
||||
}
|
||||
packets.Mark(1)
|
||||
traffic.Mark(int64(msg.Size))
|
||||
|
||||
return msg, err
|
||||
}
|
||||
|
||||
func (rw *meteredMsgReadWriter) WriteMsg(msg p2p.Msg) error {
|
||||
// Account for the data traffic
|
||||
packets, traffic := miscOutPacketsMeter, miscOutTrafficMeter
|
||||
switch {
|
||||
case rw.version < eth62 && msg.Code == BlockHashesMsg:
|
||||
packets, traffic = reqHashOutPacketsMeter, reqHashOutTrafficMeter
|
||||
case rw.version < eth62 && msg.Code == BlocksMsg:
|
||||
packets, traffic = reqBlockOutPacketsMeter, reqBlockOutTrafficMeter
|
||||
|
||||
case rw.version >= eth62 && msg.Code == BlockHeadersMsg:
|
||||
packets, traffic = reqHeaderOutPacketsMeter, reqHeaderOutTrafficMeter
|
||||
case rw.version >= eth62 && msg.Code == BlockBodiesMsg:
|
||||
packets, traffic = reqBodyOutPacketsMeter, reqBodyOutTrafficMeter
|
||||
|
||||
case rw.version >= eth63 && msg.Code == NodeDataMsg:
|
||||
packets, traffic = reqStateOutPacketsMeter, reqStateOutTrafficMeter
|
||||
case rw.version >= eth63 && msg.Code == ReceiptsMsg:
|
||||
packets, traffic = reqReceiptOutPacketsMeter, reqReceiptOutTrafficMeter
|
||||
|
||||
case msg.Code == NewBlockHashesMsg:
|
||||
packets, traffic = propHashOutPacketsMeter, propHashOutTrafficMeter
|
||||
case msg.Code == NewBlockMsg:
|
||||
packets, traffic = propBlockOutPacketsMeter, propBlockOutTrafficMeter
|
||||
case msg.Code == TxMsg:
|
||||
packets, traffic = propTxnOutPacketsMeter, propTxnOutTrafficMeter
|
||||
}
|
||||
packets.Mark(1)
|
||||
traffic.Mark(int64(msg.Size))
|
||||
|
||||
// Send the packet to the p2p layer
|
||||
return rw.MsgReadWriter.WriteMsg(msg)
|
||||
}
|
||||
|
113
eth/peer.go
113
eth/peer.go
@ -28,7 +28,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"gopkg.in/fatih/set.v0"
|
||||
)
|
||||
|
||||
@ -130,7 +129,9 @@ func (p *peer) MarkTransaction(hash common.Hash) {
|
||||
// SendTransactions sends transactions to the peer and includes the hashes
|
||||
// in its transaction hash set for future reference.
|
||||
func (p *peer) SendTransactions(txs types.Transactions) error {
|
||||
propTxnOutPacketsMeter.Mark(1)
|
||||
for _, tx := range txs {
|
||||
propTxnOutTrafficMeter.Mark(tx.Size().Int64())
|
||||
p.knownTxs.Add(tx.Hash())
|
||||
}
|
||||
return p2p.Send(p.rw, TxMsg, txs)
|
||||
@ -138,132 +139,62 @@ func (p *peer) SendTransactions(txs types.Transactions) error {
|
||||
|
||||
// SendBlockHashes sends a batch of known hashes to the remote peer.
|
||||
func (p *peer) SendBlockHashes(hashes []common.Hash) error {
|
||||
reqHashOutPacketsMeter.Mark(1)
|
||||
reqHashOutTrafficMeter.Mark(int64(32 * len(hashes)))
|
||||
|
||||
return p2p.Send(p.rw, BlockHashesMsg, hashes)
|
||||
}
|
||||
|
||||
// SendBlocks sends a batch of blocks to the remote peer.
|
||||
func (p *peer) SendBlocks(blocks []*types.Block) error {
|
||||
reqBlockOutPacketsMeter.Mark(1)
|
||||
for _, block := range blocks {
|
||||
reqBlockOutTrafficMeter.Mark(block.Size().Int64())
|
||||
}
|
||||
return p2p.Send(p.rw, BlocksMsg, blocks)
|
||||
}
|
||||
|
||||
// SendNewBlockHashes61 announces the availability of a number of blocks through
|
||||
// SendNewBlockHashes announces the availability of a number of blocks through
|
||||
// a hash notification.
|
||||
func (p *peer) SendNewBlockHashes61(hashes []common.Hash) error {
|
||||
func (p *peer) SendNewBlockHashes(hashes []common.Hash) error {
|
||||
propHashOutPacketsMeter.Mark(1)
|
||||
propHashOutTrafficMeter.Mark(int64(32 * len(hashes)))
|
||||
|
||||
for _, hash := range hashes {
|
||||
p.knownBlocks.Add(hash)
|
||||
}
|
||||
return p2p.Send(p.rw, NewBlockHashesMsg, hashes)
|
||||
}
|
||||
|
||||
// SendNewBlockHashes announces the availability of a number of blocks through
|
||||
// a hash notification.
|
||||
func (p *peer) SendNewBlockHashes(hashes []common.Hash, numbers []uint64) error {
|
||||
for _, hash := range hashes {
|
||||
p.knownBlocks.Add(hash)
|
||||
}
|
||||
request := make(newBlockHashesData, len(hashes))
|
||||
for i := 0; i < len(hashes); i++ {
|
||||
request[i].Hash = hashes[i]
|
||||
request[i].Number = numbers[i]
|
||||
}
|
||||
return p2p.Send(p.rw, NewBlockHashesMsg, request)
|
||||
}
|
||||
|
||||
// SendNewBlock propagates an entire block to a remote peer.
|
||||
func (p *peer) SendNewBlock(block *types.Block, td *big.Int) error {
|
||||
propBlockOutPacketsMeter.Mark(1)
|
||||
propBlockOutTrafficMeter.Mark(block.Size().Int64())
|
||||
|
||||
p.knownBlocks.Add(block.Hash())
|
||||
return p2p.Send(p.rw, NewBlockMsg, []interface{}{block, td})
|
||||
}
|
||||
|
||||
// SendBlockHeaders sends a batch of block headers to the remote peer.
|
||||
func (p *peer) SendBlockHeaders(headers []*types.Header) error {
|
||||
return p2p.Send(p.rw, BlockHeadersMsg, headers)
|
||||
}
|
||||
|
||||
// SendBlockBodies sends a batch of block contents to the remote peer.
|
||||
func (p *peer) SendBlockBodies(bodies []*blockBody) error {
|
||||
return p2p.Send(p.rw, BlockBodiesMsg, blockBodiesData(bodies))
|
||||
}
|
||||
|
||||
// SendBlockBodiesRLP sends a batch of block contents to the remote peer from
|
||||
// an already RLP encoded format.
|
||||
func (p *peer) SendBlockBodiesRLP(bodies []rlp.RawValue) error {
|
||||
return p2p.Send(p.rw, BlockBodiesMsg, bodies)
|
||||
}
|
||||
|
||||
// SendNodeData sends a batch of arbitrary internal data, corresponding to the
|
||||
// hashes requested.
|
||||
func (p *peer) SendNodeData(data [][]byte) error {
|
||||
return p2p.Send(p.rw, NodeDataMsg, data)
|
||||
}
|
||||
|
||||
// SendReceipts sends a batch of transaction receipts, corresponding to the ones
|
||||
// requested.
|
||||
func (p *peer) SendReceipts(receipts []*types.Receipt) error {
|
||||
return p2p.Send(p.rw, ReceiptsMsg, receipts)
|
||||
}
|
||||
|
||||
// RequestHashes fetches a batch of hashes from a peer, starting at from, going
|
||||
// towards the genesis block.
|
||||
func (p *peer) RequestHashes(from common.Hash) error {
|
||||
glog.V(logger.Debug).Infof("%v fetching hashes (%d) from %x...", p, downloader.MaxHashFetch, from[:4])
|
||||
glog.V(logger.Debug).Infof("Peer [%s] fetching hashes (%d) from %x...\n", p.id, downloader.MaxHashFetch, from[:4])
|
||||
return p2p.Send(p.rw, GetBlockHashesMsg, getBlockHashesData{from, uint64(downloader.MaxHashFetch)})
|
||||
}
|
||||
|
||||
// RequestHashesFromNumber fetches a batch of hashes from a peer, starting at
|
||||
// the requested block number, going upwards towards the genesis block.
|
||||
// RequestHashesFromNumber fetches a batch of hashes from a peer, starting at the
|
||||
// requested block number, going upwards towards the genesis block.
|
||||
func (p *peer) RequestHashesFromNumber(from uint64, count int) error {
|
||||
glog.V(logger.Debug).Infof("%v fetching hashes (%d) from #%d...", p, count, from)
|
||||
glog.V(logger.Debug).Infof("Peer [%s] fetching hashes (%d) from #%d...\n", p.id, count, from)
|
||||
return p2p.Send(p.rw, GetBlockHashesFromNumberMsg, getBlockHashesFromNumberData{from, uint64(count)})
|
||||
}
|
||||
|
||||
// RequestBlocks fetches a batch of blocks corresponding to the specified hashes.
|
||||
func (p *peer) RequestBlocks(hashes []common.Hash) error {
|
||||
glog.V(logger.Debug).Infof("%v fetching %v blocks", p, len(hashes))
|
||||
glog.V(logger.Debug).Infof("[%s] fetching %v blocks\n", p.id, len(hashes))
|
||||
return p2p.Send(p.rw, GetBlocksMsg, hashes)
|
||||
}
|
||||
|
||||
// RequestHeaders is a wrapper around the header query functions to fetch a
|
||||
// single header. It is used solely by the fetcher.
|
||||
func (p *peer) RequestOneHeader(hash common.Hash) error {
|
||||
glog.V(logger.Debug).Infof("%v fetching a single header: %x", p, hash)
|
||||
return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Hash: hash}, Amount: uint64(1), Skip: uint64(0), Reverse: false})
|
||||
}
|
||||
|
||||
// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
|
||||
// specified header query, based on the hash of an origin block.
|
||||
func (p *peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
|
||||
glog.V(logger.Debug).Infof("%v fetching %d headers from %x, skipping %d (reverse = %v)", p, amount, origin[:4], skip, reverse)
|
||||
return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
|
||||
}
|
||||
|
||||
// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
|
||||
// specified header query, based on the number of an origin block.
|
||||
func (p *peer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
|
||||
glog.V(logger.Debug).Infof("%v fetching %d headers from #%d, skipping %d (reverse = %v)", p, amount, origin, skip, reverse)
|
||||
return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
|
||||
}
|
||||
|
||||
// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
|
||||
// specified.
|
||||
func (p *peer) RequestBodies(hashes []common.Hash) error {
|
||||
glog.V(logger.Debug).Infof("%v fetching %d block bodies", p, len(hashes))
|
||||
return p2p.Send(p.rw, GetBlockBodiesMsg, hashes)
|
||||
}
|
||||
|
||||
// RequestNodeData fetches a batch of arbitrary data from a node's known state
|
||||
// data, corresponding to the specified hashes.
|
||||
func (p *peer) RequestNodeData(hashes []common.Hash) error {
|
||||
glog.V(logger.Debug).Infof("%v fetching %v state data", p, len(hashes))
|
||||
return p2p.Send(p.rw, GetNodeDataMsg, hashes)
|
||||
}
|
||||
|
||||
// RequestReceipts fetches a batch of transaction receipts from a remote node.
|
||||
func (p *peer) RequestReceipts(hashes []common.Hash) error {
|
||||
glog.V(logger.Debug).Infof("%v fetching %v receipts", p, len(hashes))
|
||||
return p2p.Send(p.rw, GetReceiptsMsg, hashes)
|
||||
}
|
||||
|
||||
// Handshake executes the eth protocol handshake, negotiating version number,
|
||||
// network IDs, difficulties, head and genesis blocks.
|
||||
func (p *peer) Handshake(td *big.Int, head common.Hash, genesis common.Hash) error {
|
||||
|
126
eth/protocol.go
126
eth/protocol.go
@ -17,28 +17,17 @@
|
||||
package eth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// Constants to match up protocol versions and messages
|
||||
const (
|
||||
eth61 = 61
|
||||
eth62 = 62
|
||||
eth63 = 63
|
||||
eth64 = 64
|
||||
)
|
||||
|
||||
// Supported versions of the eth protocol (first is primary).
|
||||
var ProtocolVersions = []uint{eth64, eth63, eth62, eth61}
|
||||
var ProtocolVersions = []uint{61, 60}
|
||||
|
||||
// Number of implemented message corresponding to different protocol versions.
|
||||
var ProtocolLengths = []uint64{15, 12, 8, 9}
|
||||
var ProtocolLengths = []uint64{9, 8}
|
||||
|
||||
const (
|
||||
NetworkId = 1
|
||||
@ -47,37 +36,15 @@ const (
|
||||
|
||||
// eth protocol message codes
|
||||
const (
|
||||
// Protocol messages belonging to eth/61
|
||||
StatusMsg = 0x00
|
||||
NewBlockHashesMsg = 0x01
|
||||
TxMsg = 0x02
|
||||
GetBlockHashesMsg = 0x03
|
||||
BlockHashesMsg = 0x04
|
||||
GetBlocksMsg = 0x05
|
||||
BlocksMsg = 0x06
|
||||
NewBlockMsg = 0x07
|
||||
GetBlockHashesFromNumberMsg = 0x08
|
||||
|
||||
// Protocol messages belonging to eth/62 (new protocol from scratch)
|
||||
// StatusMsg = 0x00 (uncomment after eth/61 deprecation)
|
||||
// NewBlockHashesMsg = 0x01 (uncomment after eth/61 deprecation)
|
||||
// TxMsg = 0x02 (uncomment after eth/61 deprecation)
|
||||
GetBlockHeadersMsg = 0x03
|
||||
BlockHeadersMsg = 0x04
|
||||
GetBlockBodiesMsg = 0x05
|
||||
BlockBodiesMsg = 0x06
|
||||
// NewBlockMsg = 0x07 (uncomment after eth/61 deprecation)
|
||||
|
||||
// Protocol messages belonging to eth/63
|
||||
GetNodeDataMsg = 0x0d
|
||||
NodeDataMsg = 0x0e
|
||||
GetReceiptsMsg = 0x0f
|
||||
ReceiptsMsg = 0x10
|
||||
|
||||
// Protocol messages belonging to eth/64
|
||||
GetAcctProofMsg = 0x11
|
||||
GetStorageDataProof = 0x12
|
||||
Proof = 0x13
|
||||
StatusMsg = iota
|
||||
NewBlockHashesMsg
|
||||
TxMsg
|
||||
GetBlockHashesMsg
|
||||
BlockHashesMsg
|
||||
GetBlocksMsg
|
||||
BlocksMsg
|
||||
NewBlockMsg
|
||||
GetBlockHashesFromNumberMsg
|
||||
)
|
||||
|
||||
type errCode int
|
||||
@ -135,85 +102,22 @@ type statusData struct {
|
||||
GenesisBlock common.Hash
|
||||
}
|
||||
|
||||
// newBlockHashesData is the network packet for the block announcements.
|
||||
type newBlockHashesData []struct {
|
||||
Hash common.Hash // Hash of one particular block being announced
|
||||
Number uint64 // Number of one particular block being announced
|
||||
}
|
||||
|
||||
// getBlockHashesData is the network packet for the hash based hash retrieval.
|
||||
// getBlockHashesData is the network packet for the hash based block retrieval
|
||||
// message.
|
||||
type getBlockHashesData struct {
|
||||
Hash common.Hash
|
||||
Amount uint64
|
||||
}
|
||||
|
||||
// getBlockHashesFromNumberData is the network packet for the number based hash
|
||||
// retrieval.
|
||||
// getBlockHashesFromNumberData is the network packet for the number based block
|
||||
// retrieval message.
|
||||
type getBlockHashesFromNumberData struct {
|
||||
Number uint64
|
||||
Amount uint64
|
||||
}
|
||||
|
||||
// getBlockHeadersData represents a block header query.
|
||||
type getBlockHeadersData struct {
|
||||
Origin hashOrNumber // Block from which to retrieve headers
|
||||
Amount uint64 // Maximum number of headers to retrieve
|
||||
Skip uint64 // Blocks to skip between consecutive headers
|
||||
Reverse bool // Query direction (false = rising towards latest, true = falling towards genesis)
|
||||
}
|
||||
|
||||
// hashOrNumber is a combined field for specifying an origin block.
|
||||
type hashOrNumber struct {
|
||||
Hash common.Hash // Block hash from which to retrieve headers (excludes Number)
|
||||
Number uint64 // Block hash from which to retrieve headers (excludes Hash)
|
||||
}
|
||||
|
||||
// EncodeRLP is a specialized encoder for hashOrNumber to encode only one of the
|
||||
// two contained union fields.
|
||||
func (hn *hashOrNumber) EncodeRLP(w io.Writer) error {
|
||||
if hn.Hash == (common.Hash{}) {
|
||||
return rlp.Encode(w, hn.Number)
|
||||
}
|
||||
if hn.Number != 0 {
|
||||
return fmt.Errorf("both origin hash (%x) and number (%d) provided", hn.Hash, hn.Number)
|
||||
}
|
||||
return rlp.Encode(w, hn.Hash)
|
||||
}
|
||||
|
||||
// DecodeRLP is a specialized decoder for hashOrNumber to decode the contents
|
||||
// into either a block hash or a block number.
|
||||
func (hn *hashOrNumber) DecodeRLP(s *rlp.Stream) error {
|
||||
_, size, _ := s.Kind()
|
||||
origin, err := s.Raw()
|
||||
if err == nil {
|
||||
switch {
|
||||
case size == 32:
|
||||
err = rlp.DecodeBytes(origin, &hn.Hash)
|
||||
case size <= 8:
|
||||
err = rlp.DecodeBytes(origin, &hn.Number)
|
||||
default:
|
||||
err = fmt.Errorf("invalid input size %d for origin", size)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// newBlockData is the network packet for the block propagation message.
|
||||
type newBlockData struct {
|
||||
Block *types.Block
|
||||
TD *big.Int
|
||||
}
|
||||
|
||||
// blockBody represents the data content of a single block.
|
||||
type blockBody struct {
|
||||
Transactions []*types.Transaction // Transactions contained within a block
|
||||
Uncles []*types.Header // Uncles contained within a block
|
||||
}
|
||||
|
||||
// blockBodiesData is the network packet for block content distribution.
|
||||
type blockBodiesData []*blockBody
|
||||
|
||||
// nodeDataData is the network response packet for a node data retrieval.
|
||||
type nodeDataData []struct {
|
||||
Value []byte
|
||||
}
|
||||
|
@ -18,16 +18,19 @@ package eth
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -37,14 +40,8 @@ func init() {
|
||||
|
||||
var testAccount = crypto.NewKey(rand.Reader)
|
||||
|
||||
// Tests that handshake failures are detected and reported correctly.
|
||||
func TestStatusMsgErrors61(t *testing.T) { testStatusMsgErrors(t, 61) }
|
||||
func TestStatusMsgErrors62(t *testing.T) { testStatusMsgErrors(t, 62) }
|
||||
func TestStatusMsgErrors63(t *testing.T) { testStatusMsgErrors(t, 63) }
|
||||
func TestStatusMsgErrors64(t *testing.T) { testStatusMsgErrors(t, 64) }
|
||||
|
||||
func testStatusMsgErrors(t *testing.T, protocol int) {
|
||||
pm := newTestProtocolManager(0, nil, nil)
|
||||
func TestStatusMsgErrors(t *testing.T) {
|
||||
pm := newProtocolManagerForTesting(nil)
|
||||
td, currentBlock, genesis := pm.chainman.Status()
|
||||
defer pm.Stop()
|
||||
|
||||
@ -59,23 +56,23 @@ func testStatusMsgErrors(t *testing.T, protocol int) {
|
||||
},
|
||||
{
|
||||
code: StatusMsg, data: statusData{10, NetworkId, td, currentBlock, genesis},
|
||||
wantError: errResp(ErrProtocolVersionMismatch, "10 (!= %d)", protocol),
|
||||
wantError: errResp(ErrProtocolVersionMismatch, "10 (!= 0)"),
|
||||
},
|
||||
{
|
||||
code: StatusMsg, data: statusData{uint32(protocol), 999, td, currentBlock, genesis},
|
||||
code: StatusMsg, data: statusData{uint32(ProtocolVersions[0]), 999, td, currentBlock, genesis},
|
||||
wantError: errResp(ErrNetworkIdMismatch, "999 (!= 1)"),
|
||||
},
|
||||
{
|
||||
code: StatusMsg, data: statusData{uint32(protocol), NetworkId, td, currentBlock, common.Hash{3}},
|
||||
code: StatusMsg, data: statusData{uint32(ProtocolVersions[0]), NetworkId, td, currentBlock, common.Hash{3}},
|
||||
wantError: errResp(ErrGenesisBlockMismatch, "0300000000000000000000000000000000000000000000000000000000000000 (!= %x)", genesis),
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
p, errc := newTestPeer("peer", protocol, pm, false)
|
||||
p, errc := newTestPeer(pm)
|
||||
// The send call might hang until reset because
|
||||
// the protocol might not read the payload.
|
||||
go p2p.Send(p.app, test.code, test.data)
|
||||
go p2p.Send(p, test.code, test.data)
|
||||
|
||||
select {
|
||||
case err := <-errc:
|
||||
@ -92,20 +89,16 @@ func testStatusMsgErrors(t *testing.T, protocol int) {
|
||||
}
|
||||
|
||||
// This test checks that received transactions are added to the local pool.
|
||||
func TestRecvTransactions61(t *testing.T) { testRecvTransactions(t, 61) }
|
||||
func TestRecvTransactions62(t *testing.T) { testRecvTransactions(t, 62) }
|
||||
func TestRecvTransactions63(t *testing.T) { testRecvTransactions(t, 63) }
|
||||
func TestRecvTransactions64(t *testing.T) { testRecvTransactions(t, 64) }
|
||||
|
||||
func testRecvTransactions(t *testing.T, protocol int) {
|
||||
func TestRecvTransactions(t *testing.T) {
|
||||
txAdded := make(chan []*types.Transaction)
|
||||
pm := newTestProtocolManager(0, nil, txAdded)
|
||||
p, _ := newTestPeer("peer", protocol, pm, true)
|
||||
pm := newProtocolManagerForTesting(txAdded)
|
||||
p, _ := newTestPeer(pm)
|
||||
defer pm.Stop()
|
||||
defer p.close()
|
||||
p.handshake(t)
|
||||
|
||||
tx := newTestTransaction(testAccount, 0, 0)
|
||||
if err := p2p.Send(p.app, TxMsg, []interface{}{tx}); err != nil {
|
||||
tx := newtx(testAccount, 0, 0)
|
||||
if err := p2p.Send(p, TxMsg, []interface{}{tx}); err != nil {
|
||||
t.Fatalf("send error: %v", err)
|
||||
}
|
||||
select {
|
||||
@ -121,20 +114,15 @@ func testRecvTransactions(t *testing.T, protocol int) {
|
||||
}
|
||||
|
||||
// This test checks that pending transactions are sent.
|
||||
func TestSendTransactions61(t *testing.T) { testSendTransactions(t, 61) }
|
||||
func TestSendTransactions62(t *testing.T) { testSendTransactions(t, 62) }
|
||||
func TestSendTransactions63(t *testing.T) { testSendTransactions(t, 63) }
|
||||
func TestSendTransactions64(t *testing.T) { testSendTransactions(t, 64) }
|
||||
|
||||
func testSendTransactions(t *testing.T, protocol int) {
|
||||
pm := newTestProtocolManager(0, nil, nil)
|
||||
func TestSendTransactions(t *testing.T) {
|
||||
pm := newProtocolManagerForTesting(nil)
|
||||
defer pm.Stop()
|
||||
|
||||
// Fill the pool with big transactions.
|
||||
const txsize = txsyncPackSize / 10
|
||||
alltxs := make([]*types.Transaction, 100)
|
||||
for nonce := range alltxs {
|
||||
alltxs[nonce] = newTestTransaction(testAccount, uint64(nonce), txsize)
|
||||
alltxs[nonce] = newtx(testAccount, uint64(nonce), txsize)
|
||||
}
|
||||
pm.txpool.AddTransactions(alltxs)
|
||||
|
||||
@ -149,7 +137,7 @@ func testSendTransactions(t *testing.T, protocol int) {
|
||||
}
|
||||
for n := 0; n < len(alltxs) && !t.Failed(); {
|
||||
var txs []*types.Transaction
|
||||
msg, err := p.app.ReadMsg()
|
||||
msg, err := p.ReadMsg()
|
||||
if err != nil {
|
||||
t.Errorf("%v: read error: %v", p.Peer, err)
|
||||
} else if msg.Code != TxMsg {
|
||||
@ -173,53 +161,97 @@ func testSendTransactions(t *testing.T, protocol int) {
|
||||
}
|
||||
}
|
||||
for i := 0; i < 3; i++ {
|
||||
p, _ := newTestPeer(fmt.Sprintf("peer #%d", i), protocol, pm, true)
|
||||
p, _ := newTestPeer(pm)
|
||||
p.handshake(t)
|
||||
wg.Add(1)
|
||||
go checktxs(p)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Tests that the custom union field encoder and decoder works correctly.
|
||||
func TestGetBlockHeadersDataEncodeDecode(t *testing.T) {
|
||||
// Create a "random" hash for testing
|
||||
var hash common.Hash
|
||||
for i, _ := range hash {
|
||||
hash[i] = byte(i)
|
||||
}
|
||||
// Assemble some table driven tests
|
||||
tests := []struct {
|
||||
packet *getBlockHeadersData
|
||||
fail bool
|
||||
}{
|
||||
// Providing the origin as either a hash or a number should both work
|
||||
{fail: false, packet: &getBlockHeadersData{Origin: hashOrNumber{Number: 314}}},
|
||||
{fail: false, packet: &getBlockHeadersData{Origin: hashOrNumber{Hash: hash}}},
|
||||
// testPeer wraps all peer-related data for tests.
|
||||
type testPeer struct {
|
||||
p2p.MsgReadWriter // writing to the test peer feeds the protocol
|
||||
pipe *p2p.MsgPipeRW // the protocol read/writes on this end
|
||||
pm *ProtocolManager
|
||||
*peer
|
||||
}
|
||||
|
||||
// Providing arbitrary query field should also work
|
||||
{fail: false, packet: &getBlockHeadersData{Origin: hashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}},
|
||||
{fail: false, packet: &getBlockHeadersData{Origin: hashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}},
|
||||
func newProtocolManagerForTesting(txAdded chan<- []*types.Transaction) *ProtocolManager {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
core.WriteTestNetGenesisBlock(db, 0)
|
||||
var (
|
||||
em = new(event.TypeMux)
|
||||
chain, _ = core.NewChainManager(db, core.FakePow{}, em)
|
||||
txpool = &fakeTxPool{added: txAdded}
|
||||
pm = NewProtocolManager(NetworkId, em, txpool, core.FakePow{}, chain)
|
||||
)
|
||||
pm.Start()
|
||||
return pm
|
||||
}
|
||||
|
||||
// Providing both the origin hash and origin number must fail
|
||||
{fail: true, packet: &getBlockHeadersData{Origin: hashOrNumber{Hash: hash, Number: 314}}},
|
||||
func newTestPeer(pm *ProtocolManager) (*testPeer, <-chan error) {
|
||||
var id discover.NodeID
|
||||
rand.Read(id[:])
|
||||
rw1, rw2 := p2p.MsgPipe()
|
||||
peer := pm.newPeer(pm.protVer, pm.netId, p2p.NewPeer(id, "test peer", nil), rw2)
|
||||
errc := make(chan error, 1)
|
||||
go func() {
|
||||
pm.newPeerCh <- peer
|
||||
errc <- pm.handle(peer)
|
||||
}()
|
||||
return &testPeer{rw1, rw2, pm, peer}, errc
|
||||
}
|
||||
|
||||
func (p *testPeer) handshake(t *testing.T) {
|
||||
td, currentBlock, genesis := p.pm.chainman.Status()
|
||||
msg := &statusData{
|
||||
ProtocolVersion: uint32(p.pm.protVer),
|
||||
NetworkId: uint32(p.pm.netId),
|
||||
TD: td,
|
||||
CurrentBlock: currentBlock,
|
||||
GenesisBlock: genesis,
|
||||
}
|
||||
// Iterate over each of the tests and try to encode and then decode
|
||||
for i, tt := range tests {
|
||||
bytes, err := rlp.EncodeToBytes(tt.packet)
|
||||
if err != nil && !tt.fail {
|
||||
t.Fatalf("test %d: failed to encode packet: %v", i, err)
|
||||
} else if err == nil && tt.fail {
|
||||
t.Fatalf("test %d: encode should have failed", i)
|
||||
}
|
||||
if !tt.fail {
|
||||
packet := new(getBlockHeadersData)
|
||||
if err := rlp.DecodeBytes(bytes, packet); err != nil {
|
||||
t.Fatalf("test %d: failed to decode packet: %v", i, err)
|
||||
}
|
||||
if packet.Origin.Hash != tt.packet.Origin.Hash || packet.Origin.Number != tt.packet.Origin.Number || packet.Amount != tt.packet.Amount ||
|
||||
packet.Skip != tt.packet.Skip || packet.Reverse != tt.packet.Reverse {
|
||||
t.Fatalf("test %d: encode decode mismatch: have %+v, want %+v", i, packet, tt.packet)
|
||||
}
|
||||
}
|
||||
if err := p2p.ExpectMsg(p, StatusMsg, msg); err != nil {
|
||||
t.Fatalf("status recv: %v", err)
|
||||
}
|
||||
if err := p2p.Send(p, StatusMsg, msg); err != nil {
|
||||
t.Fatalf("status send: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *testPeer) close() {
|
||||
p.pipe.Close()
|
||||
}
|
||||
|
||||
type fakeTxPool struct {
|
||||
// all transactions are collected.
|
||||
mu sync.Mutex
|
||||
all []*types.Transaction
|
||||
// if added is non-nil, it receives added transactions.
|
||||
added chan<- []*types.Transaction
|
||||
}
|
||||
|
||||
func (pool *fakeTxPool) AddTransactions(txs []*types.Transaction) {
|
||||
pool.mu.Lock()
|
||||
defer pool.mu.Unlock()
|
||||
pool.all = append(pool.all, txs...)
|
||||
if pool.added != nil {
|
||||
pool.added <- txs
|
||||
}
|
||||
}
|
||||
|
||||
func (pool *fakeTxPool) GetTransactions() types.Transactions {
|
||||
pool.mu.Lock()
|
||||
defer pool.mu.Unlock()
|
||||
txs := make([]*types.Transaction, len(pool.all))
|
||||
copy(txs, pool.all)
|
||||
return types.Transactions(txs)
|
||||
}
|
||||
|
||||
func newtx(from *crypto.Key, nonce uint64, datasize int) *types.Transaction {
|
||||
data := make([]byte, datasize)
|
||||
tx := types.NewTransaction(nonce, common.Address{}, big.NewInt(0), big.NewInt(100000), big.NewInt(0), data)
|
||||
tx, _ = tx.SignECDSA(from.PrivateKey)
|
||||
return tx
|
||||
}
|
||||
|
@ -61,7 +61,9 @@ type LDBDatabase struct {
|
||||
quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
|
||||
}
|
||||
|
||||
// NewLDBDatabase returns a LevelDB wrapped object.
|
||||
// NewLDBDatabase returns a LevelDB wrapped object. LDBDatabase does not persist data by
|
||||
// it self but requires a background poller which syncs every X. `Flush` should be called
|
||||
// when data needs to be stored and written to disk.
|
||||
func NewLDBDatabase(file string, cache int) (*LDBDatabase, error) {
|
||||
// Calculate the cache allowance for this particular database
|
||||
cache = int(float64(cache) * cacheRatio[filepath.Base(file)])
|
||||
@ -140,6 +142,11 @@ func (self *LDBDatabase) NewIterator() iterator.Iterator {
|
||||
return self.db.NewIterator(nil, nil)
|
||||
}
|
||||
|
||||
// Flush flushes out the queue to leveldb
|
||||
func (self *LDBDatabase) Flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *LDBDatabase) Close() {
|
||||
// Stop the metrics collection to avoid internal database races
|
||||
self.quitLock.Lock()
|
||||
@ -152,14 +159,12 @@ func (self *LDBDatabase) Close() {
|
||||
glog.V(logger.Error).Infof("metrics failure in '%s': %v\n", self.fn, err)
|
||||
}
|
||||
}
|
||||
err := self.db.Close()
|
||||
if glog.V(logger.Error) {
|
||||
if err == nil {
|
||||
glog.Infoln("closed db:", self.fn)
|
||||
} else {
|
||||
glog.Errorf("error closing db %s: %v", self.fn, err)
|
||||
}
|
||||
// Flush and close the database
|
||||
if err := self.Flush(); err != nil {
|
||||
glog.V(logger.Error).Infof("flushing '%s' failed: %v\n", self.fn, err)
|
||||
}
|
||||
self.db.Close()
|
||||
glog.V(logger.Error).Infoln("flushed and closed db:", self.fn)
|
||||
}
|
||||
|
||||
func (self *LDBDatabase) LDB() *leveldb.DB {
|
||||
@ -263,23 +268,3 @@ func (self *LDBDatabase) meter(refresh time.Duration) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: remove this stuff and expose leveldb directly
|
||||
|
||||
func (db *LDBDatabase) NewBatch() Batch {
|
||||
return &ldbBatch{db: db.db, b: new(leveldb.Batch)}
|
||||
}
|
||||
|
||||
type ldbBatch struct {
|
||||
db *leveldb.DB
|
||||
b *leveldb.Batch
|
||||
}
|
||||
|
||||
func (b *ldbBatch) Put(key, value []byte) error {
|
||||
b.b.Put(key, value)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *ldbBatch) Write() error {
|
||||
return b.db.Write(b.b, nil)
|
||||
}
|
||||
|
@ -36,7 +36,8 @@ func NewMemDatabase() (*MemDatabase, error) {
|
||||
}
|
||||
|
||||
func (db *MemDatabase) Put(key []byte, value []byte) error {
|
||||
db.db[string(key)] = common.CopyBytes(value)
|
||||
db.db[string(key)] = value
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -48,14 +49,6 @@ func (db *MemDatabase) Get(key []byte) ([]byte, error) {
|
||||
return db.db[string(key)], nil
|
||||
}
|
||||
|
||||
func (db *MemDatabase) Keys() [][]byte {
|
||||
keys := [][]byte{}
|
||||
for key, _ := range db.db {
|
||||
keys = append(keys, []byte(key))
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
/*
|
||||
func (db *MemDatabase) GetKeys() []*common.Key {
|
||||
data, _ := db.Get([]byte("KeyRing"))
|
||||
@ -91,25 +84,6 @@ func (db *MemDatabase) LastKnownTD() []byte {
|
||||
return data
|
||||
}
|
||||
|
||||
func (db *MemDatabase) NewBatch() Batch {
|
||||
return &memBatch{db: db}
|
||||
}
|
||||
|
||||
type kv struct{ k, v []byte }
|
||||
|
||||
type memBatch struct {
|
||||
db *MemDatabase
|
||||
writes []kv
|
||||
}
|
||||
|
||||
func (w *memBatch) Put(key, value []byte) error {
|
||||
w.writes = append(w.writes, kv{key, common.CopyBytes(value)})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *memBatch) Write() error {
|
||||
for _, kv := range w.writes {
|
||||
w.db.db[string(kv.k)] = kv.v
|
||||
}
|
||||
func (db *MemDatabase) Flush() error {
|
||||
return nil
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -154,9 +154,7 @@ loop:
|
||||
if err != nil {
|
||||
fmt.Println("js error:", err, arguments)
|
||||
}
|
||||
|
||||
_, inreg := registry[timer] // when clearInterval is called from within the callback don't reset it
|
||||
if timer.interval && inreg {
|
||||
if timer.interval {
|
||||
timer.timer.Reset(timer.duration)
|
||||
} else {
|
||||
delete(registry, timer)
|
||||
|
@ -31,8 +31,8 @@ import (
|
||||
// MetricsEnabledFlag is the CLI flag name to use to enable metrics collections.
|
||||
var MetricsEnabledFlag = "metrics"
|
||||
|
||||
// Enabled is the flag specifying if metrics are enable or not.
|
||||
var Enabled = false
|
||||
// enabled is the flag specifying if metrics are enable or not.
|
||||
var enabled = false
|
||||
|
||||
// Init enables or disables the metrics system. Since we need this to run before
|
||||
// any other code gets to create meters and timers, we'll actually do an ugly hack
|
||||
@ -41,7 +41,7 @@ func init() {
|
||||
for _, arg := range os.Args {
|
||||
if strings.TrimLeft(arg, "-") == MetricsEnabledFlag {
|
||||
glog.V(logger.Info).Infof("Enabling metrics collection")
|
||||
Enabled = true
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -49,7 +49,7 @@ func init() {
|
||||
// NewMeter create a new metrics Meter, either a real one of a NOP stub depending
|
||||
// on the metrics flag.
|
||||
func NewMeter(name string) metrics.Meter {
|
||||
if !Enabled {
|
||||
if !enabled {
|
||||
return new(metrics.NilMeter)
|
||||
}
|
||||
return metrics.GetOrRegisterMeter(name, metrics.DefaultRegistry)
|
||||
@ -58,7 +58,7 @@ func NewMeter(name string) metrics.Meter {
|
||||
// NewTimer create a new metrics Timer, either a real one of a NOP stub depending
|
||||
// on the metrics flag.
|
||||
func NewTimer(name string) metrics.Timer {
|
||||
if !Enabled {
|
||||
if !enabled {
|
||||
return new(metrics.NilTimer)
|
||||
}
|
||||
return metrics.GetOrRegisterTimer(name, metrics.DefaultRegistry)
|
||||
@ -68,7 +68,7 @@ func NewTimer(name string) metrics.Timer {
|
||||
// process.
|
||||
func CollectProcessMetrics(refresh time.Duration) {
|
||||
// Short circuit if the metrics system is disabled
|
||||
if !Enabled {
|
||||
if !enabled {
|
||||
return
|
||||
}
|
||||
// Create the various data collectors
|
||||
|
@ -19,8 +19,6 @@ package miner
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
@ -37,8 +35,6 @@ type CpuAgent struct {
|
||||
|
||||
index int
|
||||
pow pow.PoW
|
||||
|
||||
isMining int32 // isMining indicates whether the agent is currently mining
|
||||
}
|
||||
|
||||
func NewCpuAgent(index int, pow pow.PoW) *CpuAgent {
|
||||
@ -65,10 +61,6 @@ func (self *CpuAgent) Start() {
|
||||
self.mu.Lock()
|
||||
defer self.mu.Unlock()
|
||||
|
||||
if !atomic.CompareAndSwapInt32(&self.isMining, 0, 1) {
|
||||
return // agent already started
|
||||
}
|
||||
|
||||
self.quit = make(chan struct{})
|
||||
// creating current op ch makes sure we're not closing a nil ch
|
||||
// later on
|
||||
@ -107,11 +99,10 @@ done:
|
||||
case <-self.workCh:
|
||||
default:
|
||||
close(self.workCh)
|
||||
|
||||
break done
|
||||
}
|
||||
}
|
||||
|
||||
atomic.StoreInt32(&self.isMining, 0)
|
||||
}
|
||||
|
||||
func (self *CpuAgent) mine(work *Work, stop <-chan struct{}) {
|
||||
|
@ -17,7 +17,6 @@
|
||||
package miner
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
@ -91,7 +90,7 @@ func (a *RemoteAgent) GetHashRate() (tot int64) {
|
||||
return
|
||||
}
|
||||
|
||||
func (a *RemoteAgent) GetWork() ([3]string, error) {
|
||||
func (a *RemoteAgent) GetWork() [3]string {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
@ -111,9 +110,9 @@ func (a *RemoteAgent) GetWork() ([3]string, error) {
|
||||
res[2] = common.BytesToHash(n.Bytes()).Hex()
|
||||
|
||||
a.work[block.HashNoNonce()] = a.currentWork
|
||||
return res, nil
|
||||
}
|
||||
return res, errors.New("No work available yet, don't panic.")
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// Returns true or false, but does not indicate if the PoW was correct
|
||||
|
@ -29,7 +29,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
@ -101,7 +100,7 @@ type worker struct {
|
||||
eth core.Backend
|
||||
chain *core.ChainManager
|
||||
proc *core.BlockProcessor
|
||||
chainDb ethdb.Database
|
||||
chainDb common.Database
|
||||
|
||||
coinbase common.Address
|
||||
gasPrice *big.Int
|
||||
@ -279,12 +278,12 @@ func (self *worker) wait() {
|
||||
glog.V(logger.Error).Infoln("Invalid block found during mining")
|
||||
continue
|
||||
}
|
||||
if err := core.ValidateHeader(self.eth.BlockProcessor().Pow, block.Header(), parent.Header(), true, false); err != nil && err != core.BlockFutureErr {
|
||||
if err := core.ValidateHeader(self.eth.BlockProcessor().Pow, block.Header(), parent, true, false); err != nil && err != core.BlockFutureErr {
|
||||
glog.V(logger.Error).Infoln("Invalid header on mined block:", err)
|
||||
continue
|
||||
}
|
||||
|
||||
stat, err := self.chain.WriteBlock(block)
|
||||
stat, err := self.chain.WriteBlock(block, false)
|
||||
if err != nil {
|
||||
glog.V(logger.Error).Infoln("error writing block to chain", err)
|
||||
continue
|
||||
@ -435,7 +434,7 @@ func (self *worker) commitNewWork() {
|
||||
tstart := time.Now()
|
||||
parent := self.chain.CurrentBlock()
|
||||
tstamp := tstart.Unix()
|
||||
if parent.Time().Cmp(new(big.Int).SetInt64(tstamp)) >= 0 {
|
||||
if parent.Time().Cmp(new(big.Int).SetInt64(tstamp)) != 1 {
|
||||
tstamp = parent.Time().Int64() + 1
|
||||
}
|
||||
// this will ensure we're not going off too far in the future
|
||||
@ -534,12 +533,14 @@ func (self *worker) commitNewWork() {
|
||||
|
||||
// create the new block whose nonce will be mined.
|
||||
work.Block = types.NewBlock(header, work.txs, uncles, work.receipts)
|
||||
work.Block.Td = new(big.Int).Set(core.CalcTD(work.Block, self.chain.GetBlock(work.Block.ParentHash())))
|
||||
|
||||
// We only care about logging if we're actually mining.
|
||||
if atomic.LoadInt32(&self.mining) == 1 {
|
||||
glog.V(logger.Info).Infof("commit new work on block %v with %d txs & %d uncles. Took %v\n", work.Block.Number(), work.tcount, len(uncles), time.Since(tstart))
|
||||
self.logLocalMinedBlocks(work, previous)
|
||||
}
|
||||
|
||||
self.push(work)
|
||||
}
|
||||
|
||||
|
@ -21,7 +21,6 @@ package discover
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"os"
|
||||
"sync"
|
||||
@ -47,8 +46,11 @@ var (
|
||||
|
||||
// nodeDB stores all nodes we know about.
|
||||
type nodeDB struct {
|
||||
lvl *leveldb.DB // Interface to the database itself
|
||||
self NodeID // Own node id to prevent adding it into the database
|
||||
lvl *leveldb.DB // Interface to the database itself
|
||||
seeder iterator.Iterator // Iterator for fetching possible seed nodes
|
||||
|
||||
self NodeID // Own node id to prevent adding it into the database
|
||||
|
||||
runner sync.Once // Ensures we can start at most one expirer
|
||||
quit chan struct{} // Channel to signal the expiring thread to stop
|
||||
}
|
||||
@ -300,70 +302,52 @@ func (db *nodeDB) updateFindFails(id NodeID, fails int) error {
|
||||
return db.storeInt64(makeKey(id, nodeDBDiscoverFindFails), int64(fails))
|
||||
}
|
||||
|
||||
// querySeeds retrieves random nodes to be used as potential seed nodes
|
||||
// for bootstrapping.
|
||||
func (db *nodeDB) querySeeds(n int, maxAge time.Duration) []*Node {
|
||||
var (
|
||||
now = time.Now()
|
||||
nodes = make([]*Node, 0, n)
|
||||
it = db.lvl.NewIterator(nil, nil)
|
||||
id NodeID
|
||||
)
|
||||
defer it.Release()
|
||||
|
||||
seek:
|
||||
for seeks := 0; len(nodes) < n && seeks < n*5; seeks++ {
|
||||
// Seek to a random entry. The first byte is incremented by a
|
||||
// random amount each time in order to increase the likelihood
|
||||
// of hitting all existing nodes in very small databases.
|
||||
ctr := id[0]
|
||||
rand.Read(id[:])
|
||||
id[0] = ctr + id[0]%16
|
||||
it.Seek(makeKey(id, nodeDBDiscoverRoot))
|
||||
|
||||
n := nextNode(it)
|
||||
if n == nil {
|
||||
id[0] = 0
|
||||
continue seek // iterator exhausted
|
||||
// querySeeds retrieves a batch of nodes to be used as potential seed servers
|
||||
// during bootstrapping the node into the network.
|
||||
//
|
||||
// Ideal seeds are the most recently seen nodes (highest probability to be still
|
||||
// alive), but yet untried. However, since leveldb only supports dumb iteration
|
||||
// we will instead start pulling in potential seeds that haven't been yet pinged
|
||||
// since the start of the boot procedure.
|
||||
//
|
||||
// If the database runs out of potential seeds, we restart the startup counter
|
||||
// and start iterating over the peers again.
|
||||
func (db *nodeDB) querySeeds(n int) []*Node {
|
||||
// Create a new seed iterator if none exists
|
||||
if db.seeder == nil {
|
||||
db.seeder = db.lvl.NewIterator(nil, nil)
|
||||
}
|
||||
// Iterate over the nodes and find suitable seeds
|
||||
nodes := make([]*Node, 0, n)
|
||||
for len(nodes) < n && db.seeder.Next() {
|
||||
// Iterate until a discovery node is found
|
||||
id, field := splitKey(db.seeder.Key())
|
||||
if field != nodeDBDiscoverRoot {
|
||||
continue
|
||||
}
|
||||
if n.ID == db.self {
|
||||
continue seek
|
||||
// Dump it if its a self reference
|
||||
if bytes.Compare(id[:], db.self[:]) == 0 {
|
||||
db.deleteNode(id)
|
||||
continue
|
||||
}
|
||||
if now.Sub(db.lastPong(n.ID)) > maxAge {
|
||||
continue seek
|
||||
// Load it as a potential seed
|
||||
if node := db.node(id); node != nil {
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
for i := range nodes {
|
||||
if nodes[i].ID == n.ID {
|
||||
continue seek // duplicate
|
||||
}
|
||||
}
|
||||
nodes = append(nodes, n)
|
||||
}
|
||||
// Release the iterator if we reached the end
|
||||
if len(nodes) == 0 {
|
||||
db.seeder.Release()
|
||||
db.seeder = nil
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// reads the next node record from the iterator, skipping over other
|
||||
// database entries.
|
||||
func nextNode(it iterator.Iterator) *Node {
|
||||
for end := false; !end; end = !it.Next() {
|
||||
id, field := splitKey(it.Key())
|
||||
if field != nodeDBDiscoverRoot {
|
||||
continue
|
||||
}
|
||||
var n Node
|
||||
if err := rlp.DecodeBytes(it.Value(), &n); err != nil {
|
||||
if glog.V(logger.Warn) {
|
||||
glog.Errorf("invalid node %x: %v", id, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
return &n
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// close flushes and closes the database files.
|
||||
func (db *nodeDB) close() {
|
||||
if db.seeder != nil {
|
||||
db.seeder.Release()
|
||||
}
|
||||
close(db.quit)
|
||||
db.lvl.Close()
|
||||
}
|
||||
|
@ -162,33 +162,9 @@ var nodeDBSeedQueryNodes = []struct {
|
||||
node *Node
|
||||
pong time.Time
|
||||
}{
|
||||
// This one should not be in the result set because its last
|
||||
// pong time is too far in the past.
|
||||
{
|
||||
node: newNode(
|
||||
MustHexID("0x84d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
|
||||
net.IP{127, 0, 0, 3},
|
||||
30303,
|
||||
30303,
|
||||
),
|
||||
pong: time.Now().Add(-3 * time.Hour),
|
||||
},
|
||||
// This one shouldn't be in in the result set because its
|
||||
// nodeID is the local node's ID.
|
||||
{
|
||||
node: newNode(
|
||||
MustHexID("0x57d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
|
||||
net.IP{127, 0, 0, 3},
|
||||
30303,
|
||||
30303,
|
||||
),
|
||||
pong: time.Now().Add(-4 * time.Second),
|
||||
},
|
||||
|
||||
// These should be in the result set.
|
||||
{
|
||||
node: newNode(
|
||||
MustHexID("0x22d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
|
||||
MustHexID("0x01d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
|
||||
net.IP{127, 0, 0, 1},
|
||||
30303,
|
||||
30303,
|
||||
@ -197,7 +173,7 @@ var nodeDBSeedQueryNodes = []struct {
|
||||
},
|
||||
{
|
||||
node: newNode(
|
||||
MustHexID("0x44d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
|
||||
MustHexID("0x02d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
|
||||
net.IP{127, 0, 0, 2},
|
||||
30303,
|
||||
30303,
|
||||
@ -206,7 +182,7 @@ var nodeDBSeedQueryNodes = []struct {
|
||||
},
|
||||
{
|
||||
node: newNode(
|
||||
MustHexID("0xe2d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
|
||||
MustHexID("0x03d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
|
||||
net.IP{127, 0, 0, 3},
|
||||
30303,
|
||||
30303,
|
||||
@ -216,7 +192,7 @@ var nodeDBSeedQueryNodes = []struct {
|
||||
}
|
||||
|
||||
func TestNodeDBSeedQuery(t *testing.T) {
|
||||
db, _ := newNodeDB("", Version, nodeDBSeedQueryNodes[1].node.ID)
|
||||
db, _ := newNodeDB("", Version, NodeID{})
|
||||
defer db.close()
|
||||
|
||||
// Insert a batch of nodes for querying
|
||||
@ -224,24 +200,20 @@ func TestNodeDBSeedQuery(t *testing.T) {
|
||||
if err := db.updateNode(seed.node); err != nil {
|
||||
t.Fatalf("node %d: failed to insert: %v", i, err)
|
||||
}
|
||||
if err := db.updateLastPong(seed.node.ID, seed.pong); err != nil {
|
||||
t.Fatalf("node %d: failed to insert lastPong: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve the entire batch and check for duplicates
|
||||
seeds := db.querySeeds(len(nodeDBSeedQueryNodes)*2, time.Hour)
|
||||
seeds := db.querySeeds(2 * len(nodeDBSeedQueryNodes))
|
||||
if len(seeds) != len(nodeDBSeedQueryNodes) {
|
||||
t.Errorf("seed count mismatch: have %v, want %v", len(seeds), len(nodeDBSeedQueryNodes))
|
||||
}
|
||||
have := make(map[NodeID]struct{})
|
||||
for _, seed := range seeds {
|
||||
have[seed.ID] = struct{}{}
|
||||
}
|
||||
want := make(map[NodeID]struct{})
|
||||
for _, seed := range nodeDBSeedQueryNodes[2:] {
|
||||
for _, seed := range nodeDBSeedQueryNodes {
|
||||
want[seed.node.ID] = struct{}{}
|
||||
}
|
||||
if len(seeds) != len(want) {
|
||||
t.Errorf("seed count mismatch: have %v, want %v", len(seeds), len(want))
|
||||
}
|
||||
for id, _ := range have {
|
||||
if _, ok := want[id]; !ok {
|
||||
t.Errorf("extra seed: %v", id)
|
||||
@ -252,6 +224,63 @@ func TestNodeDBSeedQuery(t *testing.T) {
|
||||
t.Errorf("missing seed: %v", id)
|
||||
}
|
||||
}
|
||||
// Make sure the next batch is empty (seed EOF)
|
||||
seeds = db.querySeeds(2 * len(nodeDBSeedQueryNodes))
|
||||
if len(seeds) != 0 {
|
||||
t.Errorf("seed count mismatch: have %v, want %v", len(seeds), 0)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeDBSeedQueryContinuation(t *testing.T) {
|
||||
db, _ := newNodeDB("", Version, NodeID{})
|
||||
defer db.close()
|
||||
|
||||
// Insert a batch of nodes for querying
|
||||
for i, seed := range nodeDBSeedQueryNodes {
|
||||
if err := db.updateNode(seed.node); err != nil {
|
||||
t.Fatalf("node %d: failed to insert: %v", i, err)
|
||||
}
|
||||
}
|
||||
// Iteratively retrieve the batch, checking for an empty batch on reset
|
||||
for i := 0; i < len(nodeDBSeedQueryNodes); i++ {
|
||||
if seeds := db.querySeeds(1); len(seeds) != 1 {
|
||||
t.Errorf("1st iteration %d: seed count mismatch: have %v, want %v", i, len(seeds), 1)
|
||||
}
|
||||
}
|
||||
if seeds := db.querySeeds(1); len(seeds) != 0 {
|
||||
t.Errorf("reset: seed count mismatch: have %v, want %v", len(seeds), 0)
|
||||
}
|
||||
for i := 0; i < len(nodeDBSeedQueryNodes); i++ {
|
||||
if seeds := db.querySeeds(1); len(seeds) != 1 {
|
||||
t.Errorf("2nd iteration %d: seed count mismatch: have %v, want %v", i, len(seeds), 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeDBSelfSeedQuery(t *testing.T) {
|
||||
// Assign a node as self to verify evacuation
|
||||
self := nodeDBSeedQueryNodes[0].node.ID
|
||||
db, _ := newNodeDB("", Version, self)
|
||||
defer db.close()
|
||||
|
||||
// Insert a batch of nodes for querying
|
||||
for i, seed := range nodeDBSeedQueryNodes {
|
||||
if err := db.updateNode(seed.node); err != nil {
|
||||
t.Fatalf("node %d: failed to insert: %v", i, err)
|
||||
}
|
||||
}
|
||||
// Retrieve the entire batch and check that self was evacuated
|
||||
seeds := db.querySeeds(2 * len(nodeDBSeedQueryNodes))
|
||||
if len(seeds) != len(nodeDBSeedQueryNodes)-1 {
|
||||
t.Errorf("seed count mismatch: have %v, want %v", len(seeds), len(nodeDBSeedQueryNodes)-1)
|
||||
}
|
||||
have := make(map[NodeID]struct{})
|
||||
for _, seed := range seeds {
|
||||
have[seed.ID] = struct{}{}
|
||||
}
|
||||
if _, ok := have[self]; ok {
|
||||
t.Errorf("self not evacuated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeDBPersistency(t *testing.T) {
|
||||
|
@ -44,10 +44,6 @@ const (
|
||||
|
||||
maxBondingPingPongs = 16
|
||||
maxFindnodeFailures = 5
|
||||
|
||||
autoRefreshInterval = 1 * time.Hour
|
||||
seedCount = 30
|
||||
seedMaxAge = 5 * 24 * time.Hour
|
||||
)
|
||||
|
||||
type Table struct {
|
||||
@ -56,10 +52,6 @@ type Table struct {
|
||||
nursery []*Node // bootstrap nodes
|
||||
db *nodeDB // database of known nodes
|
||||
|
||||
refreshReq chan struct{}
|
||||
closeReq chan struct{}
|
||||
closed chan struct{}
|
||||
|
||||
bondmu sync.Mutex
|
||||
bonding map[NodeID]*bondproc
|
||||
bondslots chan struct{} // limits total number of active bonding processes
|
||||
@ -88,7 +80,10 @@ type transport interface {
|
||||
|
||||
// bucket contains nodes, ordered by their last activity. the entry
|
||||
// that was most recently active is the first element in entries.
|
||||
type bucket struct{ entries []*Node }
|
||||
type bucket struct {
|
||||
lastLookup time.Time
|
||||
entries []*Node
|
||||
}
|
||||
|
||||
func newTable(t transport, ourID NodeID, ourAddr *net.UDPAddr, nodeDBPath string) *Table {
|
||||
// If no node database was given, use an in-memory one
|
||||
@ -98,14 +93,11 @@ func newTable(t transport, ourID NodeID, ourAddr *net.UDPAddr, nodeDBPath string
|
||||
db, _ = newNodeDB("", Version, ourID)
|
||||
}
|
||||
tab := &Table{
|
||||
net: t,
|
||||
db: db,
|
||||
self: newNode(ourID, ourAddr.IP, uint16(ourAddr.Port), uint16(ourAddr.Port)),
|
||||
bonding: make(map[NodeID]*bondproc),
|
||||
bondslots: make(chan struct{}, maxBondingPingPongs),
|
||||
refreshReq: make(chan struct{}),
|
||||
closeReq: make(chan struct{}),
|
||||
closed: make(chan struct{}),
|
||||
net: t,
|
||||
db: db,
|
||||
self: newNode(ourID, ourAddr.IP, uint16(ourAddr.Port), uint16(ourAddr.Port)),
|
||||
bonding: make(map[NodeID]*bondproc),
|
||||
bondslots: make(chan struct{}, maxBondingPingPongs),
|
||||
}
|
||||
for i := 0; i < cap(tab.bondslots); i++ {
|
||||
tab.bondslots <- struct{}{}
|
||||
@ -113,7 +105,6 @@ func newTable(t transport, ourID NodeID, ourAddr *net.UDPAddr, nodeDBPath string
|
||||
for i := range tab.buckets {
|
||||
tab.buckets[i] = new(bucket)
|
||||
}
|
||||
go tab.refreshLoop()
|
||||
return tab
|
||||
}
|
||||
|
||||
@ -172,12 +163,10 @@ func randUint(max uint32) uint32 {
|
||||
|
||||
// Close terminates the network listener and flushes the node database.
|
||||
func (tab *Table) Close() {
|
||||
select {
|
||||
case <-tab.closed:
|
||||
// already closed.
|
||||
case tab.closeReq <- struct{}{}:
|
||||
<-tab.closed // wait for refreshLoop to end.
|
||||
if tab.net != nil {
|
||||
tab.net.close()
|
||||
}
|
||||
tab.db.close()
|
||||
}
|
||||
|
||||
// Bootstrap sets the bootstrap nodes. These nodes are used to connect
|
||||
@ -194,7 +183,7 @@ func (tab *Table) Bootstrap(nodes []*Node) {
|
||||
tab.nursery = append(tab.nursery, &cpy)
|
||||
}
|
||||
tab.mutex.Unlock()
|
||||
tab.requestRefresh()
|
||||
tab.refresh()
|
||||
}
|
||||
|
||||
// Lookup performs a network search for nodes close
|
||||
@ -215,13 +204,15 @@ func (tab *Table) Lookup(targetID NodeID) []*Node {
|
||||
asked[tab.self.ID] = true
|
||||
|
||||
tab.mutex.Lock()
|
||||
// update last lookup stamp (for refresh logic)
|
||||
tab.buckets[logdist(tab.self.sha, target)].lastLookup = time.Now()
|
||||
// generate initial result set
|
||||
result := tab.closest(target, bucketSize)
|
||||
tab.mutex.Unlock()
|
||||
|
||||
// If the result set is empty, all nodes were dropped, refresh.
|
||||
// If the result set is empty, all nodes were dropped, refresh
|
||||
if len(result.entries) == 0 {
|
||||
tab.requestRefresh()
|
||||
tab.refresh()
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -266,86 +257,56 @@ func (tab *Table) Lookup(targetID NodeID) []*Node {
|
||||
return result.entries
|
||||
}
|
||||
|
||||
func (tab *Table) requestRefresh() {
|
||||
select {
|
||||
case tab.refreshReq <- struct{}{}:
|
||||
case <-tab.closed:
|
||||
}
|
||||
}
|
||||
// refresh performs a lookup for a random target to keep buckets full, or seeds
|
||||
// the table if it is empty (initial bootstrap or discarded faulty peers).
|
||||
func (tab *Table) refresh() {
|
||||
seed := true
|
||||
|
||||
func (tab *Table) refreshLoop() {
|
||||
defer func() {
|
||||
tab.db.close()
|
||||
if tab.net != nil {
|
||||
tab.net.close()
|
||||
}
|
||||
close(tab.closed)
|
||||
}()
|
||||
|
||||
timer := time.NewTicker(autoRefreshInterval)
|
||||
var done chan struct{}
|
||||
for {
|
||||
select {
|
||||
case <-timer.C:
|
||||
if done == nil {
|
||||
done = make(chan struct{})
|
||||
go tab.doRefresh(done)
|
||||
}
|
||||
case <-tab.refreshReq:
|
||||
if done == nil {
|
||||
done = make(chan struct{})
|
||||
go tab.doRefresh(done)
|
||||
}
|
||||
case <-done:
|
||||
done = nil
|
||||
case <-tab.closeReq:
|
||||
if done != nil {
|
||||
<-done
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// doRefresh performs a lookup for a random target to keep buckets
|
||||
// full. seed nodes are inserted if the table is empty (initial
|
||||
// bootstrap or discarded faulty peers).
|
||||
func (tab *Table) doRefresh(done chan struct{}) {
|
||||
defer close(done)
|
||||
|
||||
// The Kademlia paper specifies that the bucket refresh should
|
||||
// perform a lookup in the least recently used bucket. We cannot
|
||||
// adhere to this because the findnode target is a 512bit value
|
||||
// (not hash-sized) and it is not easily possible to generate a
|
||||
// sha3 preimage that falls into a chosen bucket.
|
||||
// We perform a lookup with a random target instead.
|
||||
var target NodeID
|
||||
rand.Read(target[:])
|
||||
result := tab.Lookup(target)
|
||||
if len(result) > 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// The table is empty. Load nodes from the database and insert
|
||||
// them. This should yield a few previously seen nodes that are
|
||||
// (hopefully) still alive.
|
||||
seeds := tab.db.querySeeds(seedCount, seedMaxAge)
|
||||
seeds = tab.bondall(append(seeds, tab.nursery...))
|
||||
if glog.V(logger.Debug) {
|
||||
if len(seeds) == 0 {
|
||||
glog.Infof("no seed nodes found")
|
||||
}
|
||||
for _, n := range seeds {
|
||||
age := time.Since(tab.db.lastPong(n.ID))
|
||||
glog.Infof("seed node (age %v): %v", age, n)
|
||||
}
|
||||
}
|
||||
// If the discovery table is empty, seed with previously known nodes
|
||||
tab.mutex.Lock()
|
||||
tab.stuff(seeds)
|
||||
for _, bucket := range tab.buckets {
|
||||
if len(bucket.entries) > 0 {
|
||||
seed = false
|
||||
break
|
||||
}
|
||||
}
|
||||
tab.mutex.Unlock()
|
||||
|
||||
// Finally, do a self lookup to fill up the buckets.
|
||||
tab.Lookup(tab.self.ID)
|
||||
// If the table is not empty, try to refresh using the live entries
|
||||
if !seed {
|
||||
// The Kademlia paper specifies that the bucket refresh should
|
||||
// perform a refresh in the least recently used bucket. We cannot
|
||||
// adhere to this because the findnode target is a 512bit value
|
||||
// (not hash-sized) and it is not easily possible to generate a
|
||||
// sha3 preimage that falls into a chosen bucket.
|
||||
//
|
||||
// We perform a lookup with a random target instead.
|
||||
var target NodeID
|
||||
rand.Read(target[:])
|
||||
|
||||
result := tab.Lookup(target)
|
||||
if len(result) == 0 {
|
||||
// Lookup failed, seed after all
|
||||
seed = true
|
||||
}
|
||||
}
|
||||
|
||||
if seed {
|
||||
// Pick a batch of previously know seeds to lookup with
|
||||
seeds := tab.db.querySeeds(10)
|
||||
for _, seed := range seeds {
|
||||
glog.V(logger.Debug).Infoln("Seeding network with", seed)
|
||||
}
|
||||
nodes := append(tab.nursery, seeds...)
|
||||
|
||||
// Bond with all the seed nodes (will pingpong only if failed recently)
|
||||
bonded := tab.bondall(nodes)
|
||||
if len(bonded) > 0 {
|
||||
tab.Lookup(tab.self.ID)
|
||||
}
|
||||
// TODO: the Kademlia paper says that we're supposed to perform
|
||||
// random lookups in all buckets further away than our closest neighbor.
|
||||
}
|
||||
}
|
||||
|
||||
// closest returns the n nodes in the table that are closest to the
|
||||
@ -412,9 +373,8 @@ func (tab *Table) bond(pinged bool, id NodeID, addr *net.UDPAddr, tcpPort uint16
|
||||
}
|
||||
// If the node is unknown (non-bonded) or failed (remotely unknown), bond from scratch
|
||||
var result error
|
||||
age := time.Since(tab.db.lastPong(id))
|
||||
if node == nil || fails > 0 || age > nodeDBNodeExpiration {
|
||||
glog.V(logger.Detail).Infof("Bonding %x: known=%t, fails=%d age=%v", id[:8], node != nil, fails, age)
|
||||
if node == nil || fails > 0 {
|
||||
glog.V(logger.Detail).Infof("Bonding %x: known=%v, fails=%v", id[:8], node != nil, fails)
|
||||
|
||||
tab.bondmu.Lock()
|
||||
w := tab.bonding[id]
|
||||
@ -475,17 +435,13 @@ func (tab *Table) pingpong(w *bondproc, pinged bool, id NodeID, addr *net.UDPAdd
|
||||
// ping a remote endpoint and wait for a reply, also updating the node
|
||||
// database accordingly.
|
||||
func (tab *Table) ping(id NodeID, addr *net.UDPAddr) error {
|
||||
// Update the last ping and send the message
|
||||
tab.db.updateLastPing(id, time.Now())
|
||||
if err := tab.net.ping(id, addr); err != nil {
|
||||
return err
|
||||
}
|
||||
// Pong received, update the database and return
|
||||
tab.db.updateLastPong(id, time.Now())
|
||||
|
||||
// Start the background expiration goroutine after the first
|
||||
// successful communication. Subsequent calls have no effect if it
|
||||
// is already running. We do this here instead of somewhere else
|
||||
// so that the search for seed nodes also considers older nodes
|
||||
// that would otherwise be removed by the expiration.
|
||||
tab.db.ensureExpirer()
|
||||
return nil
|
||||
}
|
||||
|
@ -514,6 +514,9 @@ func (tn *preminedTestnet) findnode(toid NodeID, toaddr *net.UDPAddr, target Nod
|
||||
if toaddr.Port == 0 {
|
||||
panic("query to node at distance 0")
|
||||
}
|
||||
if target != tn.target {
|
||||
panic("findnode with wrong target")
|
||||
}
|
||||
next := uint16(toaddr.Port) - 1
|
||||
var result []*Node
|
||||
for i, id := range tn.dists[toaddr.Port] {
|
||||
|
@ -39,6 +39,7 @@ var (
|
||||
errPacketTooSmall = errors.New("too small")
|
||||
errBadHash = errors.New("bad hash")
|
||||
errExpired = errors.New("expired")
|
||||
errBadVersion = errors.New("version mismatch")
|
||||
errUnsolicitedReply = errors.New("unsolicited reply")
|
||||
errUnknownNode = errors.New("unknown node")
|
||||
errTimeout = errors.New("RPC timeout")
|
||||
@ -51,6 +52,8 @@ const (
|
||||
respTimeout = 500 * time.Millisecond
|
||||
sendTimeout = 500 * time.Millisecond
|
||||
expiration = 20 * time.Second
|
||||
|
||||
refreshInterval = 1 * time.Hour
|
||||
)
|
||||
|
||||
// RPC packet types
|
||||
@ -309,8 +312,10 @@ func (t *udp) loop() {
|
||||
plist = list.New()
|
||||
timeout = time.NewTimer(0)
|
||||
nextTimeout *pending // head of plist when timeout was last reset
|
||||
refresh = time.NewTicker(refreshInterval)
|
||||
)
|
||||
<-timeout.C // ignore first timeout
|
||||
defer refresh.Stop()
|
||||
defer timeout.Stop()
|
||||
|
||||
resetTimeout := func() {
|
||||
@ -339,6 +344,9 @@ func (t *udp) loop() {
|
||||
resetTimeout()
|
||||
|
||||
select {
|
||||
case <-refresh.C:
|
||||
go t.refresh()
|
||||
|
||||
case <-t.closing:
|
||||
for el := plist.Front(); el != nil; el = el.Next() {
|
||||
el.Value.(*pending).errc <- errClosed
|
||||
@ -521,6 +529,9 @@ func (req *ping) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte) er
|
||||
if expired(req.Expiration) {
|
||||
return errExpired
|
||||
}
|
||||
if req.Version != Version {
|
||||
return errBadVersion
|
||||
}
|
||||
t.send(from, pongPacket, pong{
|
||||
To: makeEndpoint(from, req.From.TCP),
|
||||
ReplyTok: mac,
|
||||
|
@ -122,6 +122,7 @@ func TestUDP_packetErrors(t *testing.T) {
|
||||
defer test.table.Close()
|
||||
|
||||
test.packetIn(errExpired, pingPacket, &ping{From: testRemote, To: testLocalAnnounced, Version: Version})
|
||||
test.packetIn(errBadVersion, pingPacket, &ping{From: testRemote, To: testLocalAnnounced, Version: 99, Expiration: futureExp})
|
||||
test.packetIn(errUnsolicitedReply, pongPacket, &pong{ReplyTok: []byte{}, Expiration: futureExp})
|
||||
test.packetIn(errUnknownNode, findnodePacket, &findnode{Expiration: futureExp})
|
||||
test.packetIn(errUnsolicitedReply, neighborsPacket, &neighbors{Expiration: futureExp})
|
||||
|
@ -38,14 +38,8 @@ type meteredConn struct {
|
||||
}
|
||||
|
||||
// newMeteredConn creates a new metered connection, also bumping the ingress or
|
||||
// egress connection meter. If the metrics system is disabled, this function
|
||||
// returns the original object.
|
||||
// egress connection meter.
|
||||
func newMeteredConn(conn net.Conn, ingress bool) net.Conn {
|
||||
// Short circuit if metrics are disabled
|
||||
if !metrics.Enabled {
|
||||
return conn
|
||||
}
|
||||
// Otherwise bump the connection counters and wrap the connection
|
||||
if ingress {
|
||||
ingressConnectMeter.Mark(1)
|
||||
} else {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user