Compare commits

..

22 Commits

Author SHA1 Message Date
Péter Szilágyi
4bcc0a37ab Merge pull request #19473 from karalabe/geth-1.8.27
[1.8.27 backport] eth, les, light: enforce CHT checkpoints on fast-sync too
2019-04-17 15:47:54 +03:00
Péter Szilágyi
b5f92e66c6 params, swarm: release Geth v1.8.27 (noop Swarm v0.3.15) 2019-04-17 14:57:38 +03:00
Péter Szilágyi
d8787230fa eth, les, light: enforce CHT checkpoints on fast-sync too 2019-04-17 14:56:58 +03:00
Péter Szilágyi
cdae1c59ab Merge pull request #19437 from zsfelfoldi/fix-sendtx
les: fix SendTx cost calculation and verify cost table
2019-04-10 15:45:13 +03:00
Péter Szilágyi
0b00e19ed9 params, swarm: release Geth v1.8.26 (+noop Swarm v0.3.14) 2019-04-10 15:38:01 +03:00
Zsolt Felfoldi
c8d8126bd0 les: check required message types in cost table 2019-04-10 13:12:46 +02:00
Zsolt Felfoldi
0de9f32ae8 les: backported new SendTx cost calculation 2019-04-10 13:12:42 +02:00
Péter Szilágyi
14ae1246b7 Merge pull request #19416 from jmcnevin/cli-fix
Revert flag removal
2019-04-09 11:47:09 +03:00
Péter Szilágyi
dc59af8622 params, swarm: hotfix Geth v1.8.25 release to restore rpc flags 2019-04-09 10:58:00 +03:00
Jeremy McNevin
45730cfab3 cmd/geth: fix accidental --rpccorsdomain and --rpcvhosts removal 2019-04-09 10:56:50 +03:00
Péter Szilágyi
4e13a09c50 Merge pull request #19370 from karalabe/geth-1.8.24
Backport PR for the v1.8.24 maintenance release
2019-04-08 16:16:05 +03:00
Péter Szilágyi
009d2fe2d6 params, swarm: release Geth v1.8.24 (noop Swarm 0.3.12) 2019-04-08 16:06:59 +03:00
Martin Holst Swende
e872ba7a9e eth, les, geth: implement cli-configurable global gas cap for RPC calls (#19401)
* eth, les, geth: implement cli-configurable global gas cap for RPC calls

* graphql, ethapi: place gas cap in DoCall

* ethapi: reformat log message
2019-04-08 15:15:13 +03:00
Felix Lange
9d9c6b5847 p2p/discover: bump failure counter only if no nodes were provided (#19362)
This resolves a minor issue where neighbors responses containing less
than 16 nodes would bump the failure counter, removing the node. One
situation where this can happen is a private deployment where the total
number of extant nodes is less than 16.

Issue found by @jsying.
2019-04-08 14:35:50 +03:00
Péter Szilágyi
8ca6454807 params: set Rinkeby Petersburg fork block (4th May, 2019) 2019-04-08 12:14:05 +03:00
Péter Szilágyi
0e63a70505 core: minor code polishes + rebase fixes 2019-04-08 12:04:31 +03:00
rjl493456442
f1b00cffc8 core: re-omit new log event when logs rebirth 2019-04-08 12:02:15 +03:00
Péter Szilágyi
442320a8ae travis: update builders to xenial to shadow Go releases 2019-04-08 12:00:42 +03:00
Martin Holst Swende
af401d03a3 all: simplify timestamps to uint64 (#19372)
* all: simplify timestamps to uint64

* tests: update definitions

* clef, faucet, mobile: leftover uint64 fixups

* ethash: fix tests

* graphql: update schema for timestamp

* ethash: remove unused variable
2019-04-08 12:00:42 +03:00
Péter Szilágyi
80a2a35bc3 trie: there's no point in retrieving the metaroot 2019-04-08 12:00:42 +03:00
Péter Szilágyi
fca5f9fd6f common/fdlimit: fix macos file descriptors for Go 1.12 2019-04-02 13:14:21 +03:00
Péter Szilágyi
38c30f8dd8 light, params: update CHTs, integrate CHT for Goerli too 2019-04-02 12:10:06 +03:00
54 changed files with 814 additions and 291 deletions

View File

@@ -4,7 +4,7 @@ sudo: false
matrix: matrix:
include: include:
- os: linux - os: linux
dist: trusty dist: xenial
sudo: required sudo: required
go: 1.10.x go: 1.10.x
script: script:
@@ -16,7 +16,7 @@ matrix:
# These are the latest Go versions. # These are the latest Go versions.
- os: linux - os: linux
dist: trusty dist: xenial
sudo: required sudo: required
go: 1.11.x go: 1.11.x
script: script:
@@ -43,7 +43,7 @@ matrix:
# This builder only tests code linters on latest version of Go # This builder only tests code linters on latest version of Go
- os: linux - os: linux
dist: trusty dist: xenial
go: 1.11.x go: 1.11.x
env: env:
- lint - lint
@@ -55,7 +55,7 @@ matrix:
# This builder does the Ubuntu PPA upload # This builder does the Ubuntu PPA upload
- if: type = push - if: type = push
os: linux os: linux
dist: trusty dist: xenial
go: 1.11.x go: 1.11.x
env: env:
- ubuntu-ppa - ubuntu-ppa
@@ -77,7 +77,7 @@ matrix:
# This builder does the Linux Azure uploads # This builder does the Linux Azure uploads
- if: type = push - if: type = push
os: linux os: linux
dist: trusty dist: xenial
sudo: required sudo: required
go: 1.11.x go: 1.11.x
env: env:
@@ -111,7 +111,7 @@ matrix:
# This builder does the Linux Azure MIPS xgo uploads # This builder does the Linux Azure MIPS xgo uploads
- if: type = push - if: type = push
os: linux os: linux
dist: trusty dist: xenial
services: services:
- docker - docker
go: 1.11.x go: 1.11.x
@@ -139,7 +139,7 @@ matrix:
# This builder does the Android Maven and Azure uploads # This builder does the Android Maven and Azure uploads
- if: type = push - if: type = push
os: linux os: linux
dist: trusty dist: xenial
addons: addons:
apt: apt:
packages: packages:
@@ -206,7 +206,7 @@ matrix:
# This builder does the Azure archive purges to avoid accumulating junk # This builder does the Azure archive purges to avoid accumulating junk
- if: type = cron - if: type = cron
os: linux os: linux
dist: trusty dist: xenial
go: 1.11.x go: 1.11.x
env: env:
- azure-purge - azure-purge

View File

@@ -579,7 +579,7 @@ func (f *faucet) loop() {
go func() { go func() {
for head := range update { for head := range update {
// New chain head arrived, query the current stats and stream to clients // New chain head arrived, query the current stats and stream to clients
timestamp := time.Unix(head.Time.Int64(), 0) timestamp := time.Unix(int64(head.Time), 0)
if time.Since(timestamp) > time.Hour { if time.Since(timestamp) > time.Hour {
log.Warn("Skipping faucet refresh, head too old", "number", head.Number, "hash", head.Hash(), "age", common.PrettyAge(timestamp)) log.Warn("Skipping faucet refresh, head too old", "number", head.Number, "hash", head.Hash(), "age", common.PrettyAge(timestamp))
continue continue

View File

@@ -372,7 +372,7 @@ func copyDb(ctx *cli.Context) error {
chain, chainDb := utils.MakeChain(ctx, stack) chain, chainDb := utils.MakeChain(ctx, stack)
syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode) syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil) dl := downloader.New(syncmode, 0, chainDb, new(event.TypeMux), chain, nil, nil)
// Create a source peer to satisfy downloader requests from // Create a source peer to satisfy downloader requests from
db, err := ethdb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256) db, err := ethdb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256)

View File

@@ -150,6 +150,7 @@ var (
utils.WSAllowedOriginsFlag, utils.WSAllowedOriginsFlag,
utils.IPCDisabledFlag, utils.IPCDisabledFlag,
utils.IPCPathFlag, utils.IPCPathFlag,
utils.RPCGlobalGasCap,
} }
whisperFlags = []cli.Flag{ whisperFlags = []cli.Flag{

View File

@@ -153,6 +153,7 @@ var AppHelpFlagGroups = []flagGroup{
utils.RPCListenAddrFlag, utils.RPCListenAddrFlag,
utils.RPCPortFlag, utils.RPCPortFlag,
utils.RPCApiFlag, utils.RPCApiFlag,
utils.RPCGlobalGasCap,
utils.WSEnabledFlag, utils.WSEnabledFlag,
utils.WSListenAddrFlag, utils.WSListenAddrFlag,
utils.WSPortFlag, utils.WSPortFlag,

View File

@@ -411,6 +411,10 @@ var (
Name: "vmdebug", Name: "vmdebug",
Usage: "Record information useful for VM and contract debugging", Usage: "Record information useful for VM and contract debugging",
} }
RPCGlobalGasCap = cli.Uint64Flag{
Name: "rpc.gascap",
Usage: "Sets a cap on gas that can be used in eth_call/estimateGas",
}
// Logging and debug settings // Logging and debug settings
EthStatsURLFlag = cli.StringFlag{ EthStatsURLFlag = cli.StringFlag{
Name: "ethstats", Name: "ethstats",
@@ -1256,6 +1260,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
if ctx.GlobalIsSet(EVMInterpreterFlag.Name) { if ctx.GlobalIsSet(EVMInterpreterFlag.Name) {
cfg.EVMInterpreter = ctx.GlobalString(EVMInterpreterFlag.Name) cfg.EVMInterpreter = ctx.GlobalString(EVMInterpreterFlag.Name)
} }
if ctx.GlobalIsSet(RPCGlobalGasCap.Name) {
cfg.RPCGasCap = new(big.Int).SetUint64(ctx.GlobalUint64(RPCGlobalGasCap.Name))
}
// Override any default configs for hard coded networks. // Override any default configs for hard coded networks.
switch { switch {

View File

@@ -0,0 +1,71 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package fdlimit
import "syscall"
// hardlimit is the number of file descriptors allowed at max by the kernel.
const hardlimit = 10240
// Raise tries to maximize the file descriptor allowance of this process
// to the maximum hard-limit allowed by the OS.
// Returns the size it was set to (may differ from the desired 'max')
func Raise(max uint64) (uint64, error) {
// Get the current limit
var limit syscall.Rlimit
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return 0, err
}
// Try to update the limit to the max allowance
limit.Cur = limit.Max
if limit.Cur > max {
limit.Cur = max
}
if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return 0, err
}
// MacOS can silently apply further caps, so retrieve the actually set limit
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return 0, err
}
return limit.Cur, nil
}
// Current retrieves the number of file descriptors allowed to be opened by this
// process.
func Current() (int, error) {
var limit syscall.Rlimit
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return 0, err
}
return int(limit.Cur), nil
}
// Maximum retrieves the maximum number of file descriptors this process is
// allowed to request for itself.
func Maximum() (int, error) {
// Retrieve the maximum allowed by dynamic OS limits
var limit syscall.Rlimit
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return 0, err
}
// Cap it to OPEN_MAX (10240) because macos is a special snowflake
if limit.Max > hardlimit {
limit.Max = hardlimit
}
return int(limit.Max), nil
}

View File

@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// +build linux darwin netbsd openbsd solaris // +build linux netbsd openbsd solaris
package fdlimit package fdlimit

View File

@@ -18,6 +18,7 @@ package fdlimit
import "fmt" import "fmt"
// hardlimit is the number of file descriptors allowed at max by the kernel.
const hardlimit = 16384 const hardlimit = 16384
// Raise tries to maximize the file descriptor allowance of this process // Raise tries to maximize the file descriptor allowance of this process

View File

@@ -279,7 +279,7 @@ func (c *Clique) verifyHeader(chain consensus.ChainReader, header *types.Header,
number := header.Number.Uint64() number := header.Number.Uint64()
// Don't waste time checking blocks from the future // Don't waste time checking blocks from the future
if header.Time.Cmp(big.NewInt(time.Now().Unix())) > 0 { if header.Time > uint64(time.Now().Unix()) {
return consensus.ErrFutureBlock return consensus.ErrFutureBlock
} }
// Checkpoint blocks need to enforce zero beneficiary // Checkpoint blocks need to enforce zero beneficiary
@@ -351,7 +351,7 @@ func (c *Clique) verifyCascadingFields(chain consensus.ChainReader, header *type
if parent == nil || parent.Number.Uint64() != number-1 || parent.Hash() != header.ParentHash { if parent == nil || parent.Number.Uint64() != number-1 || parent.Hash() != header.ParentHash {
return consensus.ErrUnknownAncestor return consensus.ErrUnknownAncestor
} }
if parent.Time.Uint64()+c.config.Period > header.Time.Uint64() { if parent.Time+c.config.Period > header.Time {
return ErrInvalidTimestamp return ErrInvalidTimestamp
} }
// Retrieve the snapshot needed to verify this header and cache it // Retrieve the snapshot needed to verify this header and cache it
@@ -570,9 +570,9 @@ func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) erro
if parent == nil { if parent == nil {
return consensus.ErrUnknownAncestor return consensus.ErrUnknownAncestor
} }
header.Time = new(big.Int).Add(parent.Time, new(big.Int).SetUint64(c.config.Period)) header.Time = parent.Time + c.config.Period
if header.Time.Int64() < time.Now().Unix() { if header.Time < uint64(time.Now().Unix()) {
header.Time = big.NewInt(time.Now().Unix()) header.Time = uint64(time.Now().Unix())
} }
return nil return nil
} }
@@ -637,7 +637,7 @@ func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, results c
} }
} }
// Sweet, the protocol permits us to sign the block, wait for our time // Sweet, the protocol permits us to sign the block, wait for our time
delay := time.Unix(header.Time.Int64(), 0).Sub(time.Now()) // nolint: gosimple delay := time.Unix(int64(header.Time), 0).Sub(time.Now()) // nolint: gosimple
if header.Difficulty.Cmp(diffNoTurn) == 0 { if header.Difficulty.Cmp(diffNoTurn) == 0 {
// It's not our turn explicitly to sign, delay it a bit // It's not our turn explicitly to sign, delay it a bit
wiggle := time.Duration(len(snap.Signers)/2+1) * wiggleTime wiggle := time.Duration(len(snap.Signers)/2+1) * wiggleTime

View File

@@ -716,7 +716,7 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) {
Difficulty: big.NewInt(167925187834220), Difficulty: big.NewInt(167925187834220),
GasLimit: 4015682, GasLimit: 4015682,
GasUsed: 0, GasUsed: 0,
Time: big.NewInt(1488928920), Time: 1488928920,
Extra: []byte("www.bw.com"), Extra: []byte("www.bw.com"),
MixDigest: common.HexToHash("0x3e140b0784516af5e5ec6730f2fb20cca22f32be399b9e4ad77d32541f798cd0"), MixDigest: common.HexToHash("0x3e140b0784516af5e5ec6730f2fb20cca22f32be399b9e4ad77d32541f798cd0"),
Nonce: types.EncodeNonce(0xf400cd0006070c49), Nonce: types.EncodeNonce(0xf400cd0006070c49),

View File

@@ -63,7 +63,6 @@ var (
// codebase, inherently breaking if the engine is swapped out. Please put common // codebase, inherently breaking if the engine is swapped out. Please put common
// error types into the consensus package. // error types into the consensus package.
var ( var (
errLargeBlockTime = errors.New("timestamp too big")
errZeroBlockTime = errors.New("timestamp equals parent's") errZeroBlockTime = errors.New("timestamp equals parent's")
errTooManyUncles = errors.New("too many uncles") errTooManyUncles = errors.New("too many uncles")
errDuplicateUncle = errors.New("duplicate uncle") errDuplicateUncle = errors.New("duplicate uncle")
@@ -242,20 +241,16 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *
return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize) return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize)
} }
// Verify the header's timestamp // Verify the header's timestamp
if uncle { if !uncle {
if header.Time.Cmp(math.MaxBig256) > 0 { if header.Time > uint64(time.Now().Add(allowedFutureBlockTime).Unix()) {
return errLargeBlockTime
}
} else {
if header.Time.Cmp(big.NewInt(time.Now().Add(allowedFutureBlockTime).Unix())) > 0 {
return consensus.ErrFutureBlock return consensus.ErrFutureBlock
} }
} }
if header.Time.Cmp(parent.Time) <= 0 { if header.Time <= parent.Time {
return errZeroBlockTime return errZeroBlockTime
} }
// Verify the block's difficulty based in it's timestamp and parent's difficulty // Verify the block's difficulty based in it's timestamp and parent's difficulty
expected := ethash.CalcDifficulty(chain, header.Time.Uint64(), parent) expected := ethash.CalcDifficulty(chain, header.Time, parent)
if expected.Cmp(header.Difficulty) != 0 { if expected.Cmp(header.Difficulty) != 0 {
return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, expected) return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, expected)
@@ -349,7 +344,7 @@ func makeDifficultyCalculator(bombDelay *big.Int) func(time uint64, parent *type
// ) + 2^(periodCount - 2) // ) + 2^(periodCount - 2)
bigTime := new(big.Int).SetUint64(time) bigTime := new(big.Int).SetUint64(time)
bigParentTime := new(big.Int).Set(parent.Time) bigParentTime := new(big.Int).SetUint64(parent.Time)
// holds intermediate values to make the algo easier to read & audit // holds intermediate values to make the algo easier to read & audit
x := new(big.Int) x := new(big.Int)
@@ -408,7 +403,7 @@ func calcDifficultyHomestead(time uint64, parent *types.Header) *big.Int {
// ) + 2^(periodCount - 2) // ) + 2^(periodCount - 2)
bigTime := new(big.Int).SetUint64(time) bigTime := new(big.Int).SetUint64(time)
bigParentTime := new(big.Int).Set(parent.Time) bigParentTime := new(big.Int).SetUint64(parent.Time)
// holds intermediate values to make the algo easier to read & audit // holds intermediate values to make the algo easier to read & audit
x := new(big.Int) x := new(big.Int)
@@ -456,7 +451,7 @@ func calcDifficultyFrontier(time uint64, parent *types.Header) *big.Int {
bigParentTime := new(big.Int) bigParentTime := new(big.Int)
bigTime.SetUint64(time) bigTime.SetUint64(time)
bigParentTime.Set(parent.Time) bigParentTime.SetUint64(parent.Time)
if bigTime.Sub(bigTime, bigParentTime).Cmp(params.DurationLimit) < 0 { if bigTime.Sub(bigTime, bigParentTime).Cmp(params.DurationLimit) < 0 {
diff.Add(parent.Difficulty, adjust) diff.Add(parent.Difficulty, adjust)
@@ -558,7 +553,7 @@ func (ethash *Ethash) Prepare(chain consensus.ChainReader, header *types.Header)
if parent == nil { if parent == nil {
return consensus.ErrUnknownAncestor return consensus.ErrUnknownAncestor
} }
header.Difficulty = ethash.CalcDifficulty(chain, header.Time.Uint64(), parent) header.Difficulty = ethash.CalcDifficulty(chain, header.Time, parent)
return nil return nil
} }

View File

@@ -76,7 +76,7 @@ func TestCalcDifficulty(t *testing.T) {
number := new(big.Int).Sub(test.CurrentBlocknumber, big.NewInt(1)) number := new(big.Int).Sub(test.CurrentBlocknumber, big.NewInt(1))
diff := CalcDifficulty(config, test.CurrentTimestamp, &types.Header{ diff := CalcDifficulty(config, test.CurrentTimestamp, &types.Header{
Number: number, Number: number,
Time: new(big.Int).SetUint64(test.ParentTimestamp), Time: test.ParentTimestamp,
Difficulty: test.ParentDifficulty, Difficulty: test.ParentDifficulty,
}) })
if diff.Cmp(test.CurrentDifficulty) != 0 { if diff.Cmp(test.CurrentDifficulty) != 0 {

View File

@@ -267,9 +267,9 @@ func (bc *BlockChain) loadLastState() error {
blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()) fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(currentHeader.Time.Int64(), 0))) log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0)))
log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(currentBlock.Time().Int64(), 0))) log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0)))
log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(currentFastBlock.Time().Int64(), 0))) log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0)))
return nil return nil
} }
@@ -894,7 +894,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
context := []interface{}{ context := []interface{}{
"count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)), "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
"number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(head.Time().Int64(), 0)), "number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)),
"size", common.StorageSize(bytes), "size", common.StorageSize(bytes),
} }
if stats.ignored > 0 { if stats.ignored > 0 {
@@ -1058,8 +1058,8 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
// accepted for future processing, and returns an error if the block is too far // accepted for future processing, and returns an error if the block is too far
// ahead and was not added. // ahead and was not added.
func (bc *BlockChain) addFutureBlock(block *types.Block) error { func (bc *BlockChain) addFutureBlock(block *types.Block) error {
max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks) max := uint64(time.Now().Unix() + maxTimeFutureBlocks)
if block.Time().Cmp(max) > 0 { if block.Time() > max {
return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max) return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max)
} }
bc.futureBlocks.Add(block.Hash(), block) bc.futureBlocks.Add(block.Hash(), block)
@@ -1391,21 +1391,25 @@ func (bc *BlockChain) insertSidechain(block *types.Block, it *insertIterator) (i
return 0, nil, nil, nil return 0, nil, nil, nil
} }
// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them // reorg takes two blocks, an old chain and a new chain and will reconstruct the
// to be part of the new canonical chain and accumulates potential missing transactions and post an // blocks and inserts them to be part of the new canonical chain and accumulates
// event about them // potential missing transactions and post an event about them.
func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
var ( var (
newChain types.Blocks newChain types.Blocks
oldChain types.Blocks oldChain types.Blocks
commonBlock *types.Block commonBlock *types.Block
deletedTxs types.Transactions deletedTxs types.Transactions
addedTxs types.Transactions
deletedLogs []*types.Log deletedLogs []*types.Log
rebirthLogs []*types.Log
// collectLogs collects the logs that were generated during the // collectLogs collects the logs that were generated during the
// processing of the block that corresponds with the given hash. // processing of the block that corresponds with the given hash.
// These logs are later announced as deleted. // These logs are later announced as deleted or reborn
collectLogs = func(hash common.Hash) { collectLogs = func(hash common.Hash, removed bool) {
// Coalesce logs and set 'Removed'.
number := bc.hc.GetBlockNumber(hash) number := bc.hc.GetBlockNumber(hash)
if number == nil { if number == nil {
return return
@@ -1413,53 +1417,60 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
receipts := rawdb.ReadReceipts(bc.db, hash, *number) receipts := rawdb.ReadReceipts(bc.db, hash, *number)
for _, receipt := range receipts { for _, receipt := range receipts {
for _, log := range receipt.Logs { for _, log := range receipt.Logs {
del := *log l := *log
del.Removed = true if removed {
deletedLogs = append(deletedLogs, &del) l.Removed = true
deletedLogs = append(deletedLogs, &l)
} else {
rebirthLogs = append(rebirthLogs, &l)
}
} }
} }
} }
) )
// Reduce the longer chain to the same number as the shorter one
// first reduce whoever is higher bound
if oldBlock.NumberU64() > newBlock.NumberU64() { if oldBlock.NumberU64() > newBlock.NumberU64() {
// reduce old chain // Old chain is longer, gather all transactions and logs as deleted ones
for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) { for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
oldChain = append(oldChain, oldBlock) oldChain = append(oldChain, oldBlock)
deletedTxs = append(deletedTxs, oldBlock.Transactions()...) deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
collectLogs(oldBlock.Hash(), true)
collectLogs(oldBlock.Hash())
} }
} else { } else {
// reduce new chain and append new chain blocks for inserting later on // New chain is longer, stash all blocks away for subsequent insertion
for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) { for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
newChain = append(newChain, newBlock) newChain = append(newChain, newBlock)
} }
} }
if oldBlock == nil { if oldBlock == nil {
return fmt.Errorf("Invalid old chain") return fmt.Errorf("invalid old chain")
} }
if newBlock == nil { if newBlock == nil {
return fmt.Errorf("Invalid new chain") return fmt.Errorf("invalid new chain")
} }
// Both sides of the reorg are at the same number, reduce both until the common
// ancestor is found
for { for {
// If the common ancestor was found, bail out
if oldBlock.Hash() == newBlock.Hash() { if oldBlock.Hash() == newBlock.Hash() {
commonBlock = oldBlock commonBlock = oldBlock
break break
} }
// Remove an old block as well as stash away a new block
oldChain = append(oldChain, oldBlock) oldChain = append(oldChain, oldBlock)
newChain = append(newChain, newBlock)
deletedTxs = append(deletedTxs, oldBlock.Transactions()...) deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
collectLogs(oldBlock.Hash()) collectLogs(oldBlock.Hash(), true)
oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) newChain = append(newChain, newBlock)
// Step back with both chains
oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1)
if oldBlock == nil { if oldBlock == nil {
return fmt.Errorf("Invalid old chain") return fmt.Errorf("invalid old chain")
} }
newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
if newBlock == nil { if newBlock == nil {
return fmt.Errorf("Invalid new chain") return fmt.Errorf("invalid new chain")
} }
} }
// Ensure the user sees large reorgs // Ensure the user sees large reorgs
@@ -1474,35 +1485,46 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash()) log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
} }
// Insert the new chain, taking care of the proper incremental order // Insert the new chain, taking care of the proper incremental order
var addedTxs types.Transactions
for i := len(newChain) - 1; i >= 0; i-- { for i := len(newChain) - 1; i >= 0; i-- {
// insert the block in the canonical way, re-writing history // Insert the block in the canonical way, re-writing history
bc.insert(newChain[i]) bc.insert(newChain[i])
// write lookup entries for hash based transaction/receipt searches
// Collect reborn logs due to chain reorg (except head block (reverse order))
if i != 0 {
collectLogs(newChain[i].Hash(), false)
}
// Write lookup entries for hash based transaction/receipt searches
rawdb.WriteTxLookupEntries(bc.db, newChain[i]) rawdb.WriteTxLookupEntries(bc.db, newChain[i])
addedTxs = append(addedTxs, newChain[i].Transactions()...) addedTxs = append(addedTxs, newChain[i].Transactions()...)
} }
// calculate the difference between deleted and added transactions // When transactions get deleted from the database, the receipts that were
diff := types.TxDifference(deletedTxs, addedTxs) // created in the fork must also be deleted
// When transactions get deleted from the database that means the
// receipts that were created in the fork must also be deleted
batch := bc.db.NewBatch() batch := bc.db.NewBatch()
for _, tx := range diff { for _, tx := range types.TxDifference(deletedTxs, addedTxs) {
rawdb.DeleteTxLookupEntry(batch, tx.Hash()) rawdb.DeleteTxLookupEntry(batch, tx.Hash())
} }
batch.Write() batch.Write()
// If any logs need to be fired, do it now. In theory we could avoid creating
// this goroutine if there are no events to fire, but realistcally that only
// ever happens if we're reorging empty blocks, which will only happen on idle
// networks where performance is not an issue either way.
//
// TODO(karalabe): Can we get rid of the goroutine somehow to guarantee correct
// event ordering?
go func() {
if len(deletedLogs) > 0 { if len(deletedLogs) > 0 {
go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs}) bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
}
if len(rebirthLogs) > 0 {
bc.logsFeed.Send(rebirthLogs)
} }
if len(oldChain) > 0 { if len(oldChain) > 0 {
go func() {
for _, block := range oldChain { for _, block := range oldChain {
bc.chainSideFeed.Send(ChainSideEvent{Block: block}) bc.chainSideFeed.Send(ChainSideEvent{Block: block})
} }
}()
} }
}()
return nil return nil
} }

View File

@@ -60,7 +60,7 @@ func (st *insertStats) report(chain []*types.Block, index int, cache common.Stor
"elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed), "elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
"number", end.Number(), "hash", end.Hash(), "number", end.Number(), "hash", end.Hash(),
} }
if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute { if timestamp := time.Unix(int64(end.Time()), 0); time.Since(timestamp) > time.Minute {
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
} }
context = append(context, []interface{}{"cache", cache}...) context = append(context, []interface{}{"cache", cache}...)

View File

@@ -884,7 +884,6 @@ func TestChainTxReorgs(t *testing.T) {
} }
func TestLogReorgs(t *testing.T) { func TestLogReorgs(t *testing.T) {
var ( var (
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr1 = crypto.PubkeyToAddress(key1.PublicKey)
@@ -930,6 +929,213 @@ func TestLogReorgs(t *testing.T) {
} }
} }
func TestLogRebirth(t *testing.T) {
var (
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
db = ethdb.NewMemDatabase()
// this code generates a log
code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}}
genesis = gspec.MustCommit(db)
signer = types.NewEIP155Signer(gspec.Config.ChainID)
newLogCh = make(chan bool)
)
// listenNewLog checks whether the received logs number is equal with expected.
listenNewLog := func(sink chan []*types.Log, expect int) {
cnt := 0
for {
select {
case logs := <-sink:
cnt += len(logs)
case <-time.NewTimer(5 * time.Second).C:
// new logs timeout
newLogCh <- false
return
}
if cnt == expect {
break
} else if cnt > expect {
// redundant logs received
newLogCh <- false
return
}
}
select {
case <-sink:
// redundant logs received
newLogCh <- false
case <-time.NewTimer(100 * time.Millisecond).C:
newLogCh <- true
}
}
blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
defer blockchain.Stop()
logsCh := make(chan []*types.Log)
blockchain.SubscribeLogsEvent(logsCh)
rmLogsCh := make(chan RemovedLogsEvent)
blockchain.SubscribeRemovedLogsEvent(rmLogsCh)
chain, _ := GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 2, func(i int, gen *BlockGen) {
if i == 1 {
tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, new(big.Int), code), signer, key1)
if err != nil {
t.Fatalf("failed to create tx: %v", err)
}
gen.AddTx(tx)
}
})
// Spawn a goroutine to receive log events
go listenNewLog(logsCh, 1)
if _, err := blockchain.InsertChain(chain); err != nil {
t.Fatalf("failed to insert chain: %v", err)
}
if !<-newLogCh {
t.Fatalf("failed to receive new log event")
}
// Generate long reorg chain
forkChain, _ := GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 2, func(i int, gen *BlockGen) {
if i == 1 {
tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, new(big.Int), code), signer, key1)
if err != nil {
t.Fatalf("failed to create tx: %v", err)
}
gen.AddTx(tx)
// Higher block difficulty
gen.OffsetTime(-9)
}
})
// Spawn a goroutine to receive log events
go listenNewLog(logsCh, 1)
if _, err := blockchain.InsertChain(forkChain); err != nil {
t.Fatalf("failed to insert forked chain: %v", err)
}
if !<-newLogCh {
t.Fatalf("failed to receive new log event")
}
// Ensure removedLog events received
select {
case ev := <-rmLogsCh:
if len(ev.Logs) == 0 {
t.Error("expected logs")
}
case <-time.NewTimer(1 * time.Second).C:
t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.")
}
newBlocks, _ := GenerateChain(params.TestChainConfig, chain[len(chain)-1], ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {})
go listenNewLog(logsCh, 1)
if _, err := blockchain.InsertChain(newBlocks); err != nil {
t.Fatalf("failed to insert forked chain: %v", err)
}
// Ensure removedLog events received
select {
case ev := <-rmLogsCh:
if len(ev.Logs) == 0 {
t.Error("expected logs")
}
case <-time.NewTimer(1 * time.Second).C:
t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.")
}
// Rebirth logs should omit a newLogEvent
if !<-newLogCh {
t.Fatalf("failed to receive new log event")
}
}
func TestSideLogRebirth(t *testing.T) {
var (
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
db = ethdb.NewMemDatabase()
// this code generates a log
code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}}
genesis = gspec.MustCommit(db)
signer = types.NewEIP155Signer(gspec.Config.ChainID)
newLogCh = make(chan bool)
)
// listenNewLog checks whether the received logs number is equal with expected.
listenNewLog := func(sink chan []*types.Log, expect int) {
cnt := 0
for {
select {
case logs := <-sink:
cnt += len(logs)
case <-time.NewTimer(5 * time.Second).C:
// new logs timeout
newLogCh <- false
return
}
if cnt == expect {
break
} else if cnt > expect {
// redundant logs received
newLogCh <- false
return
}
}
select {
case <-sink:
// redundant logs received
newLogCh <- false
case <-time.NewTimer(100 * time.Millisecond).C:
newLogCh <- true
}
}
blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
defer blockchain.Stop()
logsCh := make(chan []*types.Log)
blockchain.SubscribeLogsEvent(logsCh)
chain, _ := GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 2, func(i int, gen *BlockGen) {
if i == 1 {
// Higher block difficulty
gen.OffsetTime(-9)
}
})
if _, err := blockchain.InsertChain(chain); err != nil {
t.Fatalf("failed to insert forked chain: %v", err)
}
// Generate side chain with lower difficulty
sideChain, _ := GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 2, func(i int, gen *BlockGen) {
if i == 1 {
tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, new(big.Int), code), signer, key1)
if err != nil {
t.Fatalf("failed to create tx: %v", err)
}
gen.AddTx(tx)
}
})
if _, err := blockchain.InsertChain(sideChain); err != nil {
t.Fatalf("failed to insert forked chain: %v", err)
}
// Generate a new block based on side chain
newBlocks, _ := GenerateChain(params.TestChainConfig, sideChain[len(sideChain)-1], ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {})
go listenNewLog(logsCh, 1)
if _, err := blockchain.InsertChain(newBlocks); err != nil {
t.Fatalf("failed to insert forked chain: %v", err)
}
// Rebirth logs should omit a newLogEvent
if !<-newLogCh {
t.Fatalf("failed to receive new log event")
}
}
func TestReorgSideEvent(t *testing.T) { func TestReorgSideEvent(t *testing.T) {
var ( var (
db = ethdb.NewMemDatabase() db = ethdb.NewMemDatabase()

View File

@@ -149,12 +149,12 @@ func (b *BlockGen) PrevBlock(index int) *types.Block {
// associated difficulty. It's useful to test scenarios where forking is not // associated difficulty. It's useful to test scenarios where forking is not
// tied to chain length directly. // tied to chain length directly.
func (b *BlockGen) OffsetTime(seconds int64) { func (b *BlockGen) OffsetTime(seconds int64) {
b.header.Time.Add(b.header.Time, big.NewInt(seconds)) b.header.Time += uint64(seconds)
if b.header.Time.Cmp(b.parent.Header().Time) <= 0 { if b.header.Time <= b.parent.Header().Time {
panic("block time out of range") panic("block time out of range")
} }
chainreader := &fakeChainReader{config: b.config} chainreader := &fakeChainReader{config: b.config}
b.header.Difficulty = b.engine.CalcDifficulty(chainreader, b.header.Time.Uint64(), b.parent.Header()) b.header.Difficulty = b.engine.CalcDifficulty(chainreader, b.header.Time, b.parent.Header())
} }
// GenerateChain creates a chain of n blocks. The first block's // GenerateChain creates a chain of n blocks. The first block's
@@ -225,20 +225,20 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
} }
func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header { func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header {
var time *big.Int var time uint64
if parent.Time() == nil { if parent.Time() == 0 {
time = big.NewInt(10) time = 10
} else { } else {
time = new(big.Int).Add(parent.Time(), big.NewInt(10)) // block time is fixed at 10 seconds time = parent.Time() + 10 // block time is fixed at 10 seconds
} }
return &types.Header{ return &types.Header{
Root: state.IntermediateRoot(chain.Config().IsEIP158(parent.Number())), Root: state.IntermediateRoot(chain.Config().IsEIP158(parent.Number())),
ParentHash: parent.Hash(), ParentHash: parent.Hash(),
Coinbase: parent.Coinbase(), Coinbase: parent.Coinbase(),
Difficulty: engine.CalcDifficulty(chain, time.Uint64(), &types.Header{ Difficulty: engine.CalcDifficulty(chain, time, &types.Header{
Number: parent.Number(), Number: parent.Number(),
Time: new(big.Int).Sub(time, big.NewInt(10)), Time: time - 10,
Difficulty: parent.Difficulty(), Difficulty: parent.Difficulty(),
UncleHash: parent.UncleHash(), UncleHash: parent.UncleHash(),
}), }),

View File

@@ -51,7 +51,7 @@ func NewEVMContext(msg Message, header *types.Header, chain ChainContext, author
Origin: msg.From(), Origin: msg.From(),
Coinbase: beneficiary, Coinbase: beneficiary,
BlockNumber: new(big.Int).Set(header.Number), BlockNumber: new(big.Int).Set(header.Number),
Time: new(big.Int).Set(header.Time), Time: new(big.Int).SetUint64(header.Time),
Difficulty: new(big.Int).Set(header.Difficulty), Difficulty: new(big.Int).Set(header.Difficulty),
GasLimit: header.GasLimit, GasLimit: header.GasLimit,
GasPrice: new(big.Int).Set(msg.GasPrice()), GasPrice: new(big.Int).Set(msg.GasPrice()),

View File

@@ -243,7 +243,7 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
head := &types.Header{ head := &types.Header{
Number: new(big.Int).SetUint64(g.Number), Number: new(big.Int).SetUint64(g.Number),
Nonce: types.EncodeNonce(g.Nonce), Nonce: types.EncodeNonce(g.Nonce),
Time: new(big.Int).SetUint64(g.Timestamp), Time: g.Timestamp,
ParentHash: g.ParentHash, ParentHash: g.ParentHash,
Extra: g.ExtraData, Extra: g.ExtraData,
GasLimit: g.GasLimit, GasLimit: g.GasLimit,

View File

@@ -286,7 +286,7 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, writeHeader WhCa
"count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)), "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
"number", last.Number, "hash", last.Hash(), "number", last.Number, "hash", last.Hash(),
} }
if timestamp := time.Unix(last.Time.Int64(), 0); time.Since(timestamp) > time.Minute { if timestamp := time.Unix(int64(last.Time), 0); time.Since(timestamp) > time.Minute {
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
} }
if stats.ignored > 0 { if stats.ignored > 0 {

View File

@@ -79,7 +79,7 @@ type Header struct {
Number *big.Int `json:"number" gencodec:"required"` Number *big.Int `json:"number" gencodec:"required"`
GasLimit uint64 `json:"gasLimit" gencodec:"required"` GasLimit uint64 `json:"gasLimit" gencodec:"required"`
GasUsed uint64 `json:"gasUsed" gencodec:"required"` GasUsed uint64 `json:"gasUsed" gencodec:"required"`
Time *big.Int `json:"timestamp" gencodec:"required"` Time uint64 `json:"timestamp" gencodec:"required"`
Extra []byte `json:"extraData" gencodec:"required"` Extra []byte `json:"extraData" gencodec:"required"`
MixDigest common.Hash `json:"mixHash"` MixDigest common.Hash `json:"mixHash"`
Nonce BlockNonce `json:"nonce"` Nonce BlockNonce `json:"nonce"`
@@ -91,7 +91,7 @@ type headerMarshaling struct {
Number *hexutil.Big Number *hexutil.Big
GasLimit hexutil.Uint64 GasLimit hexutil.Uint64
GasUsed hexutil.Uint64 GasUsed hexutil.Uint64
Time *hexutil.Big Time hexutil.Uint64
Extra hexutil.Bytes Extra hexutil.Bytes
Hash common.Hash `json:"hash"` // adds call to Hash() in MarshalJSON Hash common.Hash `json:"hash"` // adds call to Hash() in MarshalJSON
} }
@@ -105,7 +105,7 @@ func (h *Header) Hash() common.Hash {
// Size returns the approximate memory used by all internal contents. It is used // Size returns the approximate memory used by all internal contents. It is used
// to approximate and limit the memory consumption of various caches. // to approximate and limit the memory consumption of various caches.
func (h *Header) Size() common.StorageSize { func (h *Header) Size() common.StorageSize {
return common.StorageSize(unsafe.Sizeof(*h)) + common.StorageSize(len(h.Extra)+(h.Difficulty.BitLen()+h.Number.BitLen()+h.Time.BitLen())/8) return common.StorageSize(unsafe.Sizeof(*h)) + common.StorageSize(len(h.Extra)+(h.Difficulty.BitLen()+h.Number.BitLen())/8)
} }
func rlpHash(x interface{}) (h common.Hash) { func rlpHash(x interface{}) (h common.Hash) {
@@ -221,9 +221,6 @@ func NewBlockWithHeader(header *Header) *Block {
// modifying a header variable. // modifying a header variable.
func CopyHeader(h *Header) *Header { func CopyHeader(h *Header) *Header {
cpy := *h cpy := *h
if cpy.Time = new(big.Int); h.Time != nil {
cpy.Time.Set(h.Time)
}
if cpy.Difficulty = new(big.Int); h.Difficulty != nil { if cpy.Difficulty = new(big.Int); h.Difficulty != nil {
cpy.Difficulty.Set(h.Difficulty) cpy.Difficulty.Set(h.Difficulty)
} }
@@ -286,7 +283,7 @@ func (b *Block) Number() *big.Int { return new(big.Int).Set(b.header.Number)
func (b *Block) GasLimit() uint64 { return b.header.GasLimit } func (b *Block) GasLimit() uint64 { return b.header.GasLimit }
func (b *Block) GasUsed() uint64 { return b.header.GasUsed } func (b *Block) GasUsed() uint64 { return b.header.GasUsed }
func (b *Block) Difficulty() *big.Int { return new(big.Int).Set(b.header.Difficulty) } func (b *Block) Difficulty() *big.Int { return new(big.Int).Set(b.header.Difficulty) }
func (b *Block) Time() *big.Int { return new(big.Int).Set(b.header.Time) } func (b *Block) Time() uint64 { return b.header.Time }
func (b *Block) NumberU64() uint64 { return b.header.Number.Uint64() } func (b *Block) NumberU64() uint64 { return b.header.Number.Uint64() }
func (b *Block) MixDigest() common.Hash { return b.header.MixDigest } func (b *Block) MixDigest() common.Hash { return b.header.MixDigest }

View File

@@ -48,7 +48,7 @@ func TestBlockEncoding(t *testing.T) {
check("Root", block.Root(), common.HexToHash("ef1552a40b7165c3cd773806b9e0c165b75356e0314bf0706f279c729f51e017")) check("Root", block.Root(), common.HexToHash("ef1552a40b7165c3cd773806b9e0c165b75356e0314bf0706f279c729f51e017"))
check("Hash", block.Hash(), common.HexToHash("0a5843ac1cb04865017cb35a57b50b07084e5fcee39b5acadade33149f4fff9e")) check("Hash", block.Hash(), common.HexToHash("0a5843ac1cb04865017cb35a57b50b07084e5fcee39b5acadade33149f4fff9e"))
check("Nonce", block.Nonce(), uint64(0xa13a5a8c8f2bb1c4)) check("Nonce", block.Nonce(), uint64(0xa13a5a8c8f2bb1c4))
check("Time", block.Time(), big.NewInt(1426516743)) check("Time", block.Time(), uint64(1426516743))
check("Size", block.Size(), common.StorageSize(len(blockEnc))) check("Size", block.Size(), common.StorageSize(len(blockEnc)))
tx1 := NewTransaction(0, common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"), big.NewInt(10), 50000, big.NewInt(10), nil) tx1 := NewTransaction(0, common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"), big.NewInt(10), 50000, big.NewInt(10), nil)

View File

@@ -27,7 +27,7 @@ func (h Header) MarshalJSON() ([]byte, error) {
Number *hexutil.Big `json:"number" gencodec:"required"` Number *hexutil.Big `json:"number" gencodec:"required"`
GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Time *hexutil.Big `json:"timestamp" gencodec:"required"` Time hexutil.Uint64 `json:"timestamp" gencodec:"required"`
Extra hexutil.Bytes `json:"extraData" gencodec:"required"` Extra hexutil.Bytes `json:"extraData" gencodec:"required"`
MixDigest common.Hash `json:"mixHash"` MixDigest common.Hash `json:"mixHash"`
Nonce BlockNonce `json:"nonce"` Nonce BlockNonce `json:"nonce"`
@@ -45,7 +45,7 @@ func (h Header) MarshalJSON() ([]byte, error) {
enc.Number = (*hexutil.Big)(h.Number) enc.Number = (*hexutil.Big)(h.Number)
enc.GasLimit = hexutil.Uint64(h.GasLimit) enc.GasLimit = hexutil.Uint64(h.GasLimit)
enc.GasUsed = hexutil.Uint64(h.GasUsed) enc.GasUsed = hexutil.Uint64(h.GasUsed)
enc.Time = (*hexutil.Big)(h.Time) enc.Time = hexutil.Uint64(h.Time)
enc.Extra = h.Extra enc.Extra = h.Extra
enc.MixDigest = h.MixDigest enc.MixDigest = h.MixDigest
enc.Nonce = h.Nonce enc.Nonce = h.Nonce
@@ -67,7 +67,7 @@ func (h *Header) UnmarshalJSON(input []byte) error {
Number *hexutil.Big `json:"number" gencodec:"required"` Number *hexutil.Big `json:"number" gencodec:"required"`
GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"` GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Time *hexutil.Big `json:"timestamp" gencodec:"required"` Time *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
Extra *hexutil.Bytes `json:"extraData" gencodec:"required"` Extra *hexutil.Bytes `json:"extraData" gencodec:"required"`
MixDigest *common.Hash `json:"mixHash"` MixDigest *common.Hash `json:"mixHash"`
Nonce *BlockNonce `json:"nonce"` Nonce *BlockNonce `json:"nonce"`
@@ -123,7 +123,7 @@ func (h *Header) UnmarshalJSON(input []byte) error {
if dec.Time == nil { if dec.Time == nil {
return errors.New("missing required field 'timestamp' for Header") return errors.New("missing required field 'timestamp' for Header")
} }
h.Time = (*big.Int)(dec.Time) h.Time = uint64(*dec.Time)
if dec.Extra == nil { if dec.Extra == nil {
return errors.New("missing required field 'extraData' for Header") return errors.New("missing required field 'extraData' for Header")
} }

View File

@@ -213,6 +213,10 @@ func (b *EthAPIBackend) AccountManager() *accounts.Manager {
return b.eth.AccountManager() return b.eth.AccountManager()
} }
func (b *EthAPIBackend) RPCGasCap() *big.Int {
return b.eth.config.RPCGasCap
}
func (b *EthAPIBackend) BloomStatus() (uint64, uint64) { func (b *EthAPIBackend) BloomStatus() (uint64, uint64) {
sections, _, _ := b.eth.bloomIndexer.Sections() sections, _, _ := b.eth.bloomIndexer.Sections()
return params.BloomBitsBlocks, sections return params.BloomBitsBlocks, sections

View File

@@ -135,6 +135,9 @@ type Config struct {
// Constantinople block override (TODO: remove after the fork) // Constantinople block override (TODO: remove after the fork)
ConstantinopleOverride *big.Int ConstantinopleOverride *big.Int
// RPCGasCap is the global gas cap for eth-call variants.
RPCGasCap *big.Int `toml:",omitempty"`
} }
type configMarshaling struct { type configMarshaling struct {

View File

@@ -75,6 +75,7 @@ var (
errUnknownPeer = errors.New("peer is unknown or unhealthy") errUnknownPeer = errors.New("peer is unknown or unhealthy")
errBadPeer = errors.New("action from bad peer ignored") errBadPeer = errors.New("action from bad peer ignored")
errStallingPeer = errors.New("peer is stalling") errStallingPeer = errors.New("peer is stalling")
errUnsyncedPeer = errors.New("unsynced peer")
errNoPeers = errors.New("no peers to keep download active") errNoPeers = errors.New("no peers to keep download active")
errTimeout = errors.New("timeout") errTimeout = errors.New("timeout")
errEmptyHeaderSet = errors.New("empty header set by peer") errEmptyHeaderSet = errors.New("empty header set by peer")
@@ -99,6 +100,7 @@ type Downloader struct {
mode SyncMode // Synchronisation mode defining the strategy used (per sync cycle) mode SyncMode // Synchronisation mode defining the strategy used (per sync cycle)
mux *event.TypeMux // Event multiplexer to announce sync operation events mux *event.TypeMux // Event multiplexer to announce sync operation events
checkpoint uint64 // Checkpoint block number to enforce head against (e.g. fast sync)
genesis uint64 // Genesis block number to limit sync to (e.g. light client CHT) genesis uint64 // Genesis block number to limit sync to (e.g. light client CHT)
queue *queue // Scheduler for selecting the hashes to download queue *queue // Scheduler for selecting the hashes to download
peers *peerSet // Set of active peers from which download can proceed peers *peerSet // Set of active peers from which download can proceed
@@ -205,15 +207,15 @@ type BlockChain interface {
} }
// New creates a new downloader to fetch hashes and blocks from remote peers. // New creates a new downloader to fetch hashes and blocks from remote peers.
func New(mode SyncMode, stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader { func New(mode SyncMode, checkpoint uint64, stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader {
if lightchain == nil { if lightchain == nil {
lightchain = chain lightchain = chain
} }
dl := &Downloader{ dl := &Downloader{
mode: mode, mode: mode,
stateDB: stateDb, stateDB: stateDb,
mux: mux, mux: mux,
checkpoint: checkpoint,
queue: newQueue(), queue: newQueue(),
peers: newPeerSet(), peers: newPeerSet(),
rttEstimate: uint64(rttMaxEstimate), rttEstimate: uint64(rttMaxEstimate),
@@ -326,7 +328,7 @@ func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode
case nil: case nil:
case errBusy: case errBusy:
case errTimeout, errBadPeer, errStallingPeer, case errTimeout, errBadPeer, errStallingPeer, errUnsyncedPeer,
errEmptyHeaderSet, errPeersUnavailable, errTooOld, errEmptyHeaderSet, errPeersUnavailable, errTooOld,
errInvalidAncestor, errInvalidChain: errInvalidAncestor, errInvalidChain:
log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err) log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err)
@@ -577,6 +579,10 @@ func (d *Downloader) fetchHeight(p *peerConnection) (*types.Header, error) {
return nil, errBadPeer return nil, errBadPeer
} }
head := headers[0] head := headers[0]
if d.mode == FastSync && head.Number.Uint64() < d.checkpoint {
p.log.Warn("Remote head below checkpoint", "number", head.Number, "hash", head.Hash())
return nil, errUnsyncedPeer
}
p.log.Debug("Remote head header identified", "number", head.Number, "hash", head.Hash()) p.log.Debug("Remote head header identified", "number", head.Number, "hash", head.Hash())
return head, nil return head, nil

View File

@@ -26,7 +26,7 @@ import (
"testing" "testing"
"time" "time"
ethereum "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
@@ -73,7 +73,8 @@ func newTester() *downloadTester {
} }
tester.stateDb = ethdb.NewMemDatabase() tester.stateDb = ethdb.NewMemDatabase()
tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00}) tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00})
tester.downloader = New(FullSync, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer)
tester.downloader = New(FullSync, 0, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer)
return tester return tester
} }
@@ -1049,6 +1050,7 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
{errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop
{errBadPeer, true}, // Peer was deemed bad for some reason, drop it {errBadPeer, true}, // Peer was deemed bad for some reason, drop it
{errStallingPeer, true}, // Peer was detected to be stalling, drop it {errStallingPeer, true}, // Peer was detected to be stalling, drop it
{errUnsyncedPeer, true}, // Peer was detected to be unsynced, drop it
{errNoPeers, false}, // No peers to download from, soft race, no issue {errNoPeers, false}, // No peers to download from, soft race, no issue
{errTimeout, true}, // No hashes received in due time, drop the peer {errTimeout, true}, // No hashes received in due time, drop the peer
{errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end {errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end
@@ -1567,3 +1569,39 @@ func TestRemoteHeaderRequestSpan(t *testing.T) {
} }
} }
} }
// Tests that peers below a pre-configured checkpoint block are prevented from
// being fast-synced from, avoiding potential cheap eclipse attacks.
func TestCheckpointEnforcement62(t *testing.T) { testCheckpointEnforcement(t, 62, FullSync) }
func TestCheckpointEnforcement63Full(t *testing.T) { testCheckpointEnforcement(t, 63, FullSync) }
func TestCheckpointEnforcement63Fast(t *testing.T) { testCheckpointEnforcement(t, 63, FastSync) }
func TestCheckpointEnforcement64Full(t *testing.T) { testCheckpointEnforcement(t, 64, FullSync) }
func TestCheckpointEnforcement64Fast(t *testing.T) { testCheckpointEnforcement(t, 64, FastSync) }
func TestCheckpointEnforcement64Light(t *testing.T) { testCheckpointEnforcement(t, 64, LightSync) }
func testCheckpointEnforcement(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
// Create a new tester with a particular hard coded checkpoint block
tester := newTester()
defer tester.terminate()
tester.downloader.checkpoint = uint64(fsMinFullBlocks) + 256
chain := testChainBase.shorten(int(tester.downloader.checkpoint) - 1)
// Attempt to sync with the peer and validate the result
tester.newPeer("peer", protocol, chain)
var expect error
if mode == FastSync {
expect = errUnsyncedPeer
}
if err := tester.sync("peer", nil, mode); err != expect {
t.Fatalf("block sync error mismatch: have %v, want %v", err, expect)
}
if mode == FastSync {
assertOwnChain(t, tester, 1)
} else {
assertOwnChain(t, tester, chain.len())
}
}

View File

@@ -28,7 +28,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/downloader"
@@ -55,7 +54,7 @@ const (
) )
var ( var (
daoChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the DAO handshake challenge syncChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the sync progress challenge
) )
// errIncompatibleConfig is returned if the requested protocols and configs are // errIncompatibleConfig is returned if the requested protocols and configs are
@@ -72,6 +71,9 @@ type ProtocolManager struct {
fastSync uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks) fastSync uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks)
acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing) acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing)
checkpointNumber uint64 // Block number for the sync progress validator to cross reference
checkpointHash common.Hash // Block hash for the sync progress validator to cross reference
txpool txPool txpool txPool
blockchain *core.BlockChain blockchain *core.BlockChain
chainconfig *params.ChainConfig chainconfig *params.ChainConfig
@@ -126,6 +128,11 @@ func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, ne
if mode == downloader.FastSync { if mode == downloader.FastSync {
manager.fastSync = uint32(1) manager.fastSync = uint32(1)
} }
// If we have trusted checkpoints, enforce them on the chain
if checkpoint, ok := params.TrustedCheckpoints[blockchain.Genesis().Hash()]; ok {
manager.checkpointNumber = (checkpoint.SectionIndex+1)*params.CHTFrequencyClient - 1
manager.checkpointHash = checkpoint.SectionHead
}
// Initiate a sub-protocol for every implemented version we can handle // Initiate a sub-protocol for every implemented version we can handle
manager.SubProtocols = make([]p2p.Protocol, 0, len(ProtocolVersions)) manager.SubProtocols = make([]p2p.Protocol, 0, len(ProtocolVersions))
for i, version := range ProtocolVersions { for i, version := range ProtocolVersions {
@@ -165,7 +172,7 @@ func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, ne
return nil, errIncompatibleConfig return nil, errIncompatibleConfig
} }
// Construct the different synchronisation mechanisms // Construct the different synchronisation mechanisms
manager.downloader = downloader.New(mode, chaindb, manager.eventMux, blockchain, nil, manager.removePeer) manager.downloader = downloader.New(mode, manager.checkpointNumber, chaindb, manager.eventMux, blockchain, nil, manager.removePeer)
validator := func(header *types.Header) error { validator := func(header *types.Header) error {
return engine.VerifyHeader(blockchain, header, true) return engine.VerifyHeader(blockchain, header, true)
@@ -291,22 +298,22 @@ func (pm *ProtocolManager) handle(p *peer) error {
// after this will be sent via broadcasts. // after this will be sent via broadcasts.
pm.syncTransactions(p) pm.syncTransactions(p)
// If we're DAO hard-fork aware, validate any remote peer with regard to the hard-fork // If we have a trusted CHT, reject all peers below that (avoid fast sync eclipse)
if daoBlock := pm.chainconfig.DAOForkBlock; daoBlock != nil { if pm.checkpointHash != (common.Hash{}) {
// Request the peer's DAO fork header for extra-data validation // Request the peer's checkpoint header for chain height/weight validation
if err := p.RequestHeadersByNumber(daoBlock.Uint64(), 1, 0, false); err != nil { if err := p.RequestHeadersByNumber(pm.checkpointNumber, 1, 0, false); err != nil {
return err return err
} }
// Start a timer to disconnect if the peer doesn't reply in time // Start a timer to disconnect if the peer doesn't reply in time
p.forkDrop = time.AfterFunc(daoChallengeTimeout, func() { p.syncDrop = time.AfterFunc(syncChallengeTimeout, func() {
p.Log().Debug("Timed out DAO fork-check, dropping") p.Log().Warn("Checkpoint challenge timed out, dropping", "addr", p.RemoteAddr(), "type", p.Name())
pm.removePeer(p.id) pm.removePeer(p.id)
}) })
// Make sure it's cleaned up if the peer dies off // Make sure it's cleaned up if the peer dies off
defer func() { defer func() {
if p.forkDrop != nil { if p.syncDrop != nil {
p.forkDrop.Stop() p.syncDrop.Stop()
p.forkDrop = nil p.syncDrop = nil
} }
}() }()
} }
@@ -438,41 +445,33 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
if err := msg.Decode(&headers); err != nil { if err := msg.Decode(&headers); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err) return errResp(ErrDecode, "msg %v: %v", msg, err)
} }
// If no headers were received, but we're expending a DAO fork check, maybe it's that // If no headers were received, but we're expencting a checkpoint header, consider it that
if len(headers) == 0 && p.forkDrop != nil { if len(headers) == 0 && p.syncDrop != nil {
// Possibly an empty reply to the fork header checks, sanity check TDs // Stop the timer either way, decide later to drop or not
verifyDAO := true p.syncDrop.Stop()
p.syncDrop = nil
// If we already have a DAO header, we can check the peer's TD against it. If // If we're doing a fast sync, we must enforce the checkpoint block to avoid
// the peer's ahead of this, it too must have a reply to the DAO check // eclipse attacks. Unsynced nodes are welcome to connect after we're done
if daoHeader := pm.blockchain.GetHeaderByNumber(pm.chainconfig.DAOForkBlock.Uint64()); daoHeader != nil { // joining the network
if _, td := p.Head(); td.Cmp(pm.blockchain.GetTd(daoHeader.Hash(), daoHeader.Number.Uint64())) >= 0 { if atomic.LoadUint32(&pm.fastSync) == 1 {
verifyDAO = false p.Log().Warn("Dropping unsynced node during fast sync", "addr", p.RemoteAddr(), "type", p.Name())
} return errors.New("unsynced node cannot serve fast sync")
}
// If we're seemingly on the same chain, disable the drop timer
if verifyDAO {
p.Log().Debug("Seems to be on the same side of the DAO fork")
p.forkDrop.Stop()
p.forkDrop = nil
return nil
} }
} }
// Filter out any explicitly requested headers, deliver the rest to the downloader // Filter out any explicitly requested headers, deliver the rest to the downloader
filter := len(headers) == 1 filter := len(headers) == 1
if filter { if filter {
// If it's a potential DAO fork check, validate against the rules // If it's a potential sync progress check, validate the content and advertised chain weight
if p.forkDrop != nil && pm.chainconfig.DAOForkBlock.Cmp(headers[0].Number) == 0 { if p.syncDrop != nil && headers[0].Number.Uint64() == pm.checkpointNumber {
// Disable the fork drop timer // Disable the sync drop timer
p.forkDrop.Stop() p.syncDrop.Stop()
p.forkDrop = nil p.syncDrop = nil
// Validate the header and either drop the peer or continue // Validate the header and either drop the peer or continue
if err := misc.VerifyDAOHeaderExtraData(pm.chainconfig, headers[0]); err != nil { if headers[0].Hash() != pm.checkpointHash {
p.Log().Debug("Verified to be on the other side of the DAO fork, dropping") return errors.New("checkpoint hash mismatch")
return err
} }
p.Log().Debug("Verified to be on the same side of the DAO fork")
return nil return nil
} }
// Otherwise if it's a whitelisted block, validate against the set // Otherwise if it's a whitelisted block, validate against the set

View File

@@ -449,48 +449,95 @@ func testGetReceipt(t *testing.T, protocol int) {
} }
} }
// Tests that post eth protocol handshake, DAO fork-enabled clients also execute // Tests that post eth protocol handshake, clients perform a mutual checkpoint
// a DAO "challenge" verifying each others' DAO fork headers to ensure they're on // challenge to validate each other's chains. Hash mismatches, or missing ones
// compatible chains. // during a fast sync should lead to the peer getting dropped.
func TestDAOChallengeNoVsNo(t *testing.T) { testDAOChallenge(t, false, false, false) } func TestCheckpointChallenge(t *testing.T) {
func TestDAOChallengeNoVsPro(t *testing.T) { testDAOChallenge(t, false, true, false) } tests := []struct {
func TestDAOChallengeProVsNo(t *testing.T) { testDAOChallenge(t, true, false, false) } syncmode downloader.SyncMode
func TestDAOChallengeProVsPro(t *testing.T) { testDAOChallenge(t, true, true, false) } checkpoint bool
func TestDAOChallengeNoVsTimeout(t *testing.T) { testDAOChallenge(t, false, false, true) } timeout bool
func TestDAOChallengeProVsTimeout(t *testing.T) { testDAOChallenge(t, true, true, true) } empty bool
match bool
drop bool
}{
// If checkpointing is not enabled locally, don't challenge and don't drop
{downloader.FullSync, false, false, false, false, false},
{downloader.FastSync, false, false, false, false, false},
{downloader.LightSync, false, false, false, false, false},
func testDAOChallenge(t *testing.T, localForked, remoteForked bool, timeout bool) { // If checkpointing is enabled locally and remote response is empty, only drop during fast sync
// Reduce the DAO handshake challenge timeout {downloader.FullSync, true, false, true, false, false},
if timeout { {downloader.FastSync, true, false, true, false, true}, // Special case, fast sync, unsynced peer
defer func(old time.Duration) { daoChallengeTimeout = old }(daoChallengeTimeout) {downloader.LightSync, true, false, true, false, false},
daoChallengeTimeout = 500 * time.Millisecond
// If checkpointing is enabled locally and remote response mismatches, always drop
{downloader.FullSync, true, false, false, false, true},
{downloader.FastSync, true, false, false, false, true},
{downloader.LightSync, true, false, false, false, true},
// If checkpointing is enabled locally and remote response matches, never drop
{downloader.FullSync, true, false, false, true, false},
{downloader.FastSync, true, false, false, true, false},
{downloader.LightSync, true, false, false, true, false},
// If checkpointing is enabled locally and remote times out, always drop
{downloader.FullSync, true, true, false, true, true},
{downloader.FastSync, true, true, false, true, true},
{downloader.LightSync, true, true, false, true, true},
} }
// Create a DAO aware protocol manager for _, tt := range tests {
t.Run(fmt.Sprintf("sync %v checkpoint %v timeout %v empty %v match %v", tt.syncmode, tt.checkpoint, tt.timeout, tt.empty, tt.match), func(t *testing.T) {
testCheckpointChallenge(t, tt.syncmode, tt.checkpoint, tt.timeout, tt.empty, tt.match, tt.drop)
})
}
}
func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpoint bool, timeout bool, empty bool, match bool, drop bool) {
// Reduce the checkpoint handshake challenge timeout
defer func(old time.Duration) { syncChallengeTimeout = old }(syncChallengeTimeout)
syncChallengeTimeout = 250 * time.Millisecond
// Initialize a chain and generate a fake CHT if checkpointing is enabled
var ( var (
evmux = new(event.TypeMux)
pow = ethash.NewFaker()
db = ethdb.NewMemDatabase() db = ethdb.NewMemDatabase()
config = &params.ChainConfig{DAOForkBlock: big.NewInt(1), DAOForkSupport: localForked} config = new(params.ChainConfig)
gspec = &core.Genesis{Config: config} genesis = (&core.Genesis{Config: config}).MustCommit(db)
genesis = gspec.MustCommit(db)
) )
blockchain, err := core.NewBlockChain(db, nil, config, pow, vm.Config{}, nil) // If checkpointing is enabled, create and inject a fake CHT and the corresponding
// chllenge response.
var response *types.Header
if checkpoint {
index := uint64(rand.Intn(500))
number := (index+1)*params.CHTFrequencyClient - 1
response = &types.Header{Number: big.NewInt(int64(number)), Extra: []byte("valid")}
cht := &params.TrustedCheckpoint{
SectionIndex: index,
SectionHead: response.Hash(),
}
params.TrustedCheckpoints[genesis.Hash()] = cht
defer delete(params.TrustedCheckpoints, genesis.Hash())
}
// Create a checkpoint aware protocol manager
blockchain, err := core.NewBlockChain(db, nil, config, ethash.NewFaker(), vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("failed to create new blockchain: %v", err) t.Fatalf("failed to create new blockchain: %v", err)
} }
pm, err := NewProtocolManager(config, downloader.FullSync, DefaultConfig.NetworkId, evmux, new(testTxPool), pow, blockchain, db, nil) pm, err := NewProtocolManager(config, syncmode, DefaultConfig.NetworkId, new(event.TypeMux), new(testTxPool), ethash.NewFaker(), blockchain, db, nil)
if err != nil { if err != nil {
t.Fatalf("failed to start test protocol manager: %v", err) t.Fatalf("failed to start test protocol manager: %v", err)
} }
pm.Start(1000) pm.Start(1000)
defer pm.Stop() defer pm.Stop()
// Connect a new peer and check that we receive the DAO challenge // Connect a new peer and check that we receive the checkpoint challenge
peer, _ := newTestPeer("peer", eth63, pm, true) peer, _ := newTestPeer("peer", eth63, pm, true)
defer peer.close() defer peer.close()
if checkpoint {
challenge := &getBlockHeadersData{ challenge := &getBlockHeadersData{
Origin: hashOrNumber{Number: config.DAOForkBlock.Uint64()}, Origin: hashOrNumber{Number: response.Number.Uint64()},
Amount: 1, Amount: 1,
Skip: 0, Skip: 0,
Reverse: false, Reverse: false,
@@ -500,28 +547,33 @@ func testDAOChallenge(t *testing.T, localForked, remoteForked bool, timeout bool
} }
// Create a block to reply to the challenge if no timeout is simulated // Create a block to reply to the challenge if no timeout is simulated
if !timeout { if !timeout {
blocks, _ := core.GenerateChain(&params.ChainConfig{}, genesis, ethash.NewFaker(), db, 1, func(i int, block *core.BlockGen) { if empty {
if remoteForked { if err := p2p.Send(peer.app, BlockHeadersMsg, []*types.Header{}); err != nil {
block.SetExtra(params.DAOForkBlockExtra)
}
})
if err := p2p.Send(peer.app, BlockHeadersMsg, []*types.Header{blocks[0].Header()}); err != nil {
t.Fatalf("failed to answer challenge: %v", err) t.Fatalf("failed to answer challenge: %v", err)
} }
time.Sleep(100 * time.Millisecond) // Sleep to avoid the verification racing with the drops } else if match {
} else { if err := p2p.Send(peer.app, BlockHeadersMsg, []*types.Header{response}); err != nil {
// Otherwise wait until the test timeout passes t.Fatalf("failed to answer challenge: %v", err)
time.Sleep(daoChallengeTimeout + 500*time.Millisecond)
}
// Verify that depending on fork side, the remote peer is maintained or dropped
if localForked == remoteForked && !timeout {
if peers := pm.peers.Len(); peers != 1 {
t.Fatalf("peer count mismatch: have %d, want %d", peers, 1)
} }
} else { } else {
if err := p2p.Send(peer.app, BlockHeadersMsg, []*types.Header{{Number: response.Number}}); err != nil {
t.Fatalf("failed to answer challenge: %v", err)
}
}
}
}
// Wait until the test timeout passes to ensure proper cleanup
time.Sleep(syncChallengeTimeout + 100*time.Millisecond)
// Verify that the remote peer is maintained or dropped
if drop {
if peers := pm.peers.Len(); peers != 0 { if peers := pm.peers.Len(); peers != 0 {
t.Fatalf("peer count mismatch: have %d, want %d", peers, 0) t.Fatalf("peer count mismatch: have %d, want %d", peers, 0)
} }
} else {
if peers := pm.peers.Len(); peers != 1 {
t.Fatalf("peer count mismatch: have %d, want %d", peers, 1)
}
} }
} }

View File

@@ -79,7 +79,7 @@ type peer struct {
rw p2p.MsgReadWriter rw p2p.MsgReadWriter
version int // Protocol version negotiated version int // Protocol version negotiated
forkDrop *time.Timer // Timed connection dropper if forks aren't validated in time syncDrop *time.Timer // Timed connection dropper if sync progress isn't validated in time
head common.Hash head common.Hash
td *big.Int td *big.Int

View File

@@ -188,14 +188,12 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
atomic.StoreUint32(&pm.fastSync, 1) atomic.StoreUint32(&pm.fastSync, 1)
mode = downloader.FastSync mode = downloader.FastSync
} }
if mode == downloader.FastSync { if mode == downloader.FastSync {
// Make sure the peer's total difficulty we are synchronizing is higher. // Make sure the peer's total difficulty we are synchronizing is higher.
if pm.blockchain.GetTdByHash(pm.blockchain.CurrentFastBlock().Hash()).Cmp(pTd) >= 0 { if pm.blockchain.GetTdByHash(pm.blockchain.CurrentFastBlock().Hash()).Cmp(pTd) >= 0 {
return return
} }
} }
// Run the sync cycle, and disable fast sync if we've went past the pivot block // Run the sync cycle, and disable fast sync if we've went past the pivot block
if err := pm.downloader.Synchronise(peer.id, pHead, pTd, mode); err != nil { if err := pm.downloader.Synchronise(peer.id, pHead, pTd, mode); err != nil {
return return

View File

@@ -557,7 +557,7 @@ func (s *Service) assembleBlockStats(block *types.Block) *blockStats {
Number: header.Number, Number: header.Number,
Hash: header.Hash(), Hash: header.Hash(),
ParentHash: header.ParentHash, ParentHash: header.ParentHash,
Timestamp: header.Time, Timestamp: new(big.Int).SetUint64(header.Time),
Miner: author, Miner: author,
GasUsed: header.GasUsed, GasUsed: header.GasUsed,
GasLimit: header.GasLimit, GasLimit: header.GasLimit,

View File

@@ -683,7 +683,7 @@ type CallArgs struct {
Data hexutil.Bytes `json:"data"` Data hexutil.Bytes `json:"data"`
} }
func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber, timeout time.Duration) ([]byte, uint64, bool, error) { func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber, timeout time.Duration, globalGasCap *big.Int) ([]byte, uint64, bool, error) {
defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now()) defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now())
state, header, err := s.b.StateAndHeaderByNumber(ctx, blockNr) state, header, err := s.b.StateAndHeaderByNumber(ctx, blockNr)
@@ -700,14 +700,18 @@ func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr
} }
} }
// Set default gas & gas price if none were set // Set default gas & gas price if none were set
gas, gasPrice := uint64(args.Gas), args.GasPrice.ToInt() gas := uint64(args.Gas)
if gas == 0 { if gas == 0 {
gas = math.MaxUint64 / 2 gas = math.MaxUint64 / 2
} }
if globalGasCap != nil && globalGasCap.Uint64() < gas {
log.Warn("Caller gas above allowance, capping", "requested", gas, "cap", globalGasCap)
gas = globalGasCap.Uint64()
}
gasPrice := args.GasPrice.ToInt()
if gasPrice.Sign() == 0 { if gasPrice.Sign() == 0 {
gasPrice = new(big.Int).SetUint64(defaultGasPrice) gasPrice = new(big.Int).SetUint64(defaultGasPrice)
} }
// Create new call message // Create new call message
msg := types.NewMessage(addr, args.To, 0, args.Value.ToInt(), gas, gasPrice, args.Data, false) msg := types.NewMessage(addr, args.To, 0, args.Value.ToInt(), gas, gasPrice, args.Data, false)
@@ -748,7 +752,7 @@ func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr
// Call executes the given transaction on the state for the given block number. // Call executes the given transaction on the state for the given block number.
// It doesn't make and changes in the state/blockchain and is useful to execute and retrieve values. // It doesn't make and changes in the state/blockchain and is useful to execute and retrieve values.
func (s *PublicBlockChainAPI) Call(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber) (hexutil.Bytes, error) { func (s *PublicBlockChainAPI) Call(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber) (hexutil.Bytes, error) {
result, _, _, err := s.doCall(ctx, args, blockNr, 5*time.Second) result, _, _, err := s.doCall(ctx, args, blockNr, 5*time.Second, s.b.RPCGasCap())
return (hexutil.Bytes)(result), err return (hexutil.Bytes)(result), err
} }
@@ -771,13 +775,18 @@ func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args CallArgs) (h
} }
hi = block.GasLimit() hi = block.GasLimit()
} }
gasCap := s.b.RPCGasCap()
if gasCap != nil && hi > gasCap.Uint64() {
log.Warn("Caller gas above allowance, capping", "requested", hi, "cap", gasCap)
hi = gasCap.Uint64()
}
cap = hi cap = hi
// Create a helper to check if a gas allowance results in an executable transaction // Create a helper to check if a gas allowance results in an executable transaction
executable := func(gas uint64) bool { executable := func(gas uint64) bool {
args.Gas = hexutil.Uint64(gas) args.Gas = hexutil.Uint64(gas)
_, _, failed, err := s.doCall(ctx, args, rpc.PendingBlockNumber, 0) _, _, failed, err := s.doCall(ctx, args, rpc.PendingBlockNumber, 0, gasCap)
if err != nil || failed { if err != nil || failed {
return false return false
} }
@@ -795,7 +804,7 @@ func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args CallArgs) (h
// Reject the transaction as invalid if it still fails at the highest allowance // Reject the transaction as invalid if it still fails at the highest allowance
if hi == cap { if hi == cap {
if !executable(hi) { if !executable(hi) {
return 0, fmt.Errorf("gas required exceeds allowance or always failing transaction") return 0, fmt.Errorf("gas required exceeds allowance (%d) or always failing transaction", cap)
} }
} }
return hexutil.Uint64(hi), nil return hexutil.Uint64(hi), nil
@@ -882,7 +891,7 @@ func RPCMarshalBlock(b *types.Block, inclTx bool, fullTx bool) (map[string]inter
"size": hexutil.Uint64(b.Size()), "size": hexutil.Uint64(b.Size()),
"gasLimit": hexutil.Uint64(head.GasLimit), "gasLimit": hexutil.Uint64(head.GasLimit),
"gasUsed": hexutil.Uint64(head.GasUsed), "gasUsed": hexutil.Uint64(head.GasUsed),
"timestamp": (*hexutil.Big)(head.Time), "timestamp": hexutil.Uint64(head.Time),
"transactionsRoot": head.TxHash, "transactionsRoot": head.TxHash,
"receiptsRoot": head.ReceiptHash, "receiptsRoot": head.ReceiptHash,
} }

View File

@@ -44,6 +44,7 @@ type Backend interface {
ChainDb() ethdb.Database ChainDb() ethdb.Database
EventMux() *event.TypeMux EventMux() *event.TypeMux
AccountManager() *accounts.Manager AccountManager() *accounts.Manager
RPCGasCap() *big.Int // global gas cap for eth_call over rpc: DoS protection
// BlockChain API // BlockChain API
SetHead(number uint64) SetHead(number uint64)

View File

@@ -187,6 +187,10 @@ func (b *LesApiBackend) AccountManager() *accounts.Manager {
return b.eth.accountManager return b.eth.accountManager
} }
func (b *LesApiBackend) RPCGasCap() *big.Int {
return b.eth.config.RPCGasCap
}
func (b *LesApiBackend) BloomStatus() (uint64, uint64) { func (b *LesApiBackend) BloomStatus() (uint64, uint64) {
if b.eth.bloomIndexer == nil { if b.eth.bloomIndexer == nil {
return 0, 0 return 0, 0

View File

@@ -153,9 +153,12 @@ func NewProtocolManager(chainConfig *params.ChainConfig, indexerConfig *light.In
if disableClientRemovePeer { if disableClientRemovePeer {
removePeer = func(id string) {} removePeer = func(id string) {}
} }
if lightSync { if lightSync {
manager.downloader = downloader.New(downloader.LightSync, chainDb, manager.eventMux, nil, blockchain, removePeer) var checkpoint uint64
if cht, ok := params.TrustedCheckpoints[blockchain.Genesis().Hash()]; ok {
checkpoint = (cht.SectionIndex+1)*params.CHTFrequencyClient - 1
}
manager.downloader = downloader.New(downloader.LightSync, checkpoint, chainDb, manager.eventMux, nil, blockchain, removePeer)
manager.peers.notify((*downloaderPeerNotify)(manager)) manager.peers.notify((*downloaderPeerNotify)(manager))
manager.fetcher = newLightFetcher(manager) manager.fetcher = newLightFetcher(manager)
} }
@@ -324,7 +327,11 @@ func (pm *ProtocolManager) handle(p *peer) error {
} }
} }
var reqList = []uint64{GetBlockHeadersMsg, GetBlockBodiesMsg, GetCodeMsg, GetReceiptsMsg, GetProofsV1Msg, SendTxMsg, SendTxV2Msg, GetTxStatusMsg, GetHeaderProofsMsg, GetProofsV2Msg, GetHelperTrieProofsMsg} var (
reqList = []uint64{GetBlockHeadersMsg, GetBlockBodiesMsg, GetCodeMsg, GetReceiptsMsg, GetProofsV1Msg, SendTxMsg, SendTxV2Msg, GetTxStatusMsg, GetHeaderProofsMsg, GetProofsV2Msg, GetHelperTrieProofsMsg}
reqListV1 = []uint64{GetBlockHeadersMsg, GetBlockBodiesMsg, GetCodeMsg, GetReceiptsMsg, GetProofsV1Msg, SendTxMsg, GetHeaderProofsMsg}
reqListV2 = []uint64{GetBlockHeadersMsg, GetBlockBodiesMsg, GetCodeMsg, GetReceiptsMsg, SendTxV2Msg, GetTxStatusMsg, GetProofsV2Msg, GetHelperTrieProofsMsg}
)
// handleMsg is invoked whenever an inbound message is received from a remote // handleMsg is invoked whenever an inbound message is received from a remote
// peer. The remote connection is torn down upon returning any error. // peer. The remote connection is torn down upon returning any error.

View File

@@ -508,8 +508,9 @@ func TestTransactionStatusLes2(t *testing.T) {
test := func(tx *types.Transaction, send bool, expStatus txStatus) { test := func(tx *types.Transaction, send bool, expStatus txStatus) {
reqID++ reqID++
if send { if send {
cost := peer.GetRequestCost(SendTxV2Msg, 1) enc, _ := rlp.EncodeToBytes(types.Transactions{tx})
sendRequest(peer.app, SendTxV2Msg, reqID, cost, types.Transactions{tx}) cost := peer.GetTxRelayCost(1, len(enc))
sendRequest(peer.app, SendTxV2Msg, reqID, cost, rlp.RawValue(enc))
} else { } else {
cost := peer.GetRequestCost(GetTxStatusMsg, 1) cost := peer.GetRequestCost(GetTxStatusMsg, 1)
sendRequest(peer.app, GetTxStatusMsg, reqID, cost, []common.Hash{tx.Hash()}) sendRequest(peer.app, GetTxStatusMsg, reqID, cost, []common.Hash{tx.Hash()})

View File

@@ -42,6 +42,11 @@ var (
const maxResponseErrors = 50 // number of invalid responses tolerated (makes the protocol less brittle but still avoids spam) const maxResponseErrors = 50 // number of invalid responses tolerated (makes the protocol less brittle but still avoids spam)
// if the total encoded size of a sent transaction batch is over txSizeCostLimit
// per transaction then the request cost is calculated as proportional to the
// encoded size instead of the transaction count
const txSizeCostLimit = 0x4000
const ( const (
announceTypeNone = iota announceTypeNone = iota
announceTypeSimple announceTypeSimple
@@ -163,7 +168,41 @@ func (p *peer) GetRequestCost(msgcode uint64, amount int) uint64 {
p.lock.RLock() p.lock.RLock()
defer p.lock.RUnlock() defer p.lock.RUnlock()
cost := p.fcCosts[msgcode].baseCost + p.fcCosts[msgcode].reqCost*uint64(amount) costs := p.fcCosts[msgcode]
if costs == nil {
return 0
}
cost := costs.baseCost + costs.reqCost*uint64(amount)
if cost > p.fcServerParams.BufLimit {
cost = p.fcServerParams.BufLimit
}
return cost
}
func (p *peer) GetTxRelayCost(amount, size int) uint64 {
p.lock.RLock()
defer p.lock.RUnlock()
var msgcode uint64
switch p.version {
case lpv1:
msgcode = SendTxMsg
case lpv2:
msgcode = SendTxV2Msg
default:
panic(nil)
}
costs := p.fcCosts[msgcode]
if costs == nil {
return 0
}
cost := costs.baseCost + costs.reqCost*uint64(amount)
sizeCost := costs.baseCost + costs.reqCost*uint64(size)/txSizeCostLimit
if sizeCost > cost {
cost = sizeCost
}
if cost > p.fcServerParams.BufLimit { if cost > p.fcServerParams.BufLimit {
cost = p.fcServerParams.BufLimit cost = p.fcServerParams.BufLimit
} }
@@ -307,9 +346,9 @@ func (p *peer) RequestTxStatus(reqID, cost uint64, txHashes []common.Hash) error
return sendRequest(p.rw, GetTxStatusMsg, reqID, cost, txHashes) return sendRequest(p.rw, GetTxStatusMsg, reqID, cost, txHashes)
} }
// SendTxStatus sends a batch of transactions to be added to the remote transaction pool. // SendTxs sends a batch of transactions to be added to the remote transaction pool.
func (p *peer) SendTxs(reqID, cost uint64, txs types.Transactions) error { func (p *peer) SendTxs(reqID, cost uint64, txs rlp.RawValue) error {
p.Log().Debug("Fetching batch of transactions", "count", len(txs)) p.Log().Debug("Fetching batch of transactions", "size", len(txs))
switch p.version { switch p.version {
case lpv1: case lpv1:
return p2p.Send(p.rw, SendTxMsg, txs) // old message format does not include reqID return p2p.Send(p.rw, SendTxMsg, txs) // old message format does not include reqID
@@ -485,6 +524,20 @@ func (p *peer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis
p.fcServerParams = params p.fcServerParams = params
p.fcServer = flowcontrol.NewServerNode(params) p.fcServer = flowcontrol.NewServerNode(params)
p.fcCosts = MRC.decode() p.fcCosts = MRC.decode()
var checkList []uint64
switch p.version {
case lpv1:
checkList = reqListV1
case lpv2:
checkList = reqListV2
default:
panic(nil)
}
for _, msgCode := range checkList {
if p.fcCosts[msgCode] == nil {
return errResp(ErrUselessPeer, "peer does not support message %d", msgCode)
}
}
} }
p.headInfo = &announceData{Td: rTd, Hash: rHash, Number: rNum} p.headInfo = &announceData{Td: rTd, Hash: rHash, Number: rNum}

View File

@@ -21,6 +21,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
) )
type ltrInfo struct { type ltrInfo struct {
@@ -113,21 +114,22 @@ func (self *LesTxRelay) send(txs types.Transactions, count int) {
for p, list := range sendTo { for p, list := range sendTo {
pp := p pp := p
ll := list ll := list
enc, _ := rlp.EncodeToBytes(ll)
reqID := genReqID() reqID := genReqID()
rq := &distReq{ rq := &distReq{
getCost: func(dp distPeer) uint64 { getCost: func(dp distPeer) uint64 {
peer := dp.(*peer) peer := dp.(*peer)
return peer.GetRequestCost(SendTxMsg, len(ll)) return peer.GetTxRelayCost(len(ll), len(enc))
}, },
canSend: func(dp distPeer) bool { canSend: func(dp distPeer) bool {
return dp.(*peer) == pp return dp.(*peer) == pp
}, },
request: func(dp distPeer) func() { request: func(dp distPeer) func() {
peer := dp.(*peer) peer := dp.(*peer)
cost := peer.GetRequestCost(SendTxMsg, len(ll)) cost := peer.GetTxRelayCost(len(ll), len(enc))
peer.fcServer.QueueRequest(reqID, cost) peer.fcServer.QueueRequest(reqID, cost)
return func() { peer.SendTxs(reqID, cost, ll) } return func() { peer.SendTxs(reqID, cost, enc) }
}, },
} }
self.reqDist.queue(rq) self.reqDist.queue(rq)

View File

@@ -100,7 +100,7 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, engine consensus.
if bc.genesisBlock == nil { if bc.genesisBlock == nil {
return nil, core.ErrNoGenesis return nil, core.ErrNoGenesis
} }
if cp, ok := trustedCheckpoints[bc.genesisBlock.Hash()]; ok { if cp, ok := params.TrustedCheckpoints[bc.genesisBlock.Hash()]; ok {
bc.addTrustedCheckpoint(cp) bc.addTrustedCheckpoint(cp)
} }
if err := bc.loadLastState(); err != nil { if err := bc.loadLastState(); err != nil {
@@ -157,7 +157,7 @@ func (self *LightChain) loadLastState() error {
// Issue a status log and return // Issue a status log and return
header := self.hc.CurrentHeader() header := self.hc.CurrentHeader()
headerTd := self.GetTd(header.Hash(), header.Number.Uint64()) headerTd := self.GetTd(header.Hash(), header.Number.Uint64())
log.Info("Loaded most recent local header", "number", header.Number, "hash", header.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(header.Time.Int64(), 0))) log.Info("Loaded most recent local header", "number", header.Number, "hash", header.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(header.Time), 0)))
return nil return nil
} }
@@ -488,7 +488,7 @@ func (self *LightChain) SyncCht(ctx context.Context) bool {
// Ensure the chain didn't move past the latest block while retrieving it // Ensure the chain didn't move past the latest block while retrieving it
if self.hc.CurrentHeader().Number.Uint64() < header.Number.Uint64() { if self.hc.CurrentHeader().Number.Uint64() < header.Number.Uint64() {
log.Info("Updated latest header based on CHT", "number", header.Number, "hash", header.Hash(), "age", common.PrettyAge(time.Unix(header.Time.Int64(), 0))) log.Info("Updated latest header based on CHT", "number", header.Number, "hash", header.Hash(), "age", common.PrettyAge(time.Unix(int64(header.Time), 0)))
self.hc.SetCurrentHeader(header) self.hc.SetCurrentHeader(header)
} }
return true return true

View File

@@ -104,13 +104,6 @@ var (
} }
) )
// trustedCheckpoints associates each known checkpoint with the genesis hash of the chain it belongs to
var trustedCheckpoints = map[common.Hash]*params.TrustedCheckpoint{
params.MainnetGenesisHash: params.MainnetTrustedCheckpoint,
params.TestnetGenesisHash: params.TestnetTrustedCheckpoint,
params.RinkebyGenesisHash: params.RinkebyTrustedCheckpoint,
}
var ( var (
ErrNoTrustedCht = errors.New("no trusted canonical hash trie") ErrNoTrustedCht = errors.New("no trusted canonical hash trie")
ErrNoTrustedBloomTrie = errors.New("no trusted bloom trie") ErrNoTrustedBloomTrie = errors.New("no trusted bloom trie")

View File

@@ -823,8 +823,8 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64)
tstart := time.Now() tstart := time.Now()
parent := w.chain.CurrentBlock() parent := w.chain.CurrentBlock()
if parent.Time().Cmp(new(big.Int).SetInt64(timestamp)) >= 0 { if parent.Time() >= uint64(timestamp) {
timestamp = parent.Time().Int64() + 1 timestamp = int64(parent.Time() + 1)
} }
// this will ensure we're not going off too far in the future // this will ensure we're not going off too far in the future
if now := time.Now().Unix(); timestamp > now+1 { if now := time.Now().Unix(); timestamp > now+1 {
@@ -839,7 +839,7 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64)
Number: num.Add(num, common.Big1), Number: num.Add(num, common.Big1),
GasLimit: core.CalcGasLimit(parent, w.gasFloor, w.gasCeil), GasLimit: core.CalcGasLimit(parent, w.gasFloor, w.gasCeil),
Extra: w.extra, Extra: w.extra,
Time: big.NewInt(timestamp), Time: uint64(timestamp),
} }
// Only set the coinbase if our consensus engine is running (avoid spurious block rewards) // Only set the coinbase if our consensus engine is running (avoid spurious block rewards)
if w.isRunning() { if w.isRunning() {

View File

@@ -109,7 +109,7 @@ func (h *Header) GetDifficulty() *BigInt { return &BigInt{h.header.Difficulty} }
func (h *Header) GetNumber() int64 { return h.header.Number.Int64() } func (h *Header) GetNumber() int64 { return h.header.Number.Int64() }
func (h *Header) GetGasLimit() int64 { return int64(h.header.GasLimit) } func (h *Header) GetGasLimit() int64 { return int64(h.header.GasLimit) }
func (h *Header) GetGasUsed() int64 { return int64(h.header.GasUsed) } func (h *Header) GetGasUsed() int64 { return int64(h.header.GasUsed) }
func (h *Header) GetTime() int64 { return h.header.Time.Int64() } func (h *Header) GetTime() int64 { return int64(h.header.Time) }
func (h *Header) GetExtra() []byte { return h.header.Extra } func (h *Header) GetExtra() []byte { return h.header.Extra }
func (h *Header) GetMixDigest() *Hash { return &Hash{h.header.MixDigest} } func (h *Header) GetMixDigest() *Hash { return &Hash{h.header.MixDigest} }
func (h *Header) GetNonce() *Nonce { return &Nonce{h.header.Nonce} } func (h *Header) GetNonce() *Nonce { return &Nonce{h.header.Nonce} }
@@ -180,7 +180,7 @@ func (b *Block) GetDifficulty() *BigInt { return &BigInt{b.block.Difficu
func (b *Block) GetNumber() int64 { return b.block.Number().Int64() } func (b *Block) GetNumber() int64 { return b.block.Number().Int64() }
func (b *Block) GetGasLimit() int64 { return int64(b.block.GasLimit()) } func (b *Block) GetGasLimit() int64 { return int64(b.block.GasLimit()) }
func (b *Block) GetGasUsed() int64 { return int64(b.block.GasUsed()) } func (b *Block) GetGasUsed() int64 { return int64(b.block.GasUsed()) }
func (b *Block) GetTime() int64 { return b.block.Time().Int64() } func (b *Block) GetTime() int64 { return int64(b.block.Time()) }
func (b *Block) GetExtra() []byte { return b.block.Extra() } func (b *Block) GetExtra() []byte { return b.block.Extra() }
func (b *Block) GetMixDigest() *Hash { return &Hash{b.block.MixDigest()} } func (b *Block) GetMixDigest() *Hash { return &Hash{b.block.MixDigest()} }
func (b *Block) GetNonce() int64 { return int64(b.block.Nonce()) } func (b *Block) GetNonce() int64 { return int64(b.block.Nonce()) }

View File

@@ -313,7 +313,7 @@ func (tab *Table) findnode(n *node, targetKey encPubkey, reply chan<- []*node) {
// Avoid recording failures on shutdown. // Avoid recording failures on shutdown.
reply <- nil reply <- nil
return return
} else if err != nil || len(r) == 0 { } else if len(r) == 0 {
fails++ fails++
tab.db.UpdateFindFails(n.ID(), n.IP(), fails) tab.db.UpdateFindFails(n.ID(), n.IP(), fails)
log.Trace("Findnode failed", "id", n.ID(), "failcount", fails, "err", err) log.Trace("Findnode failed", "id", n.ID(), "failcount", fails, "err", err)

View File

@@ -28,8 +28,18 @@ var (
MainnetGenesisHash = common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3") MainnetGenesisHash = common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3")
TestnetGenesisHash = common.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d") TestnetGenesisHash = common.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d")
RinkebyGenesisHash = common.HexToHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177") RinkebyGenesisHash = common.HexToHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
GoerliGenesisHash = common.HexToHash("0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a")
) )
// TrustedCheckpoints associates each known checkpoint with the genesis hash of
// the chain it belongs to.
var TrustedCheckpoints = map[common.Hash]*TrustedCheckpoint{
MainnetGenesisHash: MainnetTrustedCheckpoint,
TestnetGenesisHash: TestnetTrustedCheckpoint,
RinkebyGenesisHash: RinkebyTrustedCheckpoint,
GoerliGenesisHash: GoerliTrustedCheckpoint,
}
var ( var (
// MainnetChainConfig is the chain parameters to run a node on the main network. // MainnetChainConfig is the chain parameters to run a node on the main network.
MainnetChainConfig = &ChainConfig{ MainnetChainConfig = &ChainConfig{
@@ -50,10 +60,10 @@ var (
// MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network. // MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network.
MainnetTrustedCheckpoint = &TrustedCheckpoint{ MainnetTrustedCheckpoint = &TrustedCheckpoint{
Name: "mainnet", Name: "mainnet",
SectionIndex: 216, SectionIndex: 227,
SectionHead: common.HexToHash("0xae3e551c8d60d06fd411a8e6008e90625d3bb0cbbf664b65d5ed90b318553541"), SectionHead: common.HexToHash("0xa2e0b25d72c2fc6e35a7f853cdacb193b4b4f95c606accf7f8fa8415283582c7"),
CHTRoot: common.HexToHash("0xeea7d2ab3545a37deecc66fc43c9556ae337c3ea1c6893e401428207bdb8e434"), CHTRoot: common.HexToHash("0xf69bdd4053b95b61a27b106a0e86103d791edd8574950dc96aa351ab9b9f1aa0"),
BloomRoot: common.HexToHash("0xb0d4176d160d67b99a9f963281e52bce0583a566b74b4497fe3ed24ae04004ff"), BloomRoot: common.HexToHash("0xec1b454d4c6322c78ccedf76ac922a8698c3cac4d98748a84af4995b7bd3d744"),
} }
// TestnetChainConfig contains the chain parameters to run a node on the Ropsten test network. // TestnetChainConfig contains the chain parameters to run a node on the Ropsten test network.
@@ -75,10 +85,10 @@ var (
// TestnetTrustedCheckpoint contains the light client trusted checkpoint for the Ropsten test network. // TestnetTrustedCheckpoint contains the light client trusted checkpoint for the Ropsten test network.
TestnetTrustedCheckpoint = &TrustedCheckpoint{ TestnetTrustedCheckpoint = &TrustedCheckpoint{
Name: "testnet", Name: "testnet",
SectionIndex: 148, SectionIndex: 161,
SectionHead: common.HexToHash("0x4d3181bedb6aa96a6f3efa866c71f7802400d0fb4a6906946c453630d850efc0"), SectionHead: common.HexToHash("0x5378afa734e1feafb34bcca1534c4d96952b754579b96a4afb23d5301ecececc"),
CHTRoot: common.HexToHash("0x25df2f9d63a5f84b2852988f0f0f7af5a7877da061c11b85c812780b5a27a5ec"), CHTRoot: common.HexToHash("0x1cf2b071e7443a62914362486b613ff30f60cea0d9c268ed8c545f876a3ee60c"),
BloomRoot: common.HexToHash("0x0584834e5222471a06c669d210e302ca602780eaaddd04634fd65471c2a91419"), BloomRoot: common.HexToHash("0x5ac25c84bd18a9cbe878d4609a80220f57f85037a112644532412ba0d498a31b"),
} }
// RinkebyChainConfig contains the chain parameters to run a node on the Rinkeby test network. // RinkebyChainConfig contains the chain parameters to run a node on the Rinkeby test network.
@@ -93,7 +103,7 @@ var (
EIP158Block: big.NewInt(3), EIP158Block: big.NewInt(3),
ByzantiumBlock: big.NewInt(1035301), ByzantiumBlock: big.NewInt(1035301),
ConstantinopleBlock: big.NewInt(3660663), ConstantinopleBlock: big.NewInt(3660663),
PetersburgBlock: big.NewInt(9999999), //TODO! Insert Rinkeby block number PetersburgBlock: big.NewInt(4321234),
Clique: &CliqueConfig{ Clique: &CliqueConfig{
Period: 15, Period: 15,
Epoch: 30000, Epoch: 30000,
@@ -103,10 +113,10 @@ var (
// RinkebyTrustedCheckpoint contains the light client trusted checkpoint for the Rinkeby test network. // RinkebyTrustedCheckpoint contains the light client trusted checkpoint for the Rinkeby test network.
RinkebyTrustedCheckpoint = &TrustedCheckpoint{ RinkebyTrustedCheckpoint = &TrustedCheckpoint{
Name: "rinkeby", Name: "rinkeby",
SectionIndex: 113, SectionIndex: 125,
SectionHead: common.HexToHash("0xb812f3095af3af1cb2de7d7c2086ee807736a7315992c461b0986699185daf77"), SectionHead: common.HexToHash("0x8a738386f6bb34add15846f8f49c4c519a2f32519096e792b9f43bcb407c831c"),
CHTRoot: common.HexToHash("0x5416d0924925eb835987ad3d1f059ecc66778c51959c8246a7a35b22ec5f3109"), CHTRoot: common.HexToHash("0xa1e5720a9bad4dce794f129e4ac6744398197b652868011486a6f89c8ec84a75"),
BloomRoot: common.HexToHash("0xcf74ca2c14e843b366561dab4fc64237bf6bb335119cbc97d723f3b501863470"), BloomRoot: common.HexToHash("0xa3048fe8b7e30f77f11bc755a88478363d7d3e71c2bdfe4e8ab9e269cd804ba2"),
} }
// GoerliChainConfig contains the chain parameters to run a node on the Görli test network. // GoerliChainConfig contains the chain parameters to run a node on the Görli test network.
@@ -130,10 +140,10 @@ var (
// GoerliTrustedCheckpoint contains the light client trusted checkpoint for the Görli test network. // GoerliTrustedCheckpoint contains the light client trusted checkpoint for the Görli test network.
GoerliTrustedCheckpoint = &TrustedCheckpoint{ GoerliTrustedCheckpoint = &TrustedCheckpoint{
Name: "goerli", Name: "goerli",
SectionIndex: 0, SectionIndex: 9,
SectionHead: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), SectionHead: common.HexToHash("0x8e223d827391eee53b07cb8ee057dbfa11c93e0b45352188c783affd7840a921"),
CHTRoot: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), CHTRoot: common.HexToHash("0xe0a817ac69b36c1e437c5b0cff9e764853f5115702b5f66d451b665d6afb7e78"),
BloomRoot: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), BloomRoot: common.HexToHash("0x50d672aeb655b723284969c7c1201fb6ca003c23ed144bcb9f2d1b30e2971c1b"),
} }
// AllEthashProtocolChanges contains every protocol change (EIPs) introduced // AllEthashProtocolChanges contains every protocol change (EIPs) introduced

View File

@@ -23,7 +23,7 @@ import (
const ( const (
VersionMajor = 1 // Major version component of the current release VersionMajor = 1 // Major version component of the current release
VersionMinor = 8 // Minor version component of the current release VersionMinor = 8 // Minor version component of the current release
VersionPatch = 23 // Patch version component of the current release VersionPatch = 27 // Patch version component of the current release
VersionMeta = "stable" // Version metadata to append to the version string VersionMeta = "stable" // Version metadata to append to the version string
) )

View File

@@ -23,7 +23,7 @@ import (
const ( const (
VersionMajor = 0 // Major version component of the current release VersionMajor = 0 // Major version component of the current release
VersionMinor = 3 // Minor version component of the current release VersionMinor = 3 // Minor version component of the current release
VersionPatch = 11 // Patch version component of the current release VersionPatch = 15 // Patch version component of the current release
VersionMeta = "stable" // Version metadata to append to the version string VersionMeta = "stable" // Version metadata to append to the version string
) )

View File

@@ -82,7 +82,7 @@ type btHeader struct {
Difficulty *big.Int Difficulty *big.Int
GasLimit uint64 GasLimit uint64
GasUsed uint64 GasUsed uint64
Timestamp *big.Int Timestamp uint64
} }
type btHeaderMarshaling struct { type btHeaderMarshaling struct {
@@ -91,7 +91,7 @@ type btHeaderMarshaling struct {
Difficulty *math.HexOrDecimal256 Difficulty *math.HexOrDecimal256
GasLimit math.HexOrDecimal64 GasLimit math.HexOrDecimal64
GasUsed math.HexOrDecimal64 GasUsed math.HexOrDecimal64
Timestamp *math.HexOrDecimal256 Timestamp math.HexOrDecimal64
} }
func (t *BlockTest) Run() error { func (t *BlockTest) Run() error {
@@ -146,7 +146,7 @@ func (t *BlockTest) genesis(config *params.ChainConfig) *core.Genesis {
return &core.Genesis{ return &core.Genesis{
Config: config, Config: config,
Nonce: t.json.Genesis.Nonce.Uint64(), Nonce: t.json.Genesis.Nonce.Uint64(),
Timestamp: t.json.Genesis.Timestamp.Uint64(), Timestamp: t.json.Genesis.Timestamp,
ParentHash: t.json.Genesis.ParentHash, ParentHash: t.json.Genesis.ParentHash,
ExtraData: t.json.Genesis.ExtraData, ExtraData: t.json.Genesis.ExtraData,
GasLimit: t.json.Genesis.GasLimit, GasLimit: t.json.Genesis.GasLimit,
@@ -248,7 +248,7 @@ func validateHeader(h *btHeader, h2 *types.Header) error {
if h.GasUsed != h2.GasUsed { if h.GasUsed != h2.GasUsed {
return fmt.Errorf("GasUsed: want: %d have: %d", h.GasUsed, h2.GasUsed) return fmt.Errorf("GasUsed: want: %d have: %d", h.GasUsed, h2.GasUsed)
} }
if h.Timestamp.Cmp(h2.Time) != 0 { if h.Timestamp != h2.Time {
return fmt.Errorf("Timestamp: want: %v have: %v", h.Timestamp, h2.Time) return fmt.Errorf("Timestamp: want: %v have: %v", h.Timestamp, h2.Time)
} }
return nil return nil

View File

@@ -30,18 +30,18 @@ import (
//go:generate gencodec -type DifficultyTest -field-override difficultyTestMarshaling -out gen_difficultytest.go //go:generate gencodec -type DifficultyTest -field-override difficultyTestMarshaling -out gen_difficultytest.go
type DifficultyTest struct { type DifficultyTest struct {
ParentTimestamp *big.Int `json:"parentTimestamp"` ParentTimestamp uint64 `json:"parentTimestamp"`
ParentDifficulty *big.Int `json:"parentDifficulty"` ParentDifficulty *big.Int `json:"parentDifficulty"`
UncleHash common.Hash `json:"parentUncles"` UncleHash common.Hash `json:"parentUncles"`
CurrentTimestamp *big.Int `json:"currentTimestamp"` CurrentTimestamp uint64 `json:"currentTimestamp"`
CurrentBlockNumber uint64 `json:"currentBlockNumber"` CurrentBlockNumber uint64 `json:"currentBlockNumber"`
CurrentDifficulty *big.Int `json:"currentDifficulty"` CurrentDifficulty *big.Int `json:"currentDifficulty"`
} }
type difficultyTestMarshaling struct { type difficultyTestMarshaling struct {
ParentTimestamp *math.HexOrDecimal256 ParentTimestamp math.HexOrDecimal64
ParentDifficulty *math.HexOrDecimal256 ParentDifficulty *math.HexOrDecimal256
CurrentTimestamp *math.HexOrDecimal256 CurrentTimestamp math.HexOrDecimal64
CurrentDifficulty *math.HexOrDecimal256 CurrentDifficulty *math.HexOrDecimal256
UncleHash common.Hash UncleHash common.Hash
CurrentBlockNumber math.HexOrDecimal64 CurrentBlockNumber math.HexOrDecimal64
@@ -56,7 +56,7 @@ func (test *DifficultyTest) Run(config *params.ChainConfig) error {
UncleHash: test.UncleHash, UncleHash: test.UncleHash,
} }
actual := ethash.CalcDifficulty(config, test.CurrentTimestamp.Uint64(), parent) actual := ethash.CalcDifficulty(config, test.CurrentTimestamp, parent)
exp := test.CurrentDifficulty exp := test.CurrentDifficulty
if actual.Cmp(exp) != 0 { if actual.Cmp(exp) != 0 {

View File

@@ -14,6 +14,7 @@ import (
var _ = (*btHeaderMarshaling)(nil) var _ = (*btHeaderMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (b btHeader) MarshalJSON() ([]byte, error) { func (b btHeader) MarshalJSON() ([]byte, error) {
type btHeader struct { type btHeader struct {
Bloom types.Bloom Bloom types.Bloom
@@ -31,7 +32,7 @@ func (b btHeader) MarshalJSON() ([]byte, error) {
Difficulty *math.HexOrDecimal256 Difficulty *math.HexOrDecimal256
GasLimit math.HexOrDecimal64 GasLimit math.HexOrDecimal64
GasUsed math.HexOrDecimal64 GasUsed math.HexOrDecimal64
Timestamp *math.HexOrDecimal256 Timestamp math.HexOrDecimal64
} }
var enc btHeader var enc btHeader
enc.Bloom = b.Bloom enc.Bloom = b.Bloom
@@ -49,10 +50,11 @@ func (b btHeader) MarshalJSON() ([]byte, error) {
enc.Difficulty = (*math.HexOrDecimal256)(b.Difficulty) enc.Difficulty = (*math.HexOrDecimal256)(b.Difficulty)
enc.GasLimit = math.HexOrDecimal64(b.GasLimit) enc.GasLimit = math.HexOrDecimal64(b.GasLimit)
enc.GasUsed = math.HexOrDecimal64(b.GasUsed) enc.GasUsed = math.HexOrDecimal64(b.GasUsed)
enc.Timestamp = (*math.HexOrDecimal256)(b.Timestamp) enc.Timestamp = math.HexOrDecimal64(b.Timestamp)
return json.Marshal(&enc) return json.Marshal(&enc)
} }
// UnmarshalJSON unmarshals from JSON.
func (b *btHeader) UnmarshalJSON(input []byte) error { func (b *btHeader) UnmarshalJSON(input []byte) error {
type btHeader struct { type btHeader struct {
Bloom *types.Bloom Bloom *types.Bloom
@@ -70,7 +72,7 @@ func (b *btHeader) UnmarshalJSON(input []byte) error {
Difficulty *math.HexOrDecimal256 Difficulty *math.HexOrDecimal256
GasLimit *math.HexOrDecimal64 GasLimit *math.HexOrDecimal64
GasUsed *math.HexOrDecimal64 GasUsed *math.HexOrDecimal64
Timestamp *math.HexOrDecimal256 Timestamp *math.HexOrDecimal64
} }
var dec btHeader var dec btHeader
if err := json.Unmarshal(input, &dec); err != nil { if err := json.Unmarshal(input, &dec); err != nil {
@@ -122,7 +124,7 @@ func (b *btHeader) UnmarshalJSON(input []byte) error {
b.GasUsed = uint64(*dec.GasUsed) b.GasUsed = uint64(*dec.GasUsed)
} }
if dec.Timestamp != nil { if dec.Timestamp != nil {
b.Timestamp = (*big.Int)(dec.Timestamp) b.Timestamp = uint64(*dec.Timestamp)
} }
return nil return nil
} }

View File

@@ -12,31 +12,33 @@ import (
var _ = (*difficultyTestMarshaling)(nil) var _ = (*difficultyTestMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (d DifficultyTest) MarshalJSON() ([]byte, error) { func (d DifficultyTest) MarshalJSON() ([]byte, error) {
type DifficultyTest struct { type DifficultyTest struct {
ParentTimestamp *math.HexOrDecimal256 `json:"parentTimestamp"` ParentTimestamp math.HexOrDecimal64 `json:"parentTimestamp"`
ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
UncleHash common.Hash `json:"parentUncles"` UncleHash common.Hash `json:"parentUncles"`
CurrentTimestamp *math.HexOrDecimal256 `json:"currentTimestamp"` CurrentTimestamp math.HexOrDecimal64 `json:"currentTimestamp"`
CurrentBlockNumber math.HexOrDecimal64 `json:"currentBlockNumber"` CurrentBlockNumber math.HexOrDecimal64 `json:"currentBlockNumber"`
CurrentDifficulty *math.HexOrDecimal256 `json:"currentDifficulty"` CurrentDifficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
} }
var enc DifficultyTest var enc DifficultyTest
enc.ParentTimestamp = (*math.HexOrDecimal256)(d.ParentTimestamp) enc.ParentTimestamp = math.HexOrDecimal64(d.ParentTimestamp)
enc.ParentDifficulty = (*math.HexOrDecimal256)(d.ParentDifficulty) enc.ParentDifficulty = (*math.HexOrDecimal256)(d.ParentDifficulty)
enc.UncleHash = d.UncleHash enc.UncleHash = d.UncleHash
enc.CurrentTimestamp = (*math.HexOrDecimal256)(d.CurrentTimestamp) enc.CurrentTimestamp = math.HexOrDecimal64(d.CurrentTimestamp)
enc.CurrentBlockNumber = math.HexOrDecimal64(d.CurrentBlockNumber) enc.CurrentBlockNumber = math.HexOrDecimal64(d.CurrentBlockNumber)
enc.CurrentDifficulty = (*math.HexOrDecimal256)(d.CurrentDifficulty) enc.CurrentDifficulty = (*math.HexOrDecimal256)(d.CurrentDifficulty)
return json.Marshal(&enc) return json.Marshal(&enc)
} }
// UnmarshalJSON unmarshals from JSON.
func (d *DifficultyTest) UnmarshalJSON(input []byte) error { func (d *DifficultyTest) UnmarshalJSON(input []byte) error {
type DifficultyTest struct { type DifficultyTest struct {
ParentTimestamp *math.HexOrDecimal256 `json:"parentTimestamp"` ParentTimestamp *math.HexOrDecimal64 `json:"parentTimestamp"`
ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
UncleHash *common.Hash `json:"parentUncles"` UncleHash *common.Hash `json:"parentUncles"`
CurrentTimestamp *math.HexOrDecimal256 `json:"currentTimestamp"` CurrentTimestamp *math.HexOrDecimal64 `json:"currentTimestamp"`
CurrentBlockNumber *math.HexOrDecimal64 `json:"currentBlockNumber"` CurrentBlockNumber *math.HexOrDecimal64 `json:"currentBlockNumber"`
CurrentDifficulty *math.HexOrDecimal256 `json:"currentDifficulty"` CurrentDifficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
} }
@@ -45,7 +47,7 @@ func (d *DifficultyTest) UnmarshalJSON(input []byte) error {
return err return err
} }
if dec.ParentTimestamp != nil { if dec.ParentTimestamp != nil {
d.ParentTimestamp = (*big.Int)(dec.ParentTimestamp) d.ParentTimestamp = uint64(*dec.ParentTimestamp)
} }
if dec.ParentDifficulty != nil { if dec.ParentDifficulty != nil {
d.ParentDifficulty = (*big.Int)(dec.ParentDifficulty) d.ParentDifficulty = (*big.Int)(dec.ParentDifficulty)
@@ -54,7 +56,7 @@ func (d *DifficultyTest) UnmarshalJSON(input []byte) error {
d.UncleHash = *dec.UncleHash d.UncleHash = *dec.UncleHash
} }
if dec.CurrentTimestamp != nil { if dec.CurrentTimestamp != nil {
d.CurrentTimestamp = (*big.Int)(dec.CurrentTimestamp) d.CurrentTimestamp = uint64(*dec.CurrentTimestamp)
} }
if dec.CurrentBlockNumber != nil { if dec.CurrentBlockNumber != nil {
d.CurrentBlockNumber = uint64(*dec.CurrentBlockNumber) d.CurrentBlockNumber = uint64(*dec.CurrentBlockNumber)

View File

@@ -17,6 +17,7 @@
package trie package trie
import ( import (
"errors"
"fmt" "fmt"
"io" "io"
"sync" "sync"
@@ -391,6 +392,10 @@ func (db *Database) node(hash common.Hash, cachegen uint16) node {
// Node retrieves an encoded cached trie node from memory. If it cannot be found // Node retrieves an encoded cached trie node from memory. If it cannot be found
// cached, the method queries the persistent database for the content. // cached, the method queries the persistent database for the content.
func (db *Database) Node(hash common.Hash) ([]byte, error) { func (db *Database) Node(hash common.Hash) ([]byte, error) {
// It doens't make sense to retrieve the metaroot
if hash == (common.Hash{}) {
return nil, errors.New("not found")
}
// Retrieve the node from the clean cache if available // Retrieve the node from the clean cache if available
if db.cleans != nil { if db.cleans != nil {
if enc, err := db.cleans.Get(string(hash[:])); err == nil && enc != nil { if enc, err := db.cleans.Get(string(hash[:])); err == nil && enc != nil {

33
trie/database_test.go Normal file
View File

@@ -0,0 +1,33 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package trie
import (
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
)
// Tests that the trie database returns a missing trie node error if attempting
// to retrieve the meta root.
func TestDatabaseMetarootFetch(t *testing.T) {
db := NewDatabase(ethdb.NewMemDatabase())
if _, err := db.Node(common.Hash{}); err == nil {
t.Fatalf("metaroot retrieval succeeded")
}
}