Merge pull request #19370 from karalabe/geth-1.8.24
Backport PR for the v1.8.24 maintenance release
This commit is contained in:
commit
4e13a09c50
16
.travis.yml
16
.travis.yml
@ -4,7 +4,7 @@ sudo: false
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
dist: xenial
|
||||
sudo: required
|
||||
go: 1.10.x
|
||||
script:
|
||||
@ -16,7 +16,7 @@ matrix:
|
||||
|
||||
# These are the latest Go versions.
|
||||
- os: linux
|
||||
dist: trusty
|
||||
dist: xenial
|
||||
sudo: required
|
||||
go: 1.11.x
|
||||
script:
|
||||
@ -43,7 +43,7 @@ matrix:
|
||||
|
||||
# This builder only tests code linters on latest version of Go
|
||||
- os: linux
|
||||
dist: trusty
|
||||
dist: xenial
|
||||
go: 1.11.x
|
||||
env:
|
||||
- lint
|
||||
@ -55,7 +55,7 @@ matrix:
|
||||
# This builder does the Ubuntu PPA upload
|
||||
- if: type = push
|
||||
os: linux
|
||||
dist: trusty
|
||||
dist: xenial
|
||||
go: 1.11.x
|
||||
env:
|
||||
- ubuntu-ppa
|
||||
@ -77,7 +77,7 @@ matrix:
|
||||
# This builder does the Linux Azure uploads
|
||||
- if: type = push
|
||||
os: linux
|
||||
dist: trusty
|
||||
dist: xenial
|
||||
sudo: required
|
||||
go: 1.11.x
|
||||
env:
|
||||
@ -111,7 +111,7 @@ matrix:
|
||||
# This builder does the Linux Azure MIPS xgo uploads
|
||||
- if: type = push
|
||||
os: linux
|
||||
dist: trusty
|
||||
dist: xenial
|
||||
services:
|
||||
- docker
|
||||
go: 1.11.x
|
||||
@ -139,7 +139,7 @@ matrix:
|
||||
# This builder does the Android Maven and Azure uploads
|
||||
- if: type = push
|
||||
os: linux
|
||||
dist: trusty
|
||||
dist: xenial
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
@ -206,7 +206,7 @@ matrix:
|
||||
# This builder does the Azure archive purges to avoid accumulating junk
|
||||
- if: type = cron
|
||||
os: linux
|
||||
dist: trusty
|
||||
dist: xenial
|
||||
go: 1.11.x
|
||||
env:
|
||||
- azure-purge
|
||||
|
@ -579,7 +579,7 @@ func (f *faucet) loop() {
|
||||
go func() {
|
||||
for head := range update {
|
||||
// New chain head arrived, query the current stats and stream to clients
|
||||
timestamp := time.Unix(head.Time.Int64(), 0)
|
||||
timestamp := time.Unix(int64(head.Time), 0)
|
||||
if time.Since(timestamp) > time.Hour {
|
||||
log.Warn("Skipping faucet refresh, head too old", "number", head.Number, "hash", head.Hash(), "age", common.PrettyAge(timestamp))
|
||||
continue
|
||||
|
@ -125,8 +125,6 @@ var (
|
||||
utils.VMEnableDebugFlag,
|
||||
utils.NetworkIdFlag,
|
||||
utils.ConstantinopleOverrideFlag,
|
||||
utils.RPCCORSDomainFlag,
|
||||
utils.RPCVirtualHostsFlag,
|
||||
utils.EthStatsURLFlag,
|
||||
utils.MetricsEnabledFlag,
|
||||
utils.FakePoWFlag,
|
||||
@ -150,6 +148,7 @@ var (
|
||||
utils.WSAllowedOriginsFlag,
|
||||
utils.IPCDisabledFlag,
|
||||
utils.IPCPathFlag,
|
||||
utils.RPCGlobalGasCap,
|
||||
}
|
||||
|
||||
whisperFlags = []cli.Flag{
|
||||
|
@ -153,6 +153,7 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.RPCListenAddrFlag,
|
||||
utils.RPCPortFlag,
|
||||
utils.RPCApiFlag,
|
||||
utils.RPCGlobalGasCap,
|
||||
utils.WSEnabledFlag,
|
||||
utils.WSListenAddrFlag,
|
||||
utils.WSPortFlag,
|
||||
|
@ -411,6 +411,10 @@ var (
|
||||
Name: "vmdebug",
|
||||
Usage: "Record information useful for VM and contract debugging",
|
||||
}
|
||||
RPCGlobalGasCap = cli.Uint64Flag{
|
||||
Name: "rpc.gascap",
|
||||
Usage: "Sets a cap on gas that can be used in eth_call/estimateGas",
|
||||
}
|
||||
// Logging and debug settings
|
||||
EthStatsURLFlag = cli.StringFlag{
|
||||
Name: "ethstats",
|
||||
@ -1256,6 +1260,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
if ctx.GlobalIsSet(EVMInterpreterFlag.Name) {
|
||||
cfg.EVMInterpreter = ctx.GlobalString(EVMInterpreterFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(RPCGlobalGasCap.Name) {
|
||||
cfg.RPCGasCap = new(big.Int).SetUint64(ctx.GlobalUint64(RPCGlobalGasCap.Name))
|
||||
}
|
||||
|
||||
// Override any default configs for hard coded networks.
|
||||
switch {
|
||||
|
71
common/fdlimit/fdlimit_darwin.go
Normal file
71
common/fdlimit/fdlimit_darwin.go
Normal file
@ -0,0 +1,71 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package fdlimit
|
||||
|
||||
import "syscall"
|
||||
|
||||
// hardlimit is the number of file descriptors allowed at max by the kernel.
|
||||
const hardlimit = 10240
|
||||
|
||||
// Raise tries to maximize the file descriptor allowance of this process
|
||||
// to the maximum hard-limit allowed by the OS.
|
||||
// Returns the size it was set to (may differ from the desired 'max')
|
||||
func Raise(max uint64) (uint64, error) {
|
||||
// Get the current limit
|
||||
var limit syscall.Rlimit
|
||||
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// Try to update the limit to the max allowance
|
||||
limit.Cur = limit.Max
|
||||
if limit.Cur > max {
|
||||
limit.Cur = max
|
||||
}
|
||||
if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// MacOS can silently apply further caps, so retrieve the actually set limit
|
||||
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return limit.Cur, nil
|
||||
}
|
||||
|
||||
// Current retrieves the number of file descriptors allowed to be opened by this
|
||||
// process.
|
||||
func Current() (int, error) {
|
||||
var limit syscall.Rlimit
|
||||
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int(limit.Cur), nil
|
||||
}
|
||||
|
||||
// Maximum retrieves the maximum number of file descriptors this process is
|
||||
// allowed to request for itself.
|
||||
func Maximum() (int, error) {
|
||||
// Retrieve the maximum allowed by dynamic OS limits
|
||||
var limit syscall.Rlimit
|
||||
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// Cap it to OPEN_MAX (10240) because macos is a special snowflake
|
||||
if limit.Max > hardlimit {
|
||||
limit.Max = hardlimit
|
||||
}
|
||||
return int(limit.Max), nil
|
||||
}
|
@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build linux darwin netbsd openbsd solaris
|
||||
// +build linux netbsd openbsd solaris
|
||||
|
||||
package fdlimit
|
||||
|
||||
|
@ -18,6 +18,7 @@ package fdlimit
|
||||
|
||||
import "fmt"
|
||||
|
||||
// hardlimit is the number of file descriptors allowed at max by the kernel.
|
||||
const hardlimit = 16384
|
||||
|
||||
// Raise tries to maximize the file descriptor allowance of this process
|
||||
|
@ -279,7 +279,7 @@ func (c *Clique) verifyHeader(chain consensus.ChainReader, header *types.Header,
|
||||
number := header.Number.Uint64()
|
||||
|
||||
// Don't waste time checking blocks from the future
|
||||
if header.Time.Cmp(big.NewInt(time.Now().Unix())) > 0 {
|
||||
if header.Time > uint64(time.Now().Unix()) {
|
||||
return consensus.ErrFutureBlock
|
||||
}
|
||||
// Checkpoint blocks need to enforce zero beneficiary
|
||||
@ -351,7 +351,7 @@ func (c *Clique) verifyCascadingFields(chain consensus.ChainReader, header *type
|
||||
if parent == nil || parent.Number.Uint64() != number-1 || parent.Hash() != header.ParentHash {
|
||||
return consensus.ErrUnknownAncestor
|
||||
}
|
||||
if parent.Time.Uint64()+c.config.Period > header.Time.Uint64() {
|
||||
if parent.Time+c.config.Period > header.Time {
|
||||
return ErrInvalidTimestamp
|
||||
}
|
||||
// Retrieve the snapshot needed to verify this header and cache it
|
||||
@ -570,9 +570,9 @@ func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) erro
|
||||
if parent == nil {
|
||||
return consensus.ErrUnknownAncestor
|
||||
}
|
||||
header.Time = new(big.Int).Add(parent.Time, new(big.Int).SetUint64(c.config.Period))
|
||||
if header.Time.Int64() < time.Now().Unix() {
|
||||
header.Time = big.NewInt(time.Now().Unix())
|
||||
header.Time = parent.Time + c.config.Period
|
||||
if header.Time < uint64(time.Now().Unix()) {
|
||||
header.Time = uint64(time.Now().Unix())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -637,7 +637,7 @@ func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, results c
|
||||
}
|
||||
}
|
||||
// Sweet, the protocol permits us to sign the block, wait for our time
|
||||
delay := time.Unix(header.Time.Int64(), 0).Sub(time.Now()) // nolint: gosimple
|
||||
delay := time.Unix(int64(header.Time), 0).Sub(time.Now()) // nolint: gosimple
|
||||
if header.Difficulty.Cmp(diffNoTurn) == 0 {
|
||||
// It's not our turn explicitly to sign, delay it a bit
|
||||
wiggle := time.Duration(len(snap.Signers)/2+1) * wiggleTime
|
||||
|
@ -716,7 +716,7 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) {
|
||||
Difficulty: big.NewInt(167925187834220),
|
||||
GasLimit: 4015682,
|
||||
GasUsed: 0,
|
||||
Time: big.NewInt(1488928920),
|
||||
Time: 1488928920,
|
||||
Extra: []byte("www.bw.com"),
|
||||
MixDigest: common.HexToHash("0x3e140b0784516af5e5ec6730f2fb20cca22f32be399b9e4ad77d32541f798cd0"),
|
||||
Nonce: types.EncodeNonce(0xf400cd0006070c49),
|
||||
|
@ -63,7 +63,6 @@ var (
|
||||
// codebase, inherently breaking if the engine is swapped out. Please put common
|
||||
// error types into the consensus package.
|
||||
var (
|
||||
errLargeBlockTime = errors.New("timestamp too big")
|
||||
errZeroBlockTime = errors.New("timestamp equals parent's")
|
||||
errTooManyUncles = errors.New("too many uncles")
|
||||
errDuplicateUncle = errors.New("duplicate uncle")
|
||||
@ -242,20 +241,16 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *
|
||||
return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize)
|
||||
}
|
||||
// Verify the header's timestamp
|
||||
if uncle {
|
||||
if header.Time.Cmp(math.MaxBig256) > 0 {
|
||||
return errLargeBlockTime
|
||||
}
|
||||
} else {
|
||||
if header.Time.Cmp(big.NewInt(time.Now().Add(allowedFutureBlockTime).Unix())) > 0 {
|
||||
if !uncle {
|
||||
if header.Time > uint64(time.Now().Add(allowedFutureBlockTime).Unix()) {
|
||||
return consensus.ErrFutureBlock
|
||||
}
|
||||
}
|
||||
if header.Time.Cmp(parent.Time) <= 0 {
|
||||
if header.Time <= parent.Time {
|
||||
return errZeroBlockTime
|
||||
}
|
||||
// Verify the block's difficulty based in it's timestamp and parent's difficulty
|
||||
expected := ethash.CalcDifficulty(chain, header.Time.Uint64(), parent)
|
||||
expected := ethash.CalcDifficulty(chain, header.Time, parent)
|
||||
|
||||
if expected.Cmp(header.Difficulty) != 0 {
|
||||
return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, expected)
|
||||
@ -349,7 +344,7 @@ func makeDifficultyCalculator(bombDelay *big.Int) func(time uint64, parent *type
|
||||
// ) + 2^(periodCount - 2)
|
||||
|
||||
bigTime := new(big.Int).SetUint64(time)
|
||||
bigParentTime := new(big.Int).Set(parent.Time)
|
||||
bigParentTime := new(big.Int).SetUint64(parent.Time)
|
||||
|
||||
// holds intermediate values to make the algo easier to read & audit
|
||||
x := new(big.Int)
|
||||
@ -408,7 +403,7 @@ func calcDifficultyHomestead(time uint64, parent *types.Header) *big.Int {
|
||||
// ) + 2^(periodCount - 2)
|
||||
|
||||
bigTime := new(big.Int).SetUint64(time)
|
||||
bigParentTime := new(big.Int).Set(parent.Time)
|
||||
bigParentTime := new(big.Int).SetUint64(parent.Time)
|
||||
|
||||
// holds intermediate values to make the algo easier to read & audit
|
||||
x := new(big.Int)
|
||||
@ -456,7 +451,7 @@ func calcDifficultyFrontier(time uint64, parent *types.Header) *big.Int {
|
||||
bigParentTime := new(big.Int)
|
||||
|
||||
bigTime.SetUint64(time)
|
||||
bigParentTime.Set(parent.Time)
|
||||
bigParentTime.SetUint64(parent.Time)
|
||||
|
||||
if bigTime.Sub(bigTime, bigParentTime).Cmp(params.DurationLimit) < 0 {
|
||||
diff.Add(parent.Difficulty, adjust)
|
||||
@ -558,7 +553,7 @@ func (ethash *Ethash) Prepare(chain consensus.ChainReader, header *types.Header)
|
||||
if parent == nil {
|
||||
return consensus.ErrUnknownAncestor
|
||||
}
|
||||
header.Difficulty = ethash.CalcDifficulty(chain, header.Time.Uint64(), parent)
|
||||
header.Difficulty = ethash.CalcDifficulty(chain, header.Time, parent)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,7 @@ func TestCalcDifficulty(t *testing.T) {
|
||||
number := new(big.Int).Sub(test.CurrentBlocknumber, big.NewInt(1))
|
||||
diff := CalcDifficulty(config, test.CurrentTimestamp, &types.Header{
|
||||
Number: number,
|
||||
Time: new(big.Int).SetUint64(test.ParentTimestamp),
|
||||
Time: test.ParentTimestamp,
|
||||
Difficulty: test.ParentDifficulty,
|
||||
})
|
||||
if diff.Cmp(test.CurrentDifficulty) != 0 {
|
||||
|
@ -267,9 +267,9 @@ func (bc *BlockChain) loadLastState() error {
|
||||
blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
|
||||
fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
|
||||
|
||||
log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(currentHeader.Time.Int64(), 0)))
|
||||
log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(currentBlock.Time().Int64(), 0)))
|
||||
log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(currentFastBlock.Time().Int64(), 0)))
|
||||
log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0)))
|
||||
log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0)))
|
||||
log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0)))
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -894,7 +894,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
||||
|
||||
context := []interface{}{
|
||||
"count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
|
||||
"number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(head.Time().Int64(), 0)),
|
||||
"number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)),
|
||||
"size", common.StorageSize(bytes),
|
||||
}
|
||||
if stats.ignored > 0 {
|
||||
@ -1058,8 +1058,8 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
|
||||
// accepted for future processing, and returns an error if the block is too far
|
||||
// ahead and was not added.
|
||||
func (bc *BlockChain) addFutureBlock(block *types.Block) error {
|
||||
max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
|
||||
if block.Time().Cmp(max) > 0 {
|
||||
max := uint64(time.Now().Unix() + maxTimeFutureBlocks)
|
||||
if block.Time() > max {
|
||||
return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max)
|
||||
}
|
||||
bc.futureBlocks.Add(block.Hash(), block)
|
||||
@ -1391,21 +1391,25 @@ func (bc *BlockChain) insertSidechain(block *types.Block, it *insertIterator) (i
|
||||
return 0, nil, nil, nil
|
||||
}
|
||||
|
||||
// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
|
||||
// to be part of the new canonical chain and accumulates potential missing transactions and post an
|
||||
// event about them
|
||||
// reorg takes two blocks, an old chain and a new chain and will reconstruct the
|
||||
// blocks and inserts them to be part of the new canonical chain and accumulates
|
||||
// potential missing transactions and post an event about them.
|
||||
func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
|
||||
var (
|
||||
newChain types.Blocks
|
||||
oldChain types.Blocks
|
||||
commonBlock *types.Block
|
||||
|
||||
deletedTxs types.Transactions
|
||||
addedTxs types.Transactions
|
||||
|
||||
deletedLogs []*types.Log
|
||||
rebirthLogs []*types.Log
|
||||
|
||||
// collectLogs collects the logs that were generated during the
|
||||
// processing of the block that corresponds with the given hash.
|
||||
// These logs are later announced as deleted.
|
||||
collectLogs = func(hash common.Hash) {
|
||||
// Coalesce logs and set 'Removed'.
|
||||
// These logs are later announced as deleted or reborn
|
||||
collectLogs = func(hash common.Hash, removed bool) {
|
||||
number := bc.hc.GetBlockNumber(hash)
|
||||
if number == nil {
|
||||
return
|
||||
@ -1413,53 +1417,60 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
|
||||
receipts := rawdb.ReadReceipts(bc.db, hash, *number)
|
||||
for _, receipt := range receipts {
|
||||
for _, log := range receipt.Logs {
|
||||
del := *log
|
||||
del.Removed = true
|
||||
deletedLogs = append(deletedLogs, &del)
|
||||
l := *log
|
||||
if removed {
|
||||
l.Removed = true
|
||||
deletedLogs = append(deletedLogs, &l)
|
||||
} else {
|
||||
rebirthLogs = append(rebirthLogs, &l)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
// first reduce whoever is higher bound
|
||||
// Reduce the longer chain to the same number as the shorter one
|
||||
if oldBlock.NumberU64() > newBlock.NumberU64() {
|
||||
// reduce old chain
|
||||
// Old chain is longer, gather all transactions and logs as deleted ones
|
||||
for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
|
||||
oldChain = append(oldChain, oldBlock)
|
||||
deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
|
||||
|
||||
collectLogs(oldBlock.Hash())
|
||||
collectLogs(oldBlock.Hash(), true)
|
||||
}
|
||||
} else {
|
||||
// reduce new chain and append new chain blocks for inserting later on
|
||||
// New chain is longer, stash all blocks away for subsequent insertion
|
||||
for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
|
||||
newChain = append(newChain, newBlock)
|
||||
}
|
||||
}
|
||||
if oldBlock == nil {
|
||||
return fmt.Errorf("Invalid old chain")
|
||||
return fmt.Errorf("invalid old chain")
|
||||
}
|
||||
if newBlock == nil {
|
||||
return fmt.Errorf("Invalid new chain")
|
||||
return fmt.Errorf("invalid new chain")
|
||||
}
|
||||
|
||||
// Both sides of the reorg are at the same number, reduce both until the common
|
||||
// ancestor is found
|
||||
for {
|
||||
// If the common ancestor was found, bail out
|
||||
if oldBlock.Hash() == newBlock.Hash() {
|
||||
commonBlock = oldBlock
|
||||
break
|
||||
}
|
||||
|
||||
// Remove an old block as well as stash away a new block
|
||||
oldChain = append(oldChain, oldBlock)
|
||||
newChain = append(newChain, newBlock)
|
||||
deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
|
||||
collectLogs(oldBlock.Hash())
|
||||
collectLogs(oldBlock.Hash(), true)
|
||||
|
||||
oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
|
||||
newChain = append(newChain, newBlock)
|
||||
|
||||
// Step back with both chains
|
||||
oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1)
|
||||
if oldBlock == nil {
|
||||
return fmt.Errorf("Invalid old chain")
|
||||
return fmt.Errorf("invalid old chain")
|
||||
}
|
||||
newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
|
||||
if newBlock == nil {
|
||||
return fmt.Errorf("Invalid new chain")
|
||||
return fmt.Errorf("invalid new chain")
|
||||
}
|
||||
}
|
||||
// Ensure the user sees large reorgs
|
||||
@ -1474,35 +1485,46 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
|
||||
log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
|
||||
}
|
||||
// Insert the new chain, taking care of the proper incremental order
|
||||
var addedTxs types.Transactions
|
||||
for i := len(newChain) - 1; i >= 0; i-- {
|
||||
// insert the block in the canonical way, re-writing history
|
||||
// Insert the block in the canonical way, re-writing history
|
||||
bc.insert(newChain[i])
|
||||
// write lookup entries for hash based transaction/receipt searches
|
||||
|
||||
// Collect reborn logs due to chain reorg (except head block (reverse order))
|
||||
if i != 0 {
|
||||
collectLogs(newChain[i].Hash(), false)
|
||||
}
|
||||
// Write lookup entries for hash based transaction/receipt searches
|
||||
rawdb.WriteTxLookupEntries(bc.db, newChain[i])
|
||||
addedTxs = append(addedTxs, newChain[i].Transactions()...)
|
||||
}
|
||||
// calculate the difference between deleted and added transactions
|
||||
diff := types.TxDifference(deletedTxs, addedTxs)
|
||||
// When transactions get deleted from the database that means the
|
||||
// receipts that were created in the fork must also be deleted
|
||||
// When transactions get deleted from the database, the receipts that were
|
||||
// created in the fork must also be deleted
|
||||
batch := bc.db.NewBatch()
|
||||
for _, tx := range diff {
|
||||
for _, tx := range types.TxDifference(deletedTxs, addedTxs) {
|
||||
rawdb.DeleteTxLookupEntry(batch, tx.Hash())
|
||||
}
|
||||
batch.Write()
|
||||
|
||||
// If any logs need to be fired, do it now. In theory we could avoid creating
|
||||
// this goroutine if there are no events to fire, but realistcally that only
|
||||
// ever happens if we're reorging empty blocks, which will only happen on idle
|
||||
// networks where performance is not an issue either way.
|
||||
//
|
||||
// TODO(karalabe): Can we get rid of the goroutine somehow to guarantee correct
|
||||
// event ordering?
|
||||
go func() {
|
||||
if len(deletedLogs) > 0 {
|
||||
go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
|
||||
bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
|
||||
}
|
||||
if len(rebirthLogs) > 0 {
|
||||
bc.logsFeed.Send(rebirthLogs)
|
||||
}
|
||||
if len(oldChain) > 0 {
|
||||
go func() {
|
||||
for _, block := range oldChain {
|
||||
bc.chainSideFeed.Send(ChainSideEvent{Block: block})
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -60,7 +60,7 @@ func (st *insertStats) report(chain []*types.Block, index int, cache common.Stor
|
||||
"elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
|
||||
"number", end.Number(), "hash", end.Hash(),
|
||||
}
|
||||
if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute {
|
||||
if timestamp := time.Unix(int64(end.Time()), 0); time.Since(timestamp) > time.Minute {
|
||||
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
|
||||
}
|
||||
context = append(context, []interface{}{"cache", cache}...)
|
||||
|
@ -884,7 +884,6 @@ func TestChainTxReorgs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLogReorgs(t *testing.T) {
|
||||
|
||||
var (
|
||||
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
||||
@ -930,6 +929,213 @@ func TestLogReorgs(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLogRebirth(t *testing.T) {
|
||||
var (
|
||||
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
||||
db = ethdb.NewMemDatabase()
|
||||
|
||||
// this code generates a log
|
||||
code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
|
||||
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}}
|
||||
genesis = gspec.MustCommit(db)
|
||||
signer = types.NewEIP155Signer(gspec.Config.ChainID)
|
||||
newLogCh = make(chan bool)
|
||||
)
|
||||
|
||||
// listenNewLog checks whether the received logs number is equal with expected.
|
||||
listenNewLog := func(sink chan []*types.Log, expect int) {
|
||||
cnt := 0
|
||||
for {
|
||||
select {
|
||||
case logs := <-sink:
|
||||
cnt += len(logs)
|
||||
case <-time.NewTimer(5 * time.Second).C:
|
||||
// new logs timeout
|
||||
newLogCh <- false
|
||||
return
|
||||
}
|
||||
if cnt == expect {
|
||||
break
|
||||
} else if cnt > expect {
|
||||
// redundant logs received
|
||||
newLogCh <- false
|
||||
return
|
||||
}
|
||||
}
|
||||
select {
|
||||
case <-sink:
|
||||
// redundant logs received
|
||||
newLogCh <- false
|
||||
case <-time.NewTimer(100 * time.Millisecond).C:
|
||||
newLogCh <- true
|
||||
}
|
||||
}
|
||||
|
||||
blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
|
||||
defer blockchain.Stop()
|
||||
|
||||
logsCh := make(chan []*types.Log)
|
||||
blockchain.SubscribeLogsEvent(logsCh)
|
||||
|
||||
rmLogsCh := make(chan RemovedLogsEvent)
|
||||
blockchain.SubscribeRemovedLogsEvent(rmLogsCh)
|
||||
|
||||
chain, _ := GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 2, func(i int, gen *BlockGen) {
|
||||
if i == 1 {
|
||||
tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, new(big.Int), code), signer, key1)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create tx: %v", err)
|
||||
}
|
||||
gen.AddTx(tx)
|
||||
}
|
||||
})
|
||||
|
||||
// Spawn a goroutine to receive log events
|
||||
go listenNewLog(logsCh, 1)
|
||||
if _, err := blockchain.InsertChain(chain); err != nil {
|
||||
t.Fatalf("failed to insert chain: %v", err)
|
||||
}
|
||||
if !<-newLogCh {
|
||||
t.Fatalf("failed to receive new log event")
|
||||
}
|
||||
|
||||
// Generate long reorg chain
|
||||
forkChain, _ := GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 2, func(i int, gen *BlockGen) {
|
||||
if i == 1 {
|
||||
tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, new(big.Int), code), signer, key1)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create tx: %v", err)
|
||||
}
|
||||
gen.AddTx(tx)
|
||||
// Higher block difficulty
|
||||
gen.OffsetTime(-9)
|
||||
}
|
||||
})
|
||||
|
||||
// Spawn a goroutine to receive log events
|
||||
go listenNewLog(logsCh, 1)
|
||||
if _, err := blockchain.InsertChain(forkChain); err != nil {
|
||||
t.Fatalf("failed to insert forked chain: %v", err)
|
||||
}
|
||||
if !<-newLogCh {
|
||||
t.Fatalf("failed to receive new log event")
|
||||
}
|
||||
// Ensure removedLog events received
|
||||
select {
|
||||
case ev := <-rmLogsCh:
|
||||
if len(ev.Logs) == 0 {
|
||||
t.Error("expected logs")
|
||||
}
|
||||
case <-time.NewTimer(1 * time.Second).C:
|
||||
t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.")
|
||||
}
|
||||
|
||||
newBlocks, _ := GenerateChain(params.TestChainConfig, chain[len(chain)-1], ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {})
|
||||
go listenNewLog(logsCh, 1)
|
||||
if _, err := blockchain.InsertChain(newBlocks); err != nil {
|
||||
t.Fatalf("failed to insert forked chain: %v", err)
|
||||
}
|
||||
// Ensure removedLog events received
|
||||
select {
|
||||
case ev := <-rmLogsCh:
|
||||
if len(ev.Logs) == 0 {
|
||||
t.Error("expected logs")
|
||||
}
|
||||
case <-time.NewTimer(1 * time.Second).C:
|
||||
t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.")
|
||||
}
|
||||
// Rebirth logs should omit a newLogEvent
|
||||
if !<-newLogCh {
|
||||
t.Fatalf("failed to receive new log event")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSideLogRebirth(t *testing.T) {
|
||||
var (
|
||||
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
||||
db = ethdb.NewMemDatabase()
|
||||
|
||||
// this code generates a log
|
||||
code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
|
||||
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}}
|
||||
genesis = gspec.MustCommit(db)
|
||||
signer = types.NewEIP155Signer(gspec.Config.ChainID)
|
||||
newLogCh = make(chan bool)
|
||||
)
|
||||
|
||||
// listenNewLog checks whether the received logs number is equal with expected.
|
||||
listenNewLog := func(sink chan []*types.Log, expect int) {
|
||||
cnt := 0
|
||||
for {
|
||||
select {
|
||||
case logs := <-sink:
|
||||
cnt += len(logs)
|
||||
case <-time.NewTimer(5 * time.Second).C:
|
||||
// new logs timeout
|
||||
newLogCh <- false
|
||||
return
|
||||
}
|
||||
if cnt == expect {
|
||||
break
|
||||
} else if cnt > expect {
|
||||
// redundant logs received
|
||||
newLogCh <- false
|
||||
return
|
||||
}
|
||||
}
|
||||
select {
|
||||
case <-sink:
|
||||
// redundant logs received
|
||||
newLogCh <- false
|
||||
case <-time.NewTimer(100 * time.Millisecond).C:
|
||||
newLogCh <- true
|
||||
}
|
||||
}
|
||||
|
||||
blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
|
||||
defer blockchain.Stop()
|
||||
|
||||
logsCh := make(chan []*types.Log)
|
||||
blockchain.SubscribeLogsEvent(logsCh)
|
||||
|
||||
chain, _ := GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 2, func(i int, gen *BlockGen) {
|
||||
if i == 1 {
|
||||
// Higher block difficulty
|
||||
gen.OffsetTime(-9)
|
||||
}
|
||||
})
|
||||
if _, err := blockchain.InsertChain(chain); err != nil {
|
||||
t.Fatalf("failed to insert forked chain: %v", err)
|
||||
}
|
||||
|
||||
// Generate side chain with lower difficulty
|
||||
sideChain, _ := GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 2, func(i int, gen *BlockGen) {
|
||||
if i == 1 {
|
||||
tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, new(big.Int), code), signer, key1)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create tx: %v", err)
|
||||
}
|
||||
gen.AddTx(tx)
|
||||
}
|
||||
})
|
||||
if _, err := blockchain.InsertChain(sideChain); err != nil {
|
||||
t.Fatalf("failed to insert forked chain: %v", err)
|
||||
}
|
||||
|
||||
// Generate a new block based on side chain
|
||||
newBlocks, _ := GenerateChain(params.TestChainConfig, sideChain[len(sideChain)-1], ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {})
|
||||
go listenNewLog(logsCh, 1)
|
||||
if _, err := blockchain.InsertChain(newBlocks); err != nil {
|
||||
t.Fatalf("failed to insert forked chain: %v", err)
|
||||
}
|
||||
// Rebirth logs should omit a newLogEvent
|
||||
if !<-newLogCh {
|
||||
t.Fatalf("failed to receive new log event")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReorgSideEvent(t *testing.T) {
|
||||
var (
|
||||
db = ethdb.NewMemDatabase()
|
||||
|
@ -149,12 +149,12 @@ func (b *BlockGen) PrevBlock(index int) *types.Block {
|
||||
// associated difficulty. It's useful to test scenarios where forking is not
|
||||
// tied to chain length directly.
|
||||
func (b *BlockGen) OffsetTime(seconds int64) {
|
||||
b.header.Time.Add(b.header.Time, big.NewInt(seconds))
|
||||
if b.header.Time.Cmp(b.parent.Header().Time) <= 0 {
|
||||
b.header.Time += uint64(seconds)
|
||||
if b.header.Time <= b.parent.Header().Time {
|
||||
panic("block time out of range")
|
||||
}
|
||||
chainreader := &fakeChainReader{config: b.config}
|
||||
b.header.Difficulty = b.engine.CalcDifficulty(chainreader, b.header.Time.Uint64(), b.parent.Header())
|
||||
b.header.Difficulty = b.engine.CalcDifficulty(chainreader, b.header.Time, b.parent.Header())
|
||||
}
|
||||
|
||||
// GenerateChain creates a chain of n blocks. The first block's
|
||||
@ -225,20 +225,20 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
|
||||
}
|
||||
|
||||
func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header {
|
||||
var time *big.Int
|
||||
if parent.Time() == nil {
|
||||
time = big.NewInt(10)
|
||||
var time uint64
|
||||
if parent.Time() == 0 {
|
||||
time = 10
|
||||
} else {
|
||||
time = new(big.Int).Add(parent.Time(), big.NewInt(10)) // block time is fixed at 10 seconds
|
||||
time = parent.Time() + 10 // block time is fixed at 10 seconds
|
||||
}
|
||||
|
||||
return &types.Header{
|
||||
Root: state.IntermediateRoot(chain.Config().IsEIP158(parent.Number())),
|
||||
ParentHash: parent.Hash(),
|
||||
Coinbase: parent.Coinbase(),
|
||||
Difficulty: engine.CalcDifficulty(chain, time.Uint64(), &types.Header{
|
||||
Difficulty: engine.CalcDifficulty(chain, time, &types.Header{
|
||||
Number: parent.Number(),
|
||||
Time: new(big.Int).Sub(time, big.NewInt(10)),
|
||||
Time: time - 10,
|
||||
Difficulty: parent.Difficulty(),
|
||||
UncleHash: parent.UncleHash(),
|
||||
}),
|
||||
|
@ -51,7 +51,7 @@ func NewEVMContext(msg Message, header *types.Header, chain ChainContext, author
|
||||
Origin: msg.From(),
|
||||
Coinbase: beneficiary,
|
||||
BlockNumber: new(big.Int).Set(header.Number),
|
||||
Time: new(big.Int).Set(header.Time),
|
||||
Time: new(big.Int).SetUint64(header.Time),
|
||||
Difficulty: new(big.Int).Set(header.Difficulty),
|
||||
GasLimit: header.GasLimit,
|
||||
GasPrice: new(big.Int).Set(msg.GasPrice()),
|
||||
|
@ -243,7 +243,7 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
|
||||
head := &types.Header{
|
||||
Number: new(big.Int).SetUint64(g.Number),
|
||||
Nonce: types.EncodeNonce(g.Nonce),
|
||||
Time: new(big.Int).SetUint64(g.Timestamp),
|
||||
Time: g.Timestamp,
|
||||
ParentHash: g.ParentHash,
|
||||
Extra: g.ExtraData,
|
||||
GasLimit: g.GasLimit,
|
||||
|
@ -286,7 +286,7 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, writeHeader WhCa
|
||||
"count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
|
||||
"number", last.Number, "hash", last.Hash(),
|
||||
}
|
||||
if timestamp := time.Unix(last.Time.Int64(), 0); time.Since(timestamp) > time.Minute {
|
||||
if timestamp := time.Unix(int64(last.Time), 0); time.Since(timestamp) > time.Minute {
|
||||
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
|
||||
}
|
||||
if stats.ignored > 0 {
|
||||
|
@ -79,7 +79,7 @@ type Header struct {
|
||||
Number *big.Int `json:"number" gencodec:"required"`
|
||||
GasLimit uint64 `json:"gasLimit" gencodec:"required"`
|
||||
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
|
||||
Time *big.Int `json:"timestamp" gencodec:"required"`
|
||||
Time uint64 `json:"timestamp" gencodec:"required"`
|
||||
Extra []byte `json:"extraData" gencodec:"required"`
|
||||
MixDigest common.Hash `json:"mixHash"`
|
||||
Nonce BlockNonce `json:"nonce"`
|
||||
@ -91,7 +91,7 @@ type headerMarshaling struct {
|
||||
Number *hexutil.Big
|
||||
GasLimit hexutil.Uint64
|
||||
GasUsed hexutil.Uint64
|
||||
Time *hexutil.Big
|
||||
Time hexutil.Uint64
|
||||
Extra hexutil.Bytes
|
||||
Hash common.Hash `json:"hash"` // adds call to Hash() in MarshalJSON
|
||||
}
|
||||
@ -105,7 +105,7 @@ func (h *Header) Hash() common.Hash {
|
||||
// Size returns the approximate memory used by all internal contents. It is used
|
||||
// to approximate and limit the memory consumption of various caches.
|
||||
func (h *Header) Size() common.StorageSize {
|
||||
return common.StorageSize(unsafe.Sizeof(*h)) + common.StorageSize(len(h.Extra)+(h.Difficulty.BitLen()+h.Number.BitLen()+h.Time.BitLen())/8)
|
||||
return common.StorageSize(unsafe.Sizeof(*h)) + common.StorageSize(len(h.Extra)+(h.Difficulty.BitLen()+h.Number.BitLen())/8)
|
||||
}
|
||||
|
||||
func rlpHash(x interface{}) (h common.Hash) {
|
||||
@ -221,9 +221,6 @@ func NewBlockWithHeader(header *Header) *Block {
|
||||
// modifying a header variable.
|
||||
func CopyHeader(h *Header) *Header {
|
||||
cpy := *h
|
||||
if cpy.Time = new(big.Int); h.Time != nil {
|
||||
cpy.Time.Set(h.Time)
|
||||
}
|
||||
if cpy.Difficulty = new(big.Int); h.Difficulty != nil {
|
||||
cpy.Difficulty.Set(h.Difficulty)
|
||||
}
|
||||
@ -286,7 +283,7 @@ func (b *Block) Number() *big.Int { return new(big.Int).Set(b.header.Number)
|
||||
func (b *Block) GasLimit() uint64 { return b.header.GasLimit }
|
||||
func (b *Block) GasUsed() uint64 { return b.header.GasUsed }
|
||||
func (b *Block) Difficulty() *big.Int { return new(big.Int).Set(b.header.Difficulty) }
|
||||
func (b *Block) Time() *big.Int { return new(big.Int).Set(b.header.Time) }
|
||||
func (b *Block) Time() uint64 { return b.header.Time }
|
||||
|
||||
func (b *Block) NumberU64() uint64 { return b.header.Number.Uint64() }
|
||||
func (b *Block) MixDigest() common.Hash { return b.header.MixDigest }
|
||||
|
@ -48,7 +48,7 @@ func TestBlockEncoding(t *testing.T) {
|
||||
check("Root", block.Root(), common.HexToHash("ef1552a40b7165c3cd773806b9e0c165b75356e0314bf0706f279c729f51e017"))
|
||||
check("Hash", block.Hash(), common.HexToHash("0a5843ac1cb04865017cb35a57b50b07084e5fcee39b5acadade33149f4fff9e"))
|
||||
check("Nonce", block.Nonce(), uint64(0xa13a5a8c8f2bb1c4))
|
||||
check("Time", block.Time(), big.NewInt(1426516743))
|
||||
check("Time", block.Time(), uint64(1426516743))
|
||||
check("Size", block.Size(), common.StorageSize(len(blockEnc)))
|
||||
|
||||
tx1 := NewTransaction(0, common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"), big.NewInt(10), 50000, big.NewInt(10), nil)
|
||||
|
@ -27,7 +27,7 @@ func (h Header) MarshalJSON() ([]byte, error) {
|
||||
Number *hexutil.Big `json:"number" gencodec:"required"`
|
||||
GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
|
||||
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
|
||||
Time *hexutil.Big `json:"timestamp" gencodec:"required"`
|
||||
Time hexutil.Uint64 `json:"timestamp" gencodec:"required"`
|
||||
Extra hexutil.Bytes `json:"extraData" gencodec:"required"`
|
||||
MixDigest common.Hash `json:"mixHash"`
|
||||
Nonce BlockNonce `json:"nonce"`
|
||||
@ -45,7 +45,7 @@ func (h Header) MarshalJSON() ([]byte, error) {
|
||||
enc.Number = (*hexutil.Big)(h.Number)
|
||||
enc.GasLimit = hexutil.Uint64(h.GasLimit)
|
||||
enc.GasUsed = hexutil.Uint64(h.GasUsed)
|
||||
enc.Time = (*hexutil.Big)(h.Time)
|
||||
enc.Time = hexutil.Uint64(h.Time)
|
||||
enc.Extra = h.Extra
|
||||
enc.MixDigest = h.MixDigest
|
||||
enc.Nonce = h.Nonce
|
||||
@ -67,7 +67,7 @@ func (h *Header) UnmarshalJSON(input []byte) error {
|
||||
Number *hexutil.Big `json:"number" gencodec:"required"`
|
||||
GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
|
||||
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
|
||||
Time *hexutil.Big `json:"timestamp" gencodec:"required"`
|
||||
Time *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
|
||||
Extra *hexutil.Bytes `json:"extraData" gencodec:"required"`
|
||||
MixDigest *common.Hash `json:"mixHash"`
|
||||
Nonce *BlockNonce `json:"nonce"`
|
||||
@ -123,7 +123,7 @@ func (h *Header) UnmarshalJSON(input []byte) error {
|
||||
if dec.Time == nil {
|
||||
return errors.New("missing required field 'timestamp' for Header")
|
||||
}
|
||||
h.Time = (*big.Int)(dec.Time)
|
||||
h.Time = uint64(*dec.Time)
|
||||
if dec.Extra == nil {
|
||||
return errors.New("missing required field 'extraData' for Header")
|
||||
}
|
||||
|
@ -213,6 +213,10 @@ func (b *EthAPIBackend) AccountManager() *accounts.Manager {
|
||||
return b.eth.AccountManager()
|
||||
}
|
||||
|
||||
func (b *EthAPIBackend) RPCGasCap() *big.Int {
|
||||
return b.eth.config.RPCGasCap
|
||||
}
|
||||
|
||||
func (b *EthAPIBackend) BloomStatus() (uint64, uint64) {
|
||||
sections, _, _ := b.eth.bloomIndexer.Sections()
|
||||
return params.BloomBitsBlocks, sections
|
||||
|
@ -135,6 +135,9 @@ type Config struct {
|
||||
|
||||
// Constantinople block override (TODO: remove after the fork)
|
||||
ConstantinopleOverride *big.Int
|
||||
|
||||
// RPCGasCap is the global gas cap for eth-call variants.
|
||||
RPCGasCap *big.Int `toml:",omitempty"`
|
||||
}
|
||||
|
||||
type configMarshaling struct {
|
||||
|
@ -557,7 +557,7 @@ func (s *Service) assembleBlockStats(block *types.Block) *blockStats {
|
||||
Number: header.Number,
|
||||
Hash: header.Hash(),
|
||||
ParentHash: header.ParentHash,
|
||||
Timestamp: header.Time,
|
||||
Timestamp: new(big.Int).SetUint64(header.Time),
|
||||
Miner: author,
|
||||
GasUsed: header.GasUsed,
|
||||
GasLimit: header.GasLimit,
|
||||
|
@ -683,7 +683,7 @@ type CallArgs struct {
|
||||
Data hexutil.Bytes `json:"data"`
|
||||
}
|
||||
|
||||
func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber, timeout time.Duration) ([]byte, uint64, bool, error) {
|
||||
func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber, timeout time.Duration, globalGasCap *big.Int) ([]byte, uint64, bool, error) {
|
||||
defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now())
|
||||
|
||||
state, header, err := s.b.StateAndHeaderByNumber(ctx, blockNr)
|
||||
@ -700,14 +700,18 @@ func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr
|
||||
}
|
||||
}
|
||||
// Set default gas & gas price if none were set
|
||||
gas, gasPrice := uint64(args.Gas), args.GasPrice.ToInt()
|
||||
gas := uint64(args.Gas)
|
||||
if gas == 0 {
|
||||
gas = math.MaxUint64 / 2
|
||||
}
|
||||
if globalGasCap != nil && globalGasCap.Uint64() < gas {
|
||||
log.Warn("Caller gas above allowance, capping", "requested", gas, "cap", globalGasCap)
|
||||
gas = globalGasCap.Uint64()
|
||||
}
|
||||
gasPrice := args.GasPrice.ToInt()
|
||||
if gasPrice.Sign() == 0 {
|
||||
gasPrice = new(big.Int).SetUint64(defaultGasPrice)
|
||||
}
|
||||
|
||||
// Create new call message
|
||||
msg := types.NewMessage(addr, args.To, 0, args.Value.ToInt(), gas, gasPrice, args.Data, false)
|
||||
|
||||
@ -748,7 +752,7 @@ func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr
|
||||
// Call executes the given transaction on the state for the given block number.
|
||||
// It doesn't make and changes in the state/blockchain and is useful to execute and retrieve values.
|
||||
func (s *PublicBlockChainAPI) Call(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber) (hexutil.Bytes, error) {
|
||||
result, _, _, err := s.doCall(ctx, args, blockNr, 5*time.Second)
|
||||
result, _, _, err := s.doCall(ctx, args, blockNr, 5*time.Second, s.b.RPCGasCap())
|
||||
return (hexutil.Bytes)(result), err
|
||||
}
|
||||
|
||||
@ -771,13 +775,18 @@ func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args CallArgs) (h
|
||||
}
|
||||
hi = block.GasLimit()
|
||||
}
|
||||
gasCap := s.b.RPCGasCap()
|
||||
if gasCap != nil && hi > gasCap.Uint64() {
|
||||
log.Warn("Caller gas above allowance, capping", "requested", hi, "cap", gasCap)
|
||||
hi = gasCap.Uint64()
|
||||
}
|
||||
cap = hi
|
||||
|
||||
// Create a helper to check if a gas allowance results in an executable transaction
|
||||
executable := func(gas uint64) bool {
|
||||
args.Gas = hexutil.Uint64(gas)
|
||||
|
||||
_, _, failed, err := s.doCall(ctx, args, rpc.PendingBlockNumber, 0)
|
||||
_, _, failed, err := s.doCall(ctx, args, rpc.PendingBlockNumber, 0, gasCap)
|
||||
if err != nil || failed {
|
||||
return false
|
||||
}
|
||||
@ -795,7 +804,7 @@ func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args CallArgs) (h
|
||||
// Reject the transaction as invalid if it still fails at the highest allowance
|
||||
if hi == cap {
|
||||
if !executable(hi) {
|
||||
return 0, fmt.Errorf("gas required exceeds allowance or always failing transaction")
|
||||
return 0, fmt.Errorf("gas required exceeds allowance (%d) or always failing transaction", cap)
|
||||
}
|
||||
}
|
||||
return hexutil.Uint64(hi), nil
|
||||
@ -882,7 +891,7 @@ func RPCMarshalBlock(b *types.Block, inclTx bool, fullTx bool) (map[string]inter
|
||||
"size": hexutil.Uint64(b.Size()),
|
||||
"gasLimit": hexutil.Uint64(head.GasLimit),
|
||||
"gasUsed": hexutil.Uint64(head.GasUsed),
|
||||
"timestamp": (*hexutil.Big)(head.Time),
|
||||
"timestamp": hexutil.Uint64(head.Time),
|
||||
"transactionsRoot": head.TxHash,
|
||||
"receiptsRoot": head.ReceiptHash,
|
||||
}
|
||||
|
@ -44,6 +44,7 @@ type Backend interface {
|
||||
ChainDb() ethdb.Database
|
||||
EventMux() *event.TypeMux
|
||||
AccountManager() *accounts.Manager
|
||||
RPCGasCap() *big.Int // global gas cap for eth_call over rpc: DoS protection
|
||||
|
||||
// BlockChain API
|
||||
SetHead(number uint64)
|
||||
|
@ -187,6 +187,10 @@ func (b *LesApiBackend) AccountManager() *accounts.Manager {
|
||||
return b.eth.accountManager
|
||||
}
|
||||
|
||||
func (b *LesApiBackend) RPCGasCap() *big.Int {
|
||||
return b.eth.config.RPCGasCap
|
||||
}
|
||||
|
||||
func (b *LesApiBackend) BloomStatus() (uint64, uint64) {
|
||||
if b.eth.bloomIndexer == nil {
|
||||
return 0, 0
|
||||
|
@ -157,7 +157,7 @@ func (self *LightChain) loadLastState() error {
|
||||
// Issue a status log and return
|
||||
header := self.hc.CurrentHeader()
|
||||
headerTd := self.GetTd(header.Hash(), header.Number.Uint64())
|
||||
log.Info("Loaded most recent local header", "number", header.Number, "hash", header.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(header.Time.Int64(), 0)))
|
||||
log.Info("Loaded most recent local header", "number", header.Number, "hash", header.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(header.Time), 0)))
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -488,7 +488,7 @@ func (self *LightChain) SyncCht(ctx context.Context) bool {
|
||||
|
||||
// Ensure the chain didn't move past the latest block while retrieving it
|
||||
if self.hc.CurrentHeader().Number.Uint64() < header.Number.Uint64() {
|
||||
log.Info("Updated latest header based on CHT", "number", header.Number, "hash", header.Hash(), "age", common.PrettyAge(time.Unix(header.Time.Int64(), 0)))
|
||||
log.Info("Updated latest header based on CHT", "number", header.Number, "hash", header.Hash(), "age", common.PrettyAge(time.Unix(int64(header.Time), 0)))
|
||||
self.hc.SetCurrentHeader(header)
|
||||
}
|
||||
return true
|
||||
|
@ -109,6 +109,7 @@ var trustedCheckpoints = map[common.Hash]*params.TrustedCheckpoint{
|
||||
params.MainnetGenesisHash: params.MainnetTrustedCheckpoint,
|
||||
params.TestnetGenesisHash: params.TestnetTrustedCheckpoint,
|
||||
params.RinkebyGenesisHash: params.RinkebyTrustedCheckpoint,
|
||||
params.GoerliGenesisHash: params.GoerliTrustedCheckpoint,
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -823,8 +823,8 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64)
|
||||
tstart := time.Now()
|
||||
parent := w.chain.CurrentBlock()
|
||||
|
||||
if parent.Time().Cmp(new(big.Int).SetInt64(timestamp)) >= 0 {
|
||||
timestamp = parent.Time().Int64() + 1
|
||||
if parent.Time() >= uint64(timestamp) {
|
||||
timestamp = int64(parent.Time() + 1)
|
||||
}
|
||||
// this will ensure we're not going off too far in the future
|
||||
if now := time.Now().Unix(); timestamp > now+1 {
|
||||
@ -839,7 +839,7 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64)
|
||||
Number: num.Add(num, common.Big1),
|
||||
GasLimit: core.CalcGasLimit(parent, w.gasFloor, w.gasCeil),
|
||||
Extra: w.extra,
|
||||
Time: big.NewInt(timestamp),
|
||||
Time: uint64(timestamp),
|
||||
}
|
||||
// Only set the coinbase if our consensus engine is running (avoid spurious block rewards)
|
||||
if w.isRunning() {
|
||||
|
@ -109,7 +109,7 @@ func (h *Header) GetDifficulty() *BigInt { return &BigInt{h.header.Difficulty} }
|
||||
func (h *Header) GetNumber() int64 { return h.header.Number.Int64() }
|
||||
func (h *Header) GetGasLimit() int64 { return int64(h.header.GasLimit) }
|
||||
func (h *Header) GetGasUsed() int64 { return int64(h.header.GasUsed) }
|
||||
func (h *Header) GetTime() int64 { return h.header.Time.Int64() }
|
||||
func (h *Header) GetTime() int64 { return int64(h.header.Time) }
|
||||
func (h *Header) GetExtra() []byte { return h.header.Extra }
|
||||
func (h *Header) GetMixDigest() *Hash { return &Hash{h.header.MixDigest} }
|
||||
func (h *Header) GetNonce() *Nonce { return &Nonce{h.header.Nonce} }
|
||||
@ -180,7 +180,7 @@ func (b *Block) GetDifficulty() *BigInt { return &BigInt{b.block.Difficu
|
||||
func (b *Block) GetNumber() int64 { return b.block.Number().Int64() }
|
||||
func (b *Block) GetGasLimit() int64 { return int64(b.block.GasLimit()) }
|
||||
func (b *Block) GetGasUsed() int64 { return int64(b.block.GasUsed()) }
|
||||
func (b *Block) GetTime() int64 { return b.block.Time().Int64() }
|
||||
func (b *Block) GetTime() int64 { return int64(b.block.Time()) }
|
||||
func (b *Block) GetExtra() []byte { return b.block.Extra() }
|
||||
func (b *Block) GetMixDigest() *Hash { return &Hash{b.block.MixDigest()} }
|
||||
func (b *Block) GetNonce() int64 { return int64(b.block.Nonce()) }
|
||||
|
@ -313,7 +313,7 @@ func (tab *Table) findnode(n *node, targetKey encPubkey, reply chan<- []*node) {
|
||||
// Avoid recording failures on shutdown.
|
||||
reply <- nil
|
||||
return
|
||||
} else if err != nil || len(r) == 0 {
|
||||
} else if len(r) == 0 {
|
||||
fails++
|
||||
tab.db.UpdateFindFails(n.ID(), n.IP(), fails)
|
||||
log.Trace("Findnode failed", "id", n.ID(), "failcount", fails, "err", err)
|
||||
|
@ -28,6 +28,7 @@ var (
|
||||
MainnetGenesisHash = common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3")
|
||||
TestnetGenesisHash = common.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d")
|
||||
RinkebyGenesisHash = common.HexToHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
|
||||
GoerliGenesisHash = common.HexToHash("0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a")
|
||||
)
|
||||
|
||||
var (
|
||||
@ -50,10 +51,10 @@ var (
|
||||
// MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network.
|
||||
MainnetTrustedCheckpoint = &TrustedCheckpoint{
|
||||
Name: "mainnet",
|
||||
SectionIndex: 216,
|
||||
SectionHead: common.HexToHash("0xae3e551c8d60d06fd411a8e6008e90625d3bb0cbbf664b65d5ed90b318553541"),
|
||||
CHTRoot: common.HexToHash("0xeea7d2ab3545a37deecc66fc43c9556ae337c3ea1c6893e401428207bdb8e434"),
|
||||
BloomRoot: common.HexToHash("0xb0d4176d160d67b99a9f963281e52bce0583a566b74b4497fe3ed24ae04004ff"),
|
||||
SectionIndex: 227,
|
||||
SectionHead: common.HexToHash("0xa2e0b25d72c2fc6e35a7f853cdacb193b4b4f95c606accf7f8fa8415283582c7"),
|
||||
CHTRoot: common.HexToHash("0xf69bdd4053b95b61a27b106a0e86103d791edd8574950dc96aa351ab9b9f1aa0"),
|
||||
BloomRoot: common.HexToHash("0xec1b454d4c6322c78ccedf76ac922a8698c3cac4d98748a84af4995b7bd3d744"),
|
||||
}
|
||||
|
||||
// TestnetChainConfig contains the chain parameters to run a node on the Ropsten test network.
|
||||
@ -75,10 +76,10 @@ var (
|
||||
// TestnetTrustedCheckpoint contains the light client trusted checkpoint for the Ropsten test network.
|
||||
TestnetTrustedCheckpoint = &TrustedCheckpoint{
|
||||
Name: "testnet",
|
||||
SectionIndex: 148,
|
||||
SectionHead: common.HexToHash("0x4d3181bedb6aa96a6f3efa866c71f7802400d0fb4a6906946c453630d850efc0"),
|
||||
CHTRoot: common.HexToHash("0x25df2f9d63a5f84b2852988f0f0f7af5a7877da061c11b85c812780b5a27a5ec"),
|
||||
BloomRoot: common.HexToHash("0x0584834e5222471a06c669d210e302ca602780eaaddd04634fd65471c2a91419"),
|
||||
SectionIndex: 161,
|
||||
SectionHead: common.HexToHash("0x5378afa734e1feafb34bcca1534c4d96952b754579b96a4afb23d5301ecececc"),
|
||||
CHTRoot: common.HexToHash("0x1cf2b071e7443a62914362486b613ff30f60cea0d9c268ed8c545f876a3ee60c"),
|
||||
BloomRoot: common.HexToHash("0x5ac25c84bd18a9cbe878d4609a80220f57f85037a112644532412ba0d498a31b"),
|
||||
}
|
||||
|
||||
// RinkebyChainConfig contains the chain parameters to run a node on the Rinkeby test network.
|
||||
@ -93,7 +94,7 @@ var (
|
||||
EIP158Block: big.NewInt(3),
|
||||
ByzantiumBlock: big.NewInt(1035301),
|
||||
ConstantinopleBlock: big.NewInt(3660663),
|
||||
PetersburgBlock: big.NewInt(9999999), //TODO! Insert Rinkeby block number
|
||||
PetersburgBlock: big.NewInt(4321234),
|
||||
Clique: &CliqueConfig{
|
||||
Period: 15,
|
||||
Epoch: 30000,
|
||||
@ -103,10 +104,10 @@ var (
|
||||
// RinkebyTrustedCheckpoint contains the light client trusted checkpoint for the Rinkeby test network.
|
||||
RinkebyTrustedCheckpoint = &TrustedCheckpoint{
|
||||
Name: "rinkeby",
|
||||
SectionIndex: 113,
|
||||
SectionHead: common.HexToHash("0xb812f3095af3af1cb2de7d7c2086ee807736a7315992c461b0986699185daf77"),
|
||||
CHTRoot: common.HexToHash("0x5416d0924925eb835987ad3d1f059ecc66778c51959c8246a7a35b22ec5f3109"),
|
||||
BloomRoot: common.HexToHash("0xcf74ca2c14e843b366561dab4fc64237bf6bb335119cbc97d723f3b501863470"),
|
||||
SectionIndex: 125,
|
||||
SectionHead: common.HexToHash("0x8a738386f6bb34add15846f8f49c4c519a2f32519096e792b9f43bcb407c831c"),
|
||||
CHTRoot: common.HexToHash("0xa1e5720a9bad4dce794f129e4ac6744398197b652868011486a6f89c8ec84a75"),
|
||||
BloomRoot: common.HexToHash("0xa3048fe8b7e30f77f11bc755a88478363d7d3e71c2bdfe4e8ab9e269cd804ba2"),
|
||||
}
|
||||
|
||||
// GoerliChainConfig contains the chain parameters to run a node on the Görli test network.
|
||||
@ -130,10 +131,10 @@ var (
|
||||
// GoerliTrustedCheckpoint contains the light client trusted checkpoint for the Görli test network.
|
||||
GoerliTrustedCheckpoint = &TrustedCheckpoint{
|
||||
Name: "goerli",
|
||||
SectionIndex: 0,
|
||||
SectionHead: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
|
||||
CHTRoot: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
|
||||
BloomRoot: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
|
||||
SectionIndex: 9,
|
||||
SectionHead: common.HexToHash("0x8e223d827391eee53b07cb8ee057dbfa11c93e0b45352188c783affd7840a921"),
|
||||
CHTRoot: common.HexToHash("0xe0a817ac69b36c1e437c5b0cff9e764853f5115702b5f66d451b665d6afb7e78"),
|
||||
BloomRoot: common.HexToHash("0x50d672aeb655b723284969c7c1201fb6ca003c23ed144bcb9f2d1b30e2971c1b"),
|
||||
}
|
||||
|
||||
// AllEthashProtocolChanges contains every protocol change (EIPs) introduced
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
const (
|
||||
VersionMajor = 1 // Major version component of the current release
|
||||
VersionMinor = 8 // Minor version component of the current release
|
||||
VersionPatch = 23 // Patch version component of the current release
|
||||
VersionPatch = 24 // Patch version component of the current release
|
||||
VersionMeta = "stable" // Version metadata to append to the version string
|
||||
)
|
||||
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
const (
|
||||
VersionMajor = 0 // Major version component of the current release
|
||||
VersionMinor = 3 // Minor version component of the current release
|
||||
VersionPatch = 11 // Patch version component of the current release
|
||||
VersionPatch = 12 // Patch version component of the current release
|
||||
VersionMeta = "stable" // Version metadata to append to the version string
|
||||
)
|
||||
|
||||
|
@ -82,7 +82,7 @@ type btHeader struct {
|
||||
Difficulty *big.Int
|
||||
GasLimit uint64
|
||||
GasUsed uint64
|
||||
Timestamp *big.Int
|
||||
Timestamp uint64
|
||||
}
|
||||
|
||||
type btHeaderMarshaling struct {
|
||||
@ -91,7 +91,7 @@ type btHeaderMarshaling struct {
|
||||
Difficulty *math.HexOrDecimal256
|
||||
GasLimit math.HexOrDecimal64
|
||||
GasUsed math.HexOrDecimal64
|
||||
Timestamp *math.HexOrDecimal256
|
||||
Timestamp math.HexOrDecimal64
|
||||
}
|
||||
|
||||
func (t *BlockTest) Run() error {
|
||||
@ -146,7 +146,7 @@ func (t *BlockTest) genesis(config *params.ChainConfig) *core.Genesis {
|
||||
return &core.Genesis{
|
||||
Config: config,
|
||||
Nonce: t.json.Genesis.Nonce.Uint64(),
|
||||
Timestamp: t.json.Genesis.Timestamp.Uint64(),
|
||||
Timestamp: t.json.Genesis.Timestamp,
|
||||
ParentHash: t.json.Genesis.ParentHash,
|
||||
ExtraData: t.json.Genesis.ExtraData,
|
||||
GasLimit: t.json.Genesis.GasLimit,
|
||||
@ -248,7 +248,7 @@ func validateHeader(h *btHeader, h2 *types.Header) error {
|
||||
if h.GasUsed != h2.GasUsed {
|
||||
return fmt.Errorf("GasUsed: want: %d have: %d", h.GasUsed, h2.GasUsed)
|
||||
}
|
||||
if h.Timestamp.Cmp(h2.Time) != 0 {
|
||||
if h.Timestamp != h2.Time {
|
||||
return fmt.Errorf("Timestamp: want: %v have: %v", h.Timestamp, h2.Time)
|
||||
}
|
||||
return nil
|
||||
|
@ -30,18 +30,18 @@ import (
|
||||
//go:generate gencodec -type DifficultyTest -field-override difficultyTestMarshaling -out gen_difficultytest.go
|
||||
|
||||
type DifficultyTest struct {
|
||||
ParentTimestamp *big.Int `json:"parentTimestamp"`
|
||||
ParentTimestamp uint64 `json:"parentTimestamp"`
|
||||
ParentDifficulty *big.Int `json:"parentDifficulty"`
|
||||
UncleHash common.Hash `json:"parentUncles"`
|
||||
CurrentTimestamp *big.Int `json:"currentTimestamp"`
|
||||
CurrentTimestamp uint64 `json:"currentTimestamp"`
|
||||
CurrentBlockNumber uint64 `json:"currentBlockNumber"`
|
||||
CurrentDifficulty *big.Int `json:"currentDifficulty"`
|
||||
}
|
||||
|
||||
type difficultyTestMarshaling struct {
|
||||
ParentTimestamp *math.HexOrDecimal256
|
||||
ParentTimestamp math.HexOrDecimal64
|
||||
ParentDifficulty *math.HexOrDecimal256
|
||||
CurrentTimestamp *math.HexOrDecimal256
|
||||
CurrentTimestamp math.HexOrDecimal64
|
||||
CurrentDifficulty *math.HexOrDecimal256
|
||||
UncleHash common.Hash
|
||||
CurrentBlockNumber math.HexOrDecimal64
|
||||
@ -56,7 +56,7 @@ func (test *DifficultyTest) Run(config *params.ChainConfig) error {
|
||||
UncleHash: test.UncleHash,
|
||||
}
|
||||
|
||||
actual := ethash.CalcDifficulty(config, test.CurrentTimestamp.Uint64(), parent)
|
||||
actual := ethash.CalcDifficulty(config, test.CurrentTimestamp, parent)
|
||||
exp := test.CurrentDifficulty
|
||||
|
||||
if actual.Cmp(exp) != 0 {
|
||||
|
@ -14,6 +14,7 @@ import (
|
||||
|
||||
var _ = (*btHeaderMarshaling)(nil)
|
||||
|
||||
// MarshalJSON marshals as JSON.
|
||||
func (b btHeader) MarshalJSON() ([]byte, error) {
|
||||
type btHeader struct {
|
||||
Bloom types.Bloom
|
||||
@ -31,7 +32,7 @@ func (b btHeader) MarshalJSON() ([]byte, error) {
|
||||
Difficulty *math.HexOrDecimal256
|
||||
GasLimit math.HexOrDecimal64
|
||||
GasUsed math.HexOrDecimal64
|
||||
Timestamp *math.HexOrDecimal256
|
||||
Timestamp math.HexOrDecimal64
|
||||
}
|
||||
var enc btHeader
|
||||
enc.Bloom = b.Bloom
|
||||
@ -49,10 +50,11 @@ func (b btHeader) MarshalJSON() ([]byte, error) {
|
||||
enc.Difficulty = (*math.HexOrDecimal256)(b.Difficulty)
|
||||
enc.GasLimit = math.HexOrDecimal64(b.GasLimit)
|
||||
enc.GasUsed = math.HexOrDecimal64(b.GasUsed)
|
||||
enc.Timestamp = (*math.HexOrDecimal256)(b.Timestamp)
|
||||
enc.Timestamp = math.HexOrDecimal64(b.Timestamp)
|
||||
return json.Marshal(&enc)
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshals from JSON.
|
||||
func (b *btHeader) UnmarshalJSON(input []byte) error {
|
||||
type btHeader struct {
|
||||
Bloom *types.Bloom
|
||||
@ -70,7 +72,7 @@ func (b *btHeader) UnmarshalJSON(input []byte) error {
|
||||
Difficulty *math.HexOrDecimal256
|
||||
GasLimit *math.HexOrDecimal64
|
||||
GasUsed *math.HexOrDecimal64
|
||||
Timestamp *math.HexOrDecimal256
|
||||
Timestamp *math.HexOrDecimal64
|
||||
}
|
||||
var dec btHeader
|
||||
if err := json.Unmarshal(input, &dec); err != nil {
|
||||
@ -122,7 +124,7 @@ func (b *btHeader) UnmarshalJSON(input []byte) error {
|
||||
b.GasUsed = uint64(*dec.GasUsed)
|
||||
}
|
||||
if dec.Timestamp != nil {
|
||||
b.Timestamp = (*big.Int)(dec.Timestamp)
|
||||
b.Timestamp = uint64(*dec.Timestamp)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -12,31 +12,33 @@ import (
|
||||
|
||||
var _ = (*difficultyTestMarshaling)(nil)
|
||||
|
||||
// MarshalJSON marshals as JSON.
|
||||
func (d DifficultyTest) MarshalJSON() ([]byte, error) {
|
||||
type DifficultyTest struct {
|
||||
ParentTimestamp *math.HexOrDecimal256 `json:"parentTimestamp"`
|
||||
ParentTimestamp math.HexOrDecimal64 `json:"parentTimestamp"`
|
||||
ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
|
||||
UncleHash common.Hash `json:"parentUncles"`
|
||||
CurrentTimestamp *math.HexOrDecimal256 `json:"currentTimestamp"`
|
||||
CurrentTimestamp math.HexOrDecimal64 `json:"currentTimestamp"`
|
||||
CurrentBlockNumber math.HexOrDecimal64 `json:"currentBlockNumber"`
|
||||
CurrentDifficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
|
||||
}
|
||||
var enc DifficultyTest
|
||||
enc.ParentTimestamp = (*math.HexOrDecimal256)(d.ParentTimestamp)
|
||||
enc.ParentTimestamp = math.HexOrDecimal64(d.ParentTimestamp)
|
||||
enc.ParentDifficulty = (*math.HexOrDecimal256)(d.ParentDifficulty)
|
||||
enc.UncleHash = d.UncleHash
|
||||
enc.CurrentTimestamp = (*math.HexOrDecimal256)(d.CurrentTimestamp)
|
||||
enc.CurrentTimestamp = math.HexOrDecimal64(d.CurrentTimestamp)
|
||||
enc.CurrentBlockNumber = math.HexOrDecimal64(d.CurrentBlockNumber)
|
||||
enc.CurrentDifficulty = (*math.HexOrDecimal256)(d.CurrentDifficulty)
|
||||
return json.Marshal(&enc)
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshals from JSON.
|
||||
func (d *DifficultyTest) UnmarshalJSON(input []byte) error {
|
||||
type DifficultyTest struct {
|
||||
ParentTimestamp *math.HexOrDecimal256 `json:"parentTimestamp"`
|
||||
ParentTimestamp *math.HexOrDecimal64 `json:"parentTimestamp"`
|
||||
ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
|
||||
UncleHash *common.Hash `json:"parentUncles"`
|
||||
CurrentTimestamp *math.HexOrDecimal256 `json:"currentTimestamp"`
|
||||
CurrentTimestamp *math.HexOrDecimal64 `json:"currentTimestamp"`
|
||||
CurrentBlockNumber *math.HexOrDecimal64 `json:"currentBlockNumber"`
|
||||
CurrentDifficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
|
||||
}
|
||||
@ -45,7 +47,7 @@ func (d *DifficultyTest) UnmarshalJSON(input []byte) error {
|
||||
return err
|
||||
}
|
||||
if dec.ParentTimestamp != nil {
|
||||
d.ParentTimestamp = (*big.Int)(dec.ParentTimestamp)
|
||||
d.ParentTimestamp = uint64(*dec.ParentTimestamp)
|
||||
}
|
||||
if dec.ParentDifficulty != nil {
|
||||
d.ParentDifficulty = (*big.Int)(dec.ParentDifficulty)
|
||||
@ -54,7 +56,7 @@ func (d *DifficultyTest) UnmarshalJSON(input []byte) error {
|
||||
d.UncleHash = *dec.UncleHash
|
||||
}
|
||||
if dec.CurrentTimestamp != nil {
|
||||
d.CurrentTimestamp = (*big.Int)(dec.CurrentTimestamp)
|
||||
d.CurrentTimestamp = uint64(*dec.CurrentTimestamp)
|
||||
}
|
||||
if dec.CurrentBlockNumber != nil {
|
||||
d.CurrentBlockNumber = uint64(*dec.CurrentBlockNumber)
|
||||
|
@ -17,6 +17,7 @@
|
||||
package trie
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
@ -391,6 +392,10 @@ func (db *Database) node(hash common.Hash, cachegen uint16) node {
|
||||
// Node retrieves an encoded cached trie node from memory. If it cannot be found
|
||||
// cached, the method queries the persistent database for the content.
|
||||
func (db *Database) Node(hash common.Hash) ([]byte, error) {
|
||||
// It doens't make sense to retrieve the metaroot
|
||||
if hash == (common.Hash{}) {
|
||||
return nil, errors.New("not found")
|
||||
}
|
||||
// Retrieve the node from the clean cache if available
|
||||
if db.cleans != nil {
|
||||
if enc, err := db.cleans.Get(string(hash[:])); err == nil && enc != nil {
|
||||
|
33
trie/database_test.go
Normal file
33
trie/database_test.go
Normal file
@ -0,0 +1,33 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package trie
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
)
|
||||
|
||||
// Tests that the trie database returns a missing trie node error if attempting
|
||||
// to retrieve the meta root.
|
||||
func TestDatabaseMetarootFetch(t *testing.T) {
|
||||
db := NewDatabase(ethdb.NewMemDatabase())
|
||||
if _, err := db.Node(common.Hash{}); err == nil {
|
||||
t.Fatalf("metaroot retrieval succeeded")
|
||||
}
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user