all: blidly swap out glog to our log15, logs need rework
This commit is contained in:
@ -37,8 +37,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/miner"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
@ -113,7 +112,7 @@ func (s *PublicMinerAPI) GetWork() (work [3]string, err error) {
|
||||
if work, err = s.agent.GetWork(); err == nil {
|
||||
return
|
||||
}
|
||||
glog.V(logger.Debug).Infof("%v", err)
|
||||
log.Debug(fmt.Sprintf("%v", err))
|
||||
return work, fmt.Errorf("mining not ready")
|
||||
}
|
||||
|
||||
|
@ -40,8 +40,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/miner"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
@ -184,7 +183,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(logger.Info).Infof("Protocol Versions: %v, Network Id: %v", ProtocolVersions, config.NetworkId)
|
||||
log.Info(fmt.Sprintf("Protocol Versions: %v, Network Id: %v", ProtocolVersions, config.NetworkId))
|
||||
|
||||
if !config.SkipBcVersionCheck {
|
||||
bcVersion := core.GetBlockChainVersion(chainDb)
|
||||
@ -202,7 +201,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block")
|
||||
log.Info(fmt.Sprint("WARNING: Wrote default ethereum genesis block"))
|
||||
}
|
||||
|
||||
if config.ChainConfig == nil {
|
||||
@ -212,7 +211,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
||||
|
||||
eth.chainConfig = config.ChainConfig
|
||||
|
||||
glog.V(logger.Info).Infoln("Chain config:", eth.chainConfig)
|
||||
log.Info(fmt.Sprint("Chain config:", eth.chainConfig))
|
||||
|
||||
eth.blockchain, err = core.NewBlockChain(chainDb, eth.chainConfig, eth.pow, eth.EventMux(), vm.Config{EnablePreimageRecording: config.EnablePreimageRecording})
|
||||
if err != nil {
|
||||
@ -273,7 +272,7 @@ func SetupGenesisBlock(chainDb *ethdb.Database, config *Config) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(logger.Info).Infof("Successfully wrote custom genesis block: %x", block.Hash())
|
||||
log.Info(fmt.Sprintf("Successfully wrote custom genesis block: %x", block.Hash()))
|
||||
}
|
||||
// Load up a test setup if directly injected
|
||||
if config.TestGenesisState != nil {
|
||||
@ -292,13 +291,13 @@ func SetupGenesisBlock(chainDb *ethdb.Database, config *Config) error {
|
||||
func CreatePoW(config *Config) (pow.PoW, error) {
|
||||
switch {
|
||||
case config.PowFake:
|
||||
glog.V(logger.Info).Infof("ethash used in fake mode")
|
||||
log.Info(fmt.Sprintf("ethash used in fake mode"))
|
||||
return pow.PoW(core.FakePow{}), nil
|
||||
case config.PowTest:
|
||||
glog.V(logger.Info).Infof("ethash used in test mode")
|
||||
log.Info(fmt.Sprintf("ethash used in test mode"))
|
||||
return ethash.NewForTesting()
|
||||
case config.PowShared:
|
||||
glog.V(logger.Info).Infof("ethash used in shared mode")
|
||||
log.Info(fmt.Sprintf("ethash used in shared mode"))
|
||||
return ethash.NewShared(), nil
|
||||
default:
|
||||
return ethash.New(), nil
|
||||
@ -382,7 +381,7 @@ func (s *Ethereum) StartMining(threads int) error {
|
||||
eb, err := s.Etherbase()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Cannot start mining without etherbase address: %v", err)
|
||||
glog.V(logger.Error).Infoln(err)
|
||||
log.Error(fmt.Sprint(err))
|
||||
return err
|
||||
}
|
||||
go s.miner.Start(eb, threads)
|
||||
@ -470,14 +469,14 @@ func (self *Ethereum) StartAutoDAG() {
|
||||
return // already started
|
||||
}
|
||||
go func() {
|
||||
glog.V(logger.Info).Infof("Automatic pregeneration of ethash DAG ON (ethash dir: %s)", ethash.DefaultDir)
|
||||
log.Info(fmt.Sprintf("Automatic pregeneration of ethash DAG ON (ethash dir: %s)", ethash.DefaultDir))
|
||||
var nextEpoch uint64
|
||||
timer := time.After(0)
|
||||
self.autodagquit = make(chan bool)
|
||||
for {
|
||||
select {
|
||||
case <-timer:
|
||||
glog.V(logger.Info).Infof("checking DAG (ethash dir: %s)", ethash.DefaultDir)
|
||||
log.Info(fmt.Sprintf("checking DAG (ethash dir: %s)", ethash.DefaultDir))
|
||||
currentBlock := self.BlockChain().CurrentBlock().NumberU64()
|
||||
thisEpoch := currentBlock / epochLength
|
||||
if nextEpoch <= thisEpoch {
|
||||
@ -486,19 +485,19 @@ func (self *Ethereum) StartAutoDAG() {
|
||||
previousDag, previousDagFull := dagFiles(thisEpoch - 1)
|
||||
os.Remove(filepath.Join(ethash.DefaultDir, previousDag))
|
||||
os.Remove(filepath.Join(ethash.DefaultDir, previousDagFull))
|
||||
glog.V(logger.Info).Infof("removed DAG for epoch %d (%s)", thisEpoch-1, previousDag)
|
||||
log.Info(fmt.Sprintf("removed DAG for epoch %d (%s)", thisEpoch-1, previousDag))
|
||||
}
|
||||
nextEpoch = thisEpoch + 1
|
||||
dag, _ := dagFiles(nextEpoch)
|
||||
if _, err := os.Stat(dag); os.IsNotExist(err) {
|
||||
glog.V(logger.Info).Infof("Pregenerating DAG for epoch %d (%s)", nextEpoch, dag)
|
||||
log.Info(fmt.Sprintf("Pregenerating DAG for epoch %d (%s)", nextEpoch, dag))
|
||||
err := ethash.MakeDAG(nextEpoch*epochLength, "") // "" -> ethash.DefaultDir
|
||||
if err != nil {
|
||||
glog.V(logger.Error).Infof("Error generating DAG for epoch %d (%s)", nextEpoch, dag)
|
||||
log.Error(fmt.Sprintf("Error generating DAG for epoch %d (%s)", nextEpoch, dag))
|
||||
return
|
||||
}
|
||||
} else {
|
||||
glog.V(logger.Error).Infof("DAG for epoch %d (%s)", nextEpoch, dag)
|
||||
log.Error(fmt.Sprintf("DAG for epoch %d (%s)", nextEpoch, dag))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -516,7 +515,7 @@ func (self *Ethereum) StopAutoDAG() {
|
||||
close(self.autodagquit)
|
||||
self.autodagquit = nil
|
||||
}
|
||||
glog.V(logger.Info).Infof("Automatic pregeneration of ethash DAG OFF (ethash dir: %s)", ethash.DefaultDir)
|
||||
log.Info(fmt.Sprintf("Automatic pregeneration of ethash DAG OFF (ethash dir: %s)", ethash.DefaultDir))
|
||||
}
|
||||
|
||||
// dagFiles(epoch) returns the two alternative DAG filenames (not a path)
|
||||
|
@ -25,8 +25,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
@ -66,9 +65,9 @@ func sendBadBlockReport(block *types.Block, err error) {
|
||||
client := http.Client{Timeout: 8 * time.Second}
|
||||
resp, err := client.Post(badBlocksURL, "application/json", bytes.NewReader(jsonStr))
|
||||
if err != nil {
|
||||
glog.V(logger.Debug).Infoln(err)
|
||||
log.Debug(fmt.Sprint(err))
|
||||
return
|
||||
}
|
||||
glog.V(logger.Debug).Infof("Bad Block Report posted (%d)", resp.StatusCode)
|
||||
log.Debug(fmt.Sprintf("Bad Block Report posted (%d)", resp.StatusCode))
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
@ -28,8 +28,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
@ -50,7 +49,7 @@ func upgradeSequentialKeys(db ethdb.Database) (stopFn func()) {
|
||||
return nil // empty database, nothing to do
|
||||
}
|
||||
|
||||
glog.V(logger.Info).Infof("Upgrading chain database to use sequential keys")
|
||||
log.Info(fmt.Sprintf("Upgrading chain database to use sequential keys"))
|
||||
|
||||
stopChn := make(chan struct{})
|
||||
stoppedChn := make(chan struct{})
|
||||
@ -73,11 +72,11 @@ func upgradeSequentialKeys(db ethdb.Database) (stopFn func()) {
|
||||
err, stopped = upgradeSequentialOrphanedReceipts(db, stopFn)
|
||||
}
|
||||
if err == nil && !stopped {
|
||||
glog.V(logger.Info).Infof("Database conversion successful")
|
||||
log.Info(fmt.Sprintf("Database conversion successful"))
|
||||
db.Put(useSequentialKeys, []byte{42})
|
||||
}
|
||||
if err != nil {
|
||||
glog.V(logger.Error).Infof("Database conversion failed: %v", err)
|
||||
log.Error(fmt.Sprintf("Database conversion failed: %v", err))
|
||||
}
|
||||
close(stoppedChn)
|
||||
}()
|
||||
@ -106,7 +105,7 @@ func upgradeSequentialCanonicalNumbers(db ethdb.Database, stopFn func() bool) (e
|
||||
it.Release()
|
||||
it = db.(*ethdb.LDBDatabase).NewIterator()
|
||||
it.Seek(keyPtr)
|
||||
glog.V(logger.Info).Infof("converting %d canonical numbers...", cnt)
|
||||
log.Info(fmt.Sprintf("converting %d canonical numbers...", cnt))
|
||||
}
|
||||
number := big.NewInt(0).SetBytes(keyPtr[10:]).Uint64()
|
||||
newKey := []byte("h12345678n")
|
||||
@ -125,7 +124,7 @@ func upgradeSequentialCanonicalNumbers(db ethdb.Database, stopFn func() bool) (e
|
||||
it.Next()
|
||||
}
|
||||
if cnt > 0 {
|
||||
glog.V(logger.Info).Infof("converted %d canonical numbers...", cnt)
|
||||
log.Info(fmt.Sprintf("converted %d canonical numbers...", cnt))
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
@ -149,7 +148,7 @@ func upgradeSequentialBlocks(db ethdb.Database, stopFn func() bool) (error, bool
|
||||
it.Release()
|
||||
it = db.(*ethdb.LDBDatabase).NewIterator()
|
||||
it.Seek(keyPtr)
|
||||
glog.V(logger.Info).Infof("converting %d blocks...", cnt)
|
||||
log.Info(fmt.Sprintf("converting %d blocks...", cnt))
|
||||
}
|
||||
// convert header, body, td and block receipts
|
||||
var keyPrefix [38]byte
|
||||
@ -177,7 +176,7 @@ func upgradeSequentialBlocks(db ethdb.Database, stopFn func() bool) (error, bool
|
||||
}
|
||||
}
|
||||
if cnt > 0 {
|
||||
glog.V(logger.Info).Infof("converted %d blocks...", cnt)
|
||||
log.Info(fmt.Sprintf("converted %d blocks...", cnt))
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
@ -204,7 +203,7 @@ func upgradeSequentialOrphanedReceipts(db ethdb.Database, stopFn func() bool) (e
|
||||
it.Next()
|
||||
}
|
||||
if cnt > 0 {
|
||||
glog.V(logger.Info).Infof("removed %d orphaned block receipts...", cnt)
|
||||
log.Info(fmt.Sprintf("removed %d orphaned block receipts...", cnt))
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
@ -267,7 +266,7 @@ func upgradeChainDatabase(db ethdb.Database) error {
|
||||
return nil
|
||||
}
|
||||
// At least some of the database is still the old format, upgrade (skip the head block!)
|
||||
glog.V(logger.Info).Info("Old database detected, upgrading...")
|
||||
log.Info(fmt.Sprint("Old database detected, upgrading..."))
|
||||
|
||||
if db, ok := db.(*ethdb.LDBDatabase); ok {
|
||||
blockPrefix := []byte("block-hash-")
|
||||
@ -343,7 +342,7 @@ func addMipmapBloomBins(db ethdb.Database) (err error) {
|
||||
}
|
||||
|
||||
tstart := time.Now()
|
||||
glog.V(logger.Info).Infoln("upgrading db log bloom bins")
|
||||
log.Info(fmt.Sprint("upgrading db log bloom bins"))
|
||||
for i := uint64(0); i <= latestBlock.NumberU64(); i++ {
|
||||
hash := core.GetCanonicalHash(db, i)
|
||||
if (hash == common.Hash{}) {
|
||||
@ -351,6 +350,6 @@ func addMipmapBloomBins(db ethdb.Database) (err error) {
|
||||
}
|
||||
core.WriteMipmapBloom(db, i, core.GetBlockReceipts(db, hash, i))
|
||||
}
|
||||
glog.V(logger.Info).Infoln("upgrade completed in", time.Since(tstart))
|
||||
log.Info(fmt.Sprint("upgrade completed in", time.Since(tstart)))
|
||||
return nil
|
||||
}
|
||||
|
@ -33,8 +33,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/rcrowley/go-metrics"
|
||||
@ -249,9 +248,9 @@ func (d *Downloader) RegisterPeer(id string, version int, currentHead currentHea
|
||||
getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn,
|
||||
getReceipts receiptFetcherFn, getNodeData stateFetcherFn) error {
|
||||
|
||||
glog.V(logger.Detail).Infoln("Registering peer", id)
|
||||
log.Trace(fmt.Sprint("Registering peer", id))
|
||||
if err := d.peers.Register(newPeer(id, version, currentHead, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts, getNodeData)); err != nil {
|
||||
glog.V(logger.Error).Infoln("Register failed:", err)
|
||||
log.Error(fmt.Sprint("Register failed:", err))
|
||||
return err
|
||||
}
|
||||
d.qosReduceConfidence()
|
||||
@ -264,9 +263,9 @@ func (d *Downloader) RegisterPeer(id string, version int, currentHead currentHea
|
||||
// the queue.
|
||||
func (d *Downloader) UnregisterPeer(id string) error {
|
||||
// Unregister the peer from the active peer set and revoke any fetch tasks
|
||||
glog.V(logger.Detail).Infoln("Unregistering peer", id)
|
||||
log.Trace(fmt.Sprint("Unregistering peer", id))
|
||||
if err := d.peers.Unregister(id); err != nil {
|
||||
glog.V(logger.Error).Infoln("Unregister failed:", err)
|
||||
log.Error(fmt.Sprint("Unregister failed:", err))
|
||||
return err
|
||||
}
|
||||
d.queue.Revoke(id)
|
||||
@ -285,24 +284,24 @@ func (d *Downloader) UnregisterPeer(id string) error {
|
||||
// Synchronise tries to sync up our local block chain with a remote peer, both
|
||||
// adding various sanity checks as well as wrapping it with various log entries.
|
||||
func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error {
|
||||
glog.V(logger.Detail).Infof("Attempting synchronisation: %v, head [%x…], TD %v", id, head[:4], td)
|
||||
log.Trace(fmt.Sprintf("Attempting synchronisation: %v, head [%x…], TD %v", id, head[:4], td))
|
||||
|
||||
err := d.synchronise(id, head, td, mode)
|
||||
switch err {
|
||||
case nil:
|
||||
glog.V(logger.Detail).Infof("Synchronisation completed")
|
||||
log.Trace(fmt.Sprintf("Synchronisation completed"))
|
||||
|
||||
case errBusy:
|
||||
glog.V(logger.Detail).Infof("Synchronisation already in progress")
|
||||
log.Trace(fmt.Sprintf("Synchronisation already in progress"))
|
||||
|
||||
case errTimeout, errBadPeer, errStallingPeer,
|
||||
errEmptyHeaderSet, errPeersUnavailable, errTooOld,
|
||||
errInvalidAncestor, errInvalidChain:
|
||||
glog.V(logger.Debug).Infof("Removing peer %v: %v", id, err)
|
||||
log.Debug(fmt.Sprintf("Removing peer %v: %v", id, err))
|
||||
d.dropPeer(id)
|
||||
|
||||
default:
|
||||
glog.V(logger.Warn).Infof("Synchronisation failed: %v", err)
|
||||
log.Warn(fmt.Sprintf("Synchronisation failed: %v", err))
|
||||
}
|
||||
return err
|
||||
}
|
||||
@ -323,7 +322,7 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
|
||||
|
||||
// Post a user notification of the sync (only once per session)
|
||||
if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
|
||||
glog.V(logger.Info).Infoln("Block synchronisation started")
|
||||
log.Info(fmt.Sprint("Block synchronisation started"))
|
||||
}
|
||||
// Reset the queue, peer set and wake channels to clean any internal leftover state
|
||||
d.queue.Reset()
|
||||
@ -388,9 +387,9 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
|
||||
return errTooOld
|
||||
}
|
||||
|
||||
glog.V(logger.Debug).Infof("Synchronising with the network using: %s [eth/%d]", p.id, p.version)
|
||||
log.Debug(fmt.Sprintf("Synchronising with the network using: %s [eth/%d]", p.id, p.version))
|
||||
defer func(start time.Time) {
|
||||
glog.V(logger.Debug).Infof("Synchronisation terminated after %v", time.Since(start))
|
||||
log.Debug(fmt.Sprintf("Synchronisation terminated after %v", time.Since(start)))
|
||||
}(time.Now())
|
||||
|
||||
// Look up the sync boundaries: the common ancestor and the target block
|
||||
@ -438,7 +437,7 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
|
||||
origin = 0
|
||||
}
|
||||
}
|
||||
glog.V(logger.Debug).Infof("Fast syncing until pivot block #%d", pivot)
|
||||
log.Debug(fmt.Sprintf("Fast syncing until pivot block #%d", pivot))
|
||||
}
|
||||
d.queue.Prepare(origin+1, d.mode, pivot, latest)
|
||||
if d.syncInitHook != nil {
|
||||
@ -523,7 +522,7 @@ func (d *Downloader) Terminate() {
|
||||
// fetchHeight retrieves the head header of the remote peer to aid in estimating
|
||||
// the total time a pending synchronisation would take.
|
||||
func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
|
||||
glog.V(logger.Debug).Infof("%v: retrieving remote chain height", p)
|
||||
log.Debug(fmt.Sprintf("%v: retrieving remote chain height", p))
|
||||
|
||||
// Request the advertised remote head block and wait for the response
|
||||
head, _ := p.currentHead()
|
||||
@ -538,19 +537,19 @@ func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
|
||||
case packet := <-d.headerCh:
|
||||
// Discard anything not from the origin peer
|
||||
if packet.PeerId() != p.id {
|
||||
glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", packet.PeerId())
|
||||
log.Debug(fmt.Sprintf("Received headers from incorrect peer(%s)", packet.PeerId()))
|
||||
break
|
||||
}
|
||||
// Make sure the peer actually gave something valid
|
||||
headers := packet.(*headerPack).headers
|
||||
if len(headers) != 1 {
|
||||
glog.V(logger.Debug).Infof("%v: invalid number of head headers: %d != 1", p, len(headers))
|
||||
log.Debug(fmt.Sprintf("%v: invalid number of head headers: %d != 1", p, len(headers)))
|
||||
return nil, errBadPeer
|
||||
}
|
||||
return headers[0], nil
|
||||
|
||||
case <-timeout:
|
||||
glog.V(logger.Debug).Infof("%v: head header timeout", p)
|
||||
log.Debug(fmt.Sprintf("%v: head header timeout", p))
|
||||
return nil, errTimeout
|
||||
|
||||
case <-d.bodyCh:
|
||||
@ -567,7 +566,7 @@ func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
|
||||
// In the rare scenario when we ended up on a long reorganisation (i.e. none of
|
||||
// the head links match), we do a binary search to find the common ancestor.
|
||||
func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
||||
glog.V(logger.Debug).Infof("%v: looking for common ancestor (remote height %d)", p, height)
|
||||
log.Debug(fmt.Sprintf("%v: looking for common ancestor (remote height %d)", p, height))
|
||||
|
||||
// Figure out the valid ancestor range to prevent rewrite attacks
|
||||
floor, ceil := int64(-1), d.headHeader().Number.Uint64()
|
||||
@ -608,19 +607,19 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
||||
case packet := <-d.headerCh:
|
||||
// Discard anything not from the origin peer
|
||||
if packet.PeerId() != p.id {
|
||||
glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", packet.PeerId())
|
||||
log.Debug(fmt.Sprintf("Received headers from incorrect peer(%s)", packet.PeerId()))
|
||||
break
|
||||
}
|
||||
// Make sure the peer actually gave something valid
|
||||
headers := packet.(*headerPack).headers
|
||||
if len(headers) == 0 {
|
||||
glog.V(logger.Warn).Infof("%v: empty head header set", p)
|
||||
log.Warn(fmt.Sprintf("%v: empty head header set", p))
|
||||
return 0, errEmptyHeaderSet
|
||||
}
|
||||
// Make sure the peer's reply conforms to the request
|
||||
for i := 0; i < len(headers); i++ {
|
||||
if number := headers[i].Number.Int64(); number != from+int64(i)*16 {
|
||||
glog.V(logger.Warn).Infof("%v: head header set (item %d) broke chain ordering: requested %d, got %d", p, i, from+int64(i)*16, number)
|
||||
log.Warn(fmt.Sprintf("%v: head header set (item %d) broke chain ordering: requested %d, got %d", p, i, from+int64(i)*16, number))
|
||||
return 0, errInvalidChain
|
||||
}
|
||||
}
|
||||
@ -637,7 +636,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
||||
|
||||
// If every header is known, even future ones, the peer straight out lied about its head
|
||||
if number > height && i == limit-1 {
|
||||
glog.V(logger.Warn).Infof("%v: lied about chain head: reported %d, found above %d", p, height, number)
|
||||
log.Warn(fmt.Sprintf("%v: lied about chain head: reported %d, found above %d", p, height, number))
|
||||
return 0, errStallingPeer
|
||||
}
|
||||
break
|
||||
@ -645,7 +644,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
||||
}
|
||||
|
||||
case <-timeout:
|
||||
glog.V(logger.Debug).Infof("%v: head header timeout", p)
|
||||
log.Debug(fmt.Sprintf("%v: head header timeout", p))
|
||||
return 0, errTimeout
|
||||
|
||||
case <-d.bodyCh:
|
||||
@ -657,10 +656,10 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
||||
// If the head fetch already found an ancestor, return
|
||||
if !common.EmptyHash(hash) {
|
||||
if int64(number) <= floor {
|
||||
glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, number, hash[:4], floor)
|
||||
log.Warn(fmt.Sprintf("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, number, hash[:4], floor))
|
||||
return 0, errInvalidAncestor
|
||||
}
|
||||
glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, number, hash[:4])
|
||||
log.Debug(fmt.Sprintf("%v: common ancestor: #%d [%x…]", p, number, hash[:4]))
|
||||
return number, nil
|
||||
}
|
||||
// Ancestor not found, we need to binary search over our chain
|
||||
@ -684,13 +683,13 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
||||
case packer := <-d.headerCh:
|
||||
// Discard anything not from the origin peer
|
||||
if packer.PeerId() != p.id {
|
||||
glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", packer.PeerId())
|
||||
log.Debug(fmt.Sprintf("Received headers from incorrect peer(%s)", packer.PeerId()))
|
||||
break
|
||||
}
|
||||
// Make sure the peer actually gave something valid
|
||||
headers := packer.(*headerPack).headers
|
||||
if len(headers) != 1 {
|
||||
glog.V(logger.Debug).Infof("%v: invalid search header set (%d)", p, len(headers))
|
||||
log.Debug(fmt.Sprintf("%v: invalid search header set (%d)", p, len(headers)))
|
||||
return 0, errBadPeer
|
||||
}
|
||||
arrived = true
|
||||
@ -702,13 +701,13 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
||||
}
|
||||
header := d.getHeader(headers[0].Hash()) // Independent of sync mode, header surely exists
|
||||
if header.Number.Uint64() != check {
|
||||
glog.V(logger.Debug).Infof("%v: non requested header #%d [%x…], instead of #%d", p, header.Number, header.Hash().Bytes()[:4], check)
|
||||
log.Debug(fmt.Sprintf("%v: non requested header #%d [%x…], instead of #%d", p, header.Number, header.Hash().Bytes()[:4], check))
|
||||
return 0, errBadPeer
|
||||
}
|
||||
start = check
|
||||
|
||||
case <-timeout:
|
||||
glog.V(logger.Debug).Infof("%v: search header timeout", p)
|
||||
log.Debug(fmt.Sprintf("%v: search header timeout", p))
|
||||
return 0, errTimeout
|
||||
|
||||
case <-d.bodyCh:
|
||||
@ -720,10 +719,10 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
||||
}
|
||||
// Ensure valid ancestry and return
|
||||
if int64(start) <= floor {
|
||||
glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, start, hash[:4], floor)
|
||||
log.Warn(fmt.Sprintf("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, start, hash[:4], floor))
|
||||
return 0, errInvalidAncestor
|
||||
}
|
||||
glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, start, hash[:4])
|
||||
log.Debug(fmt.Sprintf("%v: common ancestor: #%d [%x…]", p, start, hash[:4]))
|
||||
return start, nil
|
||||
}
|
||||
|
||||
@ -736,8 +735,8 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
||||
// can fill in the skeleton - not even the origin peer - it's assumed invalid and
|
||||
// the origin is dropped.
|
||||
func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
||||
glog.V(logger.Debug).Infof("%v: directing header downloads from #%d", p, from)
|
||||
defer glog.V(logger.Debug).Infof("%v: header download terminated", p)
|
||||
log.Debug(fmt.Sprintf("%v: directing header downloads from #%d", p, from))
|
||||
defer log.Debug(fmt.Sprintf("%v: header download terminated", p))
|
||||
|
||||
// Create a timeout timer, and the associated header fetcher
|
||||
skeleton := true // Skeleton assembly phase or finishing up
|
||||
@ -751,10 +750,10 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
||||
timeout.Reset(d.requestTTL())
|
||||
|
||||
if skeleton {
|
||||
glog.V(logger.Detail).Infof("%v: fetching %d skeleton headers from #%d", p, MaxHeaderFetch, from)
|
||||
log.Trace(fmt.Sprintf("%v: fetching %d skeleton headers from #%d", p, MaxHeaderFetch, from))
|
||||
go p.getAbsHeaders(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false)
|
||||
} else {
|
||||
glog.V(logger.Detail).Infof("%v: fetching %d full headers from #%d", p, MaxHeaderFetch, from)
|
||||
log.Trace(fmt.Sprintf("%v: fetching %d full headers from #%d", p, MaxHeaderFetch, from))
|
||||
go p.getAbsHeaders(from, MaxHeaderFetch, 0, false)
|
||||
}
|
||||
}
|
||||
@ -769,7 +768,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
||||
case packet := <-d.headerCh:
|
||||
// Make sure the active peer is giving us the skeleton headers
|
||||
if packet.PeerId() != p.id {
|
||||
glog.V(logger.Debug).Infof("Received skeleton headers from incorrect peer (%s)", packet.PeerId())
|
||||
log.Debug(fmt.Sprintf("Received skeleton headers from incorrect peer (%s)", packet.PeerId()))
|
||||
break
|
||||
}
|
||||
headerReqTimer.UpdateSince(request)
|
||||
@ -783,7 +782,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
||||
}
|
||||
// If no more headers are inbound, notify the content fetchers and return
|
||||
if packet.Items() == 0 {
|
||||
glog.V(logger.Debug).Infof("%v: no available headers", p)
|
||||
log.Debug(fmt.Sprintf("%v: no available headers", p))
|
||||
select {
|
||||
case d.headerProcCh <- nil:
|
||||
return nil
|
||||
@ -797,7 +796,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
||||
if skeleton {
|
||||
filled, proced, err := d.fillHeaderSkeleton(from, headers)
|
||||
if err != nil {
|
||||
glog.V(logger.Debug).Infof("%v: skeleton chain invalid: %v", p, err)
|
||||
log.Debug(fmt.Sprintf("%v: skeleton chain invalid: %v", p, err))
|
||||
return errInvalidChain
|
||||
}
|
||||
headers = filled[proced:]
|
||||
@ -805,7 +804,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
||||
}
|
||||
// Insert all the new headers and fetch the next batch
|
||||
if len(headers) > 0 {
|
||||
glog.V(logger.Detail).Infof("%v: schedule %d headers from #%d", p, len(headers), from)
|
||||
log.Trace(fmt.Sprintf("%v: schedule %d headers from #%d", p, len(headers), from))
|
||||
select {
|
||||
case d.headerProcCh <- headers:
|
||||
case <-d.cancelCh:
|
||||
@ -817,7 +816,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
||||
|
||||
case <-timeout.C:
|
||||
// Header retrieval timed out, consider the peer bad and drop
|
||||
glog.V(logger.Debug).Infof("%v: header request timed out", p)
|
||||
log.Debug(fmt.Sprintf("%v: header request timed out", p))
|
||||
headerTimeoutMeter.Mark(1)
|
||||
d.dropPeer(p.id)
|
||||
|
||||
@ -847,7 +846,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
||||
// The method returs the entire filled skeleton and also the number of headers
|
||||
// already forwarded for processing.
|
||||
func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) {
|
||||
glog.V(logger.Debug).Infof("Filling up skeleton from #%d", from)
|
||||
log.Debug(fmt.Sprintf("Filling up skeleton from #%d", from))
|
||||
d.queue.ScheduleSkeleton(from, skeleton)
|
||||
|
||||
var (
|
||||
@ -868,7 +867,7 @@ func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) (
|
||||
d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve,
|
||||
nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "Header")
|
||||
|
||||
glog.V(logger.Debug).Infof("Skeleton fill terminated: %v", err)
|
||||
log.Debug(fmt.Sprintf("Skeleton fill terminated: %v", err))
|
||||
|
||||
filled, proced := d.queue.RetrieveHeaders()
|
||||
return filled, proced, err
|
||||
@ -878,7 +877,7 @@ func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) (
|
||||
// available peers, reserving a chunk of blocks for each, waiting for delivery
|
||||
// and also periodically checking for timeouts.
|
||||
func (d *Downloader) fetchBodies(from uint64) error {
|
||||
glog.V(logger.Debug).Infof("Downloading block bodies from #%d", from)
|
||||
log.Debug(fmt.Sprintf("Downloading block bodies from #%d", from))
|
||||
|
||||
var (
|
||||
deliver = func(packet dataPack) (int, error) {
|
||||
@ -894,7 +893,7 @@ func (d *Downloader) fetchBodies(from uint64) error {
|
||||
d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies,
|
||||
d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "Body")
|
||||
|
||||
glog.V(logger.Debug).Infof("Block body download terminated: %v", err)
|
||||
log.Debug(fmt.Sprintf("Block body download terminated: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
@ -902,7 +901,7 @@ func (d *Downloader) fetchBodies(from uint64) error {
|
||||
// available peers, reserving a chunk of receipts for each, waiting for delivery
|
||||
// and also periodically checking for timeouts.
|
||||
func (d *Downloader) fetchReceipts(from uint64) error {
|
||||
glog.V(logger.Debug).Infof("Downloading receipts from #%d", from)
|
||||
log.Debug(fmt.Sprintf("Downloading receipts from #%d", from))
|
||||
|
||||
var (
|
||||
deliver = func(packet dataPack) (int, error) {
|
||||
@ -918,7 +917,7 @@ func (d *Downloader) fetchReceipts(from uint64) error {
|
||||
d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts,
|
||||
d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "Receipt")
|
||||
|
||||
glog.V(logger.Debug).Infof("Receipt download terminated: %v", err)
|
||||
log.Debug(fmt.Sprintf("Receipt download terminated: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
@ -926,7 +925,7 @@ func (d *Downloader) fetchReceipts(from uint64) error {
|
||||
// available peers, reserving a chunk of nodes for each, waiting for delivery and
|
||||
// also periodically checking for timeouts.
|
||||
func (d *Downloader) fetchNodeData() error {
|
||||
glog.V(logger.Debug).Infof("Downloading node state data")
|
||||
log.Debug(fmt.Sprintf("Downloading node state data"))
|
||||
|
||||
var (
|
||||
deliver = func(packet dataPack) (int, error) {
|
||||
@ -934,12 +933,12 @@ func (d *Downloader) fetchNodeData() error {
|
||||
return d.queue.DeliverNodeData(packet.PeerId(), packet.(*statePack).states, func(delivered int, progressed bool, err error) {
|
||||
// If the peer returned old-requested data, forgive
|
||||
if err == trie.ErrNotRequested {
|
||||
glog.V(logger.Debug).Infof("peer %s: replied to stale state request, forgiving", packet.PeerId())
|
||||
log.Debug(fmt.Sprintf("peer %s: replied to stale state request, forgiving", packet.PeerId()))
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
// If the node data processing failed, the root hash is very wrong, abort
|
||||
glog.V(logger.Error).Infof("peer %d: state processing failed: %v", packet.PeerId(), err)
|
||||
log.Error(fmt.Sprintf("peer %d: state processing failed: %v", packet.PeerId(), err))
|
||||
d.cancel()
|
||||
return
|
||||
}
|
||||
@ -958,12 +957,12 @@ func (d *Downloader) fetchNodeData() error {
|
||||
|
||||
// If real database progress was made, reset any fast-sync pivot failure
|
||||
if progressed && atomic.LoadUint32(&d.fsPivotFails) > 1 {
|
||||
glog.V(logger.Debug).Infof("fast-sync progressed, resetting fail counter from %d", atomic.LoadUint32(&d.fsPivotFails))
|
||||
log.Debug(fmt.Sprintf("fast-sync progressed, resetting fail counter from %d", atomic.LoadUint32(&d.fsPivotFails)))
|
||||
atomic.StoreUint32(&d.fsPivotFails, 1) // Don't ever reset to 0, as that will unlock the pivot block
|
||||
}
|
||||
// Log a message to the user and return
|
||||
if delivered > 0 {
|
||||
glog.V(logger.Info).Infof("imported %3d state entries in %9v: processed %d, pending at least %d", delivered, common.PrettyDuration(time.Since(start)), syncStatsStateDone, pending)
|
||||
log.Info(fmt.Sprintf("imported %3d state entries in %9v: processed %d, pending at least %d", delivered, common.PrettyDuration(time.Since(start)), syncStatsStateDone, pending))
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -980,7 +979,7 @@ func (d *Downloader) fetchNodeData() error {
|
||||
d.queue.PendingNodeData, d.queue.InFlightNodeData, throttle, reserve, nil, fetch,
|
||||
d.queue.CancelNodeData, capacity, d.peers.NodeDataIdlePeers, setIdle, "State")
|
||||
|
||||
glog.V(logger.Debug).Infof("Node state data download terminated: %v", err)
|
||||
log.Debug(fmt.Sprintf("Node state data download terminated: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1045,11 +1044,11 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
|
||||
// Issue a log to the user to see what's going on
|
||||
switch {
|
||||
case err == nil && packet.Items() == 0:
|
||||
glog.V(logger.Detail).Infof("%s: no %s delivered", peer, strings.ToLower(kind))
|
||||
log.Trace(fmt.Sprintf("%s: no %s delivered", peer, strings.ToLower(kind)))
|
||||
case err == nil:
|
||||
glog.V(logger.Detail).Infof("%s: delivered %s %s(s)", peer, packet.Stats(), strings.ToLower(kind))
|
||||
log.Trace(fmt.Sprintf("%s: delivered %s %s(s)", peer, packet.Stats(), strings.ToLower(kind)))
|
||||
default:
|
||||
glog.V(logger.Detail).Infof("%s: %s delivery failed: %v", peer, strings.ToLower(kind), err)
|
||||
log.Trace(fmt.Sprintf("%s: %s delivery failed: %v", peer, strings.ToLower(kind), err))
|
||||
}
|
||||
}
|
||||
// Blocks assembled, try to update the progress
|
||||
@ -1092,10 +1091,10 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
|
||||
// and latency of a peer separately, which requires pushing the measures capacity a bit and seeing
|
||||
// how response times reacts, to it always requests one more than the minimum (i.e. min 2).
|
||||
if fails > 2 {
|
||||
glog.V(logger.Detail).Infof("%s: %s delivery timeout", peer, strings.ToLower(kind))
|
||||
log.Trace(fmt.Sprintf("%s: %s delivery timeout", peer, strings.ToLower(kind)))
|
||||
setIdle(peer, 0)
|
||||
} else {
|
||||
glog.V(logger.Debug).Infof("%s: stalling %s delivery, dropping", peer, strings.ToLower(kind))
|
||||
log.Debug(fmt.Sprintf("%s: stalling %s delivery, dropping", peer, strings.ToLower(kind)))
|
||||
d.dropPeer(pid)
|
||||
}
|
||||
}
|
||||
@ -1103,7 +1102,7 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
|
||||
// If there's nothing more to fetch, wait or terminate
|
||||
if pending() == 0 {
|
||||
if !inFlight() && finished {
|
||||
glog.V(logger.Debug).Infof("%s fetching completed", kind)
|
||||
log.Debug(fmt.Sprintf("%s fetching completed", kind))
|
||||
return nil
|
||||
}
|
||||
break
|
||||
@ -1131,15 +1130,15 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
|
||||
if request == nil {
|
||||
continue
|
||||
}
|
||||
if glog.V(logger.Detail) {
|
||||
log.Trace("", "msg", log.Lazy{Fn: func() string {
|
||||
if request.From > 0 {
|
||||
glog.Infof("%s: requesting %s(s) from #%d", peer, strings.ToLower(kind), request.From)
|
||||
return fmt.Sprintf("%s: requesting %s(s) from #%d", peer, strings.ToLower(kind), request.From)
|
||||
} else if len(request.Headers) > 0 {
|
||||
glog.Infof("%s: requesting %d %s(s), first at #%d", peer, len(request.Headers), strings.ToLower(kind), request.Headers[0].Number)
|
||||
return fmt.Sprintf("%s: requesting %d %s(s), first at #%d", peer, len(request.Headers), strings.ToLower(kind), request.Headers[0].Number)
|
||||
} else {
|
||||
glog.Infof("%s: requesting %d %s(s)", peer, len(request.Hashes), strings.ToLower(kind))
|
||||
return fmt.Sprintf("%s: requesting %d %s(s)", peer, len(request.Hashes), strings.ToLower(kind))
|
||||
}
|
||||
}
|
||||
}})
|
||||
// Fetch the chunk and make sure any errors return the hashes to the queue
|
||||
if fetchHook != nil {
|
||||
fetchHook(request.Headers)
|
||||
@ -1194,8 +1193,8 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
|
||||
if d.headBlock != nil {
|
||||
curBlock = d.headBlock().Number()
|
||||
}
|
||||
glog.V(logger.Warn).Infof("Rolled back %d headers (LH: %d->%d, FB: %d->%d, LB: %d->%d)",
|
||||
len(hashes), lastHeader, d.headHeader().Number, lastFastBlock, curFastBlock, lastBlock, curBlock)
|
||||
log.Warn(fmt.Sprintf("Rolled back %d headers (LH: %d->%d, FB: %d->%d, LB: %d->%d)",
|
||||
len(hashes), lastHeader, d.headHeader().Number, lastFastBlock, curFastBlock, lastBlock, curBlock))
|
||||
|
||||
// If we're already past the pivot point, this could be an attack, thread carefully
|
||||
if rollback[len(rollback)-1].Number.Uint64() > pivot {
|
||||
@ -1203,7 +1202,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
|
||||
if atomic.LoadUint32(&d.fsPivotFails) == 0 {
|
||||
for _, header := range rollback {
|
||||
if header.Number.Uint64() == pivot {
|
||||
glog.V(logger.Warn).Infof("Fast-sync critical section failure, locked pivot to header #%d [%x…]", pivot, header.Hash().Bytes()[:4])
|
||||
log.Warn(fmt.Sprintf("Fast-sync critical section failure, locked pivot to header #%d [%x…]", pivot, header.Hash().Bytes()[:4]))
|
||||
d.fsPivotLock = header
|
||||
}
|
||||
}
|
||||
@ -1299,7 +1298,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
|
||||
if n > 0 {
|
||||
rollback = append(rollback, chunk[:n]...)
|
||||
}
|
||||
glog.V(logger.Debug).Infof("invalid header #%d [%x…]: %v", chunk[n].Number, chunk[n].Hash().Bytes()[:4], err)
|
||||
log.Debug(fmt.Sprintf("invalid header #%d [%x…]: %v", chunk[n].Number, chunk[n].Hash().Bytes()[:4], err))
|
||||
return errInvalidChain
|
||||
}
|
||||
// All verifications passed, store newly found uncertain headers
|
||||
@ -1311,7 +1310,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
|
||||
// If we're fast syncing and just pulled in the pivot, make sure it's the one locked in
|
||||
if d.mode == FastSync && d.fsPivotLock != nil && chunk[0].Number.Uint64() <= pivot && chunk[len(chunk)-1].Number.Uint64() >= pivot {
|
||||
if pivot := chunk[int(pivot-chunk[0].Number.Uint64())]; pivot.Hash() != d.fsPivotLock.Hash() {
|
||||
glog.V(logger.Warn).Infof("Pivot doesn't match locked in version: have #%v [%x…], want #%v [%x…]", pivot.Number, pivot.Hash().Bytes()[:4], d.fsPivotLock.Number, d.fsPivotLock.Hash().Bytes()[:4])
|
||||
log.Warn(fmt.Sprintf("Pivot doesn't match locked in version: have #%v [%x…], want #%v [%x…]", pivot.Number, pivot.Hash().Bytes()[:4], d.fsPivotLock.Number, d.fsPivotLock.Hash().Bytes()[:4]))
|
||||
return errInvalidChain
|
||||
}
|
||||
}
|
||||
@ -1328,7 +1327,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
|
||||
// Otherwise insert the headers for content retrieval
|
||||
inserts := d.queue.Schedule(chunk, origin)
|
||||
if len(inserts) != len(chunk) {
|
||||
glog.V(logger.Debug).Infof("stale headers")
|
||||
log.Debug(fmt.Sprintf("stale headers"))
|
||||
return errBadPeer
|
||||
}
|
||||
}
|
||||
@ -1359,10 +1358,10 @@ func (d *Downloader) processContent() error {
|
||||
d.chainInsertHook(results)
|
||||
}
|
||||
// Actually import the blocks
|
||||
if glog.V(logger.Debug) {
|
||||
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||
first, last := results[0].Header, results[len(results)-1].Header
|
||||
glog.Infof("Inserting chain with %d items (#%d [%x…] - #%d [%x…])", len(results), first.Number, first.Hash().Bytes()[:4], last.Number, last.Hash().Bytes()[:4])
|
||||
}
|
||||
return fmt.Sprintf("Inserting chain with %d items (#%d [%x…] - #%d [%x…])", len(results), first.Number, first.Hash().Bytes()[:4], last.Number, last.Hash().Bytes()[:4])
|
||||
}})
|
||||
for len(results) != 0 {
|
||||
// Check for any termination requests
|
||||
select {
|
||||
@ -1396,14 +1395,14 @@ func (d *Downloader) processContent() error {
|
||||
case len(receipts) > 0:
|
||||
index, err = d.insertReceipts(blocks, receipts)
|
||||
if err == nil && blocks[len(blocks)-1].NumberU64() == pivot {
|
||||
glog.V(logger.Debug).Infof("Committing block #%d [%x…] as the new head", blocks[len(blocks)-1].Number(), blocks[len(blocks)-1].Hash().Bytes()[:4])
|
||||
log.Debug(fmt.Sprintf("Committing block #%d [%x…] as the new head", blocks[len(blocks)-1].Number(), blocks[len(blocks)-1].Hash().Bytes()[:4]))
|
||||
index, err = len(blocks)-1, d.commitHeadBlock(blocks[len(blocks)-1].Hash())
|
||||
}
|
||||
default:
|
||||
index, err = d.insertBlocks(blocks)
|
||||
}
|
||||
if err != nil {
|
||||
glog.V(logger.Debug).Infof("Result #%d [%x…] processing failed: %v", results[index].Header.Number, results[index].Header.Hash().Bytes()[:4], err)
|
||||
log.Debug(fmt.Sprintf("Result #%d [%x…] processing failed: %v", results[index].Header.Number, results[index].Header.Hash().Bytes()[:4], err))
|
||||
return errInvalidChain
|
||||
}
|
||||
// Shift the results to the next batch
|
||||
@ -1471,7 +1470,7 @@ func (d *Downloader) qosTuner() {
|
||||
atomic.StoreUint64(&d.rttConfidence, conf)
|
||||
|
||||
// Log the new QoS values and sleep until the next RTT
|
||||
glog.V(logger.Debug).Infof("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, d.requestTTL())
|
||||
log.Debug(fmt.Sprintf("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, d.requestTTL()))
|
||||
select {
|
||||
case <-d.quitCh:
|
||||
return
|
||||
@ -1501,7 +1500,7 @@ func (d *Downloader) qosReduceConfidence() {
|
||||
atomic.StoreUint64(&d.rttConfidence, conf)
|
||||
|
||||
rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate))
|
||||
glog.V(logger.Debug).Infof("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, d.requestTTL())
|
||||
log.Debug(fmt.Sprintf("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, d.requestTTL()))
|
||||
}
|
||||
|
||||
// requestRTT returns the current target round trip time for a download request
|
||||
|
@ -30,8 +30,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/rcrowley/go-metrics"
|
||||
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
|
||||
@ -365,20 +364,20 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
|
||||
// Make sure chain order is honoured and preserved throughout
|
||||
hash := header.Hash()
|
||||
if header.Number == nil || header.Number.Uint64() != from {
|
||||
glog.V(logger.Warn).Infof("Header #%v [%x…] broke chain ordering, expected %d", header.Number, hash[:4], from)
|
||||
log.Warn(fmt.Sprintf("Header #%v [%x…] broke chain ordering, expected %d", header.Number, hash[:4], from))
|
||||
break
|
||||
}
|
||||
if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash {
|
||||
glog.V(logger.Warn).Infof("Header #%v [%x…] broke chain ancestry", header.Number, hash[:4])
|
||||
log.Warn(fmt.Sprintf("Header #%v [%x…] broke chain ancestry", header.Number, hash[:4]))
|
||||
break
|
||||
}
|
||||
// Make sure no duplicate requests are executed
|
||||
if _, ok := q.blockTaskPool[hash]; ok {
|
||||
glog.V(logger.Warn).Infof("Header #%d [%x…] already scheduled for block fetch", header.Number.Uint64(), hash[:4])
|
||||
log.Warn(fmt.Sprintf("Header #%d [%x…] already scheduled for block fetch", header.Number.Uint64(), hash[:4]))
|
||||
continue
|
||||
}
|
||||
if _, ok := q.receiptTaskPool[hash]; ok {
|
||||
glog.V(logger.Warn).Infof("Header #%d [%x…] already scheduled for receipt fetch", header.Number.Uint64(), hash[:4])
|
||||
log.Warn(fmt.Sprintf("Header #%d [%x…] already scheduled for receipt fetch", header.Number.Uint64(), hash[:4]))
|
||||
continue
|
||||
}
|
||||
// Queue the header for content retrieval
|
||||
@ -392,7 +391,7 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
|
||||
}
|
||||
if q.mode == FastSync && header.Number.Uint64() == q.fastSyncPivot {
|
||||
// Pivoting point of the fast sync, switch the state retrieval to this
|
||||
glog.V(logger.Debug).Infof("Switching state downloads to %d [%x…]", header.Number.Uint64(), header.Hash().Bytes()[:4])
|
||||
log.Debug(fmt.Sprintf("Switching state downloads to %d [%x…]", header.Number.Uint64(), header.Hash().Bytes()[:4]))
|
||||
|
||||
q.stateTaskIndex = 0
|
||||
q.stateTaskPool = make(map[common.Hash]int)
|
||||
@ -873,10 +872,10 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
|
||||
accepted := len(headers) == MaxHeaderFetch
|
||||
if accepted {
|
||||
if headers[0].Number.Uint64() != request.From {
|
||||
glog.V(logger.Detail).Infof("Peer %s: first header #%v [%x…] broke chain ordering, expected %d", id, headers[0].Number, headers[0].Hash().Bytes()[:4], request.From)
|
||||
log.Trace(fmt.Sprintf("Peer %s: first header #%v [%x…] broke chain ordering, expected %d", id, headers[0].Number, headers[0].Hash().Bytes()[:4], request.From))
|
||||
accepted = false
|
||||
} else if headers[len(headers)-1].Hash() != target {
|
||||
glog.V(logger.Detail).Infof("Peer %s: last header #%v [%x…] broke skeleton structure, expected %x", id, headers[len(headers)-1].Number, headers[len(headers)-1].Hash().Bytes()[:4], target[:4])
|
||||
log.Trace(fmt.Sprintf("Peer %s: last header #%v [%x…] broke skeleton structure, expected %x", id, headers[len(headers)-1].Number, headers[len(headers)-1].Hash().Bytes()[:4], target[:4]))
|
||||
accepted = false
|
||||
}
|
||||
}
|
||||
@ -884,12 +883,12 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
|
||||
for i, header := range headers[1:] {
|
||||
hash := header.Hash()
|
||||
if want := request.From + 1 + uint64(i); header.Number.Uint64() != want {
|
||||
glog.V(logger.Warn).Infof("Peer %s: header #%v [%x…] broke chain ordering, expected %d", id, header.Number, hash[:4], want)
|
||||
log.Warn(fmt.Sprintf("Peer %s: header #%v [%x…] broke chain ordering, expected %d", id, header.Number, hash[:4], want))
|
||||
accepted = false
|
||||
break
|
||||
}
|
||||
if headers[i].Hash() != header.ParentHash {
|
||||
glog.V(logger.Warn).Infof("Peer %s: header #%v [%x…] broke chain ancestry", id, header.Number, hash[:4])
|
||||
log.Warn(fmt.Sprintf("Peer %s: header #%v [%x…] broke chain ancestry", id, header.Number, hash[:4]))
|
||||
accepted = false
|
||||
break
|
||||
}
|
||||
@ -897,7 +896,7 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
|
||||
}
|
||||
// If the batch of headers wasn't accepted, mark as unavailable
|
||||
if !accepted {
|
||||
glog.V(logger.Detail).Infof("Peer %s: skeleton filling from header #%d not accepted", id, request.From)
|
||||
log.Trace(fmt.Sprintf("Peer %s: skeleton filling from header #%d not accepted", id, request.From))
|
||||
|
||||
miss := q.headerPeerMiss[id]
|
||||
if miss == nil {
|
||||
@ -924,7 +923,7 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
|
||||
|
||||
select {
|
||||
case headerProcCh <- process:
|
||||
glog.V(logger.Detail).Infof("%s: pre-scheduled %d headers from #%v", id, len(process), process[0].Number)
|
||||
log.Trace(fmt.Sprintf("%s: pre-scheduled %d headers from #%v", id, len(process), process[0].Number))
|
||||
q.headerProced += len(process)
|
||||
default:
|
||||
}
|
||||
|
@ -26,8 +26,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
|
||||
)
|
||||
|
||||
@ -221,7 +220,7 @@ func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
|
||||
// FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
|
||||
// returning those that should be handled differently.
|
||||
func (f *Fetcher) FilterHeaders(headers []*types.Header, time time.Time) []*types.Header {
|
||||
glog.V(logger.Detail).Infof("[eth/62] filtering %d headers", len(headers))
|
||||
log.Trace(fmt.Sprintf("[eth/62] filtering %d headers", len(headers)))
|
||||
|
||||
// Send the filter channel to the fetcher
|
||||
filter := make(chan *headerFilterTask)
|
||||
@ -249,7 +248,7 @@ func (f *Fetcher) FilterHeaders(headers []*types.Header, time time.Time) []*type
|
||||
// FilterBodies extracts all the block bodies that were explicitly requested by
|
||||
// the fetcher, returning those that should be handled differently.
|
||||
func (f *Fetcher) FilterBodies(transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
|
||||
glog.V(logger.Detail).Infof("[eth/62] filtering %d:%d bodies", len(transactions), len(uncles))
|
||||
log.Trace(fmt.Sprintf("[eth/62] filtering %d:%d bodies", len(transactions), len(uncles)))
|
||||
|
||||
// Send the filter channel to the fetcher
|
||||
filter := make(chan *bodyFilterTask)
|
||||
@ -324,14 +323,14 @@ func (f *Fetcher) loop() {
|
||||
|
||||
count := f.announces[notification.origin] + 1
|
||||
if count > hashLimit {
|
||||
glog.V(logger.Debug).Infof("Peer %s: exceeded outstanding announces (%d)", notification.origin, hashLimit)
|
||||
log.Debug(fmt.Sprintf("Peer %s: exceeded outstanding announces (%d)", notification.origin, hashLimit))
|
||||
propAnnounceDOSMeter.Mark(1)
|
||||
break
|
||||
}
|
||||
// If we have a valid block number, check that it's potentially useful
|
||||
if notification.number > 0 {
|
||||
if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
|
||||
glog.V(logger.Debug).Infof("[eth/62] Peer %s: discarded announcement #%d [%x…], distance %d", notification.origin, notification.number, notification.hash[:4], dist)
|
||||
log.Debug(fmt.Sprintf("[eth/62] Peer %s: discarded announcement #%d [%x…], distance %d", notification.origin, notification.number, notification.hash[:4], dist))
|
||||
propAnnounceDropMeter.Mark(1)
|
||||
break
|
||||
}
|
||||
@ -381,13 +380,15 @@ func (f *Fetcher) loop() {
|
||||
}
|
||||
// Send out all block header requests
|
||||
for peer, hashes := range request {
|
||||
if glog.V(logger.Detail) && len(hashes) > 0 {
|
||||
list := "["
|
||||
for _, hash := range hashes {
|
||||
list += fmt.Sprintf("%x…, ", hash[:4])
|
||||
}
|
||||
list = list[:len(list)-2] + "]"
|
||||
glog.V(logger.Detail).Infof("[eth/62] Peer %s: fetching headers %s", peer, list)
|
||||
if len(hashes) > 0 {
|
||||
log.Trace("", "msg", log.Lazy{Fn: func() string {
|
||||
list := "["
|
||||
for _, hash := range hashes {
|
||||
list += fmt.Sprintf("%x…, ", hash[:4])
|
||||
}
|
||||
list = list[:len(list)-2] + "]"
|
||||
return fmt.Sprintf("[eth/62] Peer %s: fetching headers %s", peer, list)
|
||||
}})
|
||||
}
|
||||
// Create a closure of the fetch and schedule in on a new thread
|
||||
fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes
|
||||
@ -421,14 +422,16 @@ func (f *Fetcher) loop() {
|
||||
}
|
||||
// Send out all block body requests
|
||||
for peer, hashes := range request {
|
||||
if glog.V(logger.Detail) && len(hashes) > 0 {
|
||||
list := "["
|
||||
for _, hash := range hashes {
|
||||
list += fmt.Sprintf("%x…, ", hash[:4])
|
||||
}
|
||||
list = list[:len(list)-2] + "]"
|
||||
if len(hashes) > 0 {
|
||||
log.Trace("", "msg", log.Lazy{Fn: func() string {
|
||||
list := "["
|
||||
for _, hash := range hashes {
|
||||
list += fmt.Sprintf("%x…, ", hash[:4])
|
||||
}
|
||||
list = list[:len(list)-2] + "]"
|
||||
|
||||
glog.V(logger.Detail).Infof("[eth/62] Peer %s: fetching bodies %s", peer, list)
|
||||
return fmt.Sprintf("[eth/62] Peer %s: fetching bodies %s", peer, list)
|
||||
}})
|
||||
}
|
||||
// Create a closure of the fetch and schedule in on a new thread
|
||||
if f.completingHook != nil {
|
||||
@ -462,7 +465,7 @@ func (f *Fetcher) loop() {
|
||||
if announce := f.fetching[hash]; announce != nil && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil {
|
||||
// If the delivered header does not match the promised number, drop the announcer
|
||||
if header.Number.Uint64() != announce.number {
|
||||
glog.V(logger.Detail).Infof("[eth/62] Peer %s: invalid block number for [%x…]: announced %d, provided %d", announce.origin, header.Hash().Bytes()[:4], announce.number, header.Number.Uint64())
|
||||
log.Trace(fmt.Sprintf("[eth/62] Peer %s: invalid block number for [%x…]: announced %d, provided %d", announce.origin, header.Hash().Bytes()[:4], announce.number, header.Number.Uint64()))
|
||||
f.dropPeer(announce.origin)
|
||||
f.forgetHash(hash)
|
||||
continue
|
||||
@ -474,7 +477,7 @@ func (f *Fetcher) loop() {
|
||||
|
||||
// If the block is empty (header only), short circuit into the final import queue
|
||||
if header.TxHash == types.DeriveSha(types.Transactions{}) && header.UncleHash == types.CalcUncleHash([]*types.Header{}) {
|
||||
glog.V(logger.Detail).Infof("[eth/62] Peer %s: block #%d [%x…] empty, skipping body retrieval", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4])
|
||||
log.Trace(fmt.Sprintf("[eth/62] Peer %s: block #%d [%x…] empty, skipping body retrieval", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4]))
|
||||
|
||||
block := types.NewBlockWithHeader(header)
|
||||
block.ReceivedAt = task.time
|
||||
@ -486,7 +489,7 @@ func (f *Fetcher) loop() {
|
||||
// Otherwise add to the list of blocks needing completion
|
||||
incomplete = append(incomplete, announce)
|
||||
} else {
|
||||
glog.V(logger.Detail).Infof("[eth/62] Peer %s: block #%d [%x…] already imported, discarding header", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4])
|
||||
log.Trace(fmt.Sprintf("[eth/62] Peer %s: block #%d [%x…] already imported, discarding header", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4]))
|
||||
f.forgetHash(hash)
|
||||
}
|
||||
} else {
|
||||
@ -617,14 +620,14 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) {
|
||||
// Ensure the peer isn't DOSing us
|
||||
count := f.queues[peer] + 1
|
||||
if count > blockLimit {
|
||||
glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x…], exceeded allowance (%d)", peer, block.NumberU64(), hash.Bytes()[:4], blockLimit)
|
||||
log.Debug(fmt.Sprintf("Peer %s: discarded block #%d [%x…], exceeded allowance (%d)", peer, block.NumberU64(), hash.Bytes()[:4], blockLimit))
|
||||
propBroadcastDOSMeter.Mark(1)
|
||||
f.forgetHash(hash)
|
||||
return
|
||||
}
|
||||
// Discard any past or too distant blocks
|
||||
if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
|
||||
glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x…], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist)
|
||||
log.Debug(fmt.Sprintf("Peer %s: discarded block #%d [%x…], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist))
|
||||
propBroadcastDropMeter.Mark(1)
|
||||
f.forgetHash(hash)
|
||||
return
|
||||
@ -641,9 +644,9 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) {
|
||||
if f.queueChangeHook != nil {
|
||||
f.queueChangeHook(op.block.Hash(), true)
|
||||
}
|
||||
if glog.V(logger.Debug) {
|
||||
glog.Infof("Peer %s: queued block #%d [%x…], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size())
|
||||
}
|
||||
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
||||
return fmt.Sprintf("Peer %s: queued block #%d [%x…], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size())
|
||||
}})
|
||||
}
|
||||
}
|
||||
|
||||
@ -654,14 +657,14 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
|
||||
hash := block.Hash()
|
||||
|
||||
// Run the import on a new thread
|
||||
glog.V(logger.Debug).Infof("Peer %s: importing block #%d [%x…]", peer, block.NumberU64(), hash[:4])
|
||||
log.Debug(fmt.Sprintf("Peer %s: importing block #%d [%x…]", peer, block.NumberU64(), hash[:4]))
|
||||
go func() {
|
||||
defer func() { f.done <- hash }()
|
||||
|
||||
// If the parent's unknown, abort insertion
|
||||
parent := f.getBlock(block.ParentHash())
|
||||
if parent == nil {
|
||||
glog.V(logger.Debug).Infof("Peer %s: parent [%x…] of block #%d [%x…] unknown", peer, block.ParentHash().Bytes()[:4], block.NumberU64(), hash[:4])
|
||||
log.Debug(fmt.Sprintf("Peer %s: parent [%x…] of block #%d [%x…] unknown", peer, block.ParentHash().Bytes()[:4], block.NumberU64(), hash[:4]))
|
||||
return
|
||||
}
|
||||
// Quickly validate the header and propagate the block if it passes
|
||||
@ -676,13 +679,13 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
|
||||
|
||||
default:
|
||||
// Something went very wrong, drop the peer
|
||||
glog.V(logger.Debug).Infof("Peer %s: block #%d [%x…] verification failed: %v", peer, block.NumberU64(), hash[:4], err)
|
||||
log.Debug(fmt.Sprintf("Peer %s: block #%d [%x…] verification failed: %v", peer, block.NumberU64(), hash[:4], err))
|
||||
f.dropPeer(peer)
|
||||
return
|
||||
}
|
||||
// Run the actual import and log any issues
|
||||
if _, err := f.insertChain(types.Blocks{block}); err != nil {
|
||||
glog.V(logger.Warn).Infof("Peer %s: block #%d [%x…] import failed: %v", peer, block.NumberU64(), hash[:4], err)
|
||||
log.Warn(fmt.Sprintf("Peer %s: block #%d [%x…] import failed: %v", peer, block.NumberU64(), hash[:4], err))
|
||||
return
|
||||
}
|
||||
// If import succeeded, broadcast the block
|
||||
|
@ -17,6 +17,7 @@
|
||||
package gasprice
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"sync"
|
||||
@ -25,8 +26,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -176,7 +176,7 @@ func (self *GasPriceOracle) processBlock(block *types.Block) {
|
||||
self.lastBase = newBase
|
||||
self.lastBaseMutex.Unlock()
|
||||
|
||||
glog.V(logger.Detail).Infof("Processed block #%v, base price is %v\n", i, newBase.Int64())
|
||||
log.Trace(fmt.Sprintf("Processed block #%v, base price is %v\n", i, newBase.Int64()))
|
||||
}
|
||||
|
||||
// returns the lowers possible price with which a tx was or could have been included
|
||||
|
@ -33,8 +33,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/eth/fetcher"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
@ -116,7 +115,7 @@ func NewProtocolManager(config *params.ChainConfig, fastSync bool, networkId int
|
||||
}
|
||||
// Figure out whether to allow fast sync or not
|
||||
if fastSync && blockchain.CurrentBlock().NumberU64() > 0 {
|
||||
glog.V(logger.Info).Infof("blockchain not empty, fast sync disabled")
|
||||
log.Info(fmt.Sprintf("blockchain not empty, fast sync disabled"))
|
||||
fastSync = false
|
||||
}
|
||||
if fastSync {
|
||||
@ -179,7 +178,7 @@ func NewProtocolManager(config *params.ChainConfig, fastSync bool, networkId int
|
||||
manager.fetcher = fetcher.New(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
|
||||
|
||||
if blockchain.Genesis().Hash().Hex() == defaultGenesisHash && networkId == 1 {
|
||||
glog.V(logger.Debug).Infoln("Bad Block Reporting is enabled")
|
||||
log.Debug(fmt.Sprint("Bad Block Reporting is enabled"))
|
||||
manager.badBlockReportingEnabled = true
|
||||
}
|
||||
|
||||
@ -200,12 +199,12 @@ func (pm *ProtocolManager) removePeer(id string) {
|
||||
if peer == nil {
|
||||
return
|
||||
}
|
||||
glog.V(logger.Debug).Infoln("Removing peer", id)
|
||||
log.Debug(fmt.Sprint("Removing peer", id))
|
||||
|
||||
// Unregister the peer from the downloader and Ethereum peer set
|
||||
pm.downloader.UnregisterPeer(id)
|
||||
if err := pm.peers.Unregister(id); err != nil {
|
||||
glog.V(logger.Error).Infoln("Removal failed:", err)
|
||||
log.Error(fmt.Sprint("Removal failed:", err))
|
||||
}
|
||||
// Hard disconnect at the networking layer
|
||||
if peer != nil {
|
||||
@ -227,7 +226,7 @@ func (pm *ProtocolManager) Start() {
|
||||
}
|
||||
|
||||
func (pm *ProtocolManager) Stop() {
|
||||
glog.V(logger.Info).Infoln("Stopping ethereum protocol handler...")
|
||||
log.Info(fmt.Sprint("Stopping ethereum protocol handler..."))
|
||||
|
||||
pm.txSub.Unsubscribe() // quits txBroadcastLoop
|
||||
pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
|
||||
@ -248,7 +247,7 @@ func (pm *ProtocolManager) Stop() {
|
||||
// Wait for all peer handler goroutines and the loops to come down.
|
||||
pm.wg.Wait()
|
||||
|
||||
glog.V(logger.Info).Infoln("Ethereum protocol handler stopped")
|
||||
log.Info(fmt.Sprint("Ethereum protocol handler stopped"))
|
||||
}
|
||||
|
||||
func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
|
||||
@ -262,21 +261,21 @@ func (pm *ProtocolManager) handle(p *peer) error {
|
||||
return p2p.DiscTooManyPeers
|
||||
}
|
||||
|
||||
glog.V(logger.Debug).Infof("%v: peer connected [%s]", p, p.Name())
|
||||
log.Debug(fmt.Sprintf("%v: peer connected [%s]", p, p.Name()))
|
||||
|
||||
// Execute the Ethereum handshake
|
||||
td, head, genesis := pm.blockchain.Status()
|
||||
if err := p.Handshake(pm.networkId, td, head, genesis); err != nil {
|
||||
glog.V(logger.Debug).Infof("%v: handshake failed: %v", p, err)
|
||||
log.Debug(fmt.Sprintf("%v: handshake failed: %v", p, err))
|
||||
return err
|
||||
}
|
||||
if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
|
||||
rw.Init(p.version)
|
||||
}
|
||||
// Register the peer locally
|
||||
glog.V(logger.Detail).Infof("%v: adding peer", p)
|
||||
log.Trace(fmt.Sprintf("%v: adding peer", p))
|
||||
if err := pm.peers.Register(p); err != nil {
|
||||
glog.V(logger.Error).Infof("%v: addition failed: %v", p, err)
|
||||
log.Error(fmt.Sprintf("%v: addition failed: %v", p, err))
|
||||
return err
|
||||
}
|
||||
defer pm.removePeer(p.id)
|
||||
@ -297,7 +296,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
|
||||
}
|
||||
// Start a timer to disconnect if the peer doesn't reply in time
|
||||
p.forkDrop = time.AfterFunc(daoChallengeTimeout, func() {
|
||||
glog.V(logger.Debug).Infof("%v: timed out DAO fork-check, dropping", p)
|
||||
log.Debug(fmt.Sprintf("%v: timed out DAO fork-check, dropping", p))
|
||||
pm.removePeer(p.id)
|
||||
})
|
||||
// Make sure it's cleaned up if the peer dies off
|
||||
@ -311,7 +310,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
|
||||
// main loop. handle incoming messages.
|
||||
for {
|
||||
if err := pm.handleMsg(p); err != nil {
|
||||
glog.V(logger.Debug).Infof("%v: message handling failed: %v", p, err)
|
||||
log.Debug(fmt.Sprintf("%v: message handling failed: %v", p, err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -387,7 +386,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
)
|
||||
if next <= current {
|
||||
infos, _ := json.MarshalIndent(p.Peer.Info(), "", " ")
|
||||
glog.V(logger.Warn).Infof("%v: GetBlockHeaders skip overflow attack (current %v, skip %v, next %v)\nMalicious peer infos: %s", p, current, query.Skip, next, infos)
|
||||
log.Warn(fmt.Sprintf("%v: GetBlockHeaders skip overflow attack (current %v, skip %v, next %v)\nMalicious peer infos: %s", p, current, query.Skip, next, infos))
|
||||
unknown = true
|
||||
} else {
|
||||
if header := pm.blockchain.GetHeaderByNumber(next); header != nil {
|
||||
@ -435,7 +434,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
}
|
||||
// If we're seemingly on the same chain, disable the drop timer
|
||||
if verifyDAO {
|
||||
glog.V(logger.Debug).Infof("%v: seems to be on the same side of the DAO fork", p)
|
||||
log.Debug(fmt.Sprintf("%v: seems to be on the same side of the DAO fork", p))
|
||||
p.forkDrop.Stop()
|
||||
p.forkDrop = nil
|
||||
return nil
|
||||
@ -452,10 +451,10 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
|
||||
// Validate the header and either drop the peer or continue
|
||||
if err := core.ValidateDAOHeaderExtraData(pm.chainconfig, headers[0]); err != nil {
|
||||
glog.V(logger.Debug).Infof("%v: verified to be on the other side of the DAO fork, dropping", p)
|
||||
log.Debug(fmt.Sprintf("%v: verified to be on the other side of the DAO fork, dropping", p))
|
||||
return err
|
||||
}
|
||||
glog.V(logger.Debug).Infof("%v: verified to be on the same side of the DAO fork", p)
|
||||
log.Debug(fmt.Sprintf("%v: verified to be on the same side of the DAO fork", p))
|
||||
return nil
|
||||
}
|
||||
// Irrelevant of the fork checks, send the header to the fetcher just in case
|
||||
@ -464,7 +463,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
if len(headers) > 0 || !filter {
|
||||
err := pm.downloader.DeliverHeaders(p.id, headers)
|
||||
if err != nil {
|
||||
glog.V(logger.Debug).Infoln(err)
|
||||
log.Debug(fmt.Sprint(err))
|
||||
}
|
||||
}
|
||||
|
||||
@ -517,7 +516,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
if len(trasactions) > 0 || len(uncles) > 0 || !filter {
|
||||
err := pm.downloader.DeliverBodies(p.id, trasactions, uncles)
|
||||
if err != nil {
|
||||
glog.V(logger.Debug).Infoln(err)
|
||||
log.Debug(fmt.Sprint(err))
|
||||
}
|
||||
}
|
||||
|
||||
@ -556,7 +555,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
}
|
||||
// Deliver all to the downloader
|
||||
if err := pm.downloader.DeliverNodeData(p.id, data); err != nil {
|
||||
glog.V(logger.Debug).Infof("failed to deliver node state data: %v", err)
|
||||
log.Debug(fmt.Sprintf("failed to deliver node state data: %v", err))
|
||||
}
|
||||
|
||||
case p.version >= eth63 && msg.Code == GetReceiptsMsg:
|
||||
@ -587,7 +586,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
}
|
||||
// If known, encode and queue for response packet
|
||||
if encoded, err := rlp.EncodeToBytes(results); err != nil {
|
||||
glog.V(logger.Error).Infof("failed to encode receipt: %v", err)
|
||||
log.Error(fmt.Sprintf("failed to encode receipt: %v", err))
|
||||
} else {
|
||||
receipts = append(receipts, encoded)
|
||||
bytes += len(encoded)
|
||||
@ -603,7 +602,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
}
|
||||
// Deliver all to the downloader
|
||||
if err := pm.downloader.DeliverReceipts(p.id, receipts); err != nil {
|
||||
glog.V(logger.Debug).Infof("failed to deliver receipts: %v", err)
|
||||
log.Debug(fmt.Sprintf("failed to deliver receipts: %v", err))
|
||||
}
|
||||
|
||||
case msg.Code == NewBlockHashesMsg:
|
||||
@ -696,7 +695,7 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
|
||||
if parent := pm.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1); parent != nil {
|
||||
td = new(big.Int).Add(block.Difficulty(), pm.blockchain.GetTd(block.ParentHash(), block.NumberU64()-1))
|
||||
} else {
|
||||
glog.V(logger.Error).Infof("propagating dangling block #%d [%x]", block.NumberU64(), hash[:4])
|
||||
log.Error(fmt.Sprintf("propagating dangling block #%d [%x]", block.NumberU64(), hash[:4]))
|
||||
return
|
||||
}
|
||||
// Send the block to a subset of our peers
|
||||
@ -704,14 +703,14 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
|
||||
for _, peer := range transfer {
|
||||
peer.SendNewBlock(block, td)
|
||||
}
|
||||
glog.V(logger.Detail).Infof("propagated block %x to %d peers in %v", hash[:4], len(transfer), time.Since(block.ReceivedAt))
|
||||
log.Trace(fmt.Sprintf("propagated block %x to %d peers in %v", hash[:4], len(transfer), time.Since(block.ReceivedAt)))
|
||||
}
|
||||
// Otherwise if the block is indeed in out own chain, announce it
|
||||
if pm.blockchain.HasBlock(hash) {
|
||||
for _, peer := range peers {
|
||||
peer.SendNewBlockHashes([]common.Hash{hash}, []uint64{block.NumberU64()})
|
||||
}
|
||||
glog.V(logger.Detail).Infof("announced block %x to %d peers in %v", hash[:4], len(peers), time.Since(block.ReceivedAt))
|
||||
log.Trace(fmt.Sprintf("announced block %x to %d peers in %v", hash[:4], len(peers), time.Since(block.ReceivedAt)))
|
||||
}
|
||||
}
|
||||
|
||||
@ -724,7 +723,7 @@ func (pm *ProtocolManager) BroadcastTx(hash common.Hash, tx *types.Transaction)
|
||||
for _, peer := range peers {
|
||||
peer.SendTransactions(types.Transactions{tx})
|
||||
}
|
||||
glog.V(logger.Detail).Infoln("broadcast tx to", len(peers), "peers")
|
||||
log.Trace(fmt.Sprint("broadcast tx to", len(peers), "peers"))
|
||||
}
|
||||
|
||||
// Mined broadcast loop
|
||||
|
15
eth/peer.go
15
eth/peer.go
@ -25,8 +25,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"gopkg.in/fatih/set.v0"
|
||||
@ -192,41 +191,41 @@ func (p *peer) SendReceiptsRLP(receipts []rlp.RawValue) error {
|
||||
// RequestHeaders is a wrapper around the header query functions to fetch a
|
||||
// single header. It is used solely by the fetcher.
|
||||
func (p *peer) RequestOneHeader(hash common.Hash) error {
|
||||
glog.V(logger.Debug).Infof("%v fetching a single header: %x", p, hash)
|
||||
log.Debug(fmt.Sprintf("%v fetching a single header: %x", p, hash))
|
||||
return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Hash: hash}, Amount: uint64(1), Skip: uint64(0), Reverse: false})
|
||||
}
|
||||
|
||||
// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
|
||||
// specified header query, based on the hash of an origin block.
|
||||
func (p *peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
|
||||
glog.V(logger.Debug).Infof("%v fetching %d headers from %x, skipping %d (reverse = %v)", p, amount, origin[:4], skip, reverse)
|
||||
log.Debug(fmt.Sprintf("%v fetching %d headers from %x, skipping %d (reverse = %v)", p, amount, origin[:4], skip, reverse))
|
||||
return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
|
||||
}
|
||||
|
||||
// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
|
||||
// specified header query, based on the number of an origin block.
|
||||
func (p *peer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
|
||||
glog.V(logger.Debug).Infof("%v fetching %d headers from #%d, skipping %d (reverse = %v)", p, amount, origin, skip, reverse)
|
||||
log.Debug(fmt.Sprintf("%v fetching %d headers from #%d, skipping %d (reverse = %v)", p, amount, origin, skip, reverse))
|
||||
return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
|
||||
}
|
||||
|
||||
// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
|
||||
// specified.
|
||||
func (p *peer) RequestBodies(hashes []common.Hash) error {
|
||||
glog.V(logger.Debug).Infof("%v fetching %d block bodies", p, len(hashes))
|
||||
log.Debug(fmt.Sprintf("%v fetching %d block bodies", p, len(hashes)))
|
||||
return p2p.Send(p.rw, GetBlockBodiesMsg, hashes)
|
||||
}
|
||||
|
||||
// RequestNodeData fetches a batch of arbitrary data from a node's known state
|
||||
// data, corresponding to the specified hashes.
|
||||
func (p *peer) RequestNodeData(hashes []common.Hash) error {
|
||||
glog.V(logger.Debug).Infof("%v fetching %v state data", p, len(hashes))
|
||||
log.Debug(fmt.Sprintf("%v fetching %v state data", p, len(hashes)))
|
||||
return p2p.Send(p.rw, GetNodeDataMsg, hashes)
|
||||
}
|
||||
|
||||
// RequestReceipts fetches a batch of transaction receipts from a remote node.
|
||||
func (p *peer) RequestReceipts(hashes []common.Hash) error {
|
||||
glog.V(logger.Debug).Infof("%v fetching %v receipts", p, len(hashes))
|
||||
log.Debug(fmt.Sprintf("%v fetching %v receipts", p, len(hashes)))
|
||||
return p2p.Send(p.rw, GetReceiptsMsg, hashes)
|
||||
}
|
||||
|
||||
|
@ -30,8 +30,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
// glog.SetToStderr(true)
|
||||
// glog.SetV(6)
|
||||
// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat())))
|
||||
}
|
||||
|
||||
var testAccount, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
|
10
eth/sync.go
10
eth/sync.go
@ -17,6 +17,7 @@
|
||||
package eth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@ -24,8 +25,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
)
|
||||
|
||||
@ -87,7 +87,7 @@ func (pm *ProtocolManager) txsyncLoop() {
|
||||
delete(pending, s.p.ID())
|
||||
}
|
||||
// Send the pack in the background.
|
||||
glog.V(logger.Detail).Infof("%v: sending %d transactions (%v)", s.p.Peer, len(pack.txs), size)
|
||||
log.Trace(fmt.Sprintf("%v: sending %d transactions (%v)", s.p.Peer, len(pack.txs), size))
|
||||
sending = true
|
||||
go func() { done <- pack.p.SendTransactions(pack.txs) }()
|
||||
}
|
||||
@ -117,7 +117,7 @@ func (pm *ProtocolManager) txsyncLoop() {
|
||||
sending = false
|
||||
// Stop tracking peers that cause send failures.
|
||||
if err != nil {
|
||||
glog.V(logger.Debug).Infof("%v: tx send failed: %v", pack.p.Peer, err)
|
||||
log.Debug(fmt.Sprintf("%v: tx send failed: %v", pack.p.Peer, err))
|
||||
delete(pending, pack.p.ID())
|
||||
}
|
||||
// Schedule the next send.
|
||||
@ -187,7 +187,7 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
|
||||
if atomic.LoadUint32(&pm.fastSync) == 1 {
|
||||
// Disable fast sync if we indeed have something in our chain
|
||||
if pm.blockchain.CurrentBlock().NumberU64() > 0 {
|
||||
glog.V(logger.Info).Infof("fast sync complete, auto disabling")
|
||||
log.Info(fmt.Sprintf("fast sync complete, auto disabling"))
|
||||
atomic.StoreUint32(&pm.fastSync, 0)
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user