cmd, eth: separate out FakePeer for future reuse

This commit is contained in:
Péter Szilágyi
2017-10-10 15:51:09 +03:00
parent 345332906c
commit 35767dfd0c
3 changed files with 192 additions and 152 deletions

View File

@ -19,7 +19,6 @@ package main
import (
"encoding/json"
"fmt"
"math/big"
"os"
"runtime"
"strconv"
@ -97,13 +96,15 @@ if already existing.`,
copydbCommand = cli.Command{
Action: utils.MigrateFlags(copyDb),
Name: "copydb",
Usage: "Copy from one chain DB into another using the downloader",
ArgsUsage: "<filename>",
Usage: "Create a local chain from a target chaindata folder",
ArgsUsage: "<sourceChaindataDir>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.CacheFlag,
utils.SyncModeFlag,
utils.FakePoWFlag,
utils.TestnetFlag,
utils.RinkebyFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
@ -286,172 +287,44 @@ func exportChain(ctx *cli.Context) error {
return nil
}
type localPeer struct {
chainDb ethdb.Database
hc *core.HeaderChain
dl *downloader.Downloader
}
func (lp *localPeer) Head() (common.Hash, *big.Int) {
header := lp.hc.CurrentHeader()
return header.Hash(), header.Number
}
func (lp *localPeer) RequestHeadersByHash(hash common.Hash, amount int, skip int, reverse bool) error {
var (
headers []*types.Header
unknown bool
)
for !unknown && len(headers) < amount {
origin := lp.hc.GetHeaderByHash(hash)
if origin == nil {
break
}
number := origin.Number.Uint64()
headers = append(headers, origin)
if reverse {
for i := 0; i < int(skip)+1; i++ {
if header := lp.hc.GetHeader(hash, number); header != nil {
hash = header.ParentHash
number--
} else {
unknown = true
break
}
}
} else {
var (
current = origin.Number.Uint64()
next = current + uint64(skip) + 1
)
if header := lp.hc.GetHeaderByNumber(next); header != nil {
if lp.hc.GetBlockHashesFromHash(header.Hash(), uint64(skip+1))[skip] == hash {
hash = header.Hash()
} else {
unknown = true
}
} else {
unknown = true
}
}
}
lp.dl.DeliverHeaders("local", headers)
return nil
}
func (lp *localPeer) RequestHeadersByNumber(num uint64, amount int, skip int, reverse bool) error {
var (
headers []*types.Header
unknown bool
)
for !unknown && len(headers) < amount {
origin := lp.hc.GetHeaderByNumber(num)
if origin == nil {
break
}
if reverse {
if num >= uint64(skip+1) {
num -= uint64(skip + 1)
} else {
unknown = true
}
} else {
num += uint64(skip + 1)
}
headers = append(headers, origin)
}
lp.dl.DeliverHeaders("local", headers)
return nil
}
func (lp *localPeer) RequestBodies(hashes []common.Hash) error {
var (
transactions [][]*types.Transaction
uncles [][]*types.Header
)
for _, hash := range hashes {
block := core.GetBlock(lp.chainDb, hash, lp.hc.GetBlockNumber(hash))
transactions = append(transactions, block.Transactions())
uncles = append(uncles, block.Uncles())
}
lp.dl.DeliverBodies("local", transactions, uncles)
return nil
}
func (lp *localPeer) RequestReceipts(hashes []common.Hash) error {
var receipts [][]*types.Receipt
for _, hash := range hashes {
receipts = append(receipts, core.GetBlockReceipts(lp.chainDb, hash, lp.hc.GetBlockNumber(hash)))
}
lp.dl.DeliverReceipts("local", receipts)
return nil
}
func (lp *localPeer) RequestNodeData(hashes []common.Hash) error {
var data [][]byte
for _, hash := range hashes {
if entry, err := lp.chainDb.Get(hash.Bytes()); err == nil {
data = append(data, entry)
}
}
lp.dl.DeliverNodeData("local", data)
return nil
}
func copyDb(ctx *cli.Context) error {
// Ensure we have a source chain directory to copy
if len(ctx.Args()) != 1 {
utils.Fatalf("This command requires an argument.")
utils.Fatalf("Source chaindata directory path argument missing")
}
// Initialize a new chain for the running node to sync into
stack := makeFullNode(ctx)
chain, chainDb := utils.MakeChain(ctx, stack)
start := time.Now()
syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
mux := new(event.TypeMux)
dl := downloader.New(syncmode, chainDb, mux, chain, nil, nil)
dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil)
var err error
filename := ctx.Args().First()
cache := ctx.GlobalInt(utils.CacheFlag.Name)
handles := 256
localdb, err := ethdb.NewLDBDatabase(filename, cache, handles)
// Create a source peer to satisfy downloader requests from
db, err := ethdb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256)
if err != nil {
return err
}
hc, err := core.NewHeaderChain(localdb, chain.Config(), chain.Engine(), func() bool { return false })
hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
if err != nil {
return err
}
peer := &localPeer{localdb, hc, dl}
if err := dl.RegisterPeer("local", 63, peer); err != nil {
peer := downloader.NewFakePeer("local", db, hc, dl)
if err = dl.RegisterPeer("local", 63, peer); err != nil {
return err
}
// Synchronise with the simulated peer
start := time.Now()
currentHeader := hc.CurrentHeader()
if err := dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil {
if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil {
return err
}
for dl.Synchronising() {
time.Sleep(10 * time.Millisecond)
}
fmt.Printf("Database copy done in %v\n", time.Since(start))
fmt.Printf("Database copy done in %v", time.Since(start))
// Compact the entire database to remove any sync overhead
start = time.Now()
fmt.Println("Compacting entire database...")
if err = chainDb.(*ethdb.LDBDatabase).LDB().CompactRange(util.Range{}); err != nil {