Compare commits
66 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
3e0641923d | ||
|
74925e547f | ||
|
7afdf792ab | ||
|
c28fd9c079 | ||
|
4baa574410 | ||
|
9f45d6efae | ||
|
cbbc54c495 | ||
|
7cee2509c0 | ||
|
48b484c5ac | ||
|
06125bff89 | ||
|
9fea1a5cf5 | ||
|
e401f5ff10 | ||
|
6a53ce29a4 | ||
|
8f24097836 | ||
|
4b9c0ea76d | ||
|
3bb8a4ed3f | ||
|
983cb25a07 | ||
|
68754f3931 | ||
|
5d4512b113 | ||
|
d21303f9dd | ||
|
4fde0cabc1 | ||
|
4a04127ce3 | ||
|
2de37f28e0 | ||
|
5a88a7cf5b | ||
|
1d25039ff5 | ||
|
8ead45c20b | ||
|
82a9e11058 | ||
|
b35e4fce99 | ||
|
e24e05dd01 | ||
|
90dedea40f | ||
|
c0c01612e9 | ||
|
b2b14e6ce3 | ||
|
290d6bd903 | ||
|
9c2ac6fbd5 | ||
|
a00dc5095b | ||
|
ff90894636 | ||
|
9e04c5ec83 | ||
|
abf2d7d74f | ||
|
1976bb3df0 | ||
|
350a0490ab | ||
|
37564ceda6 | ||
|
28c5a8a54b | ||
|
298a19bbc6 | ||
|
c47052a580 | ||
|
93da0cf8a1 | ||
|
79ce5537ab | ||
|
8e7bee9b56 | ||
|
f538259187 | ||
|
b1be979443 | ||
|
e997f92caf | ||
|
56434bfa89 | ||
|
6793ffa12b | ||
|
5413df1dfa | ||
|
c374447401 | ||
|
105922180f | ||
|
3a57eecc69 | ||
|
997b55236e | ||
|
4c268e65a0 | ||
|
0b53e485d8 | ||
|
9e22e912e3 | ||
|
123864fc05 | ||
|
7163a6664e | ||
|
4366c45e4e | ||
|
3a52c4dcf2 | ||
|
722b742780 | ||
|
508891e64b |
11
build/ci.go
11
build/ci.go
@@ -215,9 +215,9 @@ func doInstall(cmdline []string) {
|
||||
var minor int
|
||||
fmt.Sscanf(strings.TrimPrefix(runtime.Version(), "go1."), "%d", &minor)
|
||||
|
||||
if minor < 11 {
|
||||
if minor < 13 {
|
||||
log.Println("You have Go version", runtime.Version())
|
||||
log.Println("go-ethereum requires at least Go version 1.11 and cannot")
|
||||
log.Println("go-ethereum requires at least Go version 1.13 and cannot")
|
||||
log.Println("be compiled with an earlier version. Please upgrade your Go installation.")
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -233,6 +233,7 @@ func doInstall(cmdline []string) {
|
||||
if runtime.GOARCH == "arm64" {
|
||||
goinstall.Args = append(goinstall.Args, "-p", "1")
|
||||
}
|
||||
goinstall.Args = append(goinstall.Args, "-trimpath")
|
||||
goinstall.Args = append(goinstall.Args, "-v")
|
||||
goinstall.Args = append(goinstall.Args, packages...)
|
||||
build.MustRun(goinstall)
|
||||
@@ -241,6 +242,7 @@ func doInstall(cmdline []string) {
|
||||
|
||||
// Seems we are cross compiling, work around forbidden GOBIN
|
||||
goinstall := goToolArch(*arch, *cc, "install", buildFlags(env)...)
|
||||
goinstall.Args = append(goinstall.Args, "-trimpath")
|
||||
goinstall.Args = append(goinstall.Args, "-v")
|
||||
goinstall.Args = append(goinstall.Args, []string{"-buildmode", "archive"}...)
|
||||
goinstall.Args = append(goinstall.Args, packages...)
|
||||
@@ -884,11 +886,12 @@ func gomobileTool(subcmd string, args ...string) *exec.Cmd {
|
||||
"PATH=" + GOBIN + string(os.PathListSeparator) + os.Getenv("PATH"),
|
||||
}
|
||||
for _, e := range os.Environ() {
|
||||
if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "PATH=") {
|
||||
if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "PATH=") || strings.HasPrefix(e, "GOBIN=") {
|
||||
continue
|
||||
}
|
||||
cmd.Env = append(cmd.Env, e)
|
||||
}
|
||||
cmd.Env = append(cmd.Env, "GOBIN="+GOBIN)
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -957,7 +960,7 @@ func doXCodeFramework(cmdline []string) {
|
||||
|
||||
if *local {
|
||||
// If we're building locally, use the build folder and stop afterwards
|
||||
bind.Dir, _ = filepath.Abs(GOBIN)
|
||||
bind.Dir = GOBIN
|
||||
build.MustRun(bind)
|
||||
return
|
||||
}
|
||||
|
@@ -94,7 +94,7 @@ with minimal requirements.
|
||||
On the `client` qube, we need to create a listener which will receive the request from the Dapp, and proxy it.
|
||||
|
||||
|
||||
[qubes-client.py](qubes/client/qubes-client.py):
|
||||
[qubes-client.py](qubes/qubes-client.py):
|
||||
|
||||
```python
|
||||
|
||||
|
@@ -21,6 +21,7 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
@@ -69,22 +70,30 @@ func enrdump(ctx *cli.Context) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("INVALID: %v", err)
|
||||
}
|
||||
fmt.Print(dumpRecord(r))
|
||||
dumpRecord(os.Stdout, r)
|
||||
return nil
|
||||
}
|
||||
|
||||
// dumpRecord creates a human-readable description of the given node record.
|
||||
func dumpRecord(r *enr.Record) string {
|
||||
out := new(bytes.Buffer)
|
||||
if n, err := enode.New(enode.ValidSchemes, r); err != nil {
|
||||
func dumpRecord(out io.Writer, r *enr.Record) {
|
||||
n, err := enode.New(enode.ValidSchemes, r)
|
||||
if err != nil {
|
||||
fmt.Fprintf(out, "INVALID: %v\n", err)
|
||||
} else {
|
||||
fmt.Fprintf(out, "Node ID: %v\n", n.ID())
|
||||
dumpNodeURL(out, n)
|
||||
}
|
||||
kv := r.AppendElements(nil)[1:]
|
||||
fmt.Fprintf(out, "Record has sequence number %d and %d key/value pairs.\n", r.Seq(), len(kv)/2)
|
||||
fmt.Fprint(out, dumpRecordKV(kv, 2))
|
||||
return out.String()
|
||||
}
|
||||
|
||||
func dumpNodeURL(out io.Writer, n *enode.Node) {
|
||||
var key enode.Secp256k1
|
||||
if n.Load(&key) != nil {
|
||||
return // no secp256k1 public key
|
||||
}
|
||||
fmt.Fprintf(out, "URLv4: %s\n", n.URLv4())
|
||||
}
|
||||
|
||||
func dumpRecordKV(kv []interface{}, indent int) string {
|
||||
|
@@ -235,23 +235,20 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Assemble the Ethereum light client protocol
|
||||
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
cfg := eth.DefaultConfig
|
||||
cfg.SyncMode = downloader.LightSync
|
||||
cfg.NetworkId = network
|
||||
cfg.Genesis = genesis
|
||||
return les.New(ctx, &cfg)
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
cfg := eth.DefaultConfig
|
||||
cfg.SyncMode = downloader.LightSync
|
||||
cfg.NetworkId = network
|
||||
cfg.Genesis = genesis
|
||||
lesBackend, err := les.New(stack, &cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to register the Ethereum service: %w", err)
|
||||
}
|
||||
|
||||
// Assemble the ethstats monitoring and reporting service'
|
||||
if stats != "" {
|
||||
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
var serv *les.LightEthereum
|
||||
ctx.Service(&serv)
|
||||
return ethstats.New(stats, nil, serv)
|
||||
}); err != nil {
|
||||
if err := ethstats.New(stack, lesBackend.ApiBackend, lesBackend.Engine(), stats); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@@ -268,7 +265,7 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u
|
||||
// Attach to the client and retrieve and interesting metadatas
|
||||
api, err := stack.Attach()
|
||||
if err != nil {
|
||||
stack.Stop()
|
||||
stack.Close()
|
||||
return nil, err
|
||||
}
|
||||
client := ethclient.NewClient(api)
|
||||
|
@@ -49,7 +49,7 @@
|
||||
<div class="row">
|
||||
<div class="col-lg-8 col-lg-offset-2">
|
||||
<div class="input-group">
|
||||
<input id="url" name="url" type="text" class="form-control" placeholder="Social network URL containing your Ethereum address...">
|
||||
<input id="url" name="url" type="text" class="form-control" placeholder="Social network URL containing your Ethereum address..."/>
|
||||
<span class="input-group-btn">
|
||||
<button class="btn btn-default dropdown-toggle" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Give me Ether <i class="fa fa-caret-down" aria-hidden="true"></i></button>
|
||||
<ul class="dropdown-menu dropdown-menu-right">{{range $idx, $amount := .Amounts}}
|
||||
|
@@ -239,8 +239,8 @@ func initGenesis(ctx *cli.Context) error {
|
||||
if err := json.NewDecoder(file).Decode(genesis); err != nil {
|
||||
utils.Fatalf("invalid genesis file: %v", err)
|
||||
}
|
||||
// Open an initialise both full and light databases
|
||||
stack := makeFullNode(ctx)
|
||||
// Open and initialise both full and light databases
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
for _, name := range []string{"chaindata", "lightchaindata"} {
|
||||
@@ -277,7 +277,8 @@ func importChain(ctx *cli.Context) error {
|
||||
utils.SetupMetrics(ctx)
|
||||
// Start system runtime metrics collection
|
||||
go metrics.CollectProcessMetrics(3 * time.Second)
|
||||
stack := makeFullNode(ctx)
|
||||
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
chain, db := utils.MakeChain(ctx, stack, false)
|
||||
@@ -371,7 +372,8 @@ func exportChain(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) < 1 {
|
||||
utils.Fatalf("This command requires an argument.")
|
||||
}
|
||||
stack := makeFullNode(ctx)
|
||||
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
chain, _ := utils.MakeChain(ctx, stack, true)
|
||||
@@ -406,7 +408,8 @@ func importPreimages(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) < 1 {
|
||||
utils.Fatalf("This command requires an argument.")
|
||||
}
|
||||
stack := makeFullNode(ctx)
|
||||
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
db := utils.MakeChainDatabase(ctx, stack)
|
||||
@@ -424,7 +427,8 @@ func exportPreimages(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) < 1 {
|
||||
utils.Fatalf("This command requires an argument.")
|
||||
}
|
||||
stack := makeFullNode(ctx)
|
||||
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
db := utils.MakeChainDatabase(ctx, stack)
|
||||
@@ -446,7 +450,7 @@ func copyDb(ctx *cli.Context) error {
|
||||
utils.Fatalf("Source ancient chain directory path argument missing")
|
||||
}
|
||||
// Initialize a new chain for the running node to sync into
|
||||
stack := makeFullNode(ctx)
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
chain, chainDb := utils.MakeChain(ctx, stack, false)
|
||||
@@ -554,7 +558,7 @@ func confirmAndRemoveDB(database string, kind string) {
|
||||
}
|
||||
|
||||
func dump(ctx *cli.Context) error {
|
||||
stack := makeFullNode(ctx)
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
chain, chainDb := utils.MakeChain(ctx, stack, true)
|
||||
|
@@ -28,6 +28,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
whisper "github.com/ethereum/go-ethereum/whisper/whisperv6"
|
||||
@@ -104,6 +105,7 @@ func defaultNodeConfig() node.Config {
|
||||
return cfg
|
||||
}
|
||||
|
||||
// makeConfigNode loads geth configuration and creates a blank node instance.
|
||||
func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
|
||||
// Load defaults.
|
||||
cfg := gethConfig{
|
||||
@@ -144,9 +146,11 @@ func enableWhisper(ctx *cli.Context) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func makeFullNode(ctx *cli.Context) *node.Node {
|
||||
// makeFullNode loads geth configuration and creates the Ethereum backend.
|
||||
func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
||||
stack, cfg := makeConfigNode(ctx)
|
||||
utils.RegisterEthService(stack, &cfg.Eth)
|
||||
|
||||
backend := utils.RegisterEthService(stack, &cfg.Eth)
|
||||
|
||||
// Whisper must be explicitly enabled by specifying at least 1 whisper flag or in dev mode
|
||||
shhEnabled := enableWhisper(ctx)
|
||||
@@ -165,13 +169,13 @@ func makeFullNode(ctx *cli.Context) *node.Node {
|
||||
}
|
||||
// Configure GraphQL if requested
|
||||
if ctx.GlobalIsSet(utils.GraphQLEnabledFlag.Name) {
|
||||
utils.RegisterGraphQLService(stack, cfg.Node.GraphQLEndpoint(), cfg.Node.GraphQLCors, cfg.Node.GraphQLVirtualHosts, cfg.Node.HTTPTimeouts)
|
||||
utils.RegisterGraphQLService(stack, backend, cfg.Node)
|
||||
}
|
||||
// Add the Ethereum Stats daemon if requested.
|
||||
if cfg.Ethstats.URL != "" {
|
||||
utils.RegisterEthStatsService(stack, cfg.Ethstats.URL)
|
||||
utils.RegisterEthStatsService(stack, backend, cfg.Ethstats.URL)
|
||||
}
|
||||
return stack
|
||||
return stack, backend
|
||||
}
|
||||
|
||||
// dumpConfig is the dumpconfig command.
|
||||
|
@@ -78,12 +78,12 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/JavaScript-Cons
|
||||
func localConsole(ctx *cli.Context) error {
|
||||
// Create and start the node based on the CLI flags
|
||||
prepare(ctx)
|
||||
node := makeFullNode(ctx)
|
||||
startNode(ctx, node)
|
||||
defer node.Close()
|
||||
stack, backend := makeFullNode(ctx)
|
||||
startNode(ctx, stack, backend)
|
||||
defer stack.Close()
|
||||
|
||||
// Attach to the newly started node and start the JavaScript console
|
||||
client, err := node.Attach()
|
||||
client, err := stack.Attach()
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to attach to the inproc geth: %v", err)
|
||||
}
|
||||
@@ -190,12 +190,12 @@ func dialRPC(endpoint string) (*rpc.Client, error) {
|
||||
// everything down.
|
||||
func ephemeralConsole(ctx *cli.Context) error {
|
||||
// Create and start the node based on the CLI flags
|
||||
node := makeFullNode(ctx)
|
||||
startNode(ctx, node)
|
||||
defer node.Close()
|
||||
stack, backend := makeFullNode(ctx)
|
||||
startNode(ctx, stack, backend)
|
||||
defer stack.Close()
|
||||
|
||||
// Attach to the newly started node and start the JavaScript console
|
||||
client, err := node.Attach()
|
||||
client, err := stack.Attach()
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to attach to the inproc geth: %v", err)
|
||||
}
|
||||
|
@@ -119,8 +119,7 @@ func testDAOForkBlockNewChain(t *testing.T, test int, genesis string, expectBloc
|
||||
} else {
|
||||
// Force chain initialization
|
||||
args := []string{"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", "--ipcdisable", "--datadir", datadir}
|
||||
geth := runGeth(t, append(args, []string{"--exec", "2+2", "console"}...)...)
|
||||
geth.WaitExit()
|
||||
runGeth(t, append(args, []string{"--exec", "2+2", "console"}...)...).WaitExit()
|
||||
}
|
||||
// Retrieve the DAO config flag from the database
|
||||
path := filepath.Join(datadir, "geth", "chaindata")
|
||||
|
@@ -36,8 +36,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||
"github.com/ethereum/go-ethereum/internal/flags"
|
||||
"github.com/ethereum/go-ethereum/les"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
@@ -108,6 +108,8 @@ var (
|
||||
utils.CacheFlag,
|
||||
utils.CacheDatabaseFlag,
|
||||
utils.CacheTrieFlag,
|
||||
utils.CacheTrieJournalFlag,
|
||||
utils.CacheTrieRejournalFlag,
|
||||
utils.CacheGCFlag,
|
||||
utils.CacheSnapshotFlag,
|
||||
utils.CacheNoPrefetchFlag,
|
||||
@@ -169,8 +171,6 @@ var (
|
||||
utils.LegacyRPCCORSDomainFlag,
|
||||
utils.LegacyRPCVirtualHostsFlag,
|
||||
utils.GraphQLEnabledFlag,
|
||||
utils.GraphQLListenAddrFlag,
|
||||
utils.GraphQLPortFlag,
|
||||
utils.GraphQLCORSDomainFlag,
|
||||
utils.GraphQLVirtualHostsFlag,
|
||||
utils.HTTPApiFlag,
|
||||
@@ -348,18 +348,20 @@ func geth(ctx *cli.Context) error {
|
||||
if args := ctx.Args(); len(args) > 0 {
|
||||
return fmt.Errorf("invalid command: %q", args[0])
|
||||
}
|
||||
|
||||
prepare(ctx)
|
||||
node := makeFullNode(ctx)
|
||||
defer node.Close()
|
||||
startNode(ctx, node)
|
||||
node.Wait()
|
||||
stack, backend := makeFullNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
startNode(ctx, stack, backend)
|
||||
stack.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// startNode boots up the system node and all registered protocols, after which
|
||||
// it unlocks any requested accounts, and starts the RPC/IPC interfaces and the
|
||||
// miner.
|
||||
func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend) {
|
||||
debug.Memsize.Add("node", stack)
|
||||
|
||||
// Start up the node itself
|
||||
@@ -379,25 +381,6 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
}
|
||||
ethClient := ethclient.NewClient(rpcClient)
|
||||
|
||||
// Set contract backend for ethereum service if local node
|
||||
// is serving LES requests.
|
||||
if ctx.GlobalInt(utils.LegacyLightServFlag.Name) > 0 || ctx.GlobalInt(utils.LightServeFlag.Name) > 0 {
|
||||
var ethService *eth.Ethereum
|
||||
if err := stack.Service(ðService); err != nil {
|
||||
utils.Fatalf("Failed to retrieve ethereum service: %v", err)
|
||||
}
|
||||
ethService.SetContractBackend(ethClient)
|
||||
}
|
||||
// Set contract backend for les service if local node is
|
||||
// running as a light client.
|
||||
if ctx.GlobalString(utils.SyncModeFlag.Name) == "light" {
|
||||
var lesService *les.LightEthereum
|
||||
if err := stack.Service(&lesService); err != nil {
|
||||
utils.Fatalf("Failed to retrieve light ethereum service: %v", err)
|
||||
}
|
||||
lesService.SetContractBackend(ethClient)
|
||||
}
|
||||
|
||||
go func() {
|
||||
// Open any wallets already attached
|
||||
for _, wallet := range stack.AccountManager().Wallets() {
|
||||
@@ -449,7 +432,7 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
if timestamp := time.Unix(int64(done.Latest.Time), 0); time.Since(timestamp) < 10*time.Minute {
|
||||
log.Info("Synchronisation completed", "latestnum", done.Latest.Number, "latesthash", done.Latest.Hash(),
|
||||
"age", common.PrettyAge(timestamp))
|
||||
stack.Stop()
|
||||
stack.Close()
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -461,24 +444,24 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
if ctx.GlobalString(utils.SyncModeFlag.Name) == "light" {
|
||||
utils.Fatalf("Light clients do not support mining")
|
||||
}
|
||||
var ethereum *eth.Ethereum
|
||||
if err := stack.Service(ðereum); err != nil {
|
||||
ethBackend, ok := backend.(*eth.EthAPIBackend)
|
||||
if !ok {
|
||||
utils.Fatalf("Ethereum service not running: %v", err)
|
||||
}
|
||||
|
||||
// Set the gas price to the limits from the CLI and start mining
|
||||
gasprice := utils.GlobalBig(ctx, utils.MinerGasPriceFlag.Name)
|
||||
if ctx.GlobalIsSet(utils.LegacyMinerGasPriceFlag.Name) && !ctx.GlobalIsSet(utils.MinerGasPriceFlag.Name) {
|
||||
gasprice = utils.GlobalBig(ctx, utils.LegacyMinerGasPriceFlag.Name)
|
||||
}
|
||||
ethereum.TxPool().SetGasPrice(gasprice)
|
||||
|
||||
ethBackend.TxPool().SetGasPrice(gasprice)
|
||||
// start mining
|
||||
threads := ctx.GlobalInt(utils.MinerThreadsFlag.Name)
|
||||
if ctx.GlobalIsSet(utils.LegacyMinerThreadsFlag.Name) && !ctx.GlobalIsSet(utils.MinerThreadsFlag.Name) {
|
||||
threads = ctx.GlobalInt(utils.LegacyMinerThreadsFlag.Name)
|
||||
log.Warn("The flag --minerthreads is deprecated and will be removed in the future, please use --miner.threads")
|
||||
}
|
||||
|
||||
if err := ethereum.StartMining(threads); err != nil {
|
||||
if err := ethBackend.StartMining(threads); err != nil {
|
||||
utils.Fatalf("Failed to start mining: %v", err)
|
||||
}
|
||||
}
|
||||
|
@@ -200,11 +200,11 @@ func (e *NoRewardEngine) Author(header *types.Header) (common.Address, error) {
|
||||
return e.inner.Author(header)
|
||||
}
|
||||
|
||||
func (e *NoRewardEngine) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error {
|
||||
func (e *NoRewardEngine) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error {
|
||||
return e.inner.VerifyHeader(chain, header, seal)
|
||||
}
|
||||
|
||||
func (e *NoRewardEngine) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
|
||||
func (e *NoRewardEngine) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
|
||||
return e.inner.VerifyHeaders(chain, headers, seals)
|
||||
}
|
||||
|
||||
@@ -212,11 +212,11 @@ func (e *NoRewardEngine) VerifyUncles(chain consensus.ChainReader, block *types.
|
||||
return e.inner.VerifyUncles(chain, block)
|
||||
}
|
||||
|
||||
func (e *NoRewardEngine) VerifySeal(chain consensus.ChainReader, header *types.Header) error {
|
||||
func (e *NoRewardEngine) VerifySeal(chain consensus.ChainHeaderReader, header *types.Header) error {
|
||||
return e.inner.VerifySeal(chain, header)
|
||||
}
|
||||
|
||||
func (e *NoRewardEngine) Prepare(chain consensus.ChainReader, header *types.Header) error {
|
||||
func (e *NoRewardEngine) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
|
||||
return e.inner.Prepare(chain, header)
|
||||
}
|
||||
|
||||
@@ -229,7 +229,7 @@ func (e *NoRewardEngine) accumulateRewards(config *params.ChainConfig, state *st
|
||||
state.AddBalance(header.Coinbase, reward)
|
||||
}
|
||||
|
||||
func (e *NoRewardEngine) Finalize(chain consensus.ChainReader, header *types.Header, statedb *state.StateDB, txs []*types.Transaction,
|
||||
func (e *NoRewardEngine) Finalize(chain consensus.ChainHeaderReader, header *types.Header, statedb *state.StateDB, txs []*types.Transaction,
|
||||
uncles []*types.Header) {
|
||||
if e.rewardsOn {
|
||||
e.inner.Finalize(chain, header, statedb, txs, uncles)
|
||||
@@ -239,7 +239,7 @@ func (e *NoRewardEngine) Finalize(chain consensus.ChainReader, header *types.Hea
|
||||
}
|
||||
}
|
||||
|
||||
func (e *NoRewardEngine) FinalizeAndAssemble(chain consensus.ChainReader, header *types.Header, statedb *state.StateDB, txs []*types.Transaction,
|
||||
func (e *NoRewardEngine) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, statedb *state.StateDB, txs []*types.Transaction,
|
||||
uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
|
||||
if e.rewardsOn {
|
||||
return e.inner.FinalizeAndAssemble(chain, header, statedb, txs, uncles, receipts)
|
||||
@@ -252,7 +252,7 @@ func (e *NoRewardEngine) FinalizeAndAssemble(chain consensus.ChainReader, header
|
||||
}
|
||||
}
|
||||
|
||||
func (e *NoRewardEngine) Seal(chain consensus.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
|
||||
func (e *NoRewardEngine) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
|
||||
return e.inner.Seal(chain, block, results, stop)
|
||||
}
|
||||
|
||||
@@ -260,11 +260,11 @@ func (e *NoRewardEngine) SealHash(header *types.Header) common.Hash {
|
||||
return e.inner.SealHash(header)
|
||||
}
|
||||
|
||||
func (e *NoRewardEngine) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int {
|
||||
func (e *NoRewardEngine) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int {
|
||||
return e.inner.CalcDifficulty(chain, time, parent)
|
||||
}
|
||||
|
||||
func (e *NoRewardEngine) APIs(chain consensus.ChainReader) []rpc.API {
|
||||
func (e *NoRewardEngine) APIs(chain consensus.ChainHeaderReader) []rpc.API {
|
||||
return e.inner.APIs(chain)
|
||||
}
|
||||
|
||||
|
@@ -109,6 +109,8 @@ var AppHelpFlagGroups = []flags.FlagGroup{
|
||||
utils.CacheFlag,
|
||||
utils.CacheDatabaseFlag,
|
||||
utils.CacheTrieFlag,
|
||||
utils.CacheTrieJournalFlag,
|
||||
utils.CacheTrieRejournalFlag,
|
||||
utils.CacheGCFlag,
|
||||
utils.CacheSnapshotFlag,
|
||||
utils.CacheNoPrefetchFlag,
|
||||
@@ -140,8 +142,6 @@ var AppHelpFlagGroups = []flags.FlagGroup{
|
||||
utils.WSApiFlag,
|
||||
utils.WSAllowedOriginsFlag,
|
||||
utils.GraphQLEnabledFlag,
|
||||
utils.GraphQLListenAddrFlag,
|
||||
utils.GraphQLPortFlag,
|
||||
utils.GraphQLCORSDomainFlag,
|
||||
utils.GraphQLVirtualHostsFlag,
|
||||
utils.RPCGlobalGasCap,
|
||||
@@ -229,6 +229,8 @@ var AppHelpFlagGroups = []flags.FlagGroup{
|
||||
utils.LegacyWSApiFlag,
|
||||
utils.LegacyGpoBlocksFlag,
|
||||
utils.LegacyGpoPercentileFlag,
|
||||
utils.LegacyGraphQLListenAddrFlag,
|
||||
utils.LegacyGraphQLPortFlag,
|
||||
}, debug.DeprecatedFlags...),
|
||||
},
|
||||
{
|
||||
|
@@ -289,7 +289,7 @@ func createNode(ctx *cli.Context) error {
|
||||
config.PrivateKey = privKey
|
||||
}
|
||||
if services := ctx.String("services"); services != "" {
|
||||
config.Services = strings.Split(services, ",")
|
||||
config.Lifecycles = strings.Split(services, ",")
|
||||
}
|
||||
node, err := client.CreateNode(config)
|
||||
if err != nil {
|
||||
|
@@ -73,7 +73,7 @@ func StartNode(stack *node.Node) {
|
||||
defer signal.Stop(sigc)
|
||||
<-sigc
|
||||
log.Info("Got interrupt, shutting down...")
|
||||
go stack.Stop()
|
||||
go stack.Close()
|
||||
for i := 10; i > 0; i-- {
|
||||
<-sigc
|
||||
if i > 1 {
|
||||
|
@@ -19,7 +19,6 @@ package utils
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -40,6 +39,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/consensus/clique"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
@@ -48,6 +48,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/ethstats"
|
||||
"github.com/ethereum/go-ethereum/graphql"
|
||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||
"github.com/ethereum/go-ethereum/internal/flags"
|
||||
"github.com/ethereum/go-ethereum/les"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
@@ -62,7 +63,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
"github.com/ethereum/go-ethereum/p2p/netutil"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
whisper "github.com/ethereum/go-ethereum/whisper/whisperv6"
|
||||
pcsclite "github.com/gballet/go-libpcsclite"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
@@ -360,6 +360,16 @@ var (
|
||||
Usage: "Percentage of cache memory allowance to use for trie caching (default = 15% full mode, 30% archive mode)",
|
||||
Value: 15,
|
||||
}
|
||||
CacheTrieJournalFlag = cli.StringFlag{
|
||||
Name: "cache.trie.journal",
|
||||
Usage: "Disk journal directory for trie cache to survive node restarts",
|
||||
Value: eth.DefaultConfig.TrieCleanCacheJournal,
|
||||
}
|
||||
CacheTrieRejournalFlag = cli.DurationFlag{
|
||||
Name: "cache.trie.rejournal",
|
||||
Usage: "Time interval to regenerate the trie cache journal",
|
||||
Value: eth.DefaultConfig.TrieCleanCacheRejournal,
|
||||
}
|
||||
CacheGCFlag = cli.IntFlag{
|
||||
Name: "cache.gc",
|
||||
Usage: "Percentage of cache memory allowance to use for trie pruning (default = 25% full mode, 0% archive mode)",
|
||||
@@ -506,6 +516,20 @@ var (
|
||||
Usage: "API's offered over the HTTP-RPC interface",
|
||||
Value: "",
|
||||
}
|
||||
GraphQLEnabledFlag = cli.BoolFlag{
|
||||
Name: "graphql",
|
||||
Usage: "Enable GraphQL on the HTTP-RPC server. Note that GraphQL can only be started if an HTTP server is started as well.",
|
||||
}
|
||||
GraphQLCORSDomainFlag = cli.StringFlag{
|
||||
Name: "graphql.corsdomain",
|
||||
Usage: "Comma separated list of domains from which to accept cross origin requests (browser enforced)",
|
||||
Value: "",
|
||||
}
|
||||
GraphQLVirtualHostsFlag = cli.StringFlag{
|
||||
Name: "graphql.vhosts",
|
||||
Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.",
|
||||
Value: strings.Join(node.DefaultConfig.GraphQLVirtualHosts, ","),
|
||||
}
|
||||
WSEnabledFlag = cli.BoolFlag{
|
||||
Name: "ws",
|
||||
Usage: "Enable the WS-RPC server",
|
||||
@@ -530,30 +554,6 @@ var (
|
||||
Usage: "Origins from which to accept websockets requests",
|
||||
Value: "",
|
||||
}
|
||||
GraphQLEnabledFlag = cli.BoolFlag{
|
||||
Name: "graphql",
|
||||
Usage: "Enable the GraphQL server",
|
||||
}
|
||||
GraphQLListenAddrFlag = cli.StringFlag{
|
||||
Name: "graphql.addr",
|
||||
Usage: "GraphQL server listening interface",
|
||||
Value: node.DefaultGraphQLHost,
|
||||
}
|
||||
GraphQLPortFlag = cli.IntFlag{
|
||||
Name: "graphql.port",
|
||||
Usage: "GraphQL server listening port",
|
||||
Value: node.DefaultGraphQLPort,
|
||||
}
|
||||
GraphQLCORSDomainFlag = cli.StringFlag{
|
||||
Name: "graphql.corsdomain",
|
||||
Usage: "Comma separated list of domains from which to accept cross origin requests (browser enforced)",
|
||||
Value: "",
|
||||
}
|
||||
GraphQLVirtualHostsFlag = cli.StringFlag{
|
||||
Name: "graphql.vhosts",
|
||||
Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.",
|
||||
Value: strings.Join(node.DefaultConfig.GraphQLVirtualHosts, ","),
|
||||
}
|
||||
ExecFlag = cli.StringFlag{
|
||||
Name: "exec",
|
||||
Usage: "Execute JavaScript statement",
|
||||
@@ -940,13 +940,6 @@ func setHTTP(ctx *cli.Context, cfg *node.Config) {
|
||||
// setGraphQL creates the GraphQL listener interface string from the set
|
||||
// command line flags, returning empty if the GraphQL endpoint is disabled.
|
||||
func setGraphQL(ctx *cli.Context, cfg *node.Config) {
|
||||
if ctx.GlobalBool(GraphQLEnabledFlag.Name) && cfg.GraphQLHost == "" {
|
||||
cfg.GraphQLHost = "127.0.0.1"
|
||||
if ctx.GlobalIsSet(GraphQLListenAddrFlag.Name) {
|
||||
cfg.GraphQLHost = ctx.GlobalString(GraphQLListenAddrFlag.Name)
|
||||
}
|
||||
}
|
||||
cfg.GraphQLPort = ctx.GlobalInt(GraphQLPortFlag.Name)
|
||||
if ctx.GlobalIsSet(GraphQLCORSDomainFlag.Name) {
|
||||
cfg.GraphQLCors = splitAndTrim(ctx.GlobalString(GraphQLCORSDomainFlag.Name))
|
||||
}
|
||||
@@ -1536,6 +1529,12 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) {
|
||||
cfg.TrieCleanCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheTrieFlag.Name) / 100
|
||||
}
|
||||
if ctx.GlobalIsSet(CacheTrieJournalFlag.Name) {
|
||||
cfg.TrieCleanCacheJournal = ctx.GlobalString(CacheTrieJournalFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(CacheTrieRejournalFlag.Name) {
|
||||
cfg.TrieCleanCacheRejournal = ctx.GlobalDuration(CacheTrieRejournalFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) {
|
||||
cfg.TrieDirtyCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
|
||||
}
|
||||
@@ -1543,6 +1542,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
cfg.SnapshotCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheSnapshotFlag.Name) / 100
|
||||
}
|
||||
if !ctx.GlobalIsSet(SnapshotFlag.Name) {
|
||||
cfg.TrieCleanCache += cfg.SnapshotCache
|
||||
cfg.SnapshotCache = 0 // Disabled
|
||||
}
|
||||
if ctx.GlobalIsSet(DocRootFlag.Name) {
|
||||
@@ -1611,23 +1611,43 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
}
|
||||
// Create new developer account or reuse existing one
|
||||
var (
|
||||
developer accounts.Account
|
||||
err error
|
||||
developer accounts.Account
|
||||
passphrase string
|
||||
err error
|
||||
)
|
||||
if accs := ks.Accounts(); len(accs) > 0 {
|
||||
if list := MakePasswordList(ctx); len(list) > 0 {
|
||||
// Just take the first value. Although the function returns a possible multiple values and
|
||||
// some usages iterate through them as attempts, that doesn't make sense in this setting,
|
||||
// when we're definitely concerned with only one account.
|
||||
passphrase = list[0]
|
||||
}
|
||||
// setEtherbase has been called above, configuring the miner address from command line flags.
|
||||
if cfg.Miner.Etherbase != (common.Address{}) {
|
||||
developer = accounts.Account{Address: cfg.Miner.Etherbase}
|
||||
} else if accs := ks.Accounts(); len(accs) > 0 {
|
||||
developer = ks.Accounts()[0]
|
||||
} else {
|
||||
developer, err = ks.NewAccount("")
|
||||
developer, err = ks.NewAccount(passphrase)
|
||||
if err != nil {
|
||||
Fatalf("Failed to create developer account: %v", err)
|
||||
}
|
||||
}
|
||||
if err := ks.Unlock(developer, ""); err != nil {
|
||||
if err := ks.Unlock(developer, passphrase); err != nil {
|
||||
Fatalf("Failed to unlock developer account: %v", err)
|
||||
}
|
||||
log.Info("Using developer account", "address", developer.Address)
|
||||
|
||||
// Create a new developer genesis block or reuse existing one
|
||||
cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.GlobalInt(DeveloperPeriodFlag.Name)), developer.Address)
|
||||
if ctx.GlobalIsSet(DataDirFlag.Name) {
|
||||
// Check if we have an already initialized chain and fall back to
|
||||
// that if so. Otherwise we need to generate a new genesis spec.
|
||||
chaindb := MakeChainDatabase(ctx, stack)
|
||||
if rawdb.ReadCanonicalHash(chaindb, 0) != (common.Hash{}) {
|
||||
cfg.Genesis = nil // fallback to db content
|
||||
}
|
||||
chaindb.Close()
|
||||
}
|
||||
if !ctx.GlobalIsSet(MinerGasPriceFlag.Name) && !ctx.GlobalIsSet(LegacyMinerGasPriceFlag.Name) {
|
||||
cfg.Miner.GasPrice = big.NewInt(1)
|
||||
}
|
||||
@@ -1655,70 +1675,46 @@ func setDNSDiscoveryDefaults(cfg *eth.Config, genesis common.Hash) {
|
||||
}
|
||||
|
||||
// RegisterEthService adds an Ethereum client to the stack.
|
||||
func RegisterEthService(stack *node.Node, cfg *eth.Config) {
|
||||
var err error
|
||||
func RegisterEthService(stack *node.Node, cfg *eth.Config) ethapi.Backend {
|
||||
if cfg.SyncMode == downloader.LightSync {
|
||||
err = stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
return les.New(ctx, cfg)
|
||||
})
|
||||
backend, err := les.New(stack, cfg)
|
||||
if err != nil {
|
||||
Fatalf("Failed to register the Ethereum service: %v", err)
|
||||
}
|
||||
return backend.ApiBackend
|
||||
} else {
|
||||
err = stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
fullNode, err := eth.New(ctx, cfg)
|
||||
if fullNode != nil && cfg.LightServ > 0 {
|
||||
ls, _ := les.NewLesServer(fullNode, cfg)
|
||||
fullNode.AddLesServer(ls)
|
||||
backend, err := eth.New(stack, cfg)
|
||||
if err != nil {
|
||||
Fatalf("Failed to register the Ethereum service: %v", err)
|
||||
}
|
||||
if cfg.LightServ > 0 {
|
||||
_, err := les.NewLesServer(stack, backend, cfg)
|
||||
if err != nil {
|
||||
Fatalf("Failed to create the LES server: %v", err)
|
||||
}
|
||||
return fullNode, err
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
Fatalf("Failed to register the Ethereum service: %v", err)
|
||||
}
|
||||
return backend.APIBackend
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterShhService configures Whisper and adds it to the given node.
|
||||
func RegisterShhService(stack *node.Node, cfg *whisper.Config) {
|
||||
if err := stack.Register(func(n *node.ServiceContext) (node.Service, error) {
|
||||
return whisper.New(cfg), nil
|
||||
}); err != nil {
|
||||
if _, err := whisper.New(stack, cfg); err != nil {
|
||||
Fatalf("Failed to register the Whisper service: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterEthStatsService configures the Ethereum Stats daemon and adds it to
|
||||
// the given node.
|
||||
func RegisterEthStatsService(stack *node.Node, url string) {
|
||||
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
// Retrieve both eth and les services
|
||||
var ethServ *eth.Ethereum
|
||||
ctx.Service(ðServ)
|
||||
|
||||
var lesServ *les.LightEthereum
|
||||
ctx.Service(&lesServ)
|
||||
|
||||
// Let ethstats use whichever is not nil
|
||||
return ethstats.New(url, ethServ, lesServ)
|
||||
}); err != nil {
|
||||
func RegisterEthStatsService(stack *node.Node, backend ethapi.Backend, url string) {
|
||||
if err := ethstats.New(stack, backend, backend.Engine(), url); err != nil {
|
||||
Fatalf("Failed to register the Ethereum Stats service: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterGraphQLService is a utility function to construct a new service and register it against a node.
|
||||
func RegisterGraphQLService(stack *node.Node, endpoint string, cors, vhosts []string, timeouts rpc.HTTPTimeouts) {
|
||||
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
// Try to construct the GraphQL service backed by a full node
|
||||
var ethServ *eth.Ethereum
|
||||
if err := ctx.Service(ðServ); err == nil {
|
||||
return graphql.New(ethServ.APIBackend, endpoint, cors, vhosts, timeouts)
|
||||
}
|
||||
// Try to construct the GraphQL service backed by a light node
|
||||
var lesServ *les.LightEthereum
|
||||
if err := ctx.Service(&lesServ); err == nil {
|
||||
return graphql.New(lesServ.ApiBackend, endpoint, cors, vhosts, timeouts)
|
||||
}
|
||||
// Well, this should not have happened, bail out
|
||||
return nil, errors.New("no Ethereum service")
|
||||
}); err != nil {
|
||||
func RegisterGraphQLService(stack *node.Node, backend ethapi.Backend, cfg node.Config) {
|
||||
if err := graphql.New(stack, backend, cfg.GraphQLCors, cfg.GraphQLVirtualHosts); err != nil {
|
||||
Fatalf("Failed to register the GraphQL service: %v", err)
|
||||
}
|
||||
}
|
||||
|
@@ -89,6 +89,8 @@ var (
|
||||
Name: "testnet",
|
||||
Usage: "Pre-configured test network (Deprecated: Please choose one of --goerli, --rinkeby, or --ropsten.)",
|
||||
}
|
||||
|
||||
// (Deprecated May 2020, shown in aliased flags section)
|
||||
LegacyRPCEnabledFlag = cli.BoolFlag{
|
||||
Name: "rpc",
|
||||
Usage: "Enable the HTTP-RPC server (deprecated, use --http)",
|
||||
@@ -158,6 +160,17 @@ var (
|
||||
Usage: "Comma separated enode URLs for P2P v5 discovery bootstrap (light server, light nodes) (deprecated, use --bootnodes)",
|
||||
Value: "",
|
||||
}
|
||||
|
||||
// (Deprecated July 2020, shown in aliased flags section)
|
||||
LegacyGraphQLListenAddrFlag = cli.StringFlag{
|
||||
Name: "graphql.addr",
|
||||
Usage: "GraphQL server listening interface (deprecated, graphql can only be enabled on the HTTP-RPC server endpoint, use --graphql)",
|
||||
}
|
||||
LegacyGraphQLPortFlag = cli.IntFlag{
|
||||
Name: "graphql.port",
|
||||
Usage: "GraphQL server listening port (deprecated, graphql can only be enabled on the HTTP-RPC server endpoint, use --graphql)",
|
||||
Value: node.DefaultHTTPPort,
|
||||
}
|
||||
)
|
||||
|
||||
// showDeprecated displays deprecated flags that will be soon removed from the codebase.
|
||||
|
@@ -221,8 +221,7 @@ func initialize() {
|
||||
MaxMessageSize: uint32(*argMaxSize),
|
||||
MinimumAcceptedPOW: *argPoW,
|
||||
}
|
||||
|
||||
shh = whisper.New(cfg)
|
||||
shh = whisper.StandaloneWhisperService(cfg)
|
||||
|
||||
if *argPoW != whisper.DefaultMinimumPoW {
|
||||
err := shh.SetMinimumPoW(*argPoW)
|
||||
@@ -433,7 +432,7 @@ func run() {
|
||||
return
|
||||
}
|
||||
defer server.Stop()
|
||||
shh.Start(nil)
|
||||
shh.Start()
|
||||
defer shh.Stop()
|
||||
|
||||
if !*forwarderMode {
|
||||
|
@@ -28,7 +28,7 @@ import (
|
||||
// API is a user facing RPC API to allow controlling the signer and voting
|
||||
// mechanisms of the proof-of-authority scheme.
|
||||
type API struct {
|
||||
chain consensus.ChainReader
|
||||
chain consensus.ChainHeaderReader
|
||||
clique *Clique
|
||||
}
|
||||
|
||||
|
@@ -137,9 +137,8 @@ var (
|
||||
errRecentlySigned = errors.New("recently signed")
|
||||
)
|
||||
|
||||
// SignerFn is a signer callback function to request a header to be signed by a
|
||||
// backing account.
|
||||
type SignerFn func(accounts.Account, string, []byte) ([]byte, error)
|
||||
// SignerFn hashes and signs the data to be signed by a backing account.
|
||||
type SignerFn func(signer accounts.Account, mimeType string, message []byte) ([]byte, error)
|
||||
|
||||
// ecrecover extracts the Ethereum account address from a signed header.
|
||||
func ecrecover(header *types.Header, sigcache *lru.ARCCache) (common.Address, error) {
|
||||
@@ -213,14 +212,14 @@ func (c *Clique) Author(header *types.Header) (common.Address, error) {
|
||||
}
|
||||
|
||||
// VerifyHeader checks whether a header conforms to the consensus rules.
|
||||
func (c *Clique) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error {
|
||||
func (c *Clique) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error {
|
||||
return c.verifyHeader(chain, header, nil)
|
||||
}
|
||||
|
||||
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers. The
|
||||
// method returns a quit channel to abort the operations and a results channel to
|
||||
// retrieve the async verifications (the order is that of the input slice).
|
||||
func (c *Clique) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
|
||||
func (c *Clique) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
|
||||
abort := make(chan struct{})
|
||||
results := make(chan error, len(headers))
|
||||
|
||||
@@ -242,7 +241,7 @@ func (c *Clique) VerifyHeaders(chain consensus.ChainReader, headers []*types.Hea
|
||||
// caller may optionally pass in a batch of parents (ascending order) to avoid
|
||||
// looking those up from the database. This is useful for concurrently verifying
|
||||
// a batch of new headers.
|
||||
func (c *Clique) verifyHeader(chain consensus.ChainReader, header *types.Header, parents []*types.Header) error {
|
||||
func (c *Clique) verifyHeader(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error {
|
||||
if header.Number == nil {
|
||||
return errUnknownBlock
|
||||
}
|
||||
@@ -305,7 +304,7 @@ func (c *Clique) verifyHeader(chain consensus.ChainReader, header *types.Header,
|
||||
// rather depend on a batch of previous headers. The caller may optionally pass
|
||||
// in a batch of parents (ascending order) to avoid looking those up from the
|
||||
// database. This is useful for concurrently verifying a batch of new headers.
|
||||
func (c *Clique) verifyCascadingFields(chain consensus.ChainReader, header *types.Header, parents []*types.Header) error {
|
||||
func (c *Clique) verifyCascadingFields(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error {
|
||||
// The genesis block is the always valid dead-end
|
||||
number := header.Number.Uint64()
|
||||
if number == 0 {
|
||||
@@ -345,7 +344,7 @@ func (c *Clique) verifyCascadingFields(chain consensus.ChainReader, header *type
|
||||
}
|
||||
|
||||
// snapshot retrieves the authorization snapshot at a given point in time.
|
||||
func (c *Clique) snapshot(chain consensus.ChainReader, number uint64, hash common.Hash, parents []*types.Header) (*Snapshot, error) {
|
||||
func (c *Clique) snapshot(chain consensus.ChainHeaderReader, number uint64, hash common.Hash, parents []*types.Header) (*Snapshot, error) {
|
||||
// Search for a snapshot in memory or on disk for checkpoints
|
||||
var (
|
||||
headers []*types.Header
|
||||
@@ -436,7 +435,7 @@ func (c *Clique) VerifyUncles(chain consensus.ChainReader, block *types.Block) e
|
||||
|
||||
// VerifySeal implements consensus.Engine, checking whether the signature contained
|
||||
// in the header satisfies the consensus protocol requirements.
|
||||
func (c *Clique) VerifySeal(chain consensus.ChainReader, header *types.Header) error {
|
||||
func (c *Clique) VerifySeal(chain consensus.ChainHeaderReader, header *types.Header) error {
|
||||
return c.verifySeal(chain, header, nil)
|
||||
}
|
||||
|
||||
@@ -444,7 +443,7 @@ func (c *Clique) VerifySeal(chain consensus.ChainReader, header *types.Header) e
|
||||
// consensus protocol requirements. The method accepts an optional list of parent
|
||||
// headers that aren't yet part of the local blockchain to generate the snapshots
|
||||
// from.
|
||||
func (c *Clique) verifySeal(chain consensus.ChainReader, header *types.Header, parents []*types.Header) error {
|
||||
func (c *Clique) verifySeal(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error {
|
||||
// Verifying the genesis block is not supported
|
||||
number := header.Number.Uint64()
|
||||
if number == 0 {
|
||||
@@ -487,7 +486,7 @@ func (c *Clique) verifySeal(chain consensus.ChainReader, header *types.Header, p
|
||||
|
||||
// Prepare implements consensus.Engine, preparing all the consensus fields of the
|
||||
// header for running the transactions on top.
|
||||
func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) error {
|
||||
func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
|
||||
// If the block isn't a checkpoint, cast a random vote (good enough for now)
|
||||
header.Coinbase = common.Address{}
|
||||
header.Nonce = types.BlockNonce{}
|
||||
@@ -552,7 +551,7 @@ func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) erro
|
||||
|
||||
// Finalize implements consensus.Engine, ensuring no uncles are set, nor block
|
||||
// rewards given.
|
||||
func (c *Clique) Finalize(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) {
|
||||
func (c *Clique) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) {
|
||||
// No block rewards in PoA, so the state remains as is and uncles are dropped
|
||||
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
|
||||
header.UncleHash = types.CalcUncleHash(nil)
|
||||
@@ -560,7 +559,7 @@ func (c *Clique) Finalize(chain consensus.ChainReader, header *types.Header, sta
|
||||
|
||||
// FinalizeAndAssemble implements consensus.Engine, ensuring no uncles are set,
|
||||
// nor block rewards given, and returns the final block.
|
||||
func (c *Clique) FinalizeAndAssemble(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
|
||||
func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
|
||||
// No block rewards in PoA, so the state remains as is and uncles are dropped
|
||||
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
|
||||
header.UncleHash = types.CalcUncleHash(nil)
|
||||
@@ -581,7 +580,7 @@ func (c *Clique) Authorize(signer common.Address, signFn SignerFn) {
|
||||
|
||||
// Seal implements consensus.Engine, attempting to create a sealed block using
|
||||
// the local signing credentials.
|
||||
func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
|
||||
func (c *Clique) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
|
||||
header := block.Header()
|
||||
|
||||
// Sealing the genesis block is not supported
|
||||
@@ -654,7 +653,7 @@ func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, results c
|
||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
|
||||
// that a new block should have based on the previous blocks in the chain and the
|
||||
// current signer.
|
||||
func (c *Clique) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int {
|
||||
func (c *Clique) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int {
|
||||
snap, err := c.snapshot(chain, parent.Number.Uint64(), parent.Hash(), nil)
|
||||
if err != nil {
|
||||
return nil
|
||||
@@ -684,7 +683,7 @@ func (c *Clique) Close() error {
|
||||
|
||||
// APIs implements consensus.Engine, returning the user facing RPC API to allow
|
||||
// controlling the signer voting.
|
||||
func (c *Clique) APIs(chain consensus.ChainReader) []rpc.API {
|
||||
func (c *Clique) APIs(chain consensus.ChainHeaderReader) []rpc.API {
|
||||
return []rpc.API{{
|
||||
Namespace: "clique",
|
||||
Version: "1.0",
|
||||
|
@@ -27,9 +27,9 @@ import (
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// ChainReader defines a small collection of methods needed to access the local
|
||||
// blockchain during header and/or uncle verification.
|
||||
type ChainReader interface {
|
||||
// ChainHeaderReader defines a small collection of methods needed to access the local
|
||||
// blockchain during header verification.
|
||||
type ChainHeaderReader interface {
|
||||
// Config retrieves the blockchain's chain configuration.
|
||||
Config() *params.ChainConfig
|
||||
|
||||
@@ -44,6 +44,12 @@ type ChainReader interface {
|
||||
|
||||
// GetHeaderByHash retrieves a block header from the database by its hash.
|
||||
GetHeaderByHash(hash common.Hash) *types.Header
|
||||
}
|
||||
|
||||
// ChainReader defines a small collection of methods needed to access the local
|
||||
// blockchain during header and/or uncle verification.
|
||||
type ChainReader interface {
|
||||
ChainHeaderReader
|
||||
|
||||
// GetBlock retrieves a block from the database by hash and number.
|
||||
GetBlock(hash common.Hash, number uint64) *types.Block
|
||||
@@ -59,13 +65,13 @@ type Engine interface {
|
||||
// VerifyHeader checks whether a header conforms to the consensus rules of a
|
||||
// given engine. Verifying the seal may be done optionally here, or explicitly
|
||||
// via the VerifySeal method.
|
||||
VerifyHeader(chain ChainReader, header *types.Header, seal bool) error
|
||||
VerifyHeader(chain ChainHeaderReader, header *types.Header, seal bool) error
|
||||
|
||||
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
|
||||
// concurrently. The method returns a quit channel to abort the operations and
|
||||
// a results channel to retrieve the async verifications (the order is that of
|
||||
// the input slice).
|
||||
VerifyHeaders(chain ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error)
|
||||
VerifyHeaders(chain ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error)
|
||||
|
||||
// VerifyUncles verifies that the given block's uncles conform to the consensus
|
||||
// rules of a given engine.
|
||||
@@ -73,18 +79,18 @@ type Engine interface {
|
||||
|
||||
// VerifySeal checks whether the crypto seal on a header is valid according to
|
||||
// the consensus rules of the given engine.
|
||||
VerifySeal(chain ChainReader, header *types.Header) error
|
||||
VerifySeal(chain ChainHeaderReader, header *types.Header) error
|
||||
|
||||
// Prepare initializes the consensus fields of a block header according to the
|
||||
// rules of a particular engine. The changes are executed inline.
|
||||
Prepare(chain ChainReader, header *types.Header) error
|
||||
Prepare(chain ChainHeaderReader, header *types.Header) error
|
||||
|
||||
// Finalize runs any post-transaction state modifications (e.g. block rewards)
|
||||
// but does not assemble the block.
|
||||
//
|
||||
// Note: The block header and state database might be updated to reflect any
|
||||
// consensus rules that happen at finalization (e.g. block rewards).
|
||||
Finalize(chain ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
|
||||
Finalize(chain ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
|
||||
uncles []*types.Header)
|
||||
|
||||
// FinalizeAndAssemble runs any post-transaction state modifications (e.g. block
|
||||
@@ -92,7 +98,7 @@ type Engine interface {
|
||||
//
|
||||
// Note: The block header and state database might be updated to reflect any
|
||||
// consensus rules that happen at finalization (e.g. block rewards).
|
||||
FinalizeAndAssemble(chain ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
|
||||
FinalizeAndAssemble(chain ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
|
||||
uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error)
|
||||
|
||||
// Seal generates a new sealing request for the given input block and pushes
|
||||
@@ -100,17 +106,17 @@ type Engine interface {
|
||||
//
|
||||
// Note, the method returns immediately and will send the result async. More
|
||||
// than one result may also be returned depending on the consensus algorithm.
|
||||
Seal(chain ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error
|
||||
Seal(chain ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error
|
||||
|
||||
// SealHash returns the hash of a block prior to it being sealed.
|
||||
SealHash(header *types.Header) common.Hash
|
||||
|
||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
|
||||
// that a new block should have.
|
||||
CalcDifficulty(chain ChainReader, time uint64, parent *types.Header) *big.Int
|
||||
CalcDifficulty(chain ChainHeaderReader, time uint64, parent *types.Header) *big.Int
|
||||
|
||||
// APIs returns the RPC APIs this consensus engine provides.
|
||||
APIs(chain ChainReader) []rpc.API
|
||||
APIs(chain ChainHeaderReader) []rpc.API
|
||||
|
||||
// Close terminates any background threads maintained by the consensus engine.
|
||||
Close() error
|
||||
|
@@ -86,7 +86,7 @@ func (ethash *Ethash) Author(header *types.Header) (common.Address, error) {
|
||||
|
||||
// VerifyHeader checks whether a header conforms to the consensus rules of the
|
||||
// stock Ethereum ethash engine.
|
||||
func (ethash *Ethash) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error {
|
||||
func (ethash *Ethash) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error {
|
||||
// If we're running a full engine faking, accept any input as valid
|
||||
if ethash.config.PowMode == ModeFullFake {
|
||||
return nil
|
||||
@@ -107,7 +107,7 @@ func (ethash *Ethash) VerifyHeader(chain consensus.ChainReader, header *types.He
|
||||
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
|
||||
// concurrently. The method returns a quit channel to abort the operations and
|
||||
// a results channel to retrieve the async verifications.
|
||||
func (ethash *Ethash) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
|
||||
func (ethash *Ethash) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
|
||||
// If we're running a full engine faking, accept any input as valid
|
||||
if ethash.config.PowMode == ModeFullFake || len(headers) == 0 {
|
||||
abort, results := make(chan struct{}), make(chan error, len(headers))
|
||||
@@ -169,7 +169,7 @@ func (ethash *Ethash) VerifyHeaders(chain consensus.ChainReader, headers []*type
|
||||
return abort, errorsOut
|
||||
}
|
||||
|
||||
func (ethash *Ethash) verifyHeaderWorker(chain consensus.ChainReader, headers []*types.Header, seals []bool, index int) error {
|
||||
func (ethash *Ethash) verifyHeaderWorker(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool, index int) error {
|
||||
var parent *types.Header
|
||||
if index == 0 {
|
||||
parent = chain.GetHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1)
|
||||
@@ -243,7 +243,7 @@ func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Blo
|
||||
// verifyHeader checks whether a header conforms to the consensus rules of the
|
||||
// stock Ethereum ethash engine.
|
||||
// See YP section 4.3.4. "Block Header Validity"
|
||||
func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *types.Header, uncle bool, seal bool) error {
|
||||
func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header, uncle bool, seal bool) error {
|
||||
// Ensure that the header's extra-data section is of a reasonable size
|
||||
if uint64(len(header.Extra)) > params.MaximumExtraDataSize {
|
||||
return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize)
|
||||
@@ -306,7 +306,7 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *
|
||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns
|
||||
// the difficulty that a new block should have when created at time
|
||||
// given the parent block's time and difficulty.
|
||||
func (ethash *Ethash) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int {
|
||||
func (ethash *Ethash) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int {
|
||||
return CalcDifficulty(chain.Config(), time, parent)
|
||||
}
|
||||
|
||||
@@ -486,14 +486,14 @@ func calcDifficultyFrontier(time uint64, parent *types.Header) *big.Int {
|
||||
|
||||
// VerifySeal implements consensus.Engine, checking whether the given block satisfies
|
||||
// the PoW difficulty requirements.
|
||||
func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Header) error {
|
||||
func (ethash *Ethash) VerifySeal(chain consensus.ChainHeaderReader, header *types.Header) error {
|
||||
return ethash.verifySeal(chain, header, false)
|
||||
}
|
||||
|
||||
// verifySeal checks whether a block satisfies the PoW difficulty requirements,
|
||||
// either using the usual ethash cache for it, or alternatively using a full DAG
|
||||
// to make remote mining fast.
|
||||
func (ethash *Ethash) verifySeal(chain consensus.ChainReader, header *types.Header, fulldag bool) error {
|
||||
func (ethash *Ethash) verifySeal(chain consensus.ChainHeaderReader, header *types.Header, fulldag bool) error {
|
||||
// If we're running a fake PoW, accept any seal as valid
|
||||
if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake {
|
||||
time.Sleep(ethash.fakeDelay)
|
||||
@@ -558,7 +558,7 @@ func (ethash *Ethash) verifySeal(chain consensus.ChainReader, header *types.Head
|
||||
|
||||
// Prepare implements consensus.Engine, initializing the difficulty field of a
|
||||
// header to conform to the ethash protocol. The changes are done inline.
|
||||
func (ethash *Ethash) Prepare(chain consensus.ChainReader, header *types.Header) error {
|
||||
func (ethash *Ethash) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
|
||||
parent := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1)
|
||||
if parent == nil {
|
||||
return consensus.ErrUnknownAncestor
|
||||
@@ -569,7 +569,7 @@ func (ethash *Ethash) Prepare(chain consensus.ChainReader, header *types.Header)
|
||||
|
||||
// Finalize implements consensus.Engine, accumulating the block and uncle rewards,
|
||||
// setting the final state on the header
|
||||
func (ethash *Ethash) Finalize(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) {
|
||||
func (ethash *Ethash) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) {
|
||||
// Accumulate any block and uncle rewards and commit the final state root
|
||||
accumulateRewards(chain.Config(), state, header, uncles)
|
||||
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
|
||||
@@ -577,7 +577,7 @@ func (ethash *Ethash) Finalize(chain consensus.ChainReader, header *types.Header
|
||||
|
||||
// FinalizeAndAssemble implements consensus.Engine, accumulating the block and
|
||||
// uncle rewards, setting the final state and assembling the block.
|
||||
func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
|
||||
func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
|
||||
// Accumulate any block and uncle rewards and commit the final state root
|
||||
accumulateRewards(chain.Config(), state, header, uncles)
|
||||
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
|
||||
|
@@ -656,7 +656,7 @@ func (ethash *Ethash) Hashrate() float64 {
|
||||
}
|
||||
|
||||
// APIs implements consensus.Engine, returning the user facing RPC APIs.
|
||||
func (ethash *Ethash) APIs(chain consensus.ChainReader) []rpc.API {
|
||||
func (ethash *Ethash) APIs(chain consensus.ChainHeaderReader) []rpc.API {
|
||||
// In order to ensure backward compatibility, we exposes ethash RPC APIs
|
||||
// to both eth and ethash namespaces.
|
||||
return []rpc.API{
|
||||
|
@@ -48,7 +48,7 @@ var (
|
||||
|
||||
// Seal implements consensus.Engine, attempting to find a nonce that satisfies
|
||||
// the block's difficulty requirements.
|
||||
func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
|
||||
func (ethash *Ethash) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
|
||||
// If we're running a fake PoW, simply return a 0 nonce immediately
|
||||
if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake {
|
||||
header := block.Header()
|
||||
|
@@ -109,7 +109,8 @@ func newTester(t *testing.T, confOverride func(*eth.Config)) *tester {
|
||||
if confOverride != nil {
|
||||
confOverride(ethConf)
|
||||
}
|
||||
if err = stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { return eth.New(ctx, ethConf) }); err != nil {
|
||||
ethBackend, err := eth.New(stack, ethConf)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to register Ethereum protocol: %v", err)
|
||||
}
|
||||
// Start the node and assemble the JavaScript console around it
|
||||
@@ -135,13 +136,10 @@ func newTester(t *testing.T, confOverride func(*eth.Config)) *tester {
|
||||
t.Fatalf("failed to create JavaScript console: %v", err)
|
||||
}
|
||||
// Create the final tester and return
|
||||
var ethereum *eth.Ethereum
|
||||
stack.Service(ðereum)
|
||||
|
||||
return &tester{
|
||||
workspace: workspace,
|
||||
stack: stack,
|
||||
ethereum: ethereum,
|
||||
ethereum: ethBackend,
|
||||
console: console,
|
||||
input: prompter,
|
||||
output: printer,
|
||||
|
@@ -116,6 +116,8 @@ const (
|
||||
// that's resident in a blockchain.
|
||||
type CacheConfig struct {
|
||||
TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
|
||||
TrieCleanJournal string // Disk journal for saving clean cache entries.
|
||||
TrieCleanRejournal time.Duration // Time interval to dump clean cache to disk periodically
|
||||
TrieCleanNoPrefetch bool // Whether to disable heuristic state prefetching for followup blocks
|
||||
TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk
|
||||
TrieDirtyDisabled bool // Whether to disable trie write caching and GC altogether (archive node)
|
||||
@@ -220,7 +222,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
|
||||
cacheConfig: cacheConfig,
|
||||
db: db,
|
||||
triegc: prque.New(nil),
|
||||
stateCache: state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit),
|
||||
stateCache: state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit, cacheConfig.TrieCleanJournal),
|
||||
quit: make(chan struct{}),
|
||||
shouldPreserve: shouldPreserve,
|
||||
bodyCache: bodyCache,
|
||||
@@ -328,6 +330,19 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
|
||||
bc.txLookupLimit = *txLookupLimit
|
||||
go bc.maintainTxIndex(txIndexBlock)
|
||||
}
|
||||
// If periodic cache journal is required, spin it up.
|
||||
if bc.cacheConfig.TrieCleanRejournal > 0 {
|
||||
if bc.cacheConfig.TrieCleanRejournal < time.Minute {
|
||||
log.Warn("Sanitizing invalid trie cache journal time", "provided", bc.cacheConfig.TrieCleanRejournal, "updated", time.Minute)
|
||||
bc.cacheConfig.TrieCleanRejournal = time.Minute
|
||||
}
|
||||
triedb := bc.stateCache.TrieDB()
|
||||
bc.wg.Add(1)
|
||||
go func() {
|
||||
defer bc.wg.Done()
|
||||
triedb.SaveCachePeriodically(bc.cacheConfig.TrieCleanJournal, bc.cacheConfig.TrieCleanRejournal, bc.quit)
|
||||
}()
|
||||
}
|
||||
return bc, nil
|
||||
}
|
||||
|
||||
@@ -919,6 +934,12 @@ func (bc *BlockChain) Stop() {
|
||||
log.Error("Dangling trie nodes after full cleanup")
|
||||
}
|
||||
}
|
||||
// Ensure all live cached entries be saved into disk, so that we can skip
|
||||
// cache warmup when node restarts.
|
||||
if bc.cacheConfig.TrieCleanJournal != "" {
|
||||
triedb := bc.stateCache.TrieDB()
|
||||
triedb.SaveCache(bc.cacheConfig.TrieCleanJournal)
|
||||
}
|
||||
log.Info("Blockchain stopped")
|
||||
}
|
||||
|
||||
@@ -1685,13 +1706,13 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
|
||||
}
|
||||
switch {
|
||||
// First block is pruned, insert as sidechain and reorg only if TD grows enough
|
||||
case err == consensus.ErrPrunedAncestor:
|
||||
case errors.Is(err, consensus.ErrPrunedAncestor):
|
||||
log.Debug("Pruned ancestor, inserting as sidechain", "number", block.Number(), "hash", block.Hash())
|
||||
return bc.insertSideChain(block, it)
|
||||
|
||||
// First block is future, shove it (and all children) to the future queue (unknown ancestor)
|
||||
case err == consensus.ErrFutureBlock || (err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(it.first().ParentHash())):
|
||||
for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) {
|
||||
case errors.Is(err, consensus.ErrFutureBlock) || (errors.Is(err, consensus.ErrUnknownAncestor) && bc.futureBlocks.Contains(it.first().ParentHash())):
|
||||
for block != nil && (it.index == 0 || errors.Is(err, consensus.ErrUnknownAncestor)) {
|
||||
log.Debug("Future block, postponing import", "number", block.Number(), "hash", block.Hash())
|
||||
if err := bc.addFutureBlock(block); err != nil {
|
||||
return it.index, err
|
||||
@@ -1874,13 +1895,13 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
|
||||
stats.report(chain, it.index, dirty)
|
||||
}
|
||||
// Any blocks remaining here? The only ones we care about are the future ones
|
||||
if block != nil && err == consensus.ErrFutureBlock {
|
||||
if block != nil && errors.Is(err, consensus.ErrFutureBlock) {
|
||||
if err := bc.addFutureBlock(block); err != nil {
|
||||
return it.index, err
|
||||
}
|
||||
block, err = it.next()
|
||||
|
||||
for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() {
|
||||
for ; block != nil && errors.Is(err, consensus.ErrUnknownAncestor); block, err = it.next() {
|
||||
if err := bc.addFutureBlock(block); err != nil {
|
||||
return it.index, err
|
||||
}
|
||||
@@ -1908,7 +1929,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
|
||||
// ones. Any other errors means that the block is invalid, and should not be written
|
||||
// to disk.
|
||||
err := consensus.ErrPrunedAncestor
|
||||
for ; block != nil && (err == consensus.ErrPrunedAncestor); block, err = it.next() {
|
||||
for ; block != nil && errors.Is(err, consensus.ErrPrunedAncestor); block, err = it.next() {
|
||||
// Check the canonical state root for that number
|
||||
if number := block.NumberU64(); current.NumberU64() >= number {
|
||||
canonical := bc.GetBlockByNumber(number)
|
||||
|
@@ -155,7 +155,6 @@ func (m *Matcher) Start(ctx context.Context, begin, end uint64, results chan uin
|
||||
session := &MatcherSession{
|
||||
matcher: m,
|
||||
quit: make(chan struct{}),
|
||||
kill: make(chan struct{}),
|
||||
ctx: ctx,
|
||||
}
|
||||
for _, scheduler := range m.schedulers {
|
||||
@@ -386,10 +385,8 @@ func (m *Matcher) distributor(dist chan *request, session *MatcherSession) {
|
||||
requests = make(map[uint][]uint64) // Per-bit list of section requests, ordered by section number
|
||||
unallocs = make(map[uint]struct{}) // Bits with pending requests but not allocated to any retriever
|
||||
retrievers chan chan uint // Waiting retrievers (toggled to nil if unallocs is empty)
|
||||
)
|
||||
var (
|
||||
allocs int // Number of active allocations to handle graceful shutdown requests
|
||||
shutdown = session.quit // Shutdown request channel, will gracefully wait for pending requests
|
||||
allocs int // Number of active allocations to handle graceful shutdown requests
|
||||
shutdown = session.quit // Shutdown request channel, will gracefully wait for pending requests
|
||||
)
|
||||
|
||||
// assign is a helper method fo try to assign a pending bit an actively
|
||||
@@ -409,15 +406,12 @@ func (m *Matcher) distributor(dist chan *request, session *MatcherSession) {
|
||||
for {
|
||||
select {
|
||||
case <-shutdown:
|
||||
// Graceful shutdown requested, wait until all pending requests are honoured
|
||||
// Shutdown requested. No more retrievers can be allocated,
|
||||
// but we still need to wait until all pending requests have returned.
|
||||
shutdown = nil
|
||||
if allocs == 0 {
|
||||
return
|
||||
}
|
||||
shutdown = nil
|
||||
|
||||
case <-session.kill:
|
||||
// Pending requests not honoured in time, hard terminate
|
||||
return
|
||||
|
||||
case req := <-dist:
|
||||
// New retrieval request arrived to be distributed to some fetcher process
|
||||
@@ -499,8 +493,9 @@ func (m *Matcher) distributor(dist chan *request, session *MatcherSession) {
|
||||
assign(result.Bit)
|
||||
}
|
||||
}
|
||||
// If we're in the process of shutting down, terminate
|
||||
if allocs == 0 && shutdown == nil {
|
||||
|
||||
// End the session when all pending deliveries have arrived.
|
||||
if shutdown == nil && allocs == 0 {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -514,7 +509,6 @@ type MatcherSession struct {
|
||||
|
||||
closer sync.Once // Sync object to ensure we only ever close once
|
||||
quit chan struct{} // Quit channel to request pipeline termination
|
||||
kill chan struct{} // Term channel to signal non-graceful forced shutdown
|
||||
|
||||
ctx context.Context // Context used by the light client to abort filtering
|
||||
err atomic.Value // Global error to track retrieval failures deep in the chain
|
||||
@@ -529,7 +523,6 @@ func (s *MatcherSession) Close() {
|
||||
s.closer.Do(func() {
|
||||
// Signal termination and wait for all goroutines to tear down
|
||||
close(s.quit)
|
||||
time.AfterFunc(time.Second, func() { close(s.kill) })
|
||||
s.pend.Wait()
|
||||
})
|
||||
}
|
||||
@@ -542,10 +535,10 @@ func (s *MatcherSession) Error() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AllocateRetrieval assigns a bloom bit index to a client process that can either
|
||||
// allocateRetrieval assigns a bloom bit index to a client process that can either
|
||||
// immediately request and fetch the section contents assigned to this bit or wait
|
||||
// a little while for more sections to be requested.
|
||||
func (s *MatcherSession) AllocateRetrieval() (uint, bool) {
|
||||
func (s *MatcherSession) allocateRetrieval() (uint, bool) {
|
||||
fetcher := make(chan uint)
|
||||
|
||||
select {
|
||||
@@ -557,9 +550,9 @@ func (s *MatcherSession) AllocateRetrieval() (uint, bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// PendingSections returns the number of pending section retrievals belonging to
|
||||
// pendingSections returns the number of pending section retrievals belonging to
|
||||
// the given bloom bit index.
|
||||
func (s *MatcherSession) PendingSections(bit uint) int {
|
||||
func (s *MatcherSession) pendingSections(bit uint) int {
|
||||
fetcher := make(chan uint)
|
||||
|
||||
select {
|
||||
@@ -571,9 +564,9 @@ func (s *MatcherSession) PendingSections(bit uint) int {
|
||||
}
|
||||
}
|
||||
|
||||
// AllocateSections assigns all or part of an already allocated bit-task queue
|
||||
// allocateSections assigns all or part of an already allocated bit-task queue
|
||||
// to the requesting process.
|
||||
func (s *MatcherSession) AllocateSections(bit uint, count int) []uint64 {
|
||||
func (s *MatcherSession) allocateSections(bit uint, count int) []uint64 {
|
||||
fetcher := make(chan *Retrieval)
|
||||
|
||||
select {
|
||||
@@ -589,14 +582,10 @@ func (s *MatcherSession) AllocateSections(bit uint, count int) []uint64 {
|
||||
}
|
||||
}
|
||||
|
||||
// DeliverSections delivers a batch of section bit-vectors for a specific bloom
|
||||
// deliverSections delivers a batch of section bit-vectors for a specific bloom
|
||||
// bit index to be injected into the processing pipeline.
|
||||
func (s *MatcherSession) DeliverSections(bit uint, sections []uint64, bitsets [][]byte) {
|
||||
select {
|
||||
case <-s.kill:
|
||||
return
|
||||
case s.matcher.deliveries <- &Retrieval{Bit: bit, Sections: sections, Bitsets: bitsets}:
|
||||
}
|
||||
func (s *MatcherSession) deliverSections(bit uint, sections []uint64, bitsets [][]byte) {
|
||||
s.matcher.deliveries <- &Retrieval{Bit: bit, Sections: sections, Bitsets: bitsets}
|
||||
}
|
||||
|
||||
// Multiplex polls the matcher session for retrieval tasks and multiplexes it into
|
||||
@@ -608,17 +597,17 @@ func (s *MatcherSession) DeliverSections(bit uint, sections []uint64, bitsets []
|
||||
func (s *MatcherSession) Multiplex(batch int, wait time.Duration, mux chan chan *Retrieval) {
|
||||
for {
|
||||
// Allocate a new bloom bit index to retrieve data for, stopping when done
|
||||
bit, ok := s.AllocateRetrieval()
|
||||
bit, ok := s.allocateRetrieval()
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
// Bit allocated, throttle a bit if we're below our batch limit
|
||||
if s.PendingSections(bit) < batch {
|
||||
if s.pendingSections(bit) < batch {
|
||||
select {
|
||||
case <-s.quit:
|
||||
// Session terminating, we can't meaningfully service, abort
|
||||
s.AllocateSections(bit, 0)
|
||||
s.DeliverSections(bit, []uint64{}, [][]byte{})
|
||||
s.allocateSections(bit, 0)
|
||||
s.deliverSections(bit, []uint64{}, [][]byte{})
|
||||
return
|
||||
|
||||
case <-time.After(wait):
|
||||
@@ -626,13 +615,13 @@ func (s *MatcherSession) Multiplex(batch int, wait time.Duration, mux chan chan
|
||||
}
|
||||
}
|
||||
// Allocate as much as we can handle and request servicing
|
||||
sections := s.AllocateSections(bit, batch)
|
||||
sections := s.allocateSections(bit, batch)
|
||||
request := make(chan *Retrieval)
|
||||
|
||||
select {
|
||||
case <-s.quit:
|
||||
// Session terminating, we can't meaningfully service, abort
|
||||
s.DeliverSections(bit, sections, make([][]byte, len(sections)))
|
||||
s.deliverSections(bit, sections, make([][]byte, len(sections)))
|
||||
return
|
||||
|
||||
case mux <- request:
|
||||
@@ -644,7 +633,7 @@ func (s *MatcherSession) Multiplex(batch int, wait time.Duration, mux chan chan
|
||||
s.err.Store(result.Error)
|
||||
s.Close()
|
||||
}
|
||||
s.DeliverSections(result.Bit, result.Sections, result.Bitsets)
|
||||
s.deliverSections(result.Bit, result.Sections, result.Bitsets)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -174,7 +174,7 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
|
||||
// We have the genesis block in database(perhaps in ancient database)
|
||||
// but the corresponding state is missing.
|
||||
header := rawdb.ReadHeader(db, stored, 0)
|
||||
if _, err := state.New(header.Root, state.NewDatabaseWithCache(db, 0), nil); err != nil {
|
||||
if _, err := state.New(header.Root, state.NewDatabaseWithCache(db, 0, ""), nil); err != nil {
|
||||
if genesis == nil {
|
||||
genesis = DefaultGenesisBlock()
|
||||
}
|
||||
|
@@ -22,6 +22,7 @@ import (
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
@@ -74,6 +75,7 @@ type freezer struct {
|
||||
tables map[string]*freezerTable // Data tables for storing everything
|
||||
instanceLock fileutil.Releaser // File-system lock to prevent double opens
|
||||
quit chan struct{}
|
||||
closeOnce sync.Once
|
||||
}
|
||||
|
||||
// newFreezer creates a chain freezer that moves ancient chain data into
|
||||
@@ -128,16 +130,18 @@ func newFreezer(datadir string, namespace string) (*freezer, error) {
|
||||
|
||||
// Close terminates the chain freezer, unmapping all the data files.
|
||||
func (f *freezer) Close() error {
|
||||
f.quit <- struct{}{}
|
||||
var errs []error
|
||||
for _, table := range f.tables {
|
||||
if err := table.Close(); err != nil {
|
||||
f.closeOnce.Do(func() {
|
||||
f.quit <- struct{}{}
|
||||
for _, table := range f.tables {
|
||||
if err := table.Close(); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
if err := f.instanceLock.Release(); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
if err := f.instanceLock.Release(); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
})
|
||||
if errs != nil {
|
||||
return fmt.Errorf("%v", errs)
|
||||
}
|
||||
@@ -221,7 +225,7 @@ func (f *freezer) AppendAncient(number uint64, hash, header, body, receipts, td
|
||||
return nil
|
||||
}
|
||||
|
||||
// Truncate discards any recent data above the provided threshold number.
|
||||
// TruncateAncients discards any recent data above the provided threshold number.
|
||||
func (f *freezer) TruncateAncients(items uint64) error {
|
||||
if atomic.LoadUint64(&f.frozen) <= items {
|
||||
return nil
|
||||
@@ -235,7 +239,7 @@ func (f *freezer) TruncateAncients(items uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// sync flushes all data tables to disk.
|
||||
// Sync flushes all data tables to disk.
|
||||
func (f *freezer) Sync() error {
|
||||
var errs []error
|
||||
for _, table := range f.tables {
|
||||
|
@@ -100,16 +100,16 @@ type Trie interface {
|
||||
// concurrent use, but does not retain any recent trie nodes in memory. To keep some
|
||||
// historical state in memory, use the NewDatabaseWithCache constructor.
|
||||
func NewDatabase(db ethdb.Database) Database {
|
||||
return NewDatabaseWithCache(db, 0)
|
||||
return NewDatabaseWithCache(db, 0, "")
|
||||
}
|
||||
|
||||
// NewDatabaseWithCache creates a backing store for state. The returned database
|
||||
// is safe for concurrent use and retains a lot of collapsed RLP trie nodes in a
|
||||
// large memory cache.
|
||||
func NewDatabaseWithCache(db ethdb.Database, cache int) Database {
|
||||
func NewDatabaseWithCache(db ethdb.Database, cache int, journal string) Database {
|
||||
csc, _ := lru.New(codeSizeCacheSize)
|
||||
return &cachingDB{
|
||||
db: trie.NewDatabaseWithCache(db, cache),
|
||||
db: trie.NewDatabaseWithCache(db, cache, journal),
|
||||
codeSizeCache: csc,
|
||||
}
|
||||
}
|
||||
|
@@ -98,6 +98,7 @@ var (
|
||||
queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil)
|
||||
queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting
|
||||
queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds
|
||||
queuedEvictionMeter = metrics.NewRegisteredMeter("txpool/queued/eviction", nil) // Dropped due to lifetime
|
||||
|
||||
// General tx metrics
|
||||
knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil)
|
||||
@@ -362,9 +363,11 @@ func (pool *TxPool) loop() {
|
||||
}
|
||||
// Any non-locals old enough should be removed
|
||||
if time.Since(pool.beats[addr]) > pool.config.Lifetime {
|
||||
for _, tx := range pool.queue[addr].Flatten() {
|
||||
list := pool.queue[addr].Flatten()
|
||||
for _, tx := range list {
|
||||
pool.removeTx(tx.Hash(), true)
|
||||
}
|
||||
queuedEvictionMeter.Mark(int64(len(list)))
|
||||
}
|
||||
}
|
||||
pool.mu.Unlock()
|
||||
@@ -614,6 +617,9 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
|
||||
pool.journalTx(from, tx)
|
||||
pool.queueTxEvent(tx)
|
||||
log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
|
||||
|
||||
// Successful promotion, bump the heartbeat
|
||||
pool.beats[from] = time.Now()
|
||||
return old != nil, nil
|
||||
}
|
||||
// New transaction isn't replacing a pending one, push into queue
|
||||
@@ -665,6 +671,10 @@ func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) (bool, er
|
||||
pool.all.Add(tx)
|
||||
pool.priced.Put(tx)
|
||||
}
|
||||
// If we never record the heartbeat, do it right now.
|
||||
if _, exist := pool.beats[from]; !exist {
|
||||
pool.beats[from] = time.Now()
|
||||
}
|
||||
return old != nil, nil
|
||||
}
|
||||
|
||||
@@ -696,7 +706,6 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T
|
||||
// An older transaction was better, discard this
|
||||
pool.all.Remove(hash)
|
||||
pool.priced.Removed(1)
|
||||
|
||||
pendingDiscardMeter.Mark(1)
|
||||
return false
|
||||
}
|
||||
@@ -704,7 +713,6 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T
|
||||
if old != nil {
|
||||
pool.all.Remove(old.Hash())
|
||||
pool.priced.Removed(1)
|
||||
|
||||
pendingReplaceMeter.Mark(1)
|
||||
} else {
|
||||
// Nothing was replaced, bump the pending counter
|
||||
@@ -716,9 +724,10 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T
|
||||
pool.priced.Put(tx)
|
||||
}
|
||||
// Set the potentially new pending nonce and notify any subsystems of the new tx
|
||||
pool.beats[addr] = time.Now()
|
||||
pool.pendingNonces.set(addr, tx.Nonce()+1)
|
||||
|
||||
// Successful promotion, bump the heartbeat
|
||||
pool.beats[addr] = time.Now()
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -891,7 +900,6 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
|
||||
// If no more pending transactions are left, remove the list
|
||||
if pending.Empty() {
|
||||
delete(pool.pending, addr)
|
||||
delete(pool.beats, addr)
|
||||
}
|
||||
// Postpone any invalidated transactions
|
||||
for _, tx := range invalids {
|
||||
@@ -912,6 +920,7 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
|
||||
}
|
||||
if future.Empty() {
|
||||
delete(pool.queue, addr)
|
||||
delete(pool.beats, addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1023,7 +1032,10 @@ func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirt
|
||||
defer close(done)
|
||||
|
||||
var promoteAddrs []common.Address
|
||||
if dirtyAccounts != nil {
|
||||
if dirtyAccounts != nil && reset == nil {
|
||||
// Only dirty accounts need to be promoted, unless we're resetting.
|
||||
// For resets, all addresses in the tx queue will be promoted and
|
||||
// the flatten operation can be avoided.
|
||||
promoteAddrs = dirtyAccounts.flatten()
|
||||
}
|
||||
pool.mu.Lock()
|
||||
@@ -1039,7 +1051,7 @@ func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirt
|
||||
}
|
||||
}
|
||||
// Reset needs promote for all addresses
|
||||
promoteAddrs = promoteAddrs[:0]
|
||||
promoteAddrs = make([]common.Address, 0, len(pool.queue))
|
||||
for addr := range pool.queue {
|
||||
promoteAddrs = append(promoteAddrs, addr)
|
||||
}
|
||||
@@ -1229,6 +1241,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Trans
|
||||
// Delete the entire queue entry if it became empty.
|
||||
if list.Empty() {
|
||||
delete(pool.queue, addr)
|
||||
delete(pool.beats, addr)
|
||||
}
|
||||
}
|
||||
return promoted
|
||||
@@ -1410,10 +1423,9 @@ func (pool *TxPool) demoteUnexecutables() {
|
||||
}
|
||||
pendingGauge.Dec(int64(len(gapped)))
|
||||
}
|
||||
// Delete the entire queue entry if it became empty.
|
||||
// Delete the entire pending entry if it became empty.
|
||||
if list.Empty() {
|
||||
delete(pool.pending, addr)
|
||||
delete(pool.beats, addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -109,6 +109,7 @@ func validateTxPoolInternals(pool *TxPool) error {
|
||||
if priced := pool.priced.items.Len() - pool.priced.stales; priced != pending+queued {
|
||||
return fmt.Errorf("total priced transaction count %d != %d pending + %d queued", priced, pending, queued)
|
||||
}
|
||||
|
||||
// Ensure the next nonce to assign is the correct one
|
||||
for addr, txs := range pool.pending {
|
||||
// Find the last transaction
|
||||
@@ -868,7 +869,7 @@ func TestTransactionQueueTimeLimitingNoLocals(t *testing.T) {
|
||||
func testTransactionQueueTimeLimiting(t *testing.T, nolocals bool) {
|
||||
// Reduce the eviction interval to a testable amount
|
||||
defer func(old time.Duration) { evictionInterval = old }(evictionInterval)
|
||||
evictionInterval = time.Second
|
||||
evictionInterval = time.Millisecond * 100
|
||||
|
||||
// Create the pool to test the non-expiration enforcement
|
||||
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||
@@ -905,6 +906,22 @@ func testTransactionQueueTimeLimiting(t *testing.T, nolocals bool) {
|
||||
if err := validateTxPoolInternals(pool); err != nil {
|
||||
t.Fatalf("pool internal state corrupted: %v", err)
|
||||
}
|
||||
|
||||
// Allow the eviction interval to run
|
||||
time.Sleep(2 * evictionInterval)
|
||||
|
||||
// Transactions should not be evicted from the queue yet since lifetime duration has not passed
|
||||
pending, queued = pool.Stats()
|
||||
if pending != 0 {
|
||||
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0)
|
||||
}
|
||||
if queued != 2 {
|
||||
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2)
|
||||
}
|
||||
if err := validateTxPoolInternals(pool); err != nil {
|
||||
t.Fatalf("pool internal state corrupted: %v", err)
|
||||
}
|
||||
|
||||
// Wait a bit for eviction to run and clean up any leftovers, and ensure only the local remains
|
||||
time.Sleep(2 * config.Lifetime)
|
||||
|
||||
@@ -924,6 +941,72 @@ func testTransactionQueueTimeLimiting(t *testing.T, nolocals bool) {
|
||||
if err := validateTxPoolInternals(pool); err != nil {
|
||||
t.Fatalf("pool internal state corrupted: %v", err)
|
||||
}
|
||||
|
||||
// remove current transactions and increase nonce to prepare for a reset and cleanup
|
||||
statedb.SetNonce(crypto.PubkeyToAddress(remote.PublicKey), 2)
|
||||
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 2)
|
||||
<-pool.requestReset(nil, nil)
|
||||
|
||||
// make sure queue, pending are cleared
|
||||
pending, queued = pool.Stats()
|
||||
if pending != 0 {
|
||||
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0)
|
||||
}
|
||||
if queued != 0 {
|
||||
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
|
||||
}
|
||||
if err := validateTxPoolInternals(pool); err != nil {
|
||||
t.Fatalf("pool internal state corrupted: %v", err)
|
||||
}
|
||||
|
||||
// Queue gapped transactions
|
||||
if err := pool.AddLocal(pricedTransaction(4, 100000, big.NewInt(1), local)); err != nil {
|
||||
t.Fatalf("failed to add remote transaction: %v", err)
|
||||
}
|
||||
if err := pool.addRemoteSync(pricedTransaction(4, 100000, big.NewInt(1), remote)); err != nil {
|
||||
t.Fatalf("failed to add remote transaction: %v", err)
|
||||
}
|
||||
time.Sleep(5 * evictionInterval) // A half lifetime pass
|
||||
|
||||
// Queue executable transactions, the life cycle should be restarted.
|
||||
if err := pool.AddLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil {
|
||||
t.Fatalf("failed to add remote transaction: %v", err)
|
||||
}
|
||||
if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(1), remote)); err != nil {
|
||||
t.Fatalf("failed to add remote transaction: %v", err)
|
||||
}
|
||||
time.Sleep(6 * evictionInterval)
|
||||
|
||||
// All gapped transactions shouldn't be kicked out
|
||||
pending, queued = pool.Stats()
|
||||
if pending != 2 {
|
||||
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
|
||||
}
|
||||
if queued != 2 {
|
||||
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3)
|
||||
}
|
||||
if err := validateTxPoolInternals(pool); err != nil {
|
||||
t.Fatalf("pool internal state corrupted: %v", err)
|
||||
}
|
||||
|
||||
// The whole life time pass after last promotion, kick out stale transactions
|
||||
time.Sleep(2 * config.Lifetime)
|
||||
pending, queued = pool.Stats()
|
||||
if pending != 2 {
|
||||
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
|
||||
}
|
||||
if nolocals {
|
||||
if queued != 0 {
|
||||
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
|
||||
}
|
||||
} else {
|
||||
if queued != 1 {
|
||||
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1)
|
||||
}
|
||||
}
|
||||
if err := validateTxPoolInternals(pool); err != nil {
|
||||
t.Fatalf("pool internal state corrupted: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that even if the transaction count belonging to a single account goes
|
||||
|
@@ -147,6 +147,17 @@ func rlpHash(x interface{}) (h common.Hash) {
|
||||
return h
|
||||
}
|
||||
|
||||
// EmptyBody returns true if there is no additional 'body' to complete the header
|
||||
// that is: no transactions and no uncles.
|
||||
func (h *Header) EmptyBody() bool {
|
||||
return h.TxHash == EmptyRootHash && h.UncleHash == EmptyUncleHash
|
||||
}
|
||||
|
||||
// EmptyReceipts returns true if there are no receipts for this header/block.
|
||||
func (h *Header) EmptyReceipts() bool {
|
||||
return h.ReceiptHash == EmptyRootHash
|
||||
}
|
||||
|
||||
// Body is a simple (mutable, non-safe) data container for storing and moving
|
||||
// a block's data contents (transactions and uncles) together.
|
||||
type Body struct {
|
||||
|
@@ -20,9 +20,9 @@ func (l Log) MarshalJSON() ([]byte, error) {
|
||||
Data hexutil.Bytes `json:"data" gencodec:"required"`
|
||||
BlockNumber hexutil.Uint64 `json:"blockNumber"`
|
||||
TxHash common.Hash `json:"transactionHash" gencodec:"required"`
|
||||
TxIndex hexutil.Uint `json:"transactionIndex" gencodec:"required"`
|
||||
TxIndex hexutil.Uint `json:"transactionIndex"`
|
||||
BlockHash common.Hash `json:"blockHash"`
|
||||
Index hexutil.Uint `json:"logIndex" gencodec:"required"`
|
||||
Index hexutil.Uint `json:"logIndex"`
|
||||
Removed bool `json:"removed"`
|
||||
}
|
||||
var enc Log
|
||||
@@ -46,9 +46,9 @@ func (l *Log) UnmarshalJSON(input []byte) error {
|
||||
Data *hexutil.Bytes `json:"data" gencodec:"required"`
|
||||
BlockNumber *hexutil.Uint64 `json:"blockNumber"`
|
||||
TxHash *common.Hash `json:"transactionHash" gencodec:"required"`
|
||||
TxIndex *hexutil.Uint `json:"transactionIndex" gencodec:"required"`
|
||||
TxIndex *hexutil.Uint `json:"transactionIndex"`
|
||||
BlockHash *common.Hash `json:"blockHash"`
|
||||
Index *hexutil.Uint `json:"logIndex" gencodec:"required"`
|
||||
Index *hexutil.Uint `json:"logIndex"`
|
||||
Removed *bool `json:"removed"`
|
||||
}
|
||||
var dec Log
|
||||
@@ -74,17 +74,15 @@ func (l *Log) UnmarshalJSON(input []byte) error {
|
||||
return errors.New("missing required field 'transactionHash' for Log")
|
||||
}
|
||||
l.TxHash = *dec.TxHash
|
||||
if dec.TxIndex == nil {
|
||||
return errors.New("missing required field 'transactionIndex' for Log")
|
||||
if dec.TxIndex != nil {
|
||||
l.TxIndex = uint(*dec.TxIndex)
|
||||
}
|
||||
l.TxIndex = uint(*dec.TxIndex)
|
||||
if dec.BlockHash != nil {
|
||||
l.BlockHash = *dec.BlockHash
|
||||
}
|
||||
if dec.Index == nil {
|
||||
return errors.New("missing required field 'logIndex' for Log")
|
||||
if dec.Index != nil {
|
||||
l.Index = uint(*dec.Index)
|
||||
}
|
||||
l.Index = uint(*dec.Index)
|
||||
if dec.Removed != nil {
|
||||
l.Removed = *dec.Removed
|
||||
}
|
||||
|
@@ -44,11 +44,11 @@ type Log struct {
|
||||
// hash of the transaction
|
||||
TxHash common.Hash `json:"transactionHash" gencodec:"required"`
|
||||
// index of the transaction in the block
|
||||
TxIndex uint `json:"transactionIndex" gencodec:"required"`
|
||||
TxIndex uint `json:"transactionIndex"`
|
||||
// hash of the block in which the transaction was included
|
||||
BlockHash common.Hash `json:"blockHash"`
|
||||
// index of the log in the block
|
||||
Index uint `json:"logIndex" gencodec:"required"`
|
||||
Index uint `json:"logIndex"`
|
||||
|
||||
// The Removed field is true if this log was reverted due to a chain reorganisation.
|
||||
// You must pay attention to this field if you receive logs through a filter query.
|
||||
|
@@ -22,6 +22,7 @@ import (
|
||||
"io"
|
||||
"math/big"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
@@ -36,7 +37,9 @@ var (
|
||||
)
|
||||
|
||||
type Transaction struct {
|
||||
data txdata
|
||||
data txdata // Consensus contents of a transaction
|
||||
time time.Time // Time first seen locally (spam avoidance)
|
||||
|
||||
// caches
|
||||
hash atomic.Value
|
||||
size atomic.Value
|
||||
@@ -100,8 +103,10 @@ func newTransaction(nonce uint64, to *common.Address, amount *big.Int, gasLimit
|
||||
if gasPrice != nil {
|
||||
d.Price.Set(gasPrice)
|
||||
}
|
||||
|
||||
return &Transaction{data: d}
|
||||
return &Transaction{
|
||||
data: d,
|
||||
time: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
// ChainId returns which chain id this transaction was signed for (if at all)
|
||||
@@ -134,8 +139,8 @@ func (tx *Transaction) DecodeRLP(s *rlp.Stream) error {
|
||||
err := s.Decode(&tx.data)
|
||||
if err == nil {
|
||||
tx.size.Store(common.StorageSize(rlp.ListSize(size)))
|
||||
tx.time = time.Now()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -153,7 +158,6 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
|
||||
if err := dec.UnmarshalJSON(input); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
withSignature := dec.V.Sign() != 0 || dec.R.Sign() != 0 || dec.S.Sign() != 0
|
||||
if withSignature {
|
||||
var V byte
|
||||
@@ -167,8 +171,10 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
|
||||
return ErrInvalidSig
|
||||
}
|
||||
}
|
||||
|
||||
*tx = Transaction{data: dec}
|
||||
*tx = Transaction{
|
||||
data: dec,
|
||||
time: time.Now(),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -246,7 +252,10 @@ func (tx *Transaction) WithSignature(signer Signer, sig []byte) (*Transaction, e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpy := &Transaction{data: tx.data}
|
||||
cpy := &Transaction{
|
||||
data: tx.data,
|
||||
time: tx.time,
|
||||
}
|
||||
cpy.data.R, cpy.data.S, cpy.data.V = r, s, v
|
||||
return cpy, nil
|
||||
}
|
||||
@@ -306,19 +315,27 @@ func (s TxByNonce) Len() int { return len(s) }
|
||||
func (s TxByNonce) Less(i, j int) bool { return s[i].data.AccountNonce < s[j].data.AccountNonce }
|
||||
func (s TxByNonce) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// TxByPrice implements both the sort and the heap interface, making it useful
|
||||
// TxByPriceAndTime implements both the sort and the heap interface, making it useful
|
||||
// for all at once sorting as well as individually adding and removing elements.
|
||||
type TxByPrice Transactions
|
||||
type TxByPriceAndTime Transactions
|
||||
|
||||
func (s TxByPrice) Len() int { return len(s) }
|
||||
func (s TxByPrice) Less(i, j int) bool { return s[i].data.Price.Cmp(s[j].data.Price) > 0 }
|
||||
func (s TxByPrice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s TxByPriceAndTime) Len() int { return len(s) }
|
||||
func (s TxByPriceAndTime) Less(i, j int) bool {
|
||||
// If the prices are equal, use the time the transaction was first seen for
|
||||
// deterministic sorting
|
||||
cmp := s[i].data.Price.Cmp(s[j].data.Price)
|
||||
if cmp == 0 {
|
||||
return s[i].time.Before(s[j].time)
|
||||
}
|
||||
return cmp > 0
|
||||
}
|
||||
func (s TxByPriceAndTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
func (s *TxByPrice) Push(x interface{}) {
|
||||
func (s *TxByPriceAndTime) Push(x interface{}) {
|
||||
*s = append(*s, x.(*Transaction))
|
||||
}
|
||||
|
||||
func (s *TxByPrice) Pop() interface{} {
|
||||
func (s *TxByPriceAndTime) Pop() interface{} {
|
||||
old := *s
|
||||
n := len(old)
|
||||
x := old[n-1]
|
||||
@@ -331,7 +348,7 @@ func (s *TxByPrice) Pop() interface{} {
|
||||
// entire batches of transactions for non-executable accounts.
|
||||
type TransactionsByPriceAndNonce struct {
|
||||
txs map[common.Address]Transactions // Per account nonce-sorted list of transactions
|
||||
heads TxByPrice // Next transaction for each unique account (price heap)
|
||||
heads TxByPriceAndTime // Next transaction for each unique account (price heap)
|
||||
signer Signer // Signer for the set of transactions
|
||||
}
|
||||
|
||||
@@ -341,8 +358,8 @@ type TransactionsByPriceAndNonce struct {
|
||||
// Note, the input map is reowned so the caller should not interact any more with
|
||||
// if after providing it to the constructor.
|
||||
func NewTransactionsByPriceAndNonce(signer Signer, txs map[common.Address]Transactions) *TransactionsByPriceAndNonce {
|
||||
// Initialize a price based heap with the head transactions
|
||||
heads := make(TxByPrice, 0, len(txs))
|
||||
// Initialize a price and received time based heap with the head transactions
|
||||
heads := make(TxByPriceAndTime, 0, len(txs))
|
||||
for from, accTxs := range txs {
|
||||
heads = append(heads, accTxs[0])
|
||||
// Ensure the sender address is from the signer
|
||||
|
@@ -22,6 +22,7 @@ import (
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
@@ -127,8 +128,8 @@ func TestTransactionPriceNonceSort(t *testing.T) {
|
||||
for i := 0; i < len(keys); i++ {
|
||||
keys[i], _ = crypto.GenerateKey()
|
||||
}
|
||||
|
||||
signer := HomesteadSigner{}
|
||||
|
||||
// Generate a batch of transactions with overlapping values, but shifted nonces
|
||||
groups := map[common.Address]Transactions{}
|
||||
for start, key := range keys {
|
||||
@@ -155,12 +156,10 @@ func TestTransactionPriceNonceSort(t *testing.T) {
|
||||
// Make sure the nonce order is valid
|
||||
for j, txj := range txs[i+1:] {
|
||||
fromj, _ := Sender(signer, txj)
|
||||
|
||||
if fromi == fromj && txi.Nonce() > txj.Nonce() {
|
||||
t.Errorf("invalid nonce ordering: tx #%d (A=%x N=%v) < tx #%d (A=%x N=%v)", i, fromi[:4], txi.Nonce(), i+j, fromj[:4], txj.Nonce())
|
||||
}
|
||||
}
|
||||
|
||||
// If the next tx has different from account, the price must be lower than the current one
|
||||
if i+1 < len(txs) {
|
||||
next := txs[i+1]
|
||||
@@ -172,6 +171,54 @@ func TestTransactionPriceNonceSort(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that if multiple transactions have the same price, the ones seen earlier
|
||||
// are prioritized to avoid network spam attacks aiming for a specific ordering.
|
||||
func TestTransactionTimeSort(t *testing.T) {
|
||||
// Generate a batch of accounts to start with
|
||||
keys := make([]*ecdsa.PrivateKey, 5)
|
||||
for i := 0; i < len(keys); i++ {
|
||||
keys[i], _ = crypto.GenerateKey()
|
||||
}
|
||||
signer := HomesteadSigner{}
|
||||
|
||||
// Generate a batch of transactions with overlapping prices, but different creation times
|
||||
groups := map[common.Address]Transactions{}
|
||||
for start, key := range keys {
|
||||
addr := crypto.PubkeyToAddress(key.PublicKey)
|
||||
|
||||
tx, _ := SignTx(NewTransaction(0, common.Address{}, big.NewInt(100), 100, big.NewInt(1), nil), signer, key)
|
||||
tx.time = time.Unix(0, int64(len(keys)-start))
|
||||
|
||||
groups[addr] = append(groups[addr], tx)
|
||||
}
|
||||
// Sort the transactions and cross check the nonce ordering
|
||||
txset := NewTransactionsByPriceAndNonce(signer, groups)
|
||||
|
||||
txs := Transactions{}
|
||||
for tx := txset.Peek(); tx != nil; tx = txset.Peek() {
|
||||
txs = append(txs, tx)
|
||||
txset.Shift()
|
||||
}
|
||||
if len(txs) != len(keys) {
|
||||
t.Errorf("expected %d transactions, found %d", len(keys), len(txs))
|
||||
}
|
||||
for i, txi := range txs {
|
||||
fromi, _ := Sender(signer, txi)
|
||||
if i+1 < len(txs) {
|
||||
next := txs[i+1]
|
||||
fromNext, _ := Sender(signer, next)
|
||||
|
||||
if txi.GasPrice().Cmp(next.GasPrice()) < 0 {
|
||||
t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", i, fromi[:4], txi.GasPrice(), i+1, fromNext[:4], next.GasPrice())
|
||||
}
|
||||
// Make sure time order is ascending if the txs have the same gas price
|
||||
if txi.GasPrice().Cmp(next.GasPrice()) == 0 && txi.time.After(next.time) {
|
||||
t.Errorf("invalid received time ordering: tx #%d (A=%x T=%v) > tx #%d (A=%x T=%v)", i, fromi[:4], txi.time, i+1, fromNext[:4], next.time)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestTransactionJSON tests serializing/de-serializing to/from JSON.
|
||||
func TestTransactionJSON(t *testing.T) {
|
||||
key, err := crypto.GenerateKey()
|
||||
|
@@ -112,7 +112,13 @@ func (c *Contract) validJumpSubdest(udest uint64) bool {
|
||||
// isCode returns true if the provided PC location is an actual opcode, as
|
||||
// opposed to a data-segment following a PUSHN operation.
|
||||
func (c *Contract) isCode(udest uint64) bool {
|
||||
// Do we already have an analysis laying around?
|
||||
if c.analysis != nil {
|
||||
return c.analysis.codeSegment(udest)
|
||||
}
|
||||
// Do we have a contract hash already?
|
||||
// If we do have a hash, that means it's a 'regular' contract. For regular
|
||||
// contracts ( not temporary initcode), we store the analysis in a map
|
||||
if c.CodeHash != (common.Hash{}) {
|
||||
// Does parent context have the analysis?
|
||||
analysis, exist := c.jumpdests[c.CodeHash]
|
||||
|
@@ -68,12 +68,11 @@ func enable1884(jt *JumpTable) {
|
||||
jt[EXTCODEHASH].constantGas = params.ExtcodeHashGasEIP1884
|
||||
|
||||
// New opcode
|
||||
jt[SELFBALANCE] = operation{
|
||||
jt[SELFBALANCE] = &operation{
|
||||
execute: opSelfBalance,
|
||||
constantGas: GasFastStep,
|
||||
minStack: minStack(0, 1),
|
||||
maxStack: maxStack(0, 1),
|
||||
valid: true,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,12 +86,11 @@ func opSelfBalance(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx
|
||||
// - Adds an opcode that returns the current chain’s EIP-155 unique identifier
|
||||
func enable1344(jt *JumpTable) {
|
||||
// New opcode
|
||||
jt[CHAINID] = operation{
|
||||
jt[CHAINID] = &operation{
|
||||
execute: opChainID,
|
||||
constantGas: GasQuickStep,
|
||||
minStack: minStack(0, 1),
|
||||
maxStack: maxStack(0, 1),
|
||||
valid: true,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -113,29 +111,26 @@ func enable2200(jt *JumpTable) {
|
||||
// - Adds opcodes that jump to and return from subroutines
|
||||
func enable2315(jt *JumpTable) {
|
||||
// New opcode
|
||||
jt[BEGINSUB] = operation{
|
||||
jt[BEGINSUB] = &operation{
|
||||
execute: opBeginSub,
|
||||
constantGas: GasQuickStep,
|
||||
minStack: minStack(0, 0),
|
||||
maxStack: maxStack(0, 0),
|
||||
valid: true,
|
||||
}
|
||||
// New opcode
|
||||
jt[JUMPSUB] = operation{
|
||||
jt[JUMPSUB] = &operation{
|
||||
execute: opJumpSub,
|
||||
constantGas: GasSlowStep,
|
||||
minStack: minStack(1, 0),
|
||||
maxStack: maxStack(1, 0),
|
||||
jumps: true,
|
||||
valid: true,
|
||||
}
|
||||
// New opcode
|
||||
jt[RETURNSUB] = operation{
|
||||
jt[RETURNSUB] = &operation{
|
||||
execute: opReturnSub,
|
||||
constantGas: GasFastStep,
|
||||
minStack: minStack(0, 0),
|
||||
maxStack: maxStack(0, 0),
|
||||
valid: true,
|
||||
jumps: true,
|
||||
}
|
||||
}
|
||||
|
@@ -32,7 +32,7 @@ type Config struct {
|
||||
NoRecursion bool // Disables call, callcode, delegate call and create
|
||||
EnablePreimageRecording bool // Enables recording of SHA3/keccak preimages
|
||||
|
||||
JumpTable [256]operation // EVM instruction table, automatically populated if unset
|
||||
JumpTable [256]*operation // EVM instruction table, automatically populated if unset
|
||||
|
||||
EWASMInterpreter string // External EWASM interpreter options
|
||||
EVMInterpreter string // External EVM interpreter options
|
||||
@@ -96,7 +96,7 @@ func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter {
|
||||
// We use the STOP instruction whether to see
|
||||
// the jump table was initialised. If it was not
|
||||
// we'll set the default jump table.
|
||||
if !cfg.JumpTable[STOP].valid {
|
||||
if cfg.JumpTable[STOP] == nil {
|
||||
var jt JumpTable
|
||||
switch {
|
||||
case evm.chainRules.IsYoloV1:
|
||||
@@ -221,7 +221,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
|
||||
// enough stack items available to perform the operation.
|
||||
op = contract.GetOp(pc)
|
||||
operation := in.cfg.JumpTable[op]
|
||||
if !operation.valid {
|
||||
if operation == nil {
|
||||
return nil, &ErrInvalidOpCode{opcode: op}
|
||||
}
|
||||
// Validate stack
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -23,6 +23,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
@@ -33,6 +34,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/eth/gasprice"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/miner"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
@@ -257,6 +259,10 @@ func (b *EthAPIBackend) TxPoolContent() (map[common.Address]types.Transactions,
|
||||
return b.eth.TxPool().Content()
|
||||
}
|
||||
|
||||
func (b *EthAPIBackend) TxPool() *core.TxPool {
|
||||
return b.eth.TxPool()
|
||||
}
|
||||
|
||||
func (b *EthAPIBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
|
||||
return b.eth.TxPool().SubscribeNewTxsEvent(ch)
|
||||
}
|
||||
@@ -307,3 +313,19 @@ func (b *EthAPIBackend) ServiceFilter(ctx context.Context, session *bloombits.Ma
|
||||
go session.Multiplex(bloomRetrievalBatch, bloomRetrievalWait, b.eth.bloomRequests)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *EthAPIBackend) Engine() consensus.Engine {
|
||||
return b.eth.engine
|
||||
}
|
||||
|
||||
func (b *EthAPIBackend) CurrentHeader() *types.Header {
|
||||
return b.eth.blockchain.CurrentHeader()
|
||||
}
|
||||
|
||||
func (b *EthAPIBackend) Miner() *miner.Miner {
|
||||
return b.eth.Miner()
|
||||
}
|
||||
|
||||
func (b *EthAPIBackend) StartMining(threads int) error {
|
||||
return b.eth.StartMining(threads)
|
||||
}
|
||||
|
@@ -147,7 +147,7 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl
|
||||
|
||||
// Ensure we have a valid starting state before doing any work
|
||||
origin := start.NumberU64()
|
||||
database := state.NewDatabaseWithCache(api.eth.ChainDb(), 16) // Chain tracing will probably start at genesis
|
||||
database := state.NewDatabaseWithCache(api.eth.ChainDb(), 16, "") // Chain tracing will probably start at genesis
|
||||
|
||||
if number := start.NumberU64(); number > 0 {
|
||||
start = api.eth.blockchain.GetBlock(start.ParentHash(), start.NumberU64()-1)
|
||||
@@ -641,7 +641,7 @@ func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (*
|
||||
}
|
||||
// Otherwise try to reexec blocks until we find a state or reach our limit
|
||||
origin := block.NumberU64()
|
||||
database := state.NewDatabaseWithCache(api.eth.ChainDb(), 16)
|
||||
database := state.NewDatabaseWithCache(api.eth.ChainDb(), 16, "")
|
||||
|
||||
for i := uint64(0); i < reexec; i++ {
|
||||
block = api.eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1)
|
||||
|
@@ -26,7 +26,6 @@ import (
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
@@ -54,15 +53,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
type LesServer interface {
|
||||
Start(srvr *p2p.Server)
|
||||
Stop()
|
||||
APIs() []rpc.API
|
||||
Protocols() []p2p.Protocol
|
||||
SetBloomBitsIndexer(bbIndexer *core.ChainIndexer)
|
||||
SetContractBackend(bind.ContractBackend)
|
||||
}
|
||||
|
||||
// Ethereum implements the Ethereum full node service.
|
||||
type Ethereum struct {
|
||||
config *Config
|
||||
@@ -71,7 +61,6 @@ type Ethereum struct {
|
||||
txPool *core.TxPool
|
||||
blockchain *core.BlockChain
|
||||
protocolManager *ProtocolManager
|
||||
lesServer LesServer
|
||||
dialCandidates enode.Iterator
|
||||
|
||||
// DB interfaces
|
||||
@@ -94,25 +83,14 @@ type Ethereum struct {
|
||||
networkID uint64
|
||||
netRPCService *ethapi.PublicNetAPI
|
||||
|
||||
p2pServer *p2p.Server
|
||||
|
||||
lock sync.RWMutex // Protects the variadic fields (e.g. gas price and etherbase)
|
||||
}
|
||||
|
||||
func (s *Ethereum) AddLesServer(ls LesServer) {
|
||||
s.lesServer = ls
|
||||
ls.SetBloomBitsIndexer(s.bloomIndexer)
|
||||
}
|
||||
|
||||
// SetClient sets a rpc client which connecting to our local node.
|
||||
func (s *Ethereum) SetContractBackend(backend bind.ContractBackend) {
|
||||
// Pass the rpc client to les server if it is enabled.
|
||||
if s.lesServer != nil {
|
||||
s.lesServer.SetContractBackend(backend)
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a new Ethereum object (including the
|
||||
// initialisation of the common Ethereum object)
|
||||
func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
||||
func New(stack *node.Node, config *Config) (*Ethereum, error) {
|
||||
// Ensure configuration values are compatible and sane
|
||||
if config.SyncMode == downloader.LightSync {
|
||||
return nil, errors.New("can't run eth.Ethereum in light sync mode, use les.LightEthereum")
|
||||
@@ -136,7 +114,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
||||
log.Info("Allocated trie memory caches", "clean", common.StorageSize(config.TrieCleanCache)*1024*1024, "dirty", common.StorageSize(config.TrieDirtyCache)*1024*1024)
|
||||
|
||||
// Assemble the Ethereum object
|
||||
chainDb, err := ctx.OpenDatabaseWithFreezer("chaindata", config.DatabaseCache, config.DatabaseHandles, config.DatabaseFreezer, "eth/db/chaindata/")
|
||||
chainDb, err := stack.OpenDatabaseWithFreezer("chaindata", config.DatabaseCache, config.DatabaseHandles, config.DatabaseFreezer, "eth/db/chaindata/")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -149,15 +127,16 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
||||
eth := &Ethereum{
|
||||
config: config,
|
||||
chainDb: chainDb,
|
||||
eventMux: ctx.EventMux,
|
||||
accountManager: ctx.AccountManager,
|
||||
engine: CreateConsensusEngine(ctx, chainConfig, &config.Ethash, config.Miner.Notify, config.Miner.Noverify, chainDb),
|
||||
eventMux: stack.EventMux(),
|
||||
accountManager: stack.AccountManager(),
|
||||
engine: CreateConsensusEngine(stack, chainConfig, &config.Ethash, config.Miner.Notify, config.Miner.Noverify, chainDb),
|
||||
closeBloomHandler: make(chan struct{}),
|
||||
networkID: config.NetworkId,
|
||||
gasPrice: config.Miner.GasPrice,
|
||||
etherbase: config.Miner.Etherbase,
|
||||
bloomRequests: make(chan chan *bloombits.Retrieval),
|
||||
bloomIndexer: NewBloomIndexer(chainDb, params.BloomBitsBlocks, params.BloomConfirms),
|
||||
p2pServer: stack.Server(),
|
||||
}
|
||||
|
||||
bcVersion := rawdb.ReadDatabaseVersion(chainDb)
|
||||
@@ -183,6 +162,8 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
||||
}
|
||||
cacheConfig = &core.CacheConfig{
|
||||
TrieCleanLimit: config.TrieCleanCache,
|
||||
TrieCleanJournal: stack.ResolvePath(config.TrieCleanCacheJournal),
|
||||
TrieCleanRejournal: config.TrieCleanCacheRejournal,
|
||||
TrieCleanNoPrefetch: config.NoPrefetch,
|
||||
TrieDirtyLimit: config.TrieDirtyCache,
|
||||
TrieDirtyDisabled: config.NoPruning,
|
||||
@@ -203,7 +184,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
||||
eth.bloomIndexer.Start(eth.blockchain)
|
||||
|
||||
if config.TxPool.Journal != "" {
|
||||
config.TxPool.Journal = ctx.ResolvePath(config.TxPool.Journal)
|
||||
config.TxPool.Journal = stack.ResolvePath(config.TxPool.Journal)
|
||||
}
|
||||
eth.txPool = core.NewTxPool(config.TxPool, chainConfig, eth.blockchain)
|
||||
|
||||
@@ -219,18 +200,25 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
||||
eth.miner = miner.New(eth, &config.Miner, chainConfig, eth.EventMux(), eth.engine, eth.isLocalBlock)
|
||||
eth.miner.SetExtra(makeExtraData(config.Miner.ExtraData))
|
||||
|
||||
eth.APIBackend = &EthAPIBackend{ctx.ExtRPCEnabled(), eth, nil}
|
||||
eth.APIBackend = &EthAPIBackend{stack.Config().ExtRPCEnabled(), eth, nil}
|
||||
gpoParams := config.GPO
|
||||
if gpoParams.Default == nil {
|
||||
gpoParams.Default = config.Miner.GasPrice
|
||||
}
|
||||
eth.APIBackend.gpo = gasprice.NewOracle(eth.APIBackend, gpoParams)
|
||||
|
||||
eth.dialCandidates, err = eth.setupDiscovery(&ctx.Config.P2P)
|
||||
eth.dialCandidates, err = eth.setupDiscovery(&stack.Config().P2P)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Start the RPC service
|
||||
eth.netRPCService = ethapi.NewPublicNetAPI(eth.p2pServer, eth.NetVersion())
|
||||
|
||||
// Register the backend on the node
|
||||
stack.RegisterAPIs(eth.APIs())
|
||||
stack.RegisterProtocols(eth.Protocols())
|
||||
stack.RegisterLifecycle(eth)
|
||||
return eth, nil
|
||||
}
|
||||
|
||||
@@ -252,7 +240,7 @@ func makeExtraData(extra []byte) []byte {
|
||||
}
|
||||
|
||||
// CreateConsensusEngine creates the required type of consensus engine instance for an Ethereum service
|
||||
func CreateConsensusEngine(ctx *node.ServiceContext, chainConfig *params.ChainConfig, config *ethash.Config, notify []string, noverify bool, db ethdb.Database) consensus.Engine {
|
||||
func CreateConsensusEngine(stack *node.Node, chainConfig *params.ChainConfig, config *ethash.Config, notify []string, noverify bool, db ethdb.Database) consensus.Engine {
|
||||
// If proof-of-authority is requested, set it up
|
||||
if chainConfig.Clique != nil {
|
||||
return clique.New(chainConfig.Clique, db)
|
||||
@@ -270,7 +258,7 @@ func CreateConsensusEngine(ctx *node.ServiceContext, chainConfig *params.ChainCo
|
||||
return ethash.NewShared()
|
||||
default:
|
||||
engine := ethash.New(ethash.Config{
|
||||
CacheDir: ctx.ResolvePath(config.CacheDir),
|
||||
CacheDir: stack.ResolvePath(config.CacheDir),
|
||||
CachesInMem: config.CachesInMem,
|
||||
CachesOnDisk: config.CachesOnDisk,
|
||||
CachesLockMmap: config.CachesLockMmap,
|
||||
@@ -289,18 +277,9 @@ func CreateConsensusEngine(ctx *node.ServiceContext, chainConfig *params.ChainCo
|
||||
func (s *Ethereum) APIs() []rpc.API {
|
||||
apis := ethapi.GetAPIs(s.APIBackend)
|
||||
|
||||
// Append any APIs exposed explicitly by the les server
|
||||
if s.lesServer != nil {
|
||||
apis = append(apis, s.lesServer.APIs()...)
|
||||
}
|
||||
// Append any APIs exposed explicitly by the consensus engine
|
||||
apis = append(apis, s.engine.APIs(s.BlockChain())...)
|
||||
|
||||
// Append any APIs exposed explicitly by the les server
|
||||
if s.lesServer != nil {
|
||||
apis = append(apis, s.lesServer.APIs()...)
|
||||
}
|
||||
|
||||
// Append all the local APIs and return
|
||||
return append(apis, []rpc.API{
|
||||
{
|
||||
@@ -515,8 +494,9 @@ func (s *Ethereum) NetVersion() uint64 { return s.networkID }
|
||||
func (s *Ethereum) Downloader() *downloader.Downloader { return s.protocolManager.downloader }
|
||||
func (s *Ethereum) Synced() bool { return atomic.LoadUint32(&s.protocolManager.acceptTxs) == 1 }
|
||||
func (s *Ethereum) ArchiveMode() bool { return s.config.NoPruning }
|
||||
func (s *Ethereum) BloomIndexer() *core.ChainIndexer { return s.bloomIndexer }
|
||||
|
||||
// Protocols implements node.Service, returning all the currently configured
|
||||
// Protocols returns all the currently configured
|
||||
// network protocols to start.
|
||||
func (s *Ethereum) Protocols() []p2p.Protocol {
|
||||
protos := make([]p2p.Protocol, len(ProtocolVersions))
|
||||
@@ -525,47 +505,35 @@ func (s *Ethereum) Protocols() []p2p.Protocol {
|
||||
protos[i].Attributes = []enr.Entry{s.currentEthEntry()}
|
||||
protos[i].DialCandidates = s.dialCandidates
|
||||
}
|
||||
if s.lesServer != nil {
|
||||
protos = append(protos, s.lesServer.Protocols()...)
|
||||
}
|
||||
return protos
|
||||
}
|
||||
|
||||
// Start implements node.Service, starting all internal goroutines needed by the
|
||||
// Start implements node.Lifecycle, starting all internal goroutines needed by the
|
||||
// Ethereum protocol implementation.
|
||||
func (s *Ethereum) Start(srvr *p2p.Server) error {
|
||||
s.startEthEntryUpdate(srvr.LocalNode())
|
||||
func (s *Ethereum) Start() error {
|
||||
s.startEthEntryUpdate(s.p2pServer.LocalNode())
|
||||
|
||||
// Start the bloom bits servicing goroutines
|
||||
s.startBloomHandlers(params.BloomBitsBlocks)
|
||||
|
||||
// Start the RPC service
|
||||
s.netRPCService = ethapi.NewPublicNetAPI(srvr, s.NetVersion())
|
||||
|
||||
// Figure out a max peers count based on the server limits
|
||||
maxPeers := srvr.MaxPeers
|
||||
maxPeers := s.p2pServer.MaxPeers
|
||||
if s.config.LightServ > 0 {
|
||||
if s.config.LightPeers >= srvr.MaxPeers {
|
||||
return fmt.Errorf("invalid peer config: light peer count (%d) >= total peer count (%d)", s.config.LightPeers, srvr.MaxPeers)
|
||||
if s.config.LightPeers >= s.p2pServer.MaxPeers {
|
||||
return fmt.Errorf("invalid peer config: light peer count (%d) >= total peer count (%d)", s.config.LightPeers, s.p2pServer.MaxPeers)
|
||||
}
|
||||
maxPeers -= s.config.LightPeers
|
||||
}
|
||||
// Start the networking layer and the light server if requested
|
||||
s.protocolManager.Start(maxPeers)
|
||||
if s.lesServer != nil {
|
||||
s.lesServer.Start(srvr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop implements node.Service, terminating all internal goroutines used by the
|
||||
// Stop implements node.Lifecycle, terminating all internal goroutines used by the
|
||||
// Ethereum protocol.
|
||||
func (s *Ethereum) Stop() error {
|
||||
// Stop all the peer-related stuff first.
|
||||
s.protocolManager.Stop()
|
||||
if s.lesServer != nil {
|
||||
s.lesServer.Stop()
|
||||
}
|
||||
|
||||
// Then stop everything else.
|
||||
s.bloomIndexer.Close()
|
||||
|
@@ -57,14 +57,16 @@ var DefaultConfig = Config{
|
||||
DatasetsOnDisk: 2,
|
||||
DatasetsLockMmap: false,
|
||||
},
|
||||
NetworkId: 1,
|
||||
LightPeers: 100,
|
||||
UltraLightFraction: 75,
|
||||
DatabaseCache: 512,
|
||||
TrieCleanCache: 256,
|
||||
TrieDirtyCache: 256,
|
||||
TrieTimeout: 60 * time.Minute,
|
||||
SnapshotCache: 256,
|
||||
NetworkId: 1,
|
||||
LightPeers: 100,
|
||||
UltraLightFraction: 75,
|
||||
DatabaseCache: 512,
|
||||
TrieCleanCache: 154,
|
||||
TrieCleanCacheJournal: "triecache",
|
||||
TrieCleanCacheRejournal: 60 * time.Minute,
|
||||
TrieDirtyCache: 256,
|
||||
TrieTimeout: 60 * time.Minute,
|
||||
SnapshotCache: 102,
|
||||
Miner: miner.Config{
|
||||
GasFloor: 8000000,
|
||||
GasCeil: 8000000,
|
||||
@@ -139,10 +141,12 @@ type Config struct {
|
||||
DatabaseCache int
|
||||
DatabaseFreezer string
|
||||
|
||||
TrieCleanCache int
|
||||
TrieDirtyCache int
|
||||
TrieTimeout time.Duration
|
||||
SnapshotCache int
|
||||
TrieCleanCache int
|
||||
TrieCleanCacheJournal string `toml:",omitempty"` // Disk journal directory for trie cache to survive node restarts
|
||||
TrieCleanCacheRejournal time.Duration `toml:",omitempty"` // Time interval to regenerate the journal for clean cache
|
||||
TrieDirtyCache int
|
||||
TrieTimeout time.Duration
|
||||
SnapshotCache int
|
||||
|
||||
// Mining options
|
||||
Miner miner.Config
|
||||
|
@@ -89,7 +89,7 @@ var (
|
||||
errCancelContentProcessing = errors.New("content processing canceled (requested)")
|
||||
errCanceled = errors.New("syncing canceled (requested)")
|
||||
errNoSyncActive = errors.New("no sync active")
|
||||
errTooOld = errors.New("peer doesn't speak recent enough protocol version (need version >= 62)")
|
||||
errTooOld = errors.New("peer doesn't speak recent enough protocol version (need version >= 63)")
|
||||
)
|
||||
|
||||
type Downloader struct {
|
||||
@@ -219,7 +219,7 @@ func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom,
|
||||
stateBloom: stateBloom,
|
||||
mux: mux,
|
||||
checkpoint: checkpoint,
|
||||
queue: newQueue(),
|
||||
queue: newQueue(blockCacheItems),
|
||||
peers: newPeerSet(),
|
||||
rttEstimate: uint64(rttMaxEstimate),
|
||||
rttConfidence: uint64(1000000),
|
||||
@@ -370,7 +370,7 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
|
||||
d.stateBloom.Close()
|
||||
}
|
||||
// Reset the queue, peer set and wake channels to clean any internal leftover state
|
||||
d.queue.Reset()
|
||||
d.queue.Reset(blockCacheItems)
|
||||
d.peers.Reset()
|
||||
|
||||
for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
|
||||
@@ -431,7 +431,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
|
||||
d.mux.Post(DoneEvent{latest})
|
||||
}
|
||||
}()
|
||||
if p.version < 62 {
|
||||
if p.version < 63 {
|
||||
return errTooOld
|
||||
}
|
||||
mode := d.getMode()
|
||||
@@ -597,6 +597,9 @@ func (d *Downloader) Terminate() {
|
||||
default:
|
||||
close(d.quitCh)
|
||||
}
|
||||
if d.stateBloom != nil {
|
||||
d.stateBloom.Close()
|
||||
}
|
||||
d.quitLock.Unlock()
|
||||
|
||||
// Cancel any pending download requests
|
||||
@@ -629,7 +632,7 @@ func (d *Downloader) fetchHeight(p *peerConnection) (*types.Header, error) {
|
||||
// Make sure the peer actually gave something valid
|
||||
headers := packet.(*headerPack).headers
|
||||
if len(headers) != 1 {
|
||||
p.log.Debug("Multiple headers for single request", "headers", len(headers))
|
||||
p.log.Warn("Multiple headers for single request", "headers", len(headers))
|
||||
return nil, fmt.Errorf("%w: multiple headers (%d) for single request", errBadPeer, len(headers))
|
||||
}
|
||||
head := headers[0]
|
||||
@@ -866,7 +869,7 @@ func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header)
|
||||
// Make sure the peer actually gave something valid
|
||||
headers := packer.(*headerPack).headers
|
||||
if len(headers) != 1 {
|
||||
p.log.Debug("Multiple headers for single request", "headers", len(headers))
|
||||
p.log.Warn("Multiple headers for single request", "headers", len(headers))
|
||||
return 0, fmt.Errorf("%w: multiple headers (%d) for single request", errBadPeer, len(headers))
|
||||
}
|
||||
arrived = true
|
||||
@@ -890,7 +893,7 @@ func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header)
|
||||
}
|
||||
header := d.lightchain.GetHeaderByHash(h) // Independent of sync mode, header surely exists
|
||||
if header.Number.Uint64() != check {
|
||||
p.log.Debug("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check)
|
||||
p.log.Warn("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check)
|
||||
return 0, fmt.Errorf("%w: non-requested header (%d)", errBadPeer, header.Number)
|
||||
}
|
||||
start = check
|
||||
@@ -1106,17 +1109,18 @@ func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) (
|
||||
pack := packet.(*headerPack)
|
||||
return d.queue.DeliverHeaders(pack.peerID, pack.headers, d.headerProcCh)
|
||||
}
|
||||
expire = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) }
|
||||
throttle = func() bool { return false }
|
||||
reserve = func(p *peerConnection, count int) (*fetchRequest, bool, error) {
|
||||
return d.queue.ReserveHeaders(p, count), false, nil
|
||||
expire = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) }
|
||||
reserve = func(p *peerConnection, count int) (*fetchRequest, bool, bool) {
|
||||
return d.queue.ReserveHeaders(p, count), false, false
|
||||
}
|
||||
fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) }
|
||||
capacity = func(p *peerConnection) int { return p.HeaderCapacity(d.requestRTT()) }
|
||||
setIdle = func(p *peerConnection, accepted int) { p.SetHeadersIdle(accepted) }
|
||||
setIdle = func(p *peerConnection, accepted int, deliveryTime time.Time) {
|
||||
p.SetHeadersIdle(accepted, deliveryTime)
|
||||
}
|
||||
)
|
||||
err := d.fetchParts(d.headerCh, deliver, d.queue.headerContCh, expire,
|
||||
d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve,
|
||||
d.queue.PendingHeaders, d.queue.InFlightHeaders, reserve,
|
||||
nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "headers")
|
||||
|
||||
log.Debug("Skeleton fill terminated", "err", err)
|
||||
@@ -1139,10 +1143,10 @@ func (d *Downloader) fetchBodies(from uint64) error {
|
||||
expire = func() map[string]int { return d.queue.ExpireBodies(d.requestTTL()) }
|
||||
fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) }
|
||||
capacity = func(p *peerConnection) int { return p.BlockCapacity(d.requestRTT()) }
|
||||
setIdle = func(p *peerConnection, accepted int) { p.SetBodiesIdle(accepted) }
|
||||
setIdle = func(p *peerConnection, accepted int, deliveryTime time.Time) { p.SetBodiesIdle(accepted, deliveryTime) }
|
||||
)
|
||||
err := d.fetchParts(d.bodyCh, deliver, d.bodyWakeCh, expire,
|
||||
d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies,
|
||||
d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ReserveBodies,
|
||||
d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "bodies")
|
||||
|
||||
log.Debug("Block body download terminated", "err", err)
|
||||
@@ -1163,10 +1167,12 @@ func (d *Downloader) fetchReceipts(from uint64) error {
|
||||
expire = func() map[string]int { return d.queue.ExpireReceipts(d.requestTTL()) }
|
||||
fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchReceipts(req) }
|
||||
capacity = func(p *peerConnection) int { return p.ReceiptCapacity(d.requestRTT()) }
|
||||
setIdle = func(p *peerConnection, accepted int) { p.SetReceiptsIdle(accepted) }
|
||||
setIdle = func(p *peerConnection, accepted int, deliveryTime time.Time) {
|
||||
p.SetReceiptsIdle(accepted, deliveryTime)
|
||||
}
|
||||
)
|
||||
err := d.fetchParts(d.receiptCh, deliver, d.receiptWakeCh, expire,
|
||||
d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts,
|
||||
d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ReserveReceipts,
|
||||
d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "receipts")
|
||||
|
||||
log.Debug("Transaction receipt download terminated", "err", err)
|
||||
@@ -1199,9 +1205,9 @@ func (d *Downloader) fetchReceipts(from uint64) error {
|
||||
// - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping)
|
||||
// - kind: textual label of the type being downloaded to display in log messages
|
||||
func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool,
|
||||
expire func() map[string]int, pending func() int, inFlight func() bool, throttle func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, error),
|
||||
expire func() map[string]int, pending func() int, inFlight func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, bool),
|
||||
fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int,
|
||||
idle func() ([]*peerConnection, int), setIdle func(*peerConnection, int), kind string) error {
|
||||
idle func() ([]*peerConnection, int), setIdle func(*peerConnection, int, time.Time), kind string) error {
|
||||
|
||||
// Create a ticker to detect expired retrieval tasks
|
||||
ticker := time.NewTicker(100 * time.Millisecond)
|
||||
@@ -1217,6 +1223,7 @@ func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack)
|
||||
return errCanceled
|
||||
|
||||
case packet := <-deliveryCh:
|
||||
deliveryTime := time.Now()
|
||||
// If the peer was previously banned and failed to deliver its pack
|
||||
// in a reasonable time frame, ignore its message.
|
||||
if peer := d.peers.Peer(packet.PeerId()); peer != nil {
|
||||
@@ -1229,7 +1236,7 @@ func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack)
|
||||
// caused by a timed out request which came through in the end), set it to
|
||||
// idle. If the delivery's stale, the peer should have already been idled.
|
||||
if !errors.Is(err, errStaleDelivery) {
|
||||
setIdle(peer, accepted)
|
||||
setIdle(peer, accepted, deliveryTime)
|
||||
}
|
||||
// Issue a log to the user to see what's going on
|
||||
switch {
|
||||
@@ -1282,7 +1289,7 @@ func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack)
|
||||
// how response times reacts, to it always requests one more than the minimum (i.e. min 2).
|
||||
if fails > 2 {
|
||||
peer.log.Trace("Data delivery timed out", "type", kind)
|
||||
setIdle(peer, 0)
|
||||
setIdle(peer, 0, time.Now())
|
||||
} else {
|
||||
peer.log.Debug("Stalling delivery, dropping", "type", kind)
|
||||
|
||||
@@ -1317,27 +1324,27 @@ func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack)
|
||||
// Send a download request to all idle peers, until throttled
|
||||
progressed, throttled, running := false, false, inFlight()
|
||||
idles, total := idle()
|
||||
|
||||
pendCount := pending()
|
||||
for _, peer := range idles {
|
||||
// Short circuit if throttling activated
|
||||
if throttle() {
|
||||
throttled = true
|
||||
if throttled {
|
||||
break
|
||||
}
|
||||
// Short circuit if there is no more available task.
|
||||
if pending() == 0 {
|
||||
if pendCount = pending(); pendCount == 0 {
|
||||
break
|
||||
}
|
||||
// Reserve a chunk of fetches for a peer. A nil can mean either that
|
||||
// no more headers are available, or that the peer is known not to
|
||||
// have them.
|
||||
request, progress, err := reserve(peer, capacity(peer))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
request, progress, throttle := reserve(peer, capacity(peer))
|
||||
if progress {
|
||||
progressed = true
|
||||
}
|
||||
if throttle {
|
||||
throttled = true
|
||||
throttleCounter.Inc(1)
|
||||
}
|
||||
if request == nil {
|
||||
continue
|
||||
}
|
||||
@@ -1362,7 +1369,7 @@ func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack)
|
||||
}
|
||||
// Make sure that we have peers available for fetching. If all peers have been tried
|
||||
// and all failed throw an error
|
||||
if !progressed && !throttled && !running && len(idles) == total && pending() > 0 {
|
||||
if !progressed && !throttled && !running && len(idles) == total && pendCount > 0 {
|
||||
return errPeersUnavailable
|
||||
}
|
||||
}
|
||||
@@ -1374,8 +1381,11 @@ func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack)
|
||||
// queue until the stream ends or a failure occurs.
|
||||
func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) error {
|
||||
// Keep a count of uncertain headers to roll back
|
||||
var rollback []*types.Header
|
||||
mode := d.getMode()
|
||||
var (
|
||||
rollback []*types.Header
|
||||
rollbackErr error
|
||||
mode = d.getMode()
|
||||
)
|
||||
defer func() {
|
||||
if len(rollback) > 0 {
|
||||
// Flatten the headers and roll them back
|
||||
@@ -1397,7 +1407,7 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er
|
||||
log.Warn("Rolled back headers", "count", len(hashes),
|
||||
"header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number),
|
||||
"fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock),
|
||||
"block", fmt.Sprintf("%d->%d", lastBlock, curBlock))
|
||||
"block", fmt.Sprintf("%d->%d", lastBlock, curBlock), "reason", rollbackErr)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -1407,6 +1417,7 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er
|
||||
for {
|
||||
select {
|
||||
case <-d.cancelCh:
|
||||
rollbackErr = errCanceled
|
||||
return errCanceled
|
||||
|
||||
case headers := <-d.headerProcCh:
|
||||
@@ -1460,6 +1471,7 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er
|
||||
// Terminate if something failed in between processing chunks
|
||||
select {
|
||||
case <-d.cancelCh:
|
||||
rollbackErr = errCanceled
|
||||
return errCanceled
|
||||
default:
|
||||
}
|
||||
@@ -1484,11 +1496,12 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er
|
||||
frequency = 1
|
||||
}
|
||||
if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil {
|
||||
rollbackErr = err
|
||||
// If some headers were inserted, add them too to the rollback list
|
||||
if n > 0 {
|
||||
rollback = append(rollback, chunk[:n]...)
|
||||
}
|
||||
log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "err", err)
|
||||
log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "parent", chunk[n].ParentHash, "err", err)
|
||||
return fmt.Errorf("%w: %v", errInvalidChain, err)
|
||||
}
|
||||
// All verifications passed, store newly found uncertain headers
|
||||
@@ -1503,6 +1516,7 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er
|
||||
for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders {
|
||||
select {
|
||||
case <-d.cancelCh:
|
||||
rollbackErr = errCanceled
|
||||
return errCanceled
|
||||
case <-time.After(time.Second):
|
||||
}
|
||||
@@ -1510,7 +1524,7 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er
|
||||
// Otherwise insert the headers for content retrieval
|
||||
inserts := d.queue.Schedule(chunk, origin)
|
||||
if len(inserts) != len(chunk) {
|
||||
log.Debug("Stale headers")
|
||||
rollbackErr = fmt.Errorf("stale headers: len inserts %v len(chunk) %v", len(inserts), len(chunk))
|
||||
return fmt.Errorf("%w: stale headers", errBadPeer)
|
||||
}
|
||||
}
|
||||
@@ -1680,6 +1694,14 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error {
|
||||
}
|
||||
|
||||
func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) {
|
||||
if len(results) == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
if lastNum := results[len(results)-1].Header.Number.Uint64(); lastNum < pivot {
|
||||
// the pivot is somewhere in the future
|
||||
return nil, results, nil
|
||||
}
|
||||
// This can also be optimized, but only happens very seldom
|
||||
for _, result := range results {
|
||||
num := result.Header.Number.Uint64()
|
||||
switch {
|
||||
|
@@ -297,14 +297,13 @@ func (dl *downloadTester) InsertChain(blocks types.Blocks) (i int, err error) {
|
||||
} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
|
||||
return i, fmt.Errorf("InsertChain: unknown parent state %x: %v", parent.Root(), err)
|
||||
}
|
||||
if _, ok := dl.ownHeaders[block.Hash()]; !ok {
|
||||
if hdr := dl.getHeaderByHash(block.Hash()); hdr == nil {
|
||||
dl.ownHashes = append(dl.ownHashes, block.Hash())
|
||||
dl.ownHeaders[block.Hash()] = block.Header()
|
||||
}
|
||||
dl.ownBlocks[block.Hash()] = block
|
||||
dl.ownReceipts[block.Hash()] = make(types.Receipts, 0)
|
||||
dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
|
||||
|
||||
td := dl.getTd(block.ParentHash())
|
||||
dl.ownChainTd[block.Hash()] = new(big.Int).Add(td, block.Difficulty())
|
||||
}
|
||||
@@ -501,13 +500,14 @@ func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, leng
|
||||
// Tests that simple synchronization against a canonical chain works correctly.
|
||||
// In this test common ancestor lookup should be short circuited and not require
|
||||
// binary searching.
|
||||
func TestCanonicalSynchronisation62(t *testing.T) { testCanonicalSynchronisation(t, 62, FullSync) }
|
||||
func TestCanonicalSynchronisation63Full(t *testing.T) { testCanonicalSynchronisation(t, 63, FullSync) }
|
||||
func TestCanonicalSynchronisation63Fast(t *testing.T) { testCanonicalSynchronisation(t, 63, FastSync) }
|
||||
func TestCanonicalSynchronisation64Full(t *testing.T) { testCanonicalSynchronisation(t, 64, FullSync) }
|
||||
func TestCanonicalSynchronisation64Fast(t *testing.T) { testCanonicalSynchronisation(t, 64, FastSync) }
|
||||
func TestCanonicalSynchronisation64Light(t *testing.T) {
|
||||
testCanonicalSynchronisation(t, 64, LightSync)
|
||||
func TestCanonicalSynchronisation65Full(t *testing.T) { testCanonicalSynchronisation(t, 65, FullSync) }
|
||||
func TestCanonicalSynchronisation65Fast(t *testing.T) { testCanonicalSynchronisation(t, 65, FastSync) }
|
||||
func TestCanonicalSynchronisation65Light(t *testing.T) {
|
||||
testCanonicalSynchronisation(t, 65, LightSync)
|
||||
}
|
||||
|
||||
func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
|
||||
@@ -529,16 +529,16 @@ func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Tests that if a large batch of blocks are being downloaded, it is throttled
|
||||
// until the cached blocks are retrieved.
|
||||
func TestThrottling62(t *testing.T) { testThrottling(t, 62, FullSync) }
|
||||
func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
|
||||
func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
|
||||
func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
|
||||
func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
|
||||
func TestThrottling65Full(t *testing.T) { testThrottling(t, 65, FullSync) }
|
||||
func TestThrottling65Fast(t *testing.T) { testThrottling(t, 65, FastSync) }
|
||||
|
||||
func testThrottling(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
// Create a long block chain to download and the tester
|
||||
targetBlocks := testChainBase.len() - 1
|
||||
@@ -570,31 +570,32 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) {
|
||||
time.Sleep(25 * time.Millisecond)
|
||||
|
||||
tester.lock.Lock()
|
||||
tester.downloader.queue.lock.Lock()
|
||||
cached = len(tester.downloader.queue.blockDonePool)
|
||||
if mode == FastSync {
|
||||
if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached {
|
||||
cached = receipts
|
||||
}
|
||||
{
|
||||
tester.downloader.queue.resultCache.lock.Lock()
|
||||
cached = tester.downloader.queue.resultCache.countCompleted()
|
||||
tester.downloader.queue.resultCache.lock.Unlock()
|
||||
frozen = int(atomic.LoadUint32(&blocked))
|
||||
retrieved = len(tester.ownBlocks)
|
||||
|
||||
}
|
||||
frozen = int(atomic.LoadUint32(&blocked))
|
||||
retrieved = len(tester.ownBlocks)
|
||||
tester.downloader.queue.lock.Unlock()
|
||||
tester.lock.Unlock()
|
||||
|
||||
if cached == blockCacheItems || cached == blockCacheItems-reorgProtHeaderDelay || retrieved+cached+frozen == targetBlocks+1 || retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
|
||||
if cached == blockCacheItems ||
|
||||
cached == blockCacheItems-reorgProtHeaderDelay ||
|
||||
retrieved+cached+frozen == targetBlocks+1 ||
|
||||
retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Make sure we filled up the cache, then exhaust it
|
||||
time.Sleep(25 * time.Millisecond) // give it a chance to screw up
|
||||
|
||||
tester.lock.RLock()
|
||||
retrieved = len(tester.ownBlocks)
|
||||
tester.lock.RUnlock()
|
||||
if cached != blockCacheItems && cached != blockCacheItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {
|
||||
t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheItems, retrieved, frozen, targetBlocks+1)
|
||||
}
|
||||
|
||||
// Permit the blocked blocks to import
|
||||
if atomic.LoadUint32(&blocked) > 0 {
|
||||
atomic.StoreUint32(&blocked, uint32(0))
|
||||
@@ -606,17 +607,20 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) {
|
||||
if err := <-errc; err != nil {
|
||||
t.Fatalf("block synchronization failed: %v", err)
|
||||
}
|
||||
tester.terminate()
|
||||
|
||||
}
|
||||
|
||||
// Tests that simple synchronization against a forked chain works correctly. In
|
||||
// this test common ancestor lookup should *not* be short circuited, and a full
|
||||
// binary search should be executed.
|
||||
func TestForkedSync62(t *testing.T) { testForkedSync(t, 62, FullSync) }
|
||||
func TestForkedSync63Full(t *testing.T) { testForkedSync(t, 63, FullSync) }
|
||||
func TestForkedSync63Fast(t *testing.T) { testForkedSync(t, 63, FastSync) }
|
||||
func TestForkedSync64Full(t *testing.T) { testForkedSync(t, 64, FullSync) }
|
||||
func TestForkedSync64Fast(t *testing.T) { testForkedSync(t, 64, FastSync) }
|
||||
func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
|
||||
func TestForkedSync65Full(t *testing.T) { testForkedSync(t, 65, FullSync) }
|
||||
func TestForkedSync65Fast(t *testing.T) { testForkedSync(t, 65, FastSync) }
|
||||
func TestForkedSync65Light(t *testing.T) { testForkedSync(t, 65, LightSync) }
|
||||
|
||||
func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
@@ -628,7 +632,6 @@ func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
chainB := testChainForkLightB.shorten(testChainBase.len() + 80)
|
||||
tester.newPeer("fork A", protocol, chainA)
|
||||
tester.newPeer("fork B", protocol, chainB)
|
||||
|
||||
// Synchronise with the peer and make sure all blocks were retrieved
|
||||
if err := tester.sync("fork A", nil, mode); err != nil {
|
||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||
@@ -644,12 +647,13 @@ func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Tests that synchronising against a much shorter but much heavyer fork works
|
||||
// corrently and is not dropped.
|
||||
func TestHeavyForkedSync62(t *testing.T) { testHeavyForkedSync(t, 62, FullSync) }
|
||||
func TestHeavyForkedSync63Full(t *testing.T) { testHeavyForkedSync(t, 63, FullSync) }
|
||||
func TestHeavyForkedSync63Fast(t *testing.T) { testHeavyForkedSync(t, 63, FastSync) }
|
||||
func TestHeavyForkedSync64Full(t *testing.T) { testHeavyForkedSync(t, 64, FullSync) }
|
||||
func TestHeavyForkedSync64Fast(t *testing.T) { testHeavyForkedSync(t, 64, FastSync) }
|
||||
func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) }
|
||||
func TestHeavyForkedSync65Full(t *testing.T) { testHeavyForkedSync(t, 65, FullSync) }
|
||||
func TestHeavyForkedSync65Fast(t *testing.T) { testHeavyForkedSync(t, 65, FastSync) }
|
||||
func TestHeavyForkedSync65Light(t *testing.T) { testHeavyForkedSync(t, 65, LightSync) }
|
||||
|
||||
func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
@@ -678,12 +682,13 @@ func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
// Tests that chain forks are contained within a certain interval of the current
|
||||
// chain head, ensuring that malicious peers cannot waste resources by feeding
|
||||
// long dead chains.
|
||||
func TestBoundedForkedSync62(t *testing.T) { testBoundedForkedSync(t, 62, FullSync) }
|
||||
func TestBoundedForkedSync63Full(t *testing.T) { testBoundedForkedSync(t, 63, FullSync) }
|
||||
func TestBoundedForkedSync63Fast(t *testing.T) { testBoundedForkedSync(t, 63, FastSync) }
|
||||
func TestBoundedForkedSync64Full(t *testing.T) { testBoundedForkedSync(t, 64, FullSync) }
|
||||
func TestBoundedForkedSync64Fast(t *testing.T) { testBoundedForkedSync(t, 64, FastSync) }
|
||||
func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
|
||||
func TestBoundedForkedSync65Full(t *testing.T) { testBoundedForkedSync(t, 65, FullSync) }
|
||||
func TestBoundedForkedSync65Fast(t *testing.T) { testBoundedForkedSync(t, 65, FastSync) }
|
||||
func TestBoundedForkedSync65Light(t *testing.T) { testBoundedForkedSync(t, 65, LightSync) }
|
||||
|
||||
func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
@@ -711,24 +716,22 @@ func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
// Tests that chain forks are contained within a certain interval of the current
|
||||
// chain head for short but heavy forks too. These are a bit special because they
|
||||
// take different ancestor lookup paths.
|
||||
func TestBoundedHeavyForkedSync62(t *testing.T) { testBoundedHeavyForkedSync(t, 62, FullSync) }
|
||||
func TestBoundedHeavyForkedSync63Full(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FullSync) }
|
||||
func TestBoundedHeavyForkedSync63Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FastSync) }
|
||||
func TestBoundedHeavyForkedSync64Full(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FullSync) }
|
||||
func TestBoundedHeavyForkedSync64Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FastSync) }
|
||||
func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
|
||||
func TestBoundedHeavyForkedSync65Full(t *testing.T) { testBoundedHeavyForkedSync(t, 65, FullSync) }
|
||||
func TestBoundedHeavyForkedSync65Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 65, FastSync) }
|
||||
func TestBoundedHeavyForkedSync65Light(t *testing.T) { testBoundedHeavyForkedSync(t, 65, LightSync) }
|
||||
|
||||
func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
// Create a long enough forked chain
|
||||
chainA := testChainForkLightA
|
||||
chainB := testChainForkHeavy
|
||||
tester.newPeer("original", protocol, chainA)
|
||||
tester.newPeer("heavy-rewriter", protocol, chainB)
|
||||
|
||||
// Synchronise with the peer and make sure all blocks were retrieved
|
||||
if err := tester.sync("original", nil, mode); err != nil {
|
||||
@@ -736,27 +739,12 @@ func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
}
|
||||
assertOwnChain(t, tester, chainA.len())
|
||||
|
||||
tester.newPeer("heavy-rewriter", protocol, chainB)
|
||||
// Synchronise with the second peer and ensure that the fork is rejected to being too old
|
||||
if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
|
||||
t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that an inactive downloader will not accept incoming block headers and
|
||||
// bodies.
|
||||
func TestInactiveDownloader62(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
// Check that neither block headers nor bodies are accepted
|
||||
if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
|
||||
t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
|
||||
}
|
||||
if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
|
||||
t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
|
||||
}
|
||||
tester.terminate()
|
||||
}
|
||||
|
||||
// Tests that an inactive downloader will not accept incoming block headers,
|
||||
@@ -780,12 +768,13 @@ func TestInactiveDownloader63(t *testing.T) {
|
||||
}
|
||||
|
||||
// Tests that a canceled download wipes all previously accumulated state.
|
||||
func TestCancel62(t *testing.T) { testCancel(t, 62, FullSync) }
|
||||
func TestCancel63Full(t *testing.T) { testCancel(t, 63, FullSync) }
|
||||
func TestCancel63Fast(t *testing.T) { testCancel(t, 63, FastSync) }
|
||||
func TestCancel64Full(t *testing.T) { testCancel(t, 64, FullSync) }
|
||||
func TestCancel64Fast(t *testing.T) { testCancel(t, 64, FastSync) }
|
||||
func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) }
|
||||
func TestCancel65Full(t *testing.T) { testCancel(t, 65, FullSync) }
|
||||
func TestCancel65Fast(t *testing.T) { testCancel(t, 65, FastSync) }
|
||||
func TestCancel65Light(t *testing.T) { testCancel(t, 65, LightSync) }
|
||||
|
||||
func testCancel(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
@@ -812,12 +801,13 @@ func testCancel(t *testing.T, protocol int, mode SyncMode) {
|
||||
}
|
||||
|
||||
// Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
|
||||
func TestMultiSynchronisation62(t *testing.T) { testMultiSynchronisation(t, 62, FullSync) }
|
||||
func TestMultiSynchronisation63Full(t *testing.T) { testMultiSynchronisation(t, 63, FullSync) }
|
||||
func TestMultiSynchronisation63Fast(t *testing.T) { testMultiSynchronisation(t, 63, FastSync) }
|
||||
func TestMultiSynchronisation64Full(t *testing.T) { testMultiSynchronisation(t, 64, FullSync) }
|
||||
func TestMultiSynchronisation64Fast(t *testing.T) { testMultiSynchronisation(t, 64, FastSync) }
|
||||
func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) }
|
||||
func TestMultiSynchronisation65Full(t *testing.T) { testMultiSynchronisation(t, 65, FullSync) }
|
||||
func TestMultiSynchronisation65Fast(t *testing.T) { testMultiSynchronisation(t, 65, FastSync) }
|
||||
func TestMultiSynchronisation65Light(t *testing.T) { testMultiSynchronisation(t, 65, LightSync) }
|
||||
|
||||
func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
@@ -841,12 +831,13 @@ func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Tests that synchronisations behave well in multi-version protocol environments
|
||||
// and not wreak havoc on other nodes in the network.
|
||||
func TestMultiProtoSynchronisation62(t *testing.T) { testMultiProtoSync(t, 62, FullSync) }
|
||||
func TestMultiProtoSynchronisation63Full(t *testing.T) { testMultiProtoSync(t, 63, FullSync) }
|
||||
func TestMultiProtoSynchronisation63Fast(t *testing.T) { testMultiProtoSync(t, 63, FastSync) }
|
||||
func TestMultiProtoSynchronisation64Full(t *testing.T) { testMultiProtoSync(t, 64, FullSync) }
|
||||
func TestMultiProtoSynchronisation64Fast(t *testing.T) { testMultiProtoSync(t, 64, FastSync) }
|
||||
func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) }
|
||||
func TestMultiProtoSynchronisation65Full(t *testing.T) { testMultiProtoSync(t, 65, FullSync) }
|
||||
func TestMultiProtoSynchronisation65Fast(t *testing.T) { testMultiProtoSync(t, 65, FastSync) }
|
||||
func TestMultiProtoSynchronisation65Light(t *testing.T) { testMultiProtoSync(t, 65, LightSync) }
|
||||
|
||||
func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
@@ -858,9 +849,9 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
chain := testChainBase.shorten(blockCacheItems - 15)
|
||||
|
||||
// Create peers of every type
|
||||
tester.newPeer("peer 62", 62, chain)
|
||||
tester.newPeer("peer 63", 63, chain)
|
||||
tester.newPeer("peer 64", 64, chain)
|
||||
tester.newPeer("peer 65", 65, chain)
|
||||
|
||||
// Synchronise with the requested peer and make sure all blocks were retrieved
|
||||
if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
|
||||
@@ -869,7 +860,7 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
assertOwnChain(t, tester, chain.len())
|
||||
|
||||
// Check that no peers have been dropped off
|
||||
for _, version := range []int{62, 63, 64} {
|
||||
for _, version := range []int{63, 64, 65} {
|
||||
peer := fmt.Sprintf("peer %d", version)
|
||||
if _, ok := tester.peers[peer]; !ok {
|
||||
t.Errorf("%s dropped", peer)
|
||||
@@ -879,12 +870,13 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Tests that if a block is empty (e.g. header only), no body request should be
|
||||
// made, and instead the header should be assembled into a whole block in itself.
|
||||
func TestEmptyShortCircuit62(t *testing.T) { testEmptyShortCircuit(t, 62, FullSync) }
|
||||
func TestEmptyShortCircuit63Full(t *testing.T) { testEmptyShortCircuit(t, 63, FullSync) }
|
||||
func TestEmptyShortCircuit63Fast(t *testing.T) { testEmptyShortCircuit(t, 63, FastSync) }
|
||||
func TestEmptyShortCircuit64Full(t *testing.T) { testEmptyShortCircuit(t, 64, FullSync) }
|
||||
func TestEmptyShortCircuit64Fast(t *testing.T) { testEmptyShortCircuit(t, 64, FastSync) }
|
||||
func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) }
|
||||
func TestEmptyShortCircuit65Full(t *testing.T) { testEmptyShortCircuit(t, 65, FullSync) }
|
||||
func TestEmptyShortCircuit65Fast(t *testing.T) { testEmptyShortCircuit(t, 65, FastSync) }
|
||||
func TestEmptyShortCircuit65Light(t *testing.T) { testEmptyShortCircuit(t, 65, LightSync) }
|
||||
|
||||
func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
@@ -932,12 +924,13 @@ func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Tests that headers are enqueued continuously, preventing malicious nodes from
|
||||
// stalling the downloader by feeding gapped header chains.
|
||||
func TestMissingHeaderAttack62(t *testing.T) { testMissingHeaderAttack(t, 62, FullSync) }
|
||||
func TestMissingHeaderAttack63Full(t *testing.T) { testMissingHeaderAttack(t, 63, FullSync) }
|
||||
func TestMissingHeaderAttack63Fast(t *testing.T) { testMissingHeaderAttack(t, 63, FastSync) }
|
||||
func TestMissingHeaderAttack64Full(t *testing.T) { testMissingHeaderAttack(t, 64, FullSync) }
|
||||
func TestMissingHeaderAttack64Fast(t *testing.T) { testMissingHeaderAttack(t, 64, FastSync) }
|
||||
func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) }
|
||||
func TestMissingHeaderAttack65Full(t *testing.T) { testMissingHeaderAttack(t, 65, FullSync) }
|
||||
func TestMissingHeaderAttack65Fast(t *testing.T) { testMissingHeaderAttack(t, 65, FastSync) }
|
||||
func TestMissingHeaderAttack65Light(t *testing.T) { testMissingHeaderAttack(t, 65, LightSync) }
|
||||
|
||||
func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
@@ -963,12 +956,13 @@ func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Tests that if requested headers are shifted (i.e. first is missing), the queue
|
||||
// detects the invalid numbering.
|
||||
func TestShiftedHeaderAttack62(t *testing.T) { testShiftedHeaderAttack(t, 62, FullSync) }
|
||||
func TestShiftedHeaderAttack63Full(t *testing.T) { testShiftedHeaderAttack(t, 63, FullSync) }
|
||||
func TestShiftedHeaderAttack63Fast(t *testing.T) { testShiftedHeaderAttack(t, 63, FastSync) }
|
||||
func TestShiftedHeaderAttack64Full(t *testing.T) { testShiftedHeaderAttack(t, 64, FullSync) }
|
||||
func TestShiftedHeaderAttack64Fast(t *testing.T) { testShiftedHeaderAttack(t, 64, FastSync) }
|
||||
func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
|
||||
func TestShiftedHeaderAttack65Full(t *testing.T) { testShiftedHeaderAttack(t, 65, FullSync) }
|
||||
func TestShiftedHeaderAttack65Fast(t *testing.T) { testShiftedHeaderAttack(t, 65, FastSync) }
|
||||
func TestShiftedHeaderAttack65Light(t *testing.T) { testShiftedHeaderAttack(t, 65, LightSync) }
|
||||
|
||||
func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
@@ -1001,13 +995,13 @@ func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
|
||||
// sure no state was corrupted.
|
||||
func TestInvalidHeaderRollback63Fast(t *testing.T) { testInvalidHeaderRollback(t, 63, FastSync) }
|
||||
func TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback(t, 64, FastSync) }
|
||||
func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
|
||||
func TestInvalidHeaderRollback65Fast(t *testing.T) { testInvalidHeaderRollback(t, 65, FastSync) }
|
||||
func TestInvalidHeaderRollback65Light(t *testing.T) { testInvalidHeaderRollback(t, 65, LightSync) }
|
||||
|
||||
func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
// Create a small enough block chain to download
|
||||
targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
|
||||
@@ -1087,34 +1081,36 @@ func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, chain.len())
|
||||
}
|
||||
}
|
||||
tester.terminate()
|
||||
}
|
||||
|
||||
// Tests that a peer advertising a high TD doesn't get to stall the downloader
|
||||
// afterwards by not sending any useful hashes.
|
||||
func TestHighTDStarvationAttack62(t *testing.T) { testHighTDStarvationAttack(t, 62, FullSync) }
|
||||
func TestHighTDStarvationAttack63Full(t *testing.T) { testHighTDStarvationAttack(t, 63, FullSync) }
|
||||
func TestHighTDStarvationAttack63Fast(t *testing.T) { testHighTDStarvationAttack(t, 63, FastSync) }
|
||||
func TestHighTDStarvationAttack64Full(t *testing.T) { testHighTDStarvationAttack(t, 64, FullSync) }
|
||||
func TestHighTDStarvationAttack64Fast(t *testing.T) { testHighTDStarvationAttack(t, 64, FastSync) }
|
||||
func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) }
|
||||
func TestHighTDStarvationAttack65Full(t *testing.T) { testHighTDStarvationAttack(t, 65, FullSync) }
|
||||
func TestHighTDStarvationAttack65Fast(t *testing.T) { testHighTDStarvationAttack(t, 65, FastSync) }
|
||||
func TestHighTDStarvationAttack65Light(t *testing.T) { testHighTDStarvationAttack(t, 65, LightSync) }
|
||||
|
||||
func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
chain := testChainBase.shorten(1)
|
||||
tester.newPeer("attack", protocol, chain)
|
||||
if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
|
||||
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
|
||||
}
|
||||
tester.terminate()
|
||||
}
|
||||
|
||||
// Tests that misbehaving peers are disconnected, whilst behaving ones are not.
|
||||
func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
|
||||
func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
|
||||
func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
|
||||
func TestBlockHeaderAttackerDropping65(t *testing.T) { testBlockHeaderAttackerDropping(t, 65) }
|
||||
|
||||
func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
|
||||
t.Parallel()
|
||||
@@ -1166,12 +1162,13 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
|
||||
|
||||
// Tests that synchronisation progress (origin block number, current block number
|
||||
// and highest block number) is tracked and updated correctly.
|
||||
func TestSyncProgress62(t *testing.T) { testSyncProgress(t, 62, FullSync) }
|
||||
func TestSyncProgress63Full(t *testing.T) { testSyncProgress(t, 63, FullSync) }
|
||||
func TestSyncProgress63Fast(t *testing.T) { testSyncProgress(t, 63, FastSync) }
|
||||
func TestSyncProgress64Full(t *testing.T) { testSyncProgress(t, 64, FullSync) }
|
||||
func TestSyncProgress64Fast(t *testing.T) { testSyncProgress(t, 64, FastSync) }
|
||||
func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) }
|
||||
func TestSyncProgress65Full(t *testing.T) { testSyncProgress(t, 65, FullSync) }
|
||||
func TestSyncProgress65Fast(t *testing.T) { testSyncProgress(t, 65, FastSync) }
|
||||
func TestSyncProgress65Light(t *testing.T) { testSyncProgress(t, 65, LightSync) }
|
||||
|
||||
func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
@@ -1249,12 +1246,13 @@ func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.Sync
|
||||
// Tests that synchronisation progress (origin block number and highest block
|
||||
// number) is tracked and updated correctly in case of a fork (or manual head
|
||||
// revertal).
|
||||
func TestForkedSyncProgress62(t *testing.T) { testForkedSyncProgress(t, 62, FullSync) }
|
||||
func TestForkedSyncProgress63Full(t *testing.T) { testForkedSyncProgress(t, 63, FullSync) }
|
||||
func TestForkedSyncProgress63Fast(t *testing.T) { testForkedSyncProgress(t, 63, FastSync) }
|
||||
func TestForkedSyncProgress64Full(t *testing.T) { testForkedSyncProgress(t, 64, FullSync) }
|
||||
func TestForkedSyncProgress64Fast(t *testing.T) { testForkedSyncProgress(t, 64, FastSync) }
|
||||
func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) }
|
||||
func TestForkedSyncProgress65Full(t *testing.T) { testForkedSyncProgress(t, 65, FullSync) }
|
||||
func TestForkedSyncProgress65Fast(t *testing.T) { testForkedSyncProgress(t, 65, FastSync) }
|
||||
func TestForkedSyncProgress65Light(t *testing.T) { testForkedSyncProgress(t, 65, LightSync) }
|
||||
|
||||
func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
@@ -1324,12 +1322,13 @@ func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
||||
// Tests that if synchronisation is aborted due to some failure, then the progress
|
||||
// origin is not updated in the next sync cycle, as it should be considered the
|
||||
// continuation of the previous sync and not a new instance.
|
||||
func TestFailedSyncProgress62(t *testing.T) { testFailedSyncProgress(t, 62, FullSync) }
|
||||
func TestFailedSyncProgress63Full(t *testing.T) { testFailedSyncProgress(t, 63, FullSync) }
|
||||
func TestFailedSyncProgress63Fast(t *testing.T) { testFailedSyncProgress(t, 63, FastSync) }
|
||||
func TestFailedSyncProgress64Full(t *testing.T) { testFailedSyncProgress(t, 64, FullSync) }
|
||||
func TestFailedSyncProgress64Fast(t *testing.T) { testFailedSyncProgress(t, 64, FastSync) }
|
||||
func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) }
|
||||
func TestFailedSyncProgress65Full(t *testing.T) { testFailedSyncProgress(t, 65, FullSync) }
|
||||
func TestFailedSyncProgress65Fast(t *testing.T) { testFailedSyncProgress(t, 65, FastSync) }
|
||||
func TestFailedSyncProgress65Light(t *testing.T) { testFailedSyncProgress(t, 65, LightSync) }
|
||||
|
||||
func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
@@ -1396,12 +1395,13 @@ func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Tests that if an attacker fakes a chain height, after the attack is detected,
|
||||
// the progress height is successfully reduced at the next sync invocation.
|
||||
func TestFakedSyncProgress62(t *testing.T) { testFakedSyncProgress(t, 62, FullSync) }
|
||||
func TestFakedSyncProgress63Full(t *testing.T) { testFakedSyncProgress(t, 63, FullSync) }
|
||||
func TestFakedSyncProgress63Fast(t *testing.T) { testFakedSyncProgress(t, 63, FastSync) }
|
||||
func TestFakedSyncProgress64Full(t *testing.T) { testFakedSyncProgress(t, 64, FullSync) }
|
||||
func TestFakedSyncProgress64Fast(t *testing.T) { testFakedSyncProgress(t, 64, FastSync) }
|
||||
func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) }
|
||||
func TestFakedSyncProgress65Full(t *testing.T) { testFakedSyncProgress(t, 65, FullSync) }
|
||||
func TestFakedSyncProgress65Fast(t *testing.T) { testFakedSyncProgress(t, 65, FastSync) }
|
||||
func TestFakedSyncProgress65Light(t *testing.T) { testFakedSyncProgress(t, 65, LightSync) }
|
||||
|
||||
func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
@@ -1479,12 +1479,14 @@ func TestDeliverHeadersHang(t *testing.T) {
|
||||
protocol int
|
||||
syncMode SyncMode
|
||||
}{
|
||||
{62, FullSync},
|
||||
{63, FullSync},
|
||||
{63, FastSync},
|
||||
{64, FullSync},
|
||||
{64, FastSync},
|
||||
{64, LightSync},
|
||||
{65, FullSync},
|
||||
{65, FastSync},
|
||||
{65, LightSync},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) {
|
||||
@@ -1645,12 +1647,13 @@ func TestRemoteHeaderRequestSpan(t *testing.T) {
|
||||
|
||||
// Tests that peers below a pre-configured checkpoint block are prevented from
|
||||
// being fast-synced from, avoiding potential cheap eclipse attacks.
|
||||
func TestCheckpointEnforcement62(t *testing.T) { testCheckpointEnforcement(t, 62, FullSync) }
|
||||
func TestCheckpointEnforcement63Full(t *testing.T) { testCheckpointEnforcement(t, 63, FullSync) }
|
||||
func TestCheckpointEnforcement63Fast(t *testing.T) { testCheckpointEnforcement(t, 63, FastSync) }
|
||||
func TestCheckpointEnforcement64Full(t *testing.T) { testCheckpointEnforcement(t, 64, FullSync) }
|
||||
func TestCheckpointEnforcement64Fast(t *testing.T) { testCheckpointEnforcement(t, 64, FastSync) }
|
||||
func TestCheckpointEnforcement64Light(t *testing.T) { testCheckpointEnforcement(t, 64, LightSync) }
|
||||
func TestCheckpointEnforcement65Full(t *testing.T) { testCheckpointEnforcement(t, 65, FullSync) }
|
||||
func TestCheckpointEnforcement65Fast(t *testing.T) { testCheckpointEnforcement(t, 65, FastSync) }
|
||||
func TestCheckpointEnforcement65Light(t *testing.T) { testCheckpointEnforcement(t, 65, LightSync) }
|
||||
|
||||
func testCheckpointEnforcement(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
@@ -40,4 +40,6 @@ var (
|
||||
|
||||
stateInMeter = metrics.NewRegisteredMeter("eth/downloader/states/in", nil)
|
||||
stateDropMeter = metrics.NewRegisteredMeter("eth/downloader/states/drop", nil)
|
||||
|
||||
throttleCounter = metrics.NewRegisteredCounter("eth/downloader/throttle", nil)
|
||||
)
|
||||
|
@@ -21,7 +21,6 @@ package downloader
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"sort"
|
||||
@@ -117,9 +116,7 @@ func newPeerConnection(id string, version int, peer Peer, logger log.Logger) *pe
|
||||
return &peerConnection{
|
||||
id: id,
|
||||
lacking: make(map[common.Hash]struct{}),
|
||||
|
||||
peer: peer,
|
||||
|
||||
peer: peer,
|
||||
version: version,
|
||||
log: logger,
|
||||
}
|
||||
@@ -145,10 +142,6 @@ func (p *peerConnection) Reset() {
|
||||
|
||||
// FetchHeaders sends a header retrieval request to the remote peer.
|
||||
func (p *peerConnection) FetchHeaders(from uint64, count int) error {
|
||||
// Sanity check the protocol version
|
||||
if p.version < 62 {
|
||||
panic(fmt.Sprintf("header fetch [eth/62+] requested on eth/%d", p.version))
|
||||
}
|
||||
// Short circuit if the peer is already fetching
|
||||
if !atomic.CompareAndSwapInt32(&p.headerIdle, 0, 1) {
|
||||
return errAlreadyFetching
|
||||
@@ -163,54 +156,46 @@ func (p *peerConnection) FetchHeaders(from uint64, count int) error {
|
||||
|
||||
// FetchBodies sends a block body retrieval request to the remote peer.
|
||||
func (p *peerConnection) FetchBodies(request *fetchRequest) error {
|
||||
// Sanity check the protocol version
|
||||
if p.version < 62 {
|
||||
panic(fmt.Sprintf("body fetch [eth/62+] requested on eth/%d", p.version))
|
||||
}
|
||||
// Short circuit if the peer is already fetching
|
||||
if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) {
|
||||
return errAlreadyFetching
|
||||
}
|
||||
p.blockStarted = time.Now()
|
||||
|
||||
// Convert the header set to a retrievable slice
|
||||
hashes := make([]common.Hash, 0, len(request.Headers))
|
||||
for _, header := range request.Headers {
|
||||
hashes = append(hashes, header.Hash())
|
||||
}
|
||||
go p.peer.RequestBodies(hashes)
|
||||
go func() {
|
||||
// Convert the header set to a retrievable slice
|
||||
hashes := make([]common.Hash, 0, len(request.Headers))
|
||||
for _, header := range request.Headers {
|
||||
hashes = append(hashes, header.Hash())
|
||||
}
|
||||
p.peer.RequestBodies(hashes)
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FetchReceipts sends a receipt retrieval request to the remote peer.
|
||||
func (p *peerConnection) FetchReceipts(request *fetchRequest) error {
|
||||
// Sanity check the protocol version
|
||||
if p.version < 63 {
|
||||
panic(fmt.Sprintf("body fetch [eth/63+] requested on eth/%d", p.version))
|
||||
}
|
||||
// Short circuit if the peer is already fetching
|
||||
if !atomic.CompareAndSwapInt32(&p.receiptIdle, 0, 1) {
|
||||
return errAlreadyFetching
|
||||
}
|
||||
p.receiptStarted = time.Now()
|
||||
|
||||
// Convert the header set to a retrievable slice
|
||||
hashes := make([]common.Hash, 0, len(request.Headers))
|
||||
for _, header := range request.Headers {
|
||||
hashes = append(hashes, header.Hash())
|
||||
}
|
||||
go p.peer.RequestReceipts(hashes)
|
||||
go func() {
|
||||
// Convert the header set to a retrievable slice
|
||||
hashes := make([]common.Hash, 0, len(request.Headers))
|
||||
for _, header := range request.Headers {
|
||||
hashes = append(hashes, header.Hash())
|
||||
}
|
||||
p.peer.RequestReceipts(hashes)
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FetchNodeData sends a node state data retrieval request to the remote peer.
|
||||
func (p *peerConnection) FetchNodeData(hashes []common.Hash) error {
|
||||
// Sanity check the protocol version
|
||||
if p.version < 63 {
|
||||
panic(fmt.Sprintf("node data fetch [eth/63+] requested on eth/%d", p.version))
|
||||
}
|
||||
// Short circuit if the peer is already fetching
|
||||
if !atomic.CompareAndSwapInt32(&p.stateIdle, 0, 1) {
|
||||
return errAlreadyFetching
|
||||
@@ -225,34 +210,34 @@ func (p *peerConnection) FetchNodeData(hashes []common.Hash) error {
|
||||
// SetHeadersIdle sets the peer to idle, allowing it to execute new header retrieval
|
||||
// requests. Its estimated header retrieval throughput is updated with that measured
|
||||
// just now.
|
||||
func (p *peerConnection) SetHeadersIdle(delivered int) {
|
||||
p.setIdle(p.headerStarted, delivered, &p.headerThroughput, &p.headerIdle)
|
||||
func (p *peerConnection) SetHeadersIdle(delivered int, deliveryTime time.Time) {
|
||||
p.setIdle(deliveryTime.Sub(p.headerStarted), delivered, &p.headerThroughput, &p.headerIdle)
|
||||
}
|
||||
|
||||
// SetBodiesIdle sets the peer to idle, allowing it to execute block body retrieval
|
||||
// requests. Its estimated body retrieval throughput is updated with that measured
|
||||
// just now.
|
||||
func (p *peerConnection) SetBodiesIdle(delivered int) {
|
||||
p.setIdle(p.blockStarted, delivered, &p.blockThroughput, &p.blockIdle)
|
||||
func (p *peerConnection) SetBodiesIdle(delivered int, deliveryTime time.Time) {
|
||||
p.setIdle(deliveryTime.Sub(p.blockStarted), delivered, &p.blockThroughput, &p.blockIdle)
|
||||
}
|
||||
|
||||
// SetReceiptsIdle sets the peer to idle, allowing it to execute new receipt
|
||||
// retrieval requests. Its estimated receipt retrieval throughput is updated
|
||||
// with that measured just now.
|
||||
func (p *peerConnection) SetReceiptsIdle(delivered int) {
|
||||
p.setIdle(p.receiptStarted, delivered, &p.receiptThroughput, &p.receiptIdle)
|
||||
func (p *peerConnection) SetReceiptsIdle(delivered int, deliveryTime time.Time) {
|
||||
p.setIdle(deliveryTime.Sub(p.receiptStarted), delivered, &p.receiptThroughput, &p.receiptIdle)
|
||||
}
|
||||
|
||||
// SetNodeDataIdle sets the peer to idle, allowing it to execute new state trie
|
||||
// data retrieval requests. Its estimated state retrieval throughput is updated
|
||||
// with that measured just now.
|
||||
func (p *peerConnection) SetNodeDataIdle(delivered int) {
|
||||
p.setIdle(p.stateStarted, delivered, &p.stateThroughput, &p.stateIdle)
|
||||
func (p *peerConnection) SetNodeDataIdle(delivered int, deliveryTime time.Time) {
|
||||
p.setIdle(deliveryTime.Sub(p.stateStarted), delivered, &p.stateThroughput, &p.stateIdle)
|
||||
}
|
||||
|
||||
// setIdle sets the peer to idle, allowing it to execute new retrieval requests.
|
||||
// Its estimated retrieval throughput is updated with that measured just now.
|
||||
func (p *peerConnection) setIdle(started time.Time, delivered int, throughput *float64, idle *int32) {
|
||||
func (p *peerConnection) setIdle(elapsed time.Duration, delivered int, throughput *float64, idle *int32) {
|
||||
// Irrelevant of the scaling, make sure the peer ends up idle
|
||||
defer atomic.StoreInt32(idle, 0)
|
||||
|
||||
@@ -265,7 +250,9 @@ func (p *peerConnection) setIdle(started time.Time, delivered int, throughput *f
|
||||
return
|
||||
}
|
||||
// Otherwise update the throughput with a new measurement
|
||||
elapsed := time.Since(started) + 1 // +1 (ns) to ensure non-zero divisor
|
||||
if elapsed <= 0 {
|
||||
elapsed = 1 // +1 (ns) to ensure non-zero divisor
|
||||
}
|
||||
measured := float64(delivered) / (float64(elapsed) / float64(time.Second))
|
||||
|
||||
*throughput = (1-measurementImpact)*(*throughput) + measurementImpact*measured
|
||||
@@ -470,7 +457,7 @@ func (ps *peerSet) HeaderIdlePeers() ([]*peerConnection, int) {
|
||||
defer p.lock.RUnlock()
|
||||
return p.headerThroughput
|
||||
}
|
||||
return ps.idlePeers(62, 65, idle, throughput)
|
||||
return ps.idlePeers(63, 65, idle, throughput)
|
||||
}
|
||||
|
||||
// BodyIdlePeers retrieves a flat list of all the currently body-idle peers within
|
||||
@@ -484,7 +471,7 @@ func (ps *peerSet) BodyIdlePeers() ([]*peerConnection, int) {
|
||||
defer p.lock.RUnlock()
|
||||
return p.blockThroughput
|
||||
}
|
||||
return ps.idlePeers(62, 65, idle, throughput)
|
||||
return ps.idlePeers(63, 65, idle, throughput)
|
||||
}
|
||||
|
||||
// ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers
|
||||
@@ -523,22 +510,20 @@ func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peerC
|
||||
defer ps.lock.RUnlock()
|
||||
|
||||
idle, total := make([]*peerConnection, 0, len(ps.peers)), 0
|
||||
tps := make([]float64, 0, len(ps.peers))
|
||||
for _, p := range ps.peers {
|
||||
if p.version >= minProtocol && p.version <= maxProtocol {
|
||||
if idleCheck(p) {
|
||||
idle = append(idle, p)
|
||||
tps = append(tps, throughput(p))
|
||||
}
|
||||
total++
|
||||
}
|
||||
}
|
||||
for i := 0; i < len(idle); i++ {
|
||||
for j := i + 1; j < len(idle); j++ {
|
||||
if throughput(idle[i]) < throughput(idle[j]) {
|
||||
idle[i], idle[j] = idle[j], idle[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
return idle, total
|
||||
// And sort them
|
||||
sortPeers := &peerThroughputSort{idle, tps}
|
||||
sort.Sort(sortPeers)
|
||||
return sortPeers.p, total
|
||||
}
|
||||
|
||||
// medianRTT returns the median RTT of the peerset, considering only the tuning
|
||||
@@ -571,3 +556,24 @@ func (ps *peerSet) medianRTT() time.Duration {
|
||||
}
|
||||
return median
|
||||
}
|
||||
|
||||
// peerThroughputSort implements the Sort interface, and allows for
|
||||
// sorting a set of peers by their throughput
|
||||
// The sorted data is with the _highest_ throughput first
|
||||
type peerThroughputSort struct {
|
||||
p []*peerConnection
|
||||
tp []float64
|
||||
}
|
||||
|
||||
func (ps *peerThroughputSort) Len() int {
|
||||
return len(ps.p)
|
||||
}
|
||||
|
||||
func (ps *peerThroughputSort) Less(i, j int) bool {
|
||||
return ps.tp[i] > ps.tp[j]
|
||||
}
|
||||
|
||||
func (ps *peerThroughputSort) Swap(i, j int) {
|
||||
ps.p[i], ps.p[j] = ps.p[j], ps.p[i]
|
||||
ps.tp[i], ps.tp[j] = ps.tp[j], ps.tp[i]
|
||||
}
|
||||
|
53
eth/downloader/peer_test.go
Normal file
53
eth/downloader/peer_test.go
Normal file
@@ -0,0 +1,53 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package downloader
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPeerThroughputSorting(t *testing.T) {
|
||||
a := &peerConnection{
|
||||
id: "a",
|
||||
headerThroughput: 1.25,
|
||||
}
|
||||
b := &peerConnection{
|
||||
id: "b",
|
||||
headerThroughput: 1.21,
|
||||
}
|
||||
c := &peerConnection{
|
||||
id: "c",
|
||||
headerThroughput: 1.23,
|
||||
}
|
||||
|
||||
peers := []*peerConnection{a, b, c}
|
||||
tps := []float64{a.headerThroughput,
|
||||
b.headerThroughput, c.headerThroughput}
|
||||
sortPeers := &peerThroughputSort{peers, tps}
|
||||
sort.Sort(sortPeers)
|
||||
if got, exp := sortPeers.p[0].id, "a"; got != exp {
|
||||
t.Errorf("sort fail, got %v exp %v", got, exp)
|
||||
}
|
||||
if got, exp := sortPeers.p[1].id, "c"; got != exp {
|
||||
t.Errorf("sort fail, got %v exp %v", got, exp)
|
||||
}
|
||||
if got, exp := sortPeers.p[2].id, "b"; got != exp {
|
||||
t.Errorf("sort fail, got %v exp %v", got, exp)
|
||||
}
|
||||
|
||||
}
|
@@ -23,6 +23,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -32,6 +33,11 @@ import (
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
)
|
||||
|
||||
const (
|
||||
bodyType = uint(0)
|
||||
receiptType = uint(1)
|
||||
)
|
||||
|
||||
var (
|
||||
blockCacheItems = 8192 // Maximum number of blocks to cache before throttling the download
|
||||
blockCacheMemory = 64 * 1024 * 1024 // Maximum amount of memory to use for block caching
|
||||
@@ -54,8 +60,7 @@ type fetchRequest struct {
|
||||
// fetchResult is a struct collecting partial results from data fetchers until
|
||||
// all outstanding pieces complete and the result as a whole can be processed.
|
||||
type fetchResult struct {
|
||||
Pending int // Number of data fetches still pending
|
||||
Hash common.Hash // Hash of the header to prevent recalculating
|
||||
pending int32 // Flag telling what deliveries are outstanding
|
||||
|
||||
Header *types.Header
|
||||
Uncles []*types.Header
|
||||
@@ -63,6 +68,44 @@ type fetchResult struct {
|
||||
Receipts types.Receipts
|
||||
}
|
||||
|
||||
func newFetchResult(header *types.Header, fastSync bool) *fetchResult {
|
||||
item := &fetchResult{
|
||||
Header: header,
|
||||
}
|
||||
if !header.EmptyBody() {
|
||||
item.pending |= (1 << bodyType)
|
||||
}
|
||||
if fastSync && !header.EmptyReceipts() {
|
||||
item.pending |= (1 << receiptType)
|
||||
}
|
||||
return item
|
||||
}
|
||||
|
||||
// SetBodyDone flags the body as finished.
|
||||
func (f *fetchResult) SetBodyDone() {
|
||||
if v := atomic.LoadInt32(&f.pending); (v & (1 << bodyType)) != 0 {
|
||||
atomic.AddInt32(&f.pending, -1)
|
||||
}
|
||||
}
|
||||
|
||||
// AllDone checks if item is done.
|
||||
func (f *fetchResult) AllDone() bool {
|
||||
return atomic.LoadInt32(&f.pending) == 0
|
||||
}
|
||||
|
||||
// SetReceiptsDone flags the receipts as finished.
|
||||
func (f *fetchResult) SetReceiptsDone() {
|
||||
if v := atomic.LoadInt32(&f.pending); (v & (1 << receiptType)) != 0 {
|
||||
atomic.AddInt32(&f.pending, -2)
|
||||
}
|
||||
}
|
||||
|
||||
// Done checks if the given type is done already
|
||||
func (f *fetchResult) Done(kind uint) bool {
|
||||
v := atomic.LoadInt32(&f.pending)
|
||||
return v&(1<<kind) == 0
|
||||
}
|
||||
|
||||
// queue represents hashes that are either need fetching or are being fetched
|
||||
type queue struct {
|
||||
mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching
|
||||
@@ -82,44 +125,37 @@ type queue struct {
|
||||
blockTaskPool map[common.Hash]*types.Header // [eth/62] Pending block (body) retrieval tasks, mapping hashes to headers
|
||||
blockTaskQueue *prque.Prque // [eth/62] Priority queue of the headers to fetch the blocks (bodies) for
|
||||
blockPendPool map[string]*fetchRequest // [eth/62] Currently pending block (body) retrieval operations
|
||||
blockDonePool map[common.Hash]struct{} // [eth/62] Set of the completed block (body) fetches
|
||||
|
||||
receiptTaskPool map[common.Hash]*types.Header // [eth/63] Pending receipt retrieval tasks, mapping hashes to headers
|
||||
receiptTaskQueue *prque.Prque // [eth/63] Priority queue of the headers to fetch the receipts for
|
||||
receiptPendPool map[string]*fetchRequest // [eth/63] Currently pending receipt retrieval operations
|
||||
receiptDonePool map[common.Hash]struct{} // [eth/63] Set of the completed receipt fetches
|
||||
|
||||
resultCache []*fetchResult // Downloaded but not yet delivered fetch results
|
||||
resultOffset uint64 // Offset of the first cached fetch result in the block chain
|
||||
resultSize common.StorageSize // Approximate size of a block (exponential moving average)
|
||||
resultCache *resultStore // Downloaded but not yet delivered fetch results
|
||||
resultSize common.StorageSize // Approximate size of a block (exponential moving average)
|
||||
|
||||
lock *sync.Mutex
|
||||
lock *sync.RWMutex
|
||||
active *sync.Cond
|
||||
closed bool
|
||||
|
||||
lastStatLog time.Time
|
||||
}
|
||||
|
||||
// newQueue creates a new download queue for scheduling block retrieval.
|
||||
func newQueue() *queue {
|
||||
lock := new(sync.Mutex)
|
||||
return &queue{
|
||||
headerPendPool: make(map[string]*fetchRequest),
|
||||
func newQueue(blockCacheLimit int) *queue {
|
||||
lock := new(sync.RWMutex)
|
||||
q := &queue{
|
||||
headerContCh: make(chan bool),
|
||||
blockTaskPool: make(map[common.Hash]*types.Header),
|
||||
blockTaskQueue: prque.New(nil),
|
||||
blockPendPool: make(map[string]*fetchRequest),
|
||||
blockDonePool: make(map[common.Hash]struct{}),
|
||||
receiptTaskPool: make(map[common.Hash]*types.Header),
|
||||
receiptTaskQueue: prque.New(nil),
|
||||
receiptPendPool: make(map[string]*fetchRequest),
|
||||
receiptDonePool: make(map[common.Hash]struct{}),
|
||||
resultCache: make([]*fetchResult, blockCacheItems),
|
||||
active: sync.NewCond(lock),
|
||||
lock: lock,
|
||||
}
|
||||
q.Reset(blockCacheLimit)
|
||||
return q
|
||||
}
|
||||
|
||||
// Reset clears out the queue contents.
|
||||
func (q *queue) Reset() {
|
||||
func (q *queue) Reset(blockCacheLimit int) {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
@@ -132,15 +168,12 @@ func (q *queue) Reset() {
|
||||
q.blockTaskPool = make(map[common.Hash]*types.Header)
|
||||
q.blockTaskQueue.Reset()
|
||||
q.blockPendPool = make(map[string]*fetchRequest)
|
||||
q.blockDonePool = make(map[common.Hash]struct{})
|
||||
|
||||
q.receiptTaskPool = make(map[common.Hash]*types.Header)
|
||||
q.receiptTaskQueue.Reset()
|
||||
q.receiptPendPool = make(map[string]*fetchRequest)
|
||||
q.receiptDonePool = make(map[common.Hash]struct{})
|
||||
|
||||
q.resultCache = make([]*fetchResult, blockCacheItems)
|
||||
q.resultOffset = 0
|
||||
q.resultCache = newResultStore(blockCacheLimit)
|
||||
}
|
||||
|
||||
// Close marks the end of the sync, unblocking Results.
|
||||
@@ -148,8 +181,8 @@ func (q *queue) Reset() {
|
||||
func (q *queue) Close() {
|
||||
q.lock.Lock()
|
||||
q.closed = true
|
||||
q.active.Signal()
|
||||
q.lock.Unlock()
|
||||
q.active.Broadcast()
|
||||
}
|
||||
|
||||
// PendingHeaders retrieves the number of header requests pending for retrieval.
|
||||
@@ -210,58 +243,8 @@ func (q *queue) Idle() bool {
|
||||
|
||||
queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size()
|
||||
pending := len(q.blockPendPool) + len(q.receiptPendPool)
|
||||
cached := len(q.blockDonePool) + len(q.receiptDonePool)
|
||||
|
||||
return (queued + pending + cached) == 0
|
||||
}
|
||||
|
||||
// ShouldThrottleBlocks checks if the download should be throttled (active block (body)
|
||||
// fetches exceed block cache).
|
||||
func (q *queue) ShouldThrottleBlocks() bool {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
return q.resultSlots(q.blockPendPool, q.blockDonePool) <= 0
|
||||
}
|
||||
|
||||
// ShouldThrottleReceipts checks if the download should be throttled (active receipt
|
||||
// fetches exceed block cache).
|
||||
func (q *queue) ShouldThrottleReceipts() bool {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
return q.resultSlots(q.receiptPendPool, q.receiptDonePool) <= 0
|
||||
}
|
||||
|
||||
// resultSlots calculates the number of results slots available for requests
|
||||
// whilst adhering to both the item and the memory limits of the result cache.
|
||||
func (q *queue) resultSlots(pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}) int {
|
||||
// Calculate the maximum length capped by the memory limit
|
||||
limit := len(q.resultCache)
|
||||
if common.StorageSize(len(q.resultCache))*q.resultSize > common.StorageSize(blockCacheMemory) {
|
||||
limit = int((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize)
|
||||
}
|
||||
// Calculate the number of slots already finished
|
||||
finished := 0
|
||||
for _, result := range q.resultCache[:limit] {
|
||||
if result == nil {
|
||||
break
|
||||
}
|
||||
if _, ok := donePool[result.Hash]; ok {
|
||||
finished++
|
||||
}
|
||||
}
|
||||
// Calculate the number of slots currently downloading
|
||||
pending := 0
|
||||
for _, request := range pendPool {
|
||||
for _, header := range request.Headers {
|
||||
if header.Number.Uint64() < q.resultOffset+uint64(limit) {
|
||||
pending++
|
||||
}
|
||||
}
|
||||
}
|
||||
// Return the free slots to distribute
|
||||
return limit - finished - pending
|
||||
return (queued + pending) == 0
|
||||
}
|
||||
|
||||
// ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill
|
||||
@@ -323,21 +306,22 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
|
||||
break
|
||||
}
|
||||
// Make sure no duplicate requests are executed
|
||||
// We cannot skip this, even if the block is empty, since this is
|
||||
// what triggers the fetchResult creation.
|
||||
if _, ok := q.blockTaskPool[hash]; ok {
|
||||
log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash)
|
||||
continue
|
||||
} else {
|
||||
q.blockTaskPool[hash] = header
|
||||
q.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))
|
||||
}
|
||||
if _, ok := q.receiptTaskPool[hash]; ok {
|
||||
log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash)
|
||||
continue
|
||||
}
|
||||
// Queue the header for content retrieval
|
||||
q.blockTaskPool[hash] = header
|
||||
q.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))
|
||||
|
||||
if q.mode == FastSync {
|
||||
q.receiptTaskPool[hash] = header
|
||||
q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64()))
|
||||
// Queue for receipt retrieval
|
||||
if q.mode == FastSync && !header.EmptyReceipts() {
|
||||
if _, ok := q.receiptTaskPool[hash]; ok {
|
||||
log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash)
|
||||
} else {
|
||||
q.receiptTaskPool[hash] = header
|
||||
q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64()))
|
||||
}
|
||||
}
|
||||
inserts = append(inserts, header)
|
||||
q.headerHead = hash
|
||||
@@ -347,67 +331,78 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
|
||||
}
|
||||
|
||||
// Results retrieves and permanently removes a batch of fetch results from
|
||||
// the cache. The result slice will be empty if the queue has been closed.
|
||||
// the cache. the result slice will be empty if the queue has been closed.
|
||||
// Results can be called concurrently with Deliver and Schedule,
|
||||
// but assumes that there are not two simultaneous callers to Results
|
||||
func (q *queue) Results(block bool) []*fetchResult {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
// Count the number of items available for processing
|
||||
nproc := q.countProcessableItems()
|
||||
for nproc == 0 && !q.closed {
|
||||
if !block {
|
||||
return nil
|
||||
// Abort early if there are no items and non-blocking requested
|
||||
if !block && !q.resultCache.HasCompletedItems() {
|
||||
return nil
|
||||
}
|
||||
closed := false
|
||||
for !closed && !q.resultCache.HasCompletedItems() {
|
||||
// In order to wait on 'active', we need to obtain the lock.
|
||||
// That may take a while, if someone is delivering at the same
|
||||
// time, so after obtaining the lock, we check again if there
|
||||
// are any results to fetch.
|
||||
// Also, in-between we ask for the lock and the lock is obtained,
|
||||
// someone can have closed the queue. In that case, we should
|
||||
// return the available results and stop blocking
|
||||
q.lock.Lock()
|
||||
if q.resultCache.HasCompletedItems() || q.closed {
|
||||
q.lock.Unlock()
|
||||
break
|
||||
}
|
||||
// No items available, and not closed
|
||||
q.active.Wait()
|
||||
nproc = q.countProcessableItems()
|
||||
closed = q.closed
|
||||
q.lock.Unlock()
|
||||
}
|
||||
// Since we have a batch limit, don't pull more into "dangling" memory
|
||||
if nproc > maxResultsProcess {
|
||||
nproc = maxResultsProcess
|
||||
}
|
||||
results := make([]*fetchResult, nproc)
|
||||
copy(results, q.resultCache[:nproc])
|
||||
if len(results) > 0 {
|
||||
// Mark results as done before dropping them from the cache.
|
||||
for _, result := range results {
|
||||
hash := result.Header.Hash()
|
||||
delete(q.blockDonePool, hash)
|
||||
delete(q.receiptDonePool, hash)
|
||||
}
|
||||
// Delete the results from the cache and clear the tail.
|
||||
copy(q.resultCache, q.resultCache[nproc:])
|
||||
for i := len(q.resultCache) - nproc; i < len(q.resultCache); i++ {
|
||||
q.resultCache[i] = nil
|
||||
}
|
||||
// Advance the expected block number of the first cache entry.
|
||||
q.resultOffset += uint64(nproc)
|
||||
|
||||
// Regardless if closed or not, we can still deliver whatever we have
|
||||
results := q.resultCache.GetCompleted(maxResultsProcess)
|
||||
for _, result := range results {
|
||||
// Recalculate the result item weights to prevent memory exhaustion
|
||||
for _, result := range results {
|
||||
size := result.Header.Size()
|
||||
for _, uncle := range result.Uncles {
|
||||
size += uncle.Size()
|
||||
}
|
||||
for _, receipt := range result.Receipts {
|
||||
size += receipt.Size()
|
||||
}
|
||||
for _, tx := range result.Transactions {
|
||||
size += tx.Size()
|
||||
}
|
||||
q.resultSize = common.StorageSize(blockCacheSizeWeight)*size + (1-common.StorageSize(blockCacheSizeWeight))*q.resultSize
|
||||
size := result.Header.Size()
|
||||
for _, uncle := range result.Uncles {
|
||||
size += uncle.Size()
|
||||
}
|
||||
for _, receipt := range result.Receipts {
|
||||
size += receipt.Size()
|
||||
}
|
||||
for _, tx := range result.Transactions {
|
||||
size += tx.Size()
|
||||
}
|
||||
q.resultSize = common.StorageSize(blockCacheSizeWeight)*size +
|
||||
(1-common.StorageSize(blockCacheSizeWeight))*q.resultSize
|
||||
}
|
||||
// Using the newly calibrated resultsize, figure out the new throttle limit
|
||||
// on the result cache
|
||||
throttleThreshold := uint64((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize)
|
||||
throttleThreshold = q.resultCache.SetThrottleThreshold(throttleThreshold)
|
||||
|
||||
// Log some info at certain times
|
||||
if time.Since(q.lastStatLog) > 10*time.Second {
|
||||
q.lastStatLog = time.Now()
|
||||
info := q.Stats()
|
||||
info = append(info, "throttle", throttleThreshold)
|
||||
log.Info("Downloader queue stats", info...)
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
// countProcessableItems counts the processable items.
|
||||
func (q *queue) countProcessableItems() int {
|
||||
for i, result := range q.resultCache {
|
||||
if result == nil || result.Pending > 0 {
|
||||
return i
|
||||
}
|
||||
func (q *queue) Stats() []interface{} {
|
||||
q.lock.RLock()
|
||||
defer q.lock.RUnlock()
|
||||
|
||||
return q.stats()
|
||||
}
|
||||
|
||||
func (q *queue) stats() []interface{} {
|
||||
return []interface{}{
|
||||
"receiptTasks", q.receiptTaskQueue.Size(),
|
||||
"blockTasks", q.blockTaskQueue.Size(),
|
||||
"itemSize", q.resultSize,
|
||||
}
|
||||
return len(q.resultCache)
|
||||
}
|
||||
|
||||
// ReserveHeaders reserves a set of headers for the given peer, skipping any
|
||||
@@ -453,27 +448,21 @@ func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest {
|
||||
// ReserveBodies reserves a set of body fetches for the given peer, skipping any
|
||||
// previously failed downloads. Beside the next batch of needed fetches, it also
|
||||
// returns a flag whether empty blocks were queued requiring processing.
|
||||
func (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, error) {
|
||||
isNoop := func(header *types.Header) bool {
|
||||
return header.TxHash == types.EmptyRootHash && header.UncleHash == types.EmptyUncleHash
|
||||
}
|
||||
func (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, bool) {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, isNoop)
|
||||
return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, bodyType)
|
||||
}
|
||||
|
||||
// ReserveReceipts reserves a set of receipt fetches for the given peer, skipping
|
||||
// any previously failed downloads. Beside the next batch of needed fetches, it
|
||||
// also returns a flag whether empty receipts were queued requiring importing.
|
||||
func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bool, error) {
|
||||
isNoop := func(header *types.Header) bool {
|
||||
return header.ReceiptHash == types.EmptyRootHash
|
||||
}
|
||||
func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bool, bool) {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, q.receiptDonePool, isNoop)
|
||||
return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, receiptType)
|
||||
}
|
||||
|
||||
// reserveHeaders reserves a set of data download operations for a given peer,
|
||||
@@ -483,57 +472,71 @@ func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bo
|
||||
// Note, this method expects the queue lock to be already held for writing. The
|
||||
// reason the lock is not obtained in here is because the parameters already need
|
||||
// to access the queue, so they already need a lock anyway.
|
||||
//
|
||||
// Returns:
|
||||
// item - the fetchRequest
|
||||
// progress - whether any progress was made
|
||||
// throttle - if the caller should throttle for a while
|
||||
func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque,
|
||||
pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, isNoop func(*types.Header) bool) (*fetchRequest, bool, error) {
|
||||
pendPool map[string]*fetchRequest, kind uint) (*fetchRequest, bool, bool) {
|
||||
// Short circuit if the pool has been depleted, or if the peer's already
|
||||
// downloading something (sanity check not to corrupt state)
|
||||
if taskQueue.Empty() {
|
||||
return nil, false, nil
|
||||
return nil, false, true
|
||||
}
|
||||
if _, ok := pendPool[p.id]; ok {
|
||||
return nil, false, nil
|
||||
return nil, false, false
|
||||
}
|
||||
// Calculate an upper limit on the items we might fetch (i.e. throttling)
|
||||
space := q.resultSlots(pendPool, donePool)
|
||||
|
||||
// Retrieve a batch of tasks, skipping previously failed ones
|
||||
send := make([]*types.Header, 0, count)
|
||||
skip := make([]*types.Header, 0)
|
||||
|
||||
progress := false
|
||||
for proc := 0; proc < space && len(send) < count && !taskQueue.Empty(); proc++ {
|
||||
header := taskQueue.PopItem().(*types.Header)
|
||||
hash := header.Hash()
|
||||
throttled := false
|
||||
for proc := 0; len(send) < count && !taskQueue.Empty(); proc++ {
|
||||
// the task queue will pop items in order, so the highest prio block
|
||||
// is also the lowest block number.
|
||||
h, _ := taskQueue.Peek()
|
||||
header := h.(*types.Header)
|
||||
// we can ask the resultcache if this header is within the
|
||||
// "prioritized" segment of blocks. If it is not, we need to throttle
|
||||
|
||||
// If we're the first to request this task, initialise the result container
|
||||
index := int(header.Number.Int64() - int64(q.resultOffset))
|
||||
if index >= len(q.resultCache) || index < 0 {
|
||||
common.Report("index allocation went beyond available resultCache space")
|
||||
return nil, false, fmt.Errorf("%w: index allocation went beyond available resultCache space", errInvalidChain)
|
||||
stale, throttle, item, err := q.resultCache.AddFetch(header, q.mode == FastSync)
|
||||
if stale {
|
||||
// Don't put back in the task queue, this item has already been
|
||||
// delivered upstream
|
||||
taskQueue.PopItem()
|
||||
progress = true
|
||||
delete(taskPool, header.Hash())
|
||||
proc = proc - 1
|
||||
log.Error("Fetch reservation already delivered", "number", header.Number.Uint64())
|
||||
continue
|
||||
}
|
||||
if q.resultCache[index] == nil {
|
||||
components := 1
|
||||
if q.mode == FastSync {
|
||||
components = 2
|
||||
}
|
||||
q.resultCache[index] = &fetchResult{
|
||||
Pending: components,
|
||||
Hash: hash,
|
||||
Header: header,
|
||||
}
|
||||
if throttle {
|
||||
// There are no resultslots available. Leave it in the task queue
|
||||
// However, if there are any left as 'skipped', we should not tell
|
||||
// the caller to throttle, since we still want some other
|
||||
// peer to fetch those for us
|
||||
throttled = len(skip) == 0
|
||||
break
|
||||
}
|
||||
// If this fetch task is a noop, skip this fetch operation
|
||||
if isNoop(header) {
|
||||
donePool[hash] = struct{}{}
|
||||
delete(taskPool, hash)
|
||||
|
||||
space, proc = space-1, proc-1
|
||||
q.resultCache[index].Pending--
|
||||
if err != nil {
|
||||
// this most definitely should _not_ happen
|
||||
log.Warn("Failed to reserve headers", "err", err)
|
||||
// There are no resultslots available. Leave it in the task queue
|
||||
break
|
||||
}
|
||||
if item.Done(kind) {
|
||||
// If it's a noop, we can skip this task
|
||||
delete(taskPool, header.Hash())
|
||||
taskQueue.PopItem()
|
||||
proc = proc - 1
|
||||
progress = true
|
||||
continue
|
||||
}
|
||||
// Remove it from the task queue
|
||||
taskQueue.PopItem()
|
||||
// Otherwise unless the peer is known not to have the data, add to the retrieve list
|
||||
if p.Lacks(hash) {
|
||||
if p.Lacks(header.Hash()) {
|
||||
skip = append(skip, header)
|
||||
} else {
|
||||
send = append(send, header)
|
||||
@@ -543,13 +546,13 @@ func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common
|
||||
for _, header := range skip {
|
||||
taskQueue.Push(header, -int64(header.Number.Uint64()))
|
||||
}
|
||||
if progress {
|
||||
if q.resultCache.HasCompletedItems() {
|
||||
// Wake Results, resultCache was modified
|
||||
q.active.Signal()
|
||||
}
|
||||
// Assemble and return the block download request
|
||||
if len(send) == 0 {
|
||||
return nil, progress, nil
|
||||
return nil, progress, throttled
|
||||
}
|
||||
request := &fetchRequest{
|
||||
Peer: p,
|
||||
@@ -557,8 +560,7 @@ func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common
|
||||
Time: time.Now(),
|
||||
}
|
||||
pendPool[p.id] = request
|
||||
|
||||
return request, progress, nil
|
||||
return request, progress, throttled
|
||||
}
|
||||
|
||||
// CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue.
|
||||
@@ -768,16 +770,23 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
|
||||
func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) (int, error) {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
reconstruct := func(header *types.Header, index int, result *fetchResult) error {
|
||||
if types.DeriveSha(types.Transactions(txLists[index])) != header.TxHash || types.CalcUncleHash(uncleLists[index]) != header.UncleHash {
|
||||
validate := func(index int, header *types.Header) error {
|
||||
if types.DeriveSha(types.Transactions(txLists[index])) != header.TxHash {
|
||||
return errInvalidBody
|
||||
}
|
||||
if types.CalcUncleHash(uncleLists[index]) != header.UncleHash {
|
||||
return errInvalidBody
|
||||
}
|
||||
result.Transactions = txLists[index]
|
||||
result.Uncles = uncleLists[index]
|
||||
return nil
|
||||
}
|
||||
return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, bodyReqTimer, len(txLists), reconstruct)
|
||||
|
||||
reconstruct := func(index int, result *fetchResult) {
|
||||
result.Transactions = txLists[index]
|
||||
result.Uncles = uncleLists[index]
|
||||
result.SetBodyDone()
|
||||
}
|
||||
return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool,
|
||||
bodyReqTimer, len(txLists), validate, reconstruct)
|
||||
}
|
||||
|
||||
// DeliverReceipts injects a receipt retrieval response into the results queue.
|
||||
@@ -786,25 +795,29 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLi
|
||||
func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, error) {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
reconstruct := func(header *types.Header, index int, result *fetchResult) error {
|
||||
validate := func(index int, header *types.Header) error {
|
||||
if types.DeriveSha(types.Receipts(receiptList[index])) != header.ReceiptHash {
|
||||
return errInvalidReceipt
|
||||
}
|
||||
result.Receipts = receiptList[index]
|
||||
return nil
|
||||
}
|
||||
return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, q.receiptDonePool, receiptReqTimer, len(receiptList), reconstruct)
|
||||
reconstruct := func(index int, result *fetchResult) {
|
||||
result.Receipts = receiptList[index]
|
||||
result.SetReceiptsDone()
|
||||
}
|
||||
return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool,
|
||||
receiptReqTimer, len(receiptList), validate, reconstruct)
|
||||
}
|
||||
|
||||
// deliver injects a data retrieval response into the results queue.
|
||||
//
|
||||
// Note, this method expects the queue lock to be already held for writing. The
|
||||
// reason the lock is not obtained in here is because the parameters already need
|
||||
// reason this lock is not obtained in here is because the parameters already need
|
||||
// to access the queue, so they already need a lock anyway.
|
||||
func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque,
|
||||
pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, reqTimer metrics.Timer,
|
||||
results int, reconstruct func(header *types.Header, index int, result *fetchResult) error) (int, error) {
|
||||
func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header,
|
||||
taskQueue *prque.Prque, pendPool map[string]*fetchRequest, reqTimer metrics.Timer,
|
||||
results int, validate func(index int, header *types.Header) error,
|
||||
reconstruct func(index int, result *fetchResult)) (int, error) {
|
||||
|
||||
// Short circuit if the data was never requested
|
||||
request := pendPool[id]
|
||||
@@ -824,52 +837,53 @@ func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQ
|
||||
var (
|
||||
accepted int
|
||||
failure error
|
||||
useful bool
|
||||
i int
|
||||
hashes []common.Hash
|
||||
)
|
||||
for i, header := range request.Headers {
|
||||
for _, header := range request.Headers {
|
||||
// Short circuit assembly if no more fetch results are found
|
||||
if i >= results {
|
||||
break
|
||||
}
|
||||
// Reconstruct the next result if contents match up
|
||||
index := int(header.Number.Int64() - int64(q.resultOffset))
|
||||
if index >= len(q.resultCache) || index < 0 || q.resultCache[index] == nil {
|
||||
failure = errInvalidChain
|
||||
break
|
||||
}
|
||||
if err := reconstruct(header, i, q.resultCache[index]); err != nil {
|
||||
// Validate the fields
|
||||
if err := validate(i, header); err != nil {
|
||||
failure = err
|
||||
break
|
||||
}
|
||||
hash := header.Hash()
|
||||
|
||||
donePool[hash] = struct{}{}
|
||||
q.resultCache[index].Pending--
|
||||
useful = true
|
||||
accepted++
|
||||
hashes = append(hashes, header.Hash())
|
||||
i++
|
||||
}
|
||||
|
||||
for _, header := range request.Headers[:i] {
|
||||
if res, stale, err := q.resultCache.GetDeliverySlot(header.Number.Uint64()); err == nil {
|
||||
reconstruct(accepted, res)
|
||||
} else {
|
||||
// else: betweeen here and above, some other peer filled this result,
|
||||
// or it was indeed a no-op. This should not happen, but if it does it's
|
||||
// not something to panic about
|
||||
log.Error("Delivery stale", "stale", stale, "number", header.Number.Uint64(), "err", err)
|
||||
failure = errStaleDelivery
|
||||
}
|
||||
// Clean up a successful fetch
|
||||
request.Headers[i] = nil
|
||||
delete(taskPool, hash)
|
||||
delete(taskPool, hashes[accepted])
|
||||
accepted++
|
||||
}
|
||||
// Return all failed or missing fetches to the queue
|
||||
for _, header := range request.Headers {
|
||||
if header != nil {
|
||||
taskQueue.Push(header, -int64(header.Number.Uint64()))
|
||||
}
|
||||
for _, header := range request.Headers[accepted:] {
|
||||
taskQueue.Push(header, -int64(header.Number.Uint64()))
|
||||
}
|
||||
// Wake up Results
|
||||
if accepted > 0 {
|
||||
q.active.Signal()
|
||||
}
|
||||
// If none of the data was good, it's a stale delivery
|
||||
if failure == nil {
|
||||
return accepted, nil
|
||||
}
|
||||
// If none of the data was good, it's a stale delivery
|
||||
if errors.Is(failure, errInvalidChain) {
|
||||
return accepted, failure
|
||||
}
|
||||
if useful {
|
||||
if accepted > 0 {
|
||||
return accepted, fmt.Errorf("partial failure: %v", failure)
|
||||
}
|
||||
return accepted, fmt.Errorf("%w: %v", failure, errStaleDelivery)
|
||||
@@ -882,8 +896,6 @@ func (q *queue) Prepare(offset uint64, mode SyncMode) {
|
||||
defer q.lock.Unlock()
|
||||
|
||||
// Prepare the queue for sync results
|
||||
if q.resultOffset < offset {
|
||||
q.resultOffset = offset
|
||||
}
|
||||
q.resultCache.Prepare(offset)
|
||||
q.mode = mode
|
||||
}
|
||||
|
426
eth/downloader/queue_test.go
Normal file
426
eth/downloader/queue_test.go
Normal file
@@ -0,0 +1,426 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package downloader
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
var (
|
||||
testdb = rawdb.NewMemoryDatabase()
|
||||
genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
|
||||
)
|
||||
|
||||
// makeChain creates a chain of n blocks starting at and including parent.
|
||||
// the returned hash chain is ordered head->parent. In addition, every 3rd block
|
||||
// contains a transaction and every 5th an uncle to allow testing correct block
|
||||
// reassembly.
|
||||
func makeChain(n int, seed byte, parent *types.Block, empty bool) ([]*types.Block, []types.Receipts) {
|
||||
blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testdb, n, func(i int, block *core.BlockGen) {
|
||||
block.SetCoinbase(common.Address{seed})
|
||||
// Add one tx to every secondblock
|
||||
if !empty && i%2 == 0 {
|
||||
signer := types.MakeSigner(params.TestChainConfig, block.Number())
|
||||
tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
block.AddTx(tx)
|
||||
}
|
||||
})
|
||||
return blocks, receipts
|
||||
}
|
||||
|
||||
type chainData struct {
|
||||
blocks []*types.Block
|
||||
offset int
|
||||
}
|
||||
|
||||
var chain *chainData
|
||||
var emptyChain *chainData
|
||||
|
||||
func init() {
|
||||
// Create a chain of blocks to import
|
||||
targetBlocks := 128
|
||||
blocks, _ := makeChain(targetBlocks, 0, genesis, false)
|
||||
chain = &chainData{blocks, 0}
|
||||
|
||||
blocks, _ = makeChain(targetBlocks, 0, genesis, true)
|
||||
emptyChain = &chainData{blocks, 0}
|
||||
}
|
||||
|
||||
func (chain *chainData) headers() []*types.Header {
|
||||
hdrs := make([]*types.Header, len(chain.blocks))
|
||||
for i, b := range chain.blocks {
|
||||
hdrs[i] = b.Header()
|
||||
}
|
||||
return hdrs
|
||||
}
|
||||
|
||||
func (chain *chainData) Len() int {
|
||||
return len(chain.blocks)
|
||||
}
|
||||
|
||||
func dummyPeer(id string) *peerConnection {
|
||||
p := &peerConnection{
|
||||
id: id,
|
||||
lacking: make(map[common.Hash]struct{}),
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func TestBasics(t *testing.T) {
|
||||
q := newQueue(10)
|
||||
if !q.Idle() {
|
||||
t.Errorf("new queue should be idle")
|
||||
}
|
||||
q.Prepare(1, FastSync)
|
||||
if res := q.Results(false); len(res) != 0 {
|
||||
t.Fatal("new queue should have 0 results")
|
||||
}
|
||||
|
||||
// Schedule a batch of headers
|
||||
q.Schedule(chain.headers(), 1)
|
||||
if q.Idle() {
|
||||
t.Errorf("queue should not be idle")
|
||||
}
|
||||
if got, exp := q.PendingBlocks(), chain.Len(); got != exp {
|
||||
t.Errorf("wrong pending block count, got %d, exp %d", got, exp)
|
||||
}
|
||||
// Only non-empty receipts get added to task-queue
|
||||
if got, exp := q.PendingReceipts(), 64; got != exp {
|
||||
t.Errorf("wrong pending receipt count, got %d, exp %d", got, exp)
|
||||
}
|
||||
// Items are now queued for downloading, next step is that we tell the
|
||||
// queue that a certain peer will deliver them for us
|
||||
{
|
||||
peer := dummyPeer("peer-1")
|
||||
fetchReq, _, throttle := q.ReserveBodies(peer, 50)
|
||||
if !throttle {
|
||||
// queue size is only 10, so throttling should occur
|
||||
t.Fatal("should throttle")
|
||||
}
|
||||
// But we should still get the first things to fetch
|
||||
if got, exp := len(fetchReq.Headers), 5; got != exp {
|
||||
t.Fatalf("expected %d requests, got %d", exp, got)
|
||||
}
|
||||
if got, exp := fetchReq.Headers[0].Number.Uint64(), uint64(1); got != exp {
|
||||
t.Fatalf("expected header %d, got %d", exp, got)
|
||||
}
|
||||
}
|
||||
{
|
||||
peer := dummyPeer("peer-2")
|
||||
fetchReq, _, throttle := q.ReserveBodies(peer, 50)
|
||||
|
||||
// The second peer should hit throttling
|
||||
if !throttle {
|
||||
t.Fatalf("should not throttle")
|
||||
}
|
||||
// And not get any fetches at all, since it was throttled to begin with
|
||||
if fetchReq != nil {
|
||||
t.Fatalf("should have no fetches, got %d", len(fetchReq.Headers))
|
||||
}
|
||||
}
|
||||
//fmt.Printf("blockTaskQueue len: %d\n", q.blockTaskQueue.Size())
|
||||
//fmt.Printf("receiptTaskQueue len: %d\n", q.receiptTaskQueue.Size())
|
||||
{
|
||||
// The receipt delivering peer should not be affected
|
||||
// by the throttling of body deliveries
|
||||
peer := dummyPeer("peer-3")
|
||||
fetchReq, _, throttle := q.ReserveReceipts(peer, 50)
|
||||
if !throttle {
|
||||
// queue size is only 10, so throttling should occur
|
||||
t.Fatal("should throttle")
|
||||
}
|
||||
// But we should still get the first things to fetch
|
||||
if got, exp := len(fetchReq.Headers), 5; got != exp {
|
||||
t.Fatalf("expected %d requests, got %d", exp, got)
|
||||
}
|
||||
if got, exp := fetchReq.Headers[0].Number.Uint64(), uint64(1); got != exp {
|
||||
t.Fatalf("expected header %d, got %d", exp, got)
|
||||
}
|
||||
|
||||
}
|
||||
//fmt.Printf("blockTaskQueue len: %d\n", q.blockTaskQueue.Size())
|
||||
//fmt.Printf("receiptTaskQueue len: %d\n", q.receiptTaskQueue.Size())
|
||||
//fmt.Printf("processable: %d\n", q.resultCache.countCompleted())
|
||||
}
|
||||
|
||||
func TestEmptyBlocks(t *testing.T) {
|
||||
q := newQueue(10)
|
||||
|
||||
q.Prepare(1, FastSync)
|
||||
// Schedule a batch of headers
|
||||
q.Schedule(emptyChain.headers(), 1)
|
||||
if q.Idle() {
|
||||
t.Errorf("queue should not be idle")
|
||||
}
|
||||
if got, exp := q.PendingBlocks(), len(emptyChain.blocks); got != exp {
|
||||
t.Errorf("wrong pending block count, got %d, exp %d", got, exp)
|
||||
}
|
||||
if got, exp := q.PendingReceipts(), 0; got != exp {
|
||||
t.Errorf("wrong pending receipt count, got %d, exp %d", got, exp)
|
||||
}
|
||||
// They won't be processable, because the fetchresults haven't been
|
||||
// created yet
|
||||
if got, exp := q.resultCache.countCompleted(), 0; got != exp {
|
||||
t.Errorf("wrong processable count, got %d, exp %d", got, exp)
|
||||
}
|
||||
|
||||
// Items are now queued for downloading, next step is that we tell the
|
||||
// queue that a certain peer will deliver them for us
|
||||
// That should trigger all of them to suddenly become 'done'
|
||||
{
|
||||
// Reserve blocks
|
||||
peer := dummyPeer("peer-1")
|
||||
fetchReq, _, _ := q.ReserveBodies(peer, 50)
|
||||
|
||||
// there should be nothing to fetch, blocks are empty
|
||||
if fetchReq != nil {
|
||||
t.Fatal("there should be no body fetch tasks remaining")
|
||||
}
|
||||
|
||||
}
|
||||
if q.blockTaskQueue.Size() != len(emptyChain.blocks)-10 {
|
||||
t.Errorf("expected block task queue to be 0, got %d", q.blockTaskQueue.Size())
|
||||
}
|
||||
if q.receiptTaskQueue.Size() != 0 {
|
||||
t.Errorf("expected receipt task queue to be 0, got %d", q.receiptTaskQueue.Size())
|
||||
}
|
||||
//fmt.Printf("receiptTaskQueue len: %d\n", q.receiptTaskQueue.Size())
|
||||
{
|
||||
peer := dummyPeer("peer-3")
|
||||
fetchReq, _, _ := q.ReserveReceipts(peer, 50)
|
||||
|
||||
// there should be nothing to fetch, blocks are empty
|
||||
if fetchReq != nil {
|
||||
t.Fatal("there should be no body fetch tasks remaining")
|
||||
}
|
||||
}
|
||||
if got, exp := q.resultCache.countCompleted(), 10; got != exp {
|
||||
t.Errorf("wrong processable count, got %d, exp %d", got, exp)
|
||||
}
|
||||
}
|
||||
|
||||
// XTestDelivery does some more extensive testing of events that happen,
|
||||
// blocks that become known and peers that make reservations and deliveries.
|
||||
// disabled since it's not really a unit-test, but can be executed to test
|
||||
// some more advanced scenarios
|
||||
func XTestDelivery(t *testing.T) {
|
||||
// the outside network, holding blocks
|
||||
blo, rec := makeChain(128, 0, genesis, false)
|
||||
world := newNetwork()
|
||||
world.receipts = rec
|
||||
world.chain = blo
|
||||
world.progress(10)
|
||||
if false {
|
||||
log.Root().SetHandler(log.StdoutHandler)
|
||||
|
||||
}
|
||||
q := newQueue(10)
|
||||
var wg sync.WaitGroup
|
||||
q.Prepare(1, FastSync)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
// deliver headers
|
||||
defer wg.Done()
|
||||
c := 1
|
||||
for {
|
||||
//fmt.Printf("getting headers from %d\n", c)
|
||||
hdrs := world.headers(c)
|
||||
l := len(hdrs)
|
||||
//fmt.Printf("scheduling %d headers, first %d last %d\n",
|
||||
// l, hdrs[0].Number.Uint64(), hdrs[len(hdrs)-1].Number.Uint64())
|
||||
q.Schedule(hdrs, uint64(c))
|
||||
c += l
|
||||
}
|
||||
}()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
// collect results
|
||||
defer wg.Done()
|
||||
tot := 0
|
||||
for {
|
||||
res := q.Results(true)
|
||||
tot += len(res)
|
||||
fmt.Printf("got %d results, %d tot\n", len(res), tot)
|
||||
// Now we can forget about these
|
||||
world.forget(res[len(res)-1].Header.Number.Uint64())
|
||||
|
||||
}
|
||||
}()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
// reserve body fetch
|
||||
i := 4
|
||||
for {
|
||||
peer := dummyPeer(fmt.Sprintf("peer-%d", i))
|
||||
f, _, _ := q.ReserveBodies(peer, rand.Intn(30))
|
||||
if f != nil {
|
||||
var emptyList []*types.Header
|
||||
var txs [][]*types.Transaction
|
||||
var uncles [][]*types.Header
|
||||
numToSkip := rand.Intn(len(f.Headers))
|
||||
for _, hdr := range f.Headers[0 : len(f.Headers)-numToSkip] {
|
||||
txs = append(txs, world.getTransactions(hdr.Number.Uint64()))
|
||||
uncles = append(uncles, emptyList)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
_, err := q.DeliverBodies(peer.id, txs, uncles)
|
||||
if err != nil {
|
||||
fmt.Printf("delivered %d bodies %v\n", len(txs), err)
|
||||
}
|
||||
} else {
|
||||
i++
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
// reserve receiptfetch
|
||||
peer := dummyPeer("peer-3")
|
||||
for {
|
||||
f, _, _ := q.ReserveReceipts(peer, rand.Intn(50))
|
||||
if f != nil {
|
||||
var rcs [][]*types.Receipt
|
||||
for _, hdr := range f.Headers {
|
||||
rcs = append(rcs, world.getReceipts(hdr.Number.Uint64()))
|
||||
}
|
||||
_, err := q.DeliverReceipts(peer.id, rcs)
|
||||
if err != nil {
|
||||
fmt.Printf("delivered %d receipts %v\n", len(rcs), err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
} else {
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for i := 0; i < 50; i++ {
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
//world.tick()
|
||||
//fmt.Printf("trying to progress\n")
|
||||
world.progress(rand.Intn(100))
|
||||
}
|
||||
for i := 0; i < 50; i++ {
|
||||
time.Sleep(2990 * time.Millisecond)
|
||||
|
||||
}
|
||||
}()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
time.Sleep(990 * time.Millisecond)
|
||||
fmt.Printf("world block tip is %d\n",
|
||||
world.chain[len(world.chain)-1].Header().Number.Uint64())
|
||||
fmt.Println(q.Stats())
|
||||
}
|
||||
}()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func newNetwork() *network {
|
||||
var l sync.RWMutex
|
||||
return &network{
|
||||
cond: sync.NewCond(&l),
|
||||
offset: 1, // block 1 is at blocks[0]
|
||||
}
|
||||
}
|
||||
|
||||
// represents the network
|
||||
type network struct {
|
||||
offset int
|
||||
chain []*types.Block
|
||||
receipts []types.Receipts
|
||||
lock sync.RWMutex
|
||||
cond *sync.Cond
|
||||
}
|
||||
|
||||
func (n *network) getTransactions(blocknum uint64) types.Transactions {
|
||||
index := blocknum - uint64(n.offset)
|
||||
return n.chain[index].Transactions()
|
||||
}
|
||||
func (n *network) getReceipts(blocknum uint64) types.Receipts {
|
||||
index := blocknum - uint64(n.offset)
|
||||
if got := n.chain[index].Header().Number.Uint64(); got != blocknum {
|
||||
fmt.Printf("Err, got %d exp %d\n", got, blocknum)
|
||||
panic("sd")
|
||||
}
|
||||
return n.receipts[index]
|
||||
}
|
||||
|
||||
func (n *network) forget(blocknum uint64) {
|
||||
index := blocknum - uint64(n.offset)
|
||||
n.chain = n.chain[index:]
|
||||
n.receipts = n.receipts[index:]
|
||||
n.offset = int(blocknum)
|
||||
|
||||
}
|
||||
func (n *network) progress(numBlocks int) {
|
||||
|
||||
n.lock.Lock()
|
||||
defer n.lock.Unlock()
|
||||
//fmt.Printf("progressing...\n")
|
||||
newBlocks, newR := makeChain(numBlocks, 0, n.chain[len(n.chain)-1], false)
|
||||
n.chain = append(n.chain, newBlocks...)
|
||||
n.receipts = append(n.receipts, newR...)
|
||||
n.cond.Broadcast()
|
||||
|
||||
}
|
||||
|
||||
func (n *network) headers(from int) []*types.Header {
|
||||
numHeaders := 128
|
||||
var hdrs []*types.Header
|
||||
index := from - n.offset
|
||||
|
||||
for index >= len(n.chain) {
|
||||
// wait for progress
|
||||
n.cond.L.Lock()
|
||||
//fmt.Printf("header going into wait\n")
|
||||
n.cond.Wait()
|
||||
index = from - n.offset
|
||||
n.cond.L.Unlock()
|
||||
}
|
||||
n.lock.RLock()
|
||||
defer n.lock.RUnlock()
|
||||
for i, b := range n.chain[index:] {
|
||||
hdrs = append(hdrs, b.Header())
|
||||
if i >= numHeaders {
|
||||
break
|
||||
}
|
||||
}
|
||||
return hdrs
|
||||
}
|
194
eth/downloader/resultstore.go
Normal file
194
eth/downloader/resultstore.go
Normal file
@@ -0,0 +1,194 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package downloader
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// resultStore implements a structure for maintaining fetchResults, tracking their
|
||||
// download-progress and delivering (finished) results.
|
||||
type resultStore struct {
|
||||
items []*fetchResult // Downloaded but not yet delivered fetch results
|
||||
resultOffset uint64 // Offset of the first cached fetch result in the block chain
|
||||
|
||||
// Internal index of first non-completed entry, updated atomically when needed.
|
||||
// If all items are complete, this will equal length(items), so
|
||||
// *important* : is not safe to use for indexing without checking against length
|
||||
indexIncomplete int32 // atomic access
|
||||
|
||||
// throttleThreshold is the limit up to which we _want_ to fill the
|
||||
// results. If blocks are large, we want to limit the results to less
|
||||
// than the number of available slots, and maybe only fill 1024 out of
|
||||
// 8192 possible places. The queue will, at certain times, recalibrate
|
||||
// this index.
|
||||
throttleThreshold uint64
|
||||
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
func newResultStore(size int) *resultStore {
|
||||
return &resultStore{
|
||||
resultOffset: 0,
|
||||
items: make([]*fetchResult, size),
|
||||
throttleThreshold: uint64(size),
|
||||
}
|
||||
}
|
||||
|
||||
// SetThrottleThreshold updates the throttling threshold based on the requested
|
||||
// limit and the total queue capacity. It returns the (possibly capped) threshold
|
||||
func (r *resultStore) SetThrottleThreshold(threshold uint64) uint64 {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
limit := uint64(len(r.items))
|
||||
if threshold >= limit {
|
||||
threshold = limit
|
||||
}
|
||||
r.throttleThreshold = threshold
|
||||
return r.throttleThreshold
|
||||
}
|
||||
|
||||
// AddFetch adds a header for body/receipt fetching. This is used when the queue
|
||||
// wants to reserve headers for fetching.
|
||||
//
|
||||
// It returns the following:
|
||||
// stale - if true, this item is already passed, and should not be requested again
|
||||
// throttled - if true, the store is at capacity, this particular header is not prio now
|
||||
// item - the result to store data into
|
||||
// err - any error that occurred
|
||||
func (r *resultStore) AddFetch(header *types.Header, fastSync bool) (stale, throttled bool, item *fetchResult, err error) {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
var index int
|
||||
item, index, stale, throttled, err = r.getFetchResult(header.Number.Uint64())
|
||||
if err != nil || stale || throttled {
|
||||
return stale, throttled, item, err
|
||||
}
|
||||
if item == nil {
|
||||
item = newFetchResult(header, fastSync)
|
||||
r.items[index] = item
|
||||
}
|
||||
return stale, throttled, item, err
|
||||
}
|
||||
|
||||
// GetDeliverySlot returns the fetchResult for the given header. If the 'stale' flag
|
||||
// is true, that means the header has already been delivered 'upstream'. This method
|
||||
// does not bubble up the 'throttle' flag, since it's moot at the point in time when
|
||||
// the item is downloaded and ready for delivery
|
||||
func (r *resultStore) GetDeliverySlot(headerNumber uint64) (*fetchResult, bool, error) {
|
||||
r.lock.RLock()
|
||||
defer r.lock.RUnlock()
|
||||
|
||||
res, _, stale, _, err := r.getFetchResult(headerNumber)
|
||||
return res, stale, err
|
||||
}
|
||||
|
||||
// getFetchResult returns the fetchResult corresponding to the given item, and
|
||||
// the index where the result is stored.
|
||||
func (r *resultStore) getFetchResult(headerNumber uint64) (item *fetchResult, index int, stale, throttle bool, err error) {
|
||||
index = int(int64(headerNumber) - int64(r.resultOffset))
|
||||
throttle = index >= int(r.throttleThreshold)
|
||||
stale = index < 0
|
||||
|
||||
if index >= len(r.items) {
|
||||
err = fmt.Errorf("%w: index allocation went beyond available resultStore space "+
|
||||
"(index [%d] = header [%d] - resultOffset [%d], len(resultStore) = %d", errInvalidChain,
|
||||
index, headerNumber, r.resultOffset, len(r.items))
|
||||
return nil, index, stale, throttle, err
|
||||
}
|
||||
if stale {
|
||||
return nil, index, stale, throttle, nil
|
||||
}
|
||||
item = r.items[index]
|
||||
return item, index, stale, throttle, nil
|
||||
}
|
||||
|
||||
// hasCompletedItems returns true if there are processable items available
|
||||
// this method is cheaper than countCompleted
|
||||
func (r *resultStore) HasCompletedItems() bool {
|
||||
r.lock.RLock()
|
||||
defer r.lock.RUnlock()
|
||||
|
||||
if len(r.items) == 0 {
|
||||
return false
|
||||
}
|
||||
if item := r.items[0]; item != nil && item.AllDone() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// countCompleted returns the number of items ready for delivery, stopping at
|
||||
// the first non-complete item.
|
||||
//
|
||||
// The mthod assumes (at least) rlock is held.
|
||||
func (r *resultStore) countCompleted() int {
|
||||
// We iterate from the already known complete point, and see
|
||||
// if any more has completed since last count
|
||||
index := atomic.LoadInt32(&r.indexIncomplete)
|
||||
for ; ; index++ {
|
||||
if index >= int32(len(r.items)) {
|
||||
break
|
||||
}
|
||||
result := r.items[index]
|
||||
if result == nil || !result.AllDone() {
|
||||
break
|
||||
}
|
||||
}
|
||||
atomic.StoreInt32(&r.indexIncomplete, index)
|
||||
return int(index)
|
||||
}
|
||||
|
||||
// GetCompleted returns the next batch of completed fetchResults
|
||||
func (r *resultStore) GetCompleted(limit int) []*fetchResult {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
completed := r.countCompleted()
|
||||
if limit > completed {
|
||||
limit = completed
|
||||
}
|
||||
results := make([]*fetchResult, limit)
|
||||
copy(results, r.items[:limit])
|
||||
|
||||
// Delete the results from the cache and clear the tail.
|
||||
copy(r.items, r.items[limit:])
|
||||
for i := len(r.items) - limit; i < len(r.items); i++ {
|
||||
r.items[i] = nil
|
||||
}
|
||||
// Advance the expected block number of the first cache entry
|
||||
r.resultOffset += uint64(limit)
|
||||
atomic.AddInt32(&r.indexIncomplete, int32(-limit))
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
// Prepare initialises the offset with the given block number
|
||||
func (r *resultStore) Prepare(offset uint64) {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
if r.resultOffset < offset {
|
||||
r.resultOffset = offset
|
||||
}
|
||||
}
|
@@ -34,13 +34,14 @@ import (
|
||||
// stateReq represents a batch of state fetch requests grouped together into
|
||||
// a single data retrieval network packet.
|
||||
type stateReq struct {
|
||||
items []common.Hash // Hashes of the state items to download
|
||||
tasks map[common.Hash]*stateTask // Download tasks to track previous attempts
|
||||
timeout time.Duration // Maximum round trip time for this to complete
|
||||
timer *time.Timer // Timer to fire when the RTT timeout expires
|
||||
peer *peerConnection // Peer that we're requesting from
|
||||
response [][]byte // Response data of the peer (nil for timeouts)
|
||||
dropped bool // Flag whether the peer dropped off early
|
||||
nItems uint16 // Number of items requested for download (max is 384, so uint16 is sufficient)
|
||||
tasks map[common.Hash]*stateTask // Download tasks to track previous attempts
|
||||
timeout time.Duration // Maximum round trip time for this to complete
|
||||
timer *time.Timer // Timer to fire when the RTT timeout expires
|
||||
peer *peerConnection // Peer that we're requesting from
|
||||
delivered time.Time // Time when the packet was delivered (independent when we process it)
|
||||
response [][]byte // Response data of the peer (nil for timeouts)
|
||||
dropped bool // Flag whether the peer dropped off early
|
||||
}
|
||||
|
||||
// timedOut returns if this request timed out.
|
||||
@@ -99,7 +100,6 @@ func (d *Downloader) runStateSync(s *stateSync) *stateSync {
|
||||
finished []*stateReq // Completed or failed requests
|
||||
timeout = make(chan *stateReq) // Timed out active requests
|
||||
)
|
||||
|
||||
// Run the state sync.
|
||||
log.Trace("State sync starting", "root", s.root)
|
||||
go s.run()
|
||||
@@ -149,6 +149,7 @@ func (d *Downloader) runStateSync(s *stateSync) *stateSync {
|
||||
// Finalize the request and queue up for processing
|
||||
req.timer.Stop()
|
||||
req.response = pack.(*statePack).states
|
||||
req.delivered = time.Now()
|
||||
|
||||
finished = append(finished, req)
|
||||
delete(active, pack.PeerId())
|
||||
@@ -163,6 +164,7 @@ func (d *Downloader) runStateSync(s *stateSync) *stateSync {
|
||||
// Finalize the request and queue up for processing
|
||||
req.timer.Stop()
|
||||
req.dropped = true
|
||||
req.delivered = time.Now()
|
||||
|
||||
finished = append(finished, req)
|
||||
delete(active, p.id)
|
||||
@@ -175,6 +177,7 @@ func (d *Downloader) runStateSync(s *stateSync) *stateSync {
|
||||
if active[req.peer.id] != req {
|
||||
continue
|
||||
}
|
||||
req.delivered = time.Now()
|
||||
// Move the timed out data back into the download queue
|
||||
finished = append(finished, req)
|
||||
delete(active, req.peer.id)
|
||||
@@ -192,16 +195,12 @@ func (d *Downloader) runStateSync(s *stateSync) *stateSync {
|
||||
// Move the previous request to the finished set
|
||||
old.timer.Stop()
|
||||
old.dropped = true
|
||||
old.delivered = time.Now()
|
||||
finished = append(finished, old)
|
||||
}
|
||||
// Start a timer to notify the sync loop if the peer stalled.
|
||||
req.timer = time.AfterFunc(req.timeout, func() {
|
||||
select {
|
||||
case timeout <- req:
|
||||
case <-s.done:
|
||||
// Prevent leaking of timer goroutines in the unlikely case where a
|
||||
// timer is fired just before exiting runStateSync.
|
||||
}
|
||||
timeout <- req
|
||||
})
|
||||
active[req.peer.id] = req
|
||||
}
|
||||
@@ -213,7 +212,6 @@ func (d *Downloader) runStateSync(s *stateSync) *stateSync {
|
||||
// are marked as idle and de facto _are_ idle.
|
||||
func (d *Downloader) spindownStateSync(active map[string]*stateReq, finished []*stateReq, timeout chan *stateReq, peerDrop chan *peerConnection) {
|
||||
log.Trace("State sync spinning down", "active", len(active), "finished", len(finished))
|
||||
|
||||
for len(active) > 0 {
|
||||
var (
|
||||
req *stateReq
|
||||
@@ -235,16 +233,16 @@ func (d *Downloader) spindownStateSync(active map[string]*stateReq, finished []*
|
||||
if req == nil {
|
||||
continue
|
||||
}
|
||||
req.peer.log.Trace("State peer marked idle (spindown)", "req.items", len(req.items), "reason", reason)
|
||||
req.peer.log.Trace("State peer marked idle (spindown)", "req.items", int(req.nItems), "reason", reason)
|
||||
req.timer.Stop()
|
||||
delete(active, req.peer.id)
|
||||
req.peer.SetNodeDataIdle(len(req.items))
|
||||
req.peer.SetNodeDataIdle(int(req.nItems), time.Now())
|
||||
}
|
||||
// The 'finished' set contains deliveries that we were going to pass to processing.
|
||||
// Those are now moot, but we still need to set those peers as idle, which would
|
||||
// otherwise have been done after processing
|
||||
for _, req := range finished {
|
||||
req.peer.SetNodeDataIdle(len(req.items))
|
||||
req.peer.SetNodeDataIdle(int(req.nItems), time.Now())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -352,7 +350,7 @@ func (s *stateSync) loop() (err error) {
|
||||
case req := <-s.deliver:
|
||||
// Response, disconnect or timeout triggered, drop the peer if stalling
|
||||
log.Trace("Received node data response", "peer", req.peer.id, "count", len(req.response), "dropped", req.dropped, "timeout", !req.dropped && req.timedOut())
|
||||
if len(req.items) <= 2 && !req.dropped && req.timedOut() {
|
||||
if req.nItems <= 2 && !req.dropped && req.timedOut() {
|
||||
// 2 items are the minimum requested, if even that times out, we've no use of
|
||||
// this peer at the moment.
|
||||
log.Warn("Stalling state sync, dropping peer", "peer", req.peer.id)
|
||||
@@ -376,7 +374,7 @@ func (s *stateSync) loop() (err error) {
|
||||
}
|
||||
// Process all the received blobs and check for stale delivery
|
||||
delivered, err := s.process(req)
|
||||
req.peer.SetNodeDataIdle(delivered)
|
||||
req.peer.SetNodeDataIdle(delivered, req.delivered)
|
||||
if err != nil {
|
||||
log.Warn("Node data write error", "err", err)
|
||||
return err
|
||||
@@ -413,14 +411,14 @@ func (s *stateSync) assignTasks() {
|
||||
// Assign a batch of fetches proportional to the estimated latency/bandwidth
|
||||
cap := p.NodeDataCapacity(s.d.requestRTT())
|
||||
req := &stateReq{peer: p, timeout: s.d.requestTTL()}
|
||||
s.fillTasks(cap, req)
|
||||
items := s.fillTasks(cap, req)
|
||||
|
||||
// If the peer was assigned tasks to fetch, send the network request
|
||||
if len(req.items) > 0 {
|
||||
req.peer.log.Trace("Requesting new batch of data", "type", "state", "count", len(req.items), "root", s.root)
|
||||
if len(items) > 0 {
|
||||
req.peer.log.Trace("Requesting new batch of data", "type", "state", "count", len(items), "root", s.root)
|
||||
select {
|
||||
case s.d.trackStateReq <- req:
|
||||
req.peer.FetchNodeData(req.items)
|
||||
req.peer.FetchNodeData(items)
|
||||
case <-s.cancel:
|
||||
case <-s.d.cancelCh:
|
||||
}
|
||||
@@ -430,7 +428,7 @@ func (s *stateSync) assignTasks() {
|
||||
|
||||
// fillTasks fills the given request object with a maximum of n state download
|
||||
// tasks to send to the remote peer.
|
||||
func (s *stateSync) fillTasks(n int, req *stateReq) {
|
||||
func (s *stateSync) fillTasks(n int, req *stateReq) []common.Hash {
|
||||
// Refill available tasks from the scheduler.
|
||||
if len(s.tasks) < n {
|
||||
new := s.sched.Missing(n - len(s.tasks))
|
||||
@@ -439,11 +437,11 @@ func (s *stateSync) fillTasks(n int, req *stateReq) {
|
||||
}
|
||||
}
|
||||
// Find tasks that haven't been tried with the request's peer.
|
||||
req.items = make([]common.Hash, 0, n)
|
||||
items := make([]common.Hash, 0, n)
|
||||
req.tasks = make(map[common.Hash]*stateTask, n)
|
||||
for hash, t := range s.tasks {
|
||||
// Stop when we've gathered enough requests
|
||||
if len(req.items) == n {
|
||||
if len(items) == n {
|
||||
break
|
||||
}
|
||||
// Skip any requests we've already tried from this peer
|
||||
@@ -452,10 +450,12 @@ func (s *stateSync) fillTasks(n int, req *stateReq) {
|
||||
}
|
||||
// Assign the request to this peer
|
||||
t.attempts[req.peer.id] = struct{}{}
|
||||
req.items = append(req.items, hash)
|
||||
items = append(items, hash)
|
||||
req.tasks[hash] = t
|
||||
delete(s.tasks, hash)
|
||||
}
|
||||
req.nItems = uint16(len(items))
|
||||
return items
|
||||
}
|
||||
|
||||
// process iterates over a batch of delivered state data, injecting each item
|
||||
|
@@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package fetcher contains the announcement based blocks or transaction synchronisation.
|
||||
// Package fetcher contains the announcement based header, blocks or transaction synchronisation.
|
||||
package fetcher
|
||||
|
||||
import (
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
lightTimeout = time.Millisecond // Time allowance before an announced header is explicitly requested
|
||||
arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block/transaction is explicitly requested
|
||||
gatherSlack = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches
|
||||
fetchTimeout = 5 * time.Second // Maximum allotted time to return an explicitly requested block/transaction
|
||||
@@ -39,7 +40,7 @@ const (
|
||||
const (
|
||||
maxUncleDist = 7 // Maximum allowed backward distance from the chain head
|
||||
maxQueueDist = 32 // Maximum allowed distance from the chain head to queue
|
||||
hashLimit = 256 // Maximum number of unique blocks a peer may have announced
|
||||
hashLimit = 256 // Maximum number of unique blocks or headers a peer may have announced
|
||||
blockLimit = 64 // Maximum number of unique blocks a peer may have delivered
|
||||
)
|
||||
|
||||
@@ -63,9 +64,10 @@ var (
|
||||
bodyFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/out", nil)
|
||||
)
|
||||
|
||||
var (
|
||||
errTerminated = errors.New("terminated")
|
||||
)
|
||||
var errTerminated = errors.New("terminated")
|
||||
|
||||
// HeaderRetrievalFn is a callback type for retrieving a header from the local chain.
|
||||
type HeaderRetrievalFn func(common.Hash) *types.Header
|
||||
|
||||
// blockRetrievalFn is a callback type for retrieving a block from the local chain.
|
||||
type blockRetrievalFn func(common.Hash) *types.Block
|
||||
@@ -85,6 +87,9 @@ type blockBroadcasterFn func(block *types.Block, propagate bool)
|
||||
// chainHeightFn is a callback type to retrieve the current chain height.
|
||||
type chainHeightFn func() uint64
|
||||
|
||||
// headersInsertFn is a callback type to insert a batch of headers into the local chain.
|
||||
type headersInsertFn func(headers []*types.Header) (int, error)
|
||||
|
||||
// chainInsertFn is a callback type to insert a batch of blocks into the local chain.
|
||||
type chainInsertFn func(types.Blocks) (int, error)
|
||||
|
||||
@@ -121,18 +126,38 @@ type bodyFilterTask struct {
|
||||
time time.Time // Arrival time of the blocks' contents
|
||||
}
|
||||
|
||||
// blockInject represents a schedules import operation.
|
||||
type blockInject struct {
|
||||
// blockOrHeaderInject represents a schedules import operation.
|
||||
type blockOrHeaderInject struct {
|
||||
origin string
|
||||
block *types.Block
|
||||
|
||||
header *types.Header // Used for light mode fetcher which only cares about header.
|
||||
block *types.Block // Used for normal mode fetcher which imports full block.
|
||||
}
|
||||
|
||||
// number returns the block number of the injected object.
|
||||
func (inject *blockOrHeaderInject) number() uint64 {
|
||||
if inject.header != nil {
|
||||
return inject.header.Number.Uint64()
|
||||
}
|
||||
return inject.block.NumberU64()
|
||||
}
|
||||
|
||||
// number returns the block hash of the injected object.
|
||||
func (inject *blockOrHeaderInject) hash() common.Hash {
|
||||
if inject.header != nil {
|
||||
return inject.header.Hash()
|
||||
}
|
||||
return inject.block.Hash()
|
||||
}
|
||||
|
||||
// BlockFetcher is responsible for accumulating block announcements from various peers
|
||||
// and scheduling them for retrieval.
|
||||
type BlockFetcher struct {
|
||||
light bool // The indicator whether it's a light fetcher or normal one.
|
||||
|
||||
// Various event channels
|
||||
notify chan *blockAnnounce
|
||||
inject chan *blockInject
|
||||
inject chan *blockOrHeaderInject
|
||||
|
||||
headerFilter chan chan *headerFilterTask
|
||||
bodyFilter chan chan *bodyFilterTask
|
||||
@@ -148,31 +173,34 @@ type BlockFetcher struct {
|
||||
completing map[common.Hash]*blockAnnounce // Blocks with headers, currently body-completing
|
||||
|
||||
// Block cache
|
||||
queue *prque.Prque // Queue containing the import operations (block number sorted)
|
||||
queues map[string]int // Per peer block counts to prevent memory exhaustion
|
||||
queued map[common.Hash]*blockInject // Set of already queued blocks (to dedupe imports)
|
||||
queue *prque.Prque // Queue containing the import operations (block number sorted)
|
||||
queues map[string]int // Per peer block counts to prevent memory exhaustion
|
||||
queued map[common.Hash]*blockOrHeaderInject // Set of already queued blocks (to dedup imports)
|
||||
|
||||
// Callbacks
|
||||
getHeader HeaderRetrievalFn // Retrieves a header from the local chain
|
||||
getBlock blockRetrievalFn // Retrieves a block from the local chain
|
||||
verifyHeader headerVerifierFn // Checks if a block's headers have a valid proof of work
|
||||
broadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers
|
||||
chainHeight chainHeightFn // Retrieves the current chain's height
|
||||
insertHeaders headersInsertFn // Injects a batch of headers into the chain
|
||||
insertChain chainInsertFn // Injects a batch of blocks into the chain
|
||||
dropPeer peerDropFn // Drops a peer for misbehaving
|
||||
|
||||
// Testing hooks
|
||||
announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the blockAnnounce list
|
||||
queueChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a block from the import queue
|
||||
fetchingHook func([]common.Hash) // Method to call upon starting a block (eth/61) or header (eth/62) fetch
|
||||
completingHook func([]common.Hash) // Method to call upon starting a block body fetch (eth/62)
|
||||
importedHook func(*types.Block) // Method to call upon successful block import (both eth/61 and eth/62)
|
||||
announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the blockAnnounce list
|
||||
queueChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a block from the import queue
|
||||
fetchingHook func([]common.Hash) // Method to call upon starting a block (eth/61) or header (eth/62) fetch
|
||||
completingHook func([]common.Hash) // Method to call upon starting a block body fetch (eth/62)
|
||||
importedHook func(*types.Header, *types.Block) // Method to call upon successful header or block import (both eth/61 and eth/62)
|
||||
}
|
||||
|
||||
// NewBlockFetcher creates a block fetcher to retrieve blocks based on hash announcements.
|
||||
func NewBlockFetcher(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertChain chainInsertFn, dropPeer peerDropFn) *BlockFetcher {
|
||||
func NewBlockFetcher(light bool, getHeader HeaderRetrievalFn, getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertHeaders headersInsertFn, insertChain chainInsertFn, dropPeer peerDropFn) *BlockFetcher {
|
||||
return &BlockFetcher{
|
||||
light: light,
|
||||
notify: make(chan *blockAnnounce),
|
||||
inject: make(chan *blockInject),
|
||||
inject: make(chan *blockOrHeaderInject),
|
||||
headerFilter: make(chan chan *headerFilterTask),
|
||||
bodyFilter: make(chan chan *bodyFilterTask),
|
||||
done: make(chan common.Hash),
|
||||
@@ -184,11 +212,13 @@ func NewBlockFetcher(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, b
|
||||
completing: make(map[common.Hash]*blockAnnounce),
|
||||
queue: prque.New(nil),
|
||||
queues: make(map[string]int),
|
||||
queued: make(map[common.Hash]*blockInject),
|
||||
queued: make(map[common.Hash]*blockOrHeaderInject),
|
||||
getHeader: getHeader,
|
||||
getBlock: getBlock,
|
||||
verifyHeader: verifyHeader,
|
||||
broadcastBlock: broadcastBlock,
|
||||
chainHeight: chainHeight,
|
||||
insertHeaders: insertHeaders,
|
||||
insertChain: insertChain,
|
||||
dropPeer: dropPeer,
|
||||
}
|
||||
@@ -228,7 +258,7 @@ func (f *BlockFetcher) Notify(peer string, hash common.Hash, number uint64, time
|
||||
|
||||
// Enqueue tries to fill gaps the fetcher's future import queue.
|
||||
func (f *BlockFetcher) Enqueue(peer string, block *types.Block) error {
|
||||
op := &blockInject{
|
||||
op := &blockOrHeaderInject{
|
||||
origin: peer,
|
||||
block: block,
|
||||
}
|
||||
@@ -315,13 +345,13 @@ func (f *BlockFetcher) loop() {
|
||||
// Import any queued blocks that could potentially fit
|
||||
height := f.chainHeight()
|
||||
for !f.queue.Empty() {
|
||||
op := f.queue.PopItem().(*blockInject)
|
||||
hash := op.block.Hash()
|
||||
op := f.queue.PopItem().(*blockOrHeaderInject)
|
||||
hash := op.hash()
|
||||
if f.queueChangeHook != nil {
|
||||
f.queueChangeHook(hash, false)
|
||||
}
|
||||
// If too high up the chain or phase, continue later
|
||||
number := op.block.NumberU64()
|
||||
number := op.number()
|
||||
if number > height+1 {
|
||||
f.queue.Push(op, -int64(number))
|
||||
if f.queueChangeHook != nil {
|
||||
@@ -330,11 +360,15 @@ func (f *BlockFetcher) loop() {
|
||||
break
|
||||
}
|
||||
// Otherwise if fresh and still unknown, try and import
|
||||
if number+maxUncleDist < height || f.getBlock(hash) != nil {
|
||||
if (number+maxUncleDist < height) || (f.light && f.getHeader(hash) != nil) || (!f.light && f.getBlock(hash) != nil) {
|
||||
f.forgetBlock(hash)
|
||||
continue
|
||||
}
|
||||
f.insert(op.origin, op.block)
|
||||
if f.light {
|
||||
f.importHeaders(op.origin, op.header)
|
||||
} else {
|
||||
f.importBlocks(op.origin, op.block)
|
||||
}
|
||||
}
|
||||
// Wait for an outside event to occur
|
||||
select {
|
||||
@@ -379,7 +413,13 @@ func (f *BlockFetcher) loop() {
|
||||
case op := <-f.inject:
|
||||
// A direct block insertion was requested, try and fill any pending gaps
|
||||
blockBroadcastInMeter.Mark(1)
|
||||
f.enqueue(op.origin, op.block)
|
||||
|
||||
// Now only direct block injection is allowed, drop the header injection
|
||||
// here silently if we receive.
|
||||
if f.light {
|
||||
continue
|
||||
}
|
||||
f.enqueue(op.origin, nil, op.block)
|
||||
|
||||
case hash := <-f.done:
|
||||
// A pending import finished, remove all traces of the notification
|
||||
@@ -391,13 +431,19 @@ func (f *BlockFetcher) loop() {
|
||||
request := make(map[string][]common.Hash)
|
||||
|
||||
for hash, announces := range f.announced {
|
||||
if time.Since(announces[0].time) > arriveTimeout-gatherSlack {
|
||||
// In current LES protocol(les2/les3), only header announce is
|
||||
// available, no need to wait too much time for header broadcast.
|
||||
timeout := arriveTimeout - gatherSlack
|
||||
if f.light {
|
||||
timeout = 0
|
||||
}
|
||||
if time.Since(announces[0].time) > timeout {
|
||||
// Pick a random peer to retrieve from, reset all others
|
||||
announce := announces[rand.Intn(len(announces))]
|
||||
f.forgetHash(hash)
|
||||
|
||||
// If the block still didn't arrive, queue for fetching
|
||||
if f.getBlock(hash) == nil {
|
||||
if (f.light && f.getHeader(hash) == nil) || (!f.light && f.getBlock(hash) == nil) {
|
||||
request[announce.origin] = append(request[announce.origin], hash)
|
||||
f.fetching[hash] = announce
|
||||
}
|
||||
@@ -465,7 +511,7 @@ func (f *BlockFetcher) loop() {
|
||||
|
||||
// Split the batch of headers into unknown ones (to return to the caller),
|
||||
// known incomplete ones (requiring body retrievals) and completed blocks.
|
||||
unknown, incomplete, complete := []*types.Header{}, []*blockAnnounce{}, []*types.Block{}
|
||||
unknown, incomplete, complete, lightHeaders := []*types.Header{}, []*blockAnnounce{}, []*types.Block{}, []*blockAnnounce{}
|
||||
for _, header := range task.headers {
|
||||
hash := header.Hash()
|
||||
|
||||
@@ -478,6 +524,16 @@ func (f *BlockFetcher) loop() {
|
||||
f.forgetHash(hash)
|
||||
continue
|
||||
}
|
||||
// Collect all headers only if we are running in light
|
||||
// mode and the headers are not imported by other means.
|
||||
if f.light {
|
||||
if f.getHeader(hash) == nil {
|
||||
announce.header = header
|
||||
lightHeaders = append(lightHeaders, announce)
|
||||
}
|
||||
f.forgetHash(hash)
|
||||
continue
|
||||
}
|
||||
// Only keep if not imported by other means
|
||||
if f.getBlock(hash) == nil {
|
||||
announce.header = header
|
||||
@@ -522,10 +578,14 @@ func (f *BlockFetcher) loop() {
|
||||
f.rescheduleComplete(completeTimer)
|
||||
}
|
||||
}
|
||||
// Schedule the header for light fetcher import
|
||||
for _, announce := range lightHeaders {
|
||||
f.enqueue(announce.origin, announce.header, nil)
|
||||
}
|
||||
// Schedule the header-only blocks for import
|
||||
for _, block := range complete {
|
||||
if announce := f.completing[block.Hash()]; announce != nil {
|
||||
f.enqueue(announce.origin, block)
|
||||
f.enqueue(announce.origin, nil, block)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -538,40 +598,51 @@ func (f *BlockFetcher) loop() {
|
||||
return
|
||||
}
|
||||
bodyFilterInMeter.Mark(int64(len(task.transactions)))
|
||||
|
||||
blocks := []*types.Block{}
|
||||
for i := 0; i < len(task.transactions) && i < len(task.uncles); i++ {
|
||||
// Match up a body to any possible completion request
|
||||
matched := false
|
||||
|
||||
for hash, announce := range f.completing {
|
||||
if f.queued[hash] == nil {
|
||||
txnHash := types.DeriveSha(types.Transactions(task.transactions[i]))
|
||||
uncleHash := types.CalcUncleHash(task.uncles[i])
|
||||
|
||||
if txnHash == announce.header.TxHash && uncleHash == announce.header.UncleHash && announce.origin == task.peer {
|
||||
// Mark the body matched, reassemble if still unknown
|
||||
matched = true
|
||||
|
||||
if f.getBlock(hash) == nil {
|
||||
block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i])
|
||||
block.ReceivedAt = task.time
|
||||
|
||||
blocks = append(blocks, block)
|
||||
} else {
|
||||
f.forgetHash(hash)
|
||||
}
|
||||
// abort early if there's nothing explicitly requested
|
||||
if len(f.completing) > 0 {
|
||||
for i := 0; i < len(task.transactions) && i < len(task.uncles); i++ {
|
||||
// Match up a body to any possible completion request
|
||||
var (
|
||||
matched = false
|
||||
uncleHash common.Hash // calculated lazily and reused
|
||||
txnHash common.Hash // calculated lazily and reused
|
||||
)
|
||||
for hash, announce := range f.completing {
|
||||
if f.queued[hash] != nil || announce.origin != task.peer {
|
||||
continue
|
||||
}
|
||||
if uncleHash == (common.Hash{}) {
|
||||
uncleHash = types.CalcUncleHash(task.uncles[i])
|
||||
}
|
||||
if uncleHash != announce.header.UncleHash {
|
||||
continue
|
||||
}
|
||||
if txnHash == (common.Hash{}) {
|
||||
txnHash = types.DeriveSha(types.Transactions(task.transactions[i]))
|
||||
}
|
||||
if txnHash != announce.header.TxHash {
|
||||
continue
|
||||
}
|
||||
// Mark the body matched, reassemble if still unknown
|
||||
matched = true
|
||||
if f.getBlock(hash) == nil {
|
||||
block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i])
|
||||
block.ReceivedAt = task.time
|
||||
blocks = append(blocks, block)
|
||||
} else {
|
||||
f.forgetHash(hash)
|
||||
}
|
||||
|
||||
}
|
||||
if matched {
|
||||
task.transactions = append(task.transactions[:i], task.transactions[i+1:]...)
|
||||
task.uncles = append(task.uncles[:i], task.uncles[i+1:]...)
|
||||
i--
|
||||
continue
|
||||
}
|
||||
}
|
||||
if matched {
|
||||
task.transactions = append(task.transactions[:i], task.transactions[i+1:]...)
|
||||
task.uncles = append(task.uncles[:i], task.uncles[i+1:]...)
|
||||
i--
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
bodyFilterOutMeter.Mark(int64(len(task.transactions)))
|
||||
select {
|
||||
case filter <- task:
|
||||
@@ -581,7 +652,7 @@ func (f *BlockFetcher) loop() {
|
||||
// Schedule the retrieved blocks for ordered import
|
||||
for _, block := range blocks {
|
||||
if announce := f.completing[block.Hash()]; announce != nil {
|
||||
f.enqueue(announce.origin, block)
|
||||
f.enqueue(announce.origin, nil, block)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -594,6 +665,12 @@ func (f *BlockFetcher) rescheduleFetch(fetch *time.Timer) {
|
||||
if len(f.announced) == 0 {
|
||||
return
|
||||
}
|
||||
// Schedule announcement retrieval quickly for light mode
|
||||
// since server won't send any headers to client.
|
||||
if f.light {
|
||||
fetch.Reset(lightTimeout)
|
||||
return
|
||||
}
|
||||
// Otherwise find the earliest expiring announcement
|
||||
earliest := time.Now()
|
||||
for _, announces := range f.announced {
|
||||
@@ -620,46 +697,88 @@ func (f *BlockFetcher) rescheduleComplete(complete *time.Timer) {
|
||||
complete.Reset(gatherSlack - time.Since(earliest))
|
||||
}
|
||||
|
||||
// enqueue schedules a new future import operation, if the block to be imported
|
||||
// has not yet been seen.
|
||||
func (f *BlockFetcher) enqueue(peer string, block *types.Block) {
|
||||
hash := block.Hash()
|
||||
|
||||
// enqueue schedules a new header or block import operation, if the component
|
||||
// to be imported has not yet been seen.
|
||||
func (f *BlockFetcher) enqueue(peer string, header *types.Header, block *types.Block) {
|
||||
var (
|
||||
hash common.Hash
|
||||
number uint64
|
||||
)
|
||||
if header != nil {
|
||||
hash, number = header.Hash(), header.Number.Uint64()
|
||||
} else {
|
||||
hash, number = block.Hash(), block.NumberU64()
|
||||
}
|
||||
// Ensure the peer isn't DOSing us
|
||||
count := f.queues[peer] + 1
|
||||
if count > blockLimit {
|
||||
log.Debug("Discarded propagated block, exceeded allowance", "peer", peer, "number", block.Number(), "hash", hash, "limit", blockLimit)
|
||||
log.Debug("Discarded delivered header or block, exceeded allowance", "peer", peer, "number", number, "hash", hash, "limit", blockLimit)
|
||||
blockBroadcastDOSMeter.Mark(1)
|
||||
f.forgetHash(hash)
|
||||
return
|
||||
}
|
||||
// Discard any past or too distant blocks
|
||||
if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
|
||||
log.Debug("Discarded propagated block, too far away", "peer", peer, "number", block.Number(), "hash", hash, "distance", dist)
|
||||
if dist := int64(number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
|
||||
log.Debug("Discarded delivered header or block, too far away", "peer", peer, "number", number, "hash", hash, "distance", dist)
|
||||
blockBroadcastDropMeter.Mark(1)
|
||||
f.forgetHash(hash)
|
||||
return
|
||||
}
|
||||
// Schedule the block for future importing
|
||||
if _, ok := f.queued[hash]; !ok {
|
||||
op := &blockInject{
|
||||
origin: peer,
|
||||
block: block,
|
||||
op := &blockOrHeaderInject{origin: peer}
|
||||
if header != nil {
|
||||
op.header = header
|
||||
} else {
|
||||
op.block = block
|
||||
}
|
||||
f.queues[peer] = count
|
||||
f.queued[hash] = op
|
||||
f.queue.Push(op, -int64(block.NumberU64()))
|
||||
f.queue.Push(op, -int64(number))
|
||||
if f.queueChangeHook != nil {
|
||||
f.queueChangeHook(op.block.Hash(), true)
|
||||
f.queueChangeHook(hash, true)
|
||||
}
|
||||
log.Debug("Queued propagated block", "peer", peer, "number", block.Number(), "hash", hash, "queued", f.queue.Size())
|
||||
log.Debug("Queued delivered header or block", "peer", peer, "number", number, "hash", hash, "queued", f.queue.Size())
|
||||
}
|
||||
}
|
||||
|
||||
// insert spawns a new goroutine to run a block insertion into the chain. If the
|
||||
// importHeaders spawns a new goroutine to run a header insertion into the chain.
|
||||
// If the header's number is at the same height as the current import phase, it
|
||||
// updates the phase states accordingly.
|
||||
func (f *BlockFetcher) importHeaders(peer string, header *types.Header) {
|
||||
hash := header.Hash()
|
||||
log.Debug("Importing propagated header", "peer", peer, "number", header.Number, "hash", hash)
|
||||
|
||||
go func() {
|
||||
defer func() { f.done <- hash }()
|
||||
// If the parent's unknown, abort insertion
|
||||
parent := f.getHeader(header.ParentHash)
|
||||
if parent == nil {
|
||||
log.Debug("Unknown parent of propagated header", "peer", peer, "number", header.Number, "hash", hash, "parent", header.ParentHash)
|
||||
return
|
||||
}
|
||||
// Validate the header and if something went wrong, drop the peer
|
||||
if err := f.verifyHeader(header); err != nil && err != consensus.ErrFutureBlock {
|
||||
log.Debug("Propagated header verification failed", "peer", peer, "number", header.Number, "hash", hash, "err", err)
|
||||
f.dropPeer(peer)
|
||||
return
|
||||
}
|
||||
// Run the actual import and log any issues
|
||||
if _, err := f.insertHeaders([]*types.Header{header}); err != nil {
|
||||
log.Debug("Propagated header import failed", "peer", peer, "number", header.Number, "hash", hash, "err", err)
|
||||
return
|
||||
}
|
||||
// Invoke the testing hook if needed
|
||||
if f.importedHook != nil {
|
||||
f.importedHook(header, nil)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// importBlocks spawns a new goroutine to run a block insertion into the chain. If the
|
||||
// block's number is at the same height as the current import phase, it updates
|
||||
// the phase states accordingly.
|
||||
func (f *BlockFetcher) insert(peer string, block *types.Block) {
|
||||
func (f *BlockFetcher) importBlocks(peer string, block *types.Block) {
|
||||
hash := block.Hash()
|
||||
|
||||
// Run the import on a new thread
|
||||
@@ -700,7 +819,7 @@ func (f *BlockFetcher) insert(peer string, block *types.Block) {
|
||||
|
||||
// Invoke the testing hook if needed
|
||||
if f.importedHook != nil {
|
||||
f.importedHook(block)
|
||||
f.importedHook(nil, block)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
@@ -78,26 +78,36 @@ func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common
|
||||
type fetcherTester struct {
|
||||
fetcher *BlockFetcher
|
||||
|
||||
hashes []common.Hash // Hash chain belonging to the tester
|
||||
blocks map[common.Hash]*types.Block // Blocks belonging to the tester
|
||||
drops map[string]bool // Map of peers dropped by the fetcher
|
||||
hashes []common.Hash // Hash chain belonging to the tester
|
||||
headers map[common.Hash]*types.Header // Headers belonging to the tester
|
||||
blocks map[common.Hash]*types.Block // Blocks belonging to the tester
|
||||
drops map[string]bool // Map of peers dropped by the fetcher
|
||||
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// newTester creates a new fetcher test mocker.
|
||||
func newTester() *fetcherTester {
|
||||
func newTester(light bool) *fetcherTester {
|
||||
tester := &fetcherTester{
|
||||
hashes: []common.Hash{genesis.Hash()},
|
||||
blocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
|
||||
drops: make(map[string]bool),
|
||||
hashes: []common.Hash{genesis.Hash()},
|
||||
headers: map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
|
||||
blocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
|
||||
drops: make(map[string]bool),
|
||||
}
|
||||
tester.fetcher = NewBlockFetcher(tester.getBlock, tester.verifyHeader, tester.broadcastBlock, tester.chainHeight, tester.insertChain, tester.dropPeer)
|
||||
tester.fetcher = NewBlockFetcher(light, tester.getHeader, tester.getBlock, tester.verifyHeader, tester.broadcastBlock, tester.chainHeight, tester.insertHeaders, tester.insertChain, tester.dropPeer)
|
||||
tester.fetcher.Start()
|
||||
|
||||
return tester
|
||||
}
|
||||
|
||||
// getHeader retrieves a header from the tester's block chain.
|
||||
func (f *fetcherTester) getHeader(hash common.Hash) *types.Header {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
|
||||
return f.headers[hash]
|
||||
}
|
||||
|
||||
// getBlock retrieves a block from the tester's block chain.
|
||||
func (f *fetcherTester) getBlock(hash common.Hash) *types.Block {
|
||||
f.lock.RLock()
|
||||
@@ -120,9 +130,33 @@ func (f *fetcherTester) chainHeight() uint64 {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
|
||||
if f.fetcher.light {
|
||||
return f.headers[f.hashes[len(f.hashes)-1]].Number.Uint64()
|
||||
}
|
||||
return f.blocks[f.hashes[len(f.hashes)-1]].NumberU64()
|
||||
}
|
||||
|
||||
// insertChain injects a new headers into the simulated chain.
|
||||
func (f *fetcherTester) insertHeaders(headers []*types.Header) (int, error) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
for i, header := range headers {
|
||||
// Make sure the parent in known
|
||||
if _, ok := f.headers[header.ParentHash]; !ok {
|
||||
return i, errors.New("unknown parent")
|
||||
}
|
||||
// Discard any new blocks if the same height already exists
|
||||
if header.Number.Uint64() <= f.headers[f.hashes[len(f.hashes)-1]].Number.Uint64() {
|
||||
return i, nil
|
||||
}
|
||||
// Otherwise build our current chain
|
||||
f.hashes = append(f.hashes, header.Hash())
|
||||
f.headers[header.Hash()] = header
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// insertChain injects a new blocks into the simulated chain.
|
||||
func (f *fetcherTester) insertChain(blocks types.Blocks) (int, error) {
|
||||
f.lock.Lock()
|
||||
@@ -233,7 +267,7 @@ func verifyCompletingEvent(t *testing.T, completing chan []common.Hash, arrive b
|
||||
}
|
||||
|
||||
// verifyImportEvent verifies that one single event arrive on an import channel.
|
||||
func verifyImportEvent(t *testing.T, imported chan *types.Block, arrive bool) {
|
||||
func verifyImportEvent(t *testing.T, imported chan interface{}, arrive bool) {
|
||||
if arrive {
|
||||
select {
|
||||
case <-imported:
|
||||
@@ -251,7 +285,7 @@ func verifyImportEvent(t *testing.T, imported chan *types.Block, arrive bool) {
|
||||
|
||||
// verifyImportCount verifies that exactly count number of events arrive on an
|
||||
// import hook channel.
|
||||
func verifyImportCount(t *testing.T, imported chan *types.Block, count int) {
|
||||
func verifyImportCount(t *testing.T, imported chan interface{}, count int) {
|
||||
for i := 0; i < count; i++ {
|
||||
select {
|
||||
case <-imported:
|
||||
@@ -263,7 +297,7 @@ func verifyImportCount(t *testing.T, imported chan *types.Block, count int) {
|
||||
}
|
||||
|
||||
// verifyImportDone verifies that no more events are arriving on an import channel.
|
||||
func verifyImportDone(t *testing.T, imported chan *types.Block) {
|
||||
func verifyImportDone(t *testing.T, imported chan interface{}) {
|
||||
select {
|
||||
case <-imported:
|
||||
t.Fatalf("extra block imported")
|
||||
@@ -271,45 +305,62 @@ func verifyImportDone(t *testing.T, imported chan *types.Block) {
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that a fetcher accepts block announcements and initiates retrievals for
|
||||
// them, successfully importing into the local chain.
|
||||
func TestSequentialAnnouncements62(t *testing.T) { testSequentialAnnouncements(t, 62) }
|
||||
func TestSequentialAnnouncements63(t *testing.T) { testSequentialAnnouncements(t, 63) }
|
||||
func TestSequentialAnnouncements64(t *testing.T) { testSequentialAnnouncements(t, 64) }
|
||||
// verifyChainHeight verifies the chain height is as expected.
|
||||
func verifyChainHeight(t *testing.T, fetcher *fetcherTester, height uint64) {
|
||||
if fetcher.chainHeight() != height {
|
||||
t.Fatalf("chain height mismatch, got %d, want %d", fetcher.chainHeight(), height)
|
||||
}
|
||||
}
|
||||
|
||||
func testSequentialAnnouncements(t *testing.T, protocol int) {
|
||||
// Tests that a fetcher accepts block/header announcements and initiates retrievals
|
||||
// for them, successfully importing into the local chain.
|
||||
func TestFullSequentialAnnouncements(t *testing.T) { testSequentialAnnouncements(t, false) }
|
||||
func TestLightSequentialAnnouncements(t *testing.T) { testSequentialAnnouncements(t, true) }
|
||||
|
||||
func testSequentialAnnouncements(t *testing.T, light bool) {
|
||||
// Create a chain of blocks to import
|
||||
targetBlocks := 4 * hashLimit
|
||||
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
||||
|
||||
tester := newTester()
|
||||
tester := newTester(light)
|
||||
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
|
||||
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
|
||||
|
||||
// Iteratively announce blocks until all are imported
|
||||
imported := make(chan *types.Block)
|
||||
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
||||
|
||||
imported := make(chan interface{})
|
||||
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) {
|
||||
if light {
|
||||
if header == nil {
|
||||
t.Fatalf("Fetcher try to import empty header")
|
||||
}
|
||||
imported <- header
|
||||
} else {
|
||||
if block == nil {
|
||||
t.Fatalf("Fetcher try to import empty block")
|
||||
}
|
||||
imported <- block
|
||||
}
|
||||
}
|
||||
for i := len(hashes) - 2; i >= 0; i-- {
|
||||
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
|
||||
verifyImportEvent(t, imported, true)
|
||||
}
|
||||
verifyImportDone(t, imported)
|
||||
verifyChainHeight(t, tester, uint64(len(hashes)-1))
|
||||
}
|
||||
|
||||
// Tests that if blocks are announced by multiple peers (or even the same buggy
|
||||
// peer), they will only get downloaded at most once.
|
||||
func TestConcurrentAnnouncements62(t *testing.T) { testConcurrentAnnouncements(t, 62) }
|
||||
func TestConcurrentAnnouncements63(t *testing.T) { testConcurrentAnnouncements(t, 63) }
|
||||
func TestConcurrentAnnouncements64(t *testing.T) { testConcurrentAnnouncements(t, 64) }
|
||||
func TestFullConcurrentAnnouncements(t *testing.T) { testConcurrentAnnouncements(t, false) }
|
||||
func TestLightConcurrentAnnouncements(t *testing.T) { testConcurrentAnnouncements(t, true) }
|
||||
|
||||
func testConcurrentAnnouncements(t *testing.T, protocol int) {
|
||||
func testConcurrentAnnouncements(t *testing.T, light bool) {
|
||||
// Create a chain of blocks to import
|
||||
targetBlocks := 4 * hashLimit
|
||||
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
||||
|
||||
// Assemble a tester with a built in counter for the requests
|
||||
tester := newTester()
|
||||
tester := newTester(light)
|
||||
firstHeaderFetcher := tester.makeHeaderFetcher("first", blocks, -gatherSlack)
|
||||
firstBodyFetcher := tester.makeBodyFetcher("first", blocks, 0)
|
||||
secondHeaderFetcher := tester.makeHeaderFetcher("second", blocks, -gatherSlack)
|
||||
@@ -325,9 +376,20 @@ func testConcurrentAnnouncements(t *testing.T, protocol int) {
|
||||
return secondHeaderFetcher(hash)
|
||||
}
|
||||
// Iteratively announce blocks until all are imported
|
||||
imported := make(chan *types.Block)
|
||||
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
||||
|
||||
imported := make(chan interface{})
|
||||
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) {
|
||||
if light {
|
||||
if header == nil {
|
||||
t.Fatalf("Fetcher try to import empty header")
|
||||
}
|
||||
imported <- header
|
||||
} else {
|
||||
if block == nil {
|
||||
t.Fatalf("Fetcher try to import empty block")
|
||||
}
|
||||
imported <- block
|
||||
}
|
||||
}
|
||||
for i := len(hashes) - 2; i >= 0; i-- {
|
||||
tester.fetcher.Notify("first", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), firstHeaderWrapper, firstBodyFetcher)
|
||||
tester.fetcher.Notify("second", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout+time.Millisecond), secondHeaderWrapper, secondBodyFetcher)
|
||||
@@ -340,30 +402,42 @@ func testConcurrentAnnouncements(t *testing.T, protocol int) {
|
||||
if int(counter) != targetBlocks {
|
||||
t.Fatalf("retrieval count mismatch: have %v, want %v", counter, targetBlocks)
|
||||
}
|
||||
verifyChainHeight(t, tester, uint64(len(hashes)-1))
|
||||
}
|
||||
|
||||
// Tests that announcements arriving while a previous is being fetched still
|
||||
// results in a valid import.
|
||||
func TestOverlappingAnnouncements62(t *testing.T) { testOverlappingAnnouncements(t, 62) }
|
||||
func TestOverlappingAnnouncements63(t *testing.T) { testOverlappingAnnouncements(t, 63) }
|
||||
func TestOverlappingAnnouncements64(t *testing.T) { testOverlappingAnnouncements(t, 64) }
|
||||
func TestFullOverlappingAnnouncements(t *testing.T) { testOverlappingAnnouncements(t, false) }
|
||||
func TestLightOverlappingAnnouncements(t *testing.T) { testOverlappingAnnouncements(t, true) }
|
||||
|
||||
func testOverlappingAnnouncements(t *testing.T, protocol int) {
|
||||
func testOverlappingAnnouncements(t *testing.T, light bool) {
|
||||
// Create a chain of blocks to import
|
||||
targetBlocks := 4 * hashLimit
|
||||
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
||||
|
||||
tester := newTester()
|
||||
tester := newTester(light)
|
||||
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
|
||||
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
|
||||
|
||||
// Iteratively announce blocks, but overlap them continuously
|
||||
overlap := 16
|
||||
imported := make(chan *types.Block, len(hashes)-1)
|
||||
imported := make(chan interface{}, len(hashes)-1)
|
||||
for i := 0; i < overlap; i++ {
|
||||
imported <- nil
|
||||
}
|
||||
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
||||
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) {
|
||||
if light {
|
||||
if header == nil {
|
||||
t.Fatalf("Fetcher try to import empty header")
|
||||
}
|
||||
imported <- header
|
||||
} else {
|
||||
if block == nil {
|
||||
t.Fatalf("Fetcher try to import empty block")
|
||||
}
|
||||
imported <- block
|
||||
}
|
||||
}
|
||||
|
||||
for i := len(hashes) - 2; i >= 0; i-- {
|
||||
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
|
||||
@@ -375,19 +449,19 @@ func testOverlappingAnnouncements(t *testing.T, protocol int) {
|
||||
}
|
||||
// Wait for all the imports to complete and check count
|
||||
verifyImportCount(t, imported, overlap)
|
||||
verifyChainHeight(t, tester, uint64(len(hashes)-1))
|
||||
}
|
||||
|
||||
// Tests that announces already being retrieved will not be duplicated.
|
||||
func TestPendingDeduplication62(t *testing.T) { testPendingDeduplication(t, 62) }
|
||||
func TestPendingDeduplication63(t *testing.T) { testPendingDeduplication(t, 63) }
|
||||
func TestPendingDeduplication64(t *testing.T) { testPendingDeduplication(t, 64) }
|
||||
func TestFullPendingDeduplication(t *testing.T) { testPendingDeduplication(t, false) }
|
||||
func TestLightPendingDeduplication(t *testing.T) { testPendingDeduplication(t, true) }
|
||||
|
||||
func testPendingDeduplication(t *testing.T, protocol int) {
|
||||
func testPendingDeduplication(t *testing.T, light bool) {
|
||||
// Create a hash and corresponding block
|
||||
hashes, blocks := makeChain(1, 0, genesis)
|
||||
|
||||
// Assemble a tester with a built in counter and delayed fetcher
|
||||
tester := newTester()
|
||||
tester := newTester(light)
|
||||
headerFetcher := tester.makeHeaderFetcher("repeater", blocks, -gatherSlack)
|
||||
bodyFetcher := tester.makeBodyFetcher("repeater", blocks, 0)
|
||||
|
||||
@@ -403,42 +477,58 @@ func testPendingDeduplication(t *testing.T, protocol int) {
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
checkNonExist := func() bool {
|
||||
return tester.getBlock(hashes[0]) == nil
|
||||
}
|
||||
if light {
|
||||
checkNonExist = func() bool {
|
||||
return tester.getHeader(hashes[0]) == nil
|
||||
}
|
||||
}
|
||||
// Announce the same block many times until it's fetched (wait for any pending ops)
|
||||
for tester.getBlock(hashes[0]) == nil {
|
||||
for checkNonExist() {
|
||||
tester.fetcher.Notify("repeater", hashes[0], 1, time.Now().Add(-arriveTimeout), headerWrapper, bodyFetcher)
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
time.Sleep(delay)
|
||||
|
||||
// Check that all blocks were imported and none fetched twice
|
||||
if imported := len(tester.blocks); imported != 2 {
|
||||
t.Fatalf("synchronised block mismatch: have %v, want %v", imported, 2)
|
||||
}
|
||||
if int(counter) != 1 {
|
||||
t.Fatalf("retrieval count mismatch: have %v, want %v", counter, 1)
|
||||
}
|
||||
verifyChainHeight(t, tester, 1)
|
||||
}
|
||||
|
||||
// Tests that announcements retrieved in a random order are cached and eventually
|
||||
// imported when all the gaps are filled in.
|
||||
func TestRandomArrivalImport62(t *testing.T) { testRandomArrivalImport(t, 62) }
|
||||
func TestRandomArrivalImport63(t *testing.T) { testRandomArrivalImport(t, 63) }
|
||||
func TestRandomArrivalImport64(t *testing.T) { testRandomArrivalImport(t, 64) }
|
||||
func TestFullRandomArrivalImport(t *testing.T) { testRandomArrivalImport(t, false) }
|
||||
func TestLightRandomArrivalImport(t *testing.T) { testRandomArrivalImport(t, true) }
|
||||
|
||||
func testRandomArrivalImport(t *testing.T, protocol int) {
|
||||
func testRandomArrivalImport(t *testing.T, light bool) {
|
||||
// Create a chain of blocks to import, and choose one to delay
|
||||
targetBlocks := maxQueueDist
|
||||
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
||||
skip := targetBlocks / 2
|
||||
|
||||
tester := newTester()
|
||||
tester := newTester(light)
|
||||
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
|
||||
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
|
||||
|
||||
// Iteratively announce blocks, skipping one entry
|
||||
imported := make(chan *types.Block, len(hashes)-1)
|
||||
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
||||
|
||||
imported := make(chan interface{}, len(hashes)-1)
|
||||
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) {
|
||||
if light {
|
||||
if header == nil {
|
||||
t.Fatalf("Fetcher try to import empty header")
|
||||
}
|
||||
imported <- header
|
||||
} else {
|
||||
if block == nil {
|
||||
t.Fatalf("Fetcher try to import empty block")
|
||||
}
|
||||
imported <- block
|
||||
}
|
||||
}
|
||||
for i := len(hashes) - 1; i >= 0; i-- {
|
||||
if i != skip {
|
||||
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
|
||||
@@ -448,27 +538,24 @@ func testRandomArrivalImport(t *testing.T, protocol int) {
|
||||
// Finally announce the skipped entry and check full import
|
||||
tester.fetcher.Notify("valid", hashes[skip], uint64(len(hashes)-skip-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
|
||||
verifyImportCount(t, imported, len(hashes)-1)
|
||||
verifyChainHeight(t, tester, uint64(len(hashes)-1))
|
||||
}
|
||||
|
||||
// Tests that direct block enqueues (due to block propagation vs. hash announce)
|
||||
// are correctly schedule, filling and import queue gaps.
|
||||
func TestQueueGapFill62(t *testing.T) { testQueueGapFill(t, 62) }
|
||||
func TestQueueGapFill63(t *testing.T) { testQueueGapFill(t, 63) }
|
||||
func TestQueueGapFill64(t *testing.T) { testQueueGapFill(t, 64) }
|
||||
|
||||
func testQueueGapFill(t *testing.T, protocol int) {
|
||||
func TestQueueGapFill(t *testing.T) {
|
||||
// Create a chain of blocks to import, and choose one to not announce at all
|
||||
targetBlocks := maxQueueDist
|
||||
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
||||
skip := targetBlocks / 2
|
||||
|
||||
tester := newTester()
|
||||
tester := newTester(false)
|
||||
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
|
||||
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
|
||||
|
||||
// Iteratively announce blocks, skipping one entry
|
||||
imported := make(chan *types.Block, len(hashes)-1)
|
||||
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
||||
imported := make(chan interface{}, len(hashes)-1)
|
||||
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block }
|
||||
|
||||
for i := len(hashes) - 1; i >= 0; i-- {
|
||||
if i != skip {
|
||||
@@ -479,20 +566,17 @@ func testQueueGapFill(t *testing.T, protocol int) {
|
||||
// Fill the missing block directly as if propagated
|
||||
tester.fetcher.Enqueue("valid", blocks[hashes[skip]])
|
||||
verifyImportCount(t, imported, len(hashes)-1)
|
||||
verifyChainHeight(t, tester, uint64(len(hashes)-1))
|
||||
}
|
||||
|
||||
// Tests that blocks arriving from various sources (multiple propagations, hash
|
||||
// announces, etc) do not get scheduled for import multiple times.
|
||||
func TestImportDeduplication62(t *testing.T) { testImportDeduplication(t, 62) }
|
||||
func TestImportDeduplication63(t *testing.T) { testImportDeduplication(t, 63) }
|
||||
func TestImportDeduplication64(t *testing.T) { testImportDeduplication(t, 64) }
|
||||
|
||||
func testImportDeduplication(t *testing.T, protocol int) {
|
||||
func TestImportDeduplication(t *testing.T) {
|
||||
// Create two blocks to import (one for duplication, the other for stalling)
|
||||
hashes, blocks := makeChain(2, 0, genesis)
|
||||
|
||||
// Create the tester and wrap the importer with a counter
|
||||
tester := newTester()
|
||||
tester := newTester(false)
|
||||
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
|
||||
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
|
||||
|
||||
@@ -503,9 +587,9 @@ func testImportDeduplication(t *testing.T, protocol int) {
|
||||
}
|
||||
// Instrument the fetching and imported events
|
||||
fetching := make(chan []common.Hash)
|
||||
imported := make(chan *types.Block, len(hashes)-1)
|
||||
imported := make(chan interface{}, len(hashes)-1)
|
||||
tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- hashes }
|
||||
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
||||
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block }
|
||||
|
||||
// Announce the duplicating block, wait for retrieval, and also propagate directly
|
||||
tester.fetcher.Notify("valid", hashes[0], 1, time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
|
||||
@@ -534,7 +618,7 @@ func TestDistantPropagationDiscarding(t *testing.T) {
|
||||
low, high := len(hashes)/2+maxUncleDist+1, len(hashes)/2-maxQueueDist-1
|
||||
|
||||
// Create a tester and simulate a head block being the middle of the above chain
|
||||
tester := newTester()
|
||||
tester := newTester(false)
|
||||
|
||||
tester.lock.Lock()
|
||||
tester.hashes = []common.Hash{head}
|
||||
@@ -558,11 +642,10 @@ func TestDistantPropagationDiscarding(t *testing.T) {
|
||||
// Tests that announcements with numbers much lower or higher than out current
|
||||
// head get discarded to prevent wasting resources on useless blocks from faulty
|
||||
// peers.
|
||||
func TestDistantAnnouncementDiscarding62(t *testing.T) { testDistantAnnouncementDiscarding(t, 62) }
|
||||
func TestDistantAnnouncementDiscarding63(t *testing.T) { testDistantAnnouncementDiscarding(t, 63) }
|
||||
func TestDistantAnnouncementDiscarding64(t *testing.T) { testDistantAnnouncementDiscarding(t, 64) }
|
||||
func TestFullDistantAnnouncementDiscarding(t *testing.T) { testDistantAnnouncementDiscarding(t, false) }
|
||||
func TestLightDistantAnnouncementDiscarding(t *testing.T) { testDistantAnnouncementDiscarding(t, true) }
|
||||
|
||||
func testDistantAnnouncementDiscarding(t *testing.T, protocol int) {
|
||||
func testDistantAnnouncementDiscarding(t *testing.T, light bool) {
|
||||
// Create a long chain to import and define the discard boundaries
|
||||
hashes, blocks := makeChain(3*maxQueueDist, 0, genesis)
|
||||
head := hashes[len(hashes)/2]
|
||||
@@ -570,10 +653,11 @@ func testDistantAnnouncementDiscarding(t *testing.T, protocol int) {
|
||||
low, high := len(hashes)/2+maxUncleDist+1, len(hashes)/2-maxQueueDist-1
|
||||
|
||||
// Create a tester and simulate a head block being the middle of the above chain
|
||||
tester := newTester()
|
||||
tester := newTester(light)
|
||||
|
||||
tester.lock.Lock()
|
||||
tester.hashes = []common.Hash{head}
|
||||
tester.headers = map[common.Hash]*types.Header{head: blocks[head].Header()}
|
||||
tester.blocks = map[common.Hash]*types.Block{head: blocks[head]}
|
||||
tester.lock.Unlock()
|
||||
|
||||
@@ -601,21 +685,31 @@ func testDistantAnnouncementDiscarding(t *testing.T, protocol int) {
|
||||
|
||||
// Tests that peers announcing blocks with invalid numbers (i.e. not matching
|
||||
// the headers provided afterwards) get dropped as malicious.
|
||||
func TestInvalidNumberAnnouncement62(t *testing.T) { testInvalidNumberAnnouncement(t, 62) }
|
||||
func TestInvalidNumberAnnouncement63(t *testing.T) { testInvalidNumberAnnouncement(t, 63) }
|
||||
func TestInvalidNumberAnnouncement64(t *testing.T) { testInvalidNumberAnnouncement(t, 64) }
|
||||
func TestFullInvalidNumberAnnouncement(t *testing.T) { testInvalidNumberAnnouncement(t, false) }
|
||||
func TestLightInvalidNumberAnnouncement(t *testing.T) { testInvalidNumberAnnouncement(t, true) }
|
||||
|
||||
func testInvalidNumberAnnouncement(t *testing.T, protocol int) {
|
||||
func testInvalidNumberAnnouncement(t *testing.T, light bool) {
|
||||
// Create a single block to import and check numbers against
|
||||
hashes, blocks := makeChain(1, 0, genesis)
|
||||
|
||||
tester := newTester()
|
||||
tester := newTester(light)
|
||||
badHeaderFetcher := tester.makeHeaderFetcher("bad", blocks, -gatherSlack)
|
||||
badBodyFetcher := tester.makeBodyFetcher("bad", blocks, 0)
|
||||
|
||||
imported := make(chan *types.Block)
|
||||
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
||||
|
||||
imported := make(chan interface{})
|
||||
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) {
|
||||
if light {
|
||||
if header == nil {
|
||||
t.Fatalf("Fetcher try to import empty header")
|
||||
}
|
||||
imported <- header
|
||||
} else {
|
||||
if block == nil {
|
||||
t.Fatalf("Fetcher try to import empty block")
|
||||
}
|
||||
imported <- block
|
||||
}
|
||||
}
|
||||
// Announce a block with a bad number, check for immediate drop
|
||||
tester.fetcher.Notify("bad", hashes[0], 2, time.Now().Add(-arriveTimeout), badHeaderFetcher, badBodyFetcher)
|
||||
verifyImportEvent(t, imported, false)
|
||||
@@ -646,15 +740,11 @@ func testInvalidNumberAnnouncement(t *testing.T, protocol int) {
|
||||
|
||||
// Tests that if a block is empty (i.e. header only), no body request should be
|
||||
// made, and instead the header should be assembled into a whole block in itself.
|
||||
func TestEmptyBlockShortCircuit62(t *testing.T) { testEmptyBlockShortCircuit(t, 62) }
|
||||
func TestEmptyBlockShortCircuit63(t *testing.T) { testEmptyBlockShortCircuit(t, 63) }
|
||||
func TestEmptyBlockShortCircuit64(t *testing.T) { testEmptyBlockShortCircuit(t, 64) }
|
||||
|
||||
func testEmptyBlockShortCircuit(t *testing.T, protocol int) {
|
||||
func TestEmptyBlockShortCircuit(t *testing.T) {
|
||||
// Create a chain of blocks to import
|
||||
hashes, blocks := makeChain(32, 0, genesis)
|
||||
|
||||
tester := newTester()
|
||||
tester := newTester(false)
|
||||
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
|
||||
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
|
||||
|
||||
@@ -665,9 +755,13 @@ func testEmptyBlockShortCircuit(t *testing.T, protocol int) {
|
||||
completing := make(chan []common.Hash)
|
||||
tester.fetcher.completingHook = func(hashes []common.Hash) { completing <- hashes }
|
||||
|
||||
imported := make(chan *types.Block)
|
||||
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
||||
|
||||
imported := make(chan interface{})
|
||||
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) {
|
||||
if block == nil {
|
||||
t.Fatalf("Fetcher try to import empty block")
|
||||
}
|
||||
imported <- block
|
||||
}
|
||||
// Iteratively announce blocks until all are imported
|
||||
for i := len(hashes) - 2; i >= 0; i-- {
|
||||
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
|
||||
@@ -687,16 +781,12 @@ func testEmptyBlockShortCircuit(t *testing.T, protocol int) {
|
||||
// Tests that a peer is unable to use unbounded memory with sending infinite
|
||||
// block announcements to a node, but that even in the face of such an attack,
|
||||
// the fetcher remains operational.
|
||||
func TestHashMemoryExhaustionAttack62(t *testing.T) { testHashMemoryExhaustionAttack(t, 62) }
|
||||
func TestHashMemoryExhaustionAttack63(t *testing.T) { testHashMemoryExhaustionAttack(t, 63) }
|
||||
func TestHashMemoryExhaustionAttack64(t *testing.T) { testHashMemoryExhaustionAttack(t, 64) }
|
||||
|
||||
func testHashMemoryExhaustionAttack(t *testing.T, protocol int) {
|
||||
func TestHashMemoryExhaustionAttack(t *testing.T) {
|
||||
// Create a tester with instrumented import hooks
|
||||
tester := newTester()
|
||||
tester := newTester(false)
|
||||
|
||||
imported, announces := make(chan *types.Block), int32(0)
|
||||
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
||||
imported, announces := make(chan interface{}), int32(0)
|
||||
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block }
|
||||
tester.fetcher.announceChangeHook = func(hash common.Hash, added bool) {
|
||||
if added {
|
||||
atomic.AddInt32(&announces, 1)
|
||||
@@ -740,10 +830,10 @@ func testHashMemoryExhaustionAttack(t *testing.T, protocol int) {
|
||||
// system memory.
|
||||
func TestBlockMemoryExhaustionAttack(t *testing.T) {
|
||||
// Create a tester with instrumented import hooks
|
||||
tester := newTester()
|
||||
tester := newTester(false)
|
||||
|
||||
imported, enqueued := make(chan *types.Block), int32(0)
|
||||
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
||||
imported, enqueued := make(chan interface{}), int32(0)
|
||||
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block }
|
||||
tester.fetcher.queueChangeHook = func(hash common.Hash, added bool) {
|
||||
if added {
|
||||
atomic.AddInt32(&enqueued, 1)
|
||||
|
@@ -38,6 +38,8 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
||||
DatabaseCache int
|
||||
DatabaseFreezer string
|
||||
TrieCleanCache int
|
||||
TrieCleanCacheJournal string `toml:",omitempty"`
|
||||
TrieCleanCacheRejournal time.Duration `toml:",omitempty"`
|
||||
TrieDirtyCache int
|
||||
TrieTimeout time.Duration
|
||||
SnapshotCache int
|
||||
@@ -76,6 +78,8 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
||||
enc.DatabaseCache = c.DatabaseCache
|
||||
enc.DatabaseFreezer = c.DatabaseFreezer
|
||||
enc.TrieCleanCache = c.TrieCleanCache
|
||||
enc.TrieCleanCacheJournal = c.TrieCleanCacheJournal
|
||||
enc.TrieCleanCacheRejournal = c.TrieCleanCacheRejournal
|
||||
enc.TrieDirtyCache = c.TrieDirtyCache
|
||||
enc.TrieTimeout = c.TrieTimeout
|
||||
enc.SnapshotCache = c.SnapshotCache
|
||||
@@ -118,6 +122,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
||||
DatabaseCache *int
|
||||
DatabaseFreezer *string
|
||||
TrieCleanCache *int
|
||||
TrieCleanCacheJournal *string `toml:",omitempty"`
|
||||
TrieCleanCacheRejournal *time.Duration `toml:",omitempty"`
|
||||
TrieDirtyCache *int
|
||||
TrieTimeout *time.Duration
|
||||
SnapshotCache *int
|
||||
@@ -201,6 +207,12 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
||||
if dec.TrieCleanCache != nil {
|
||||
c.TrieCleanCache = *dec.TrieCleanCache
|
||||
}
|
||||
if dec.TrieCleanCacheJournal != nil {
|
||||
c.TrieCleanCacheJournal = *dec.TrieCleanCacheJournal
|
||||
}
|
||||
if dec.TrieCleanCacheRejournal != nil {
|
||||
c.TrieCleanCacheRejournal = *dec.TrieCleanCacheRejournal
|
||||
}
|
||||
if dec.TrieDirtyCache != nil {
|
||||
c.TrieDirtyCache = *dec.TrieDirtyCache
|
||||
}
|
||||
|
@@ -188,7 +188,7 @@ func NewProtocolManager(config *params.ChainConfig, checkpoint *params.TrustedCh
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
manager.blockFetcher = fetcher.NewBlockFetcher(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
|
||||
manager.blockFetcher = fetcher.NewBlockFetcher(false, nil, blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, nil, inserter, manager.removePeer)
|
||||
|
||||
fetchTx := func(peer string, hashes []common.Hash) error {
|
||||
p := manager.peers.Peer(peer)
|
||||
|
@@ -262,7 +262,7 @@ func (p *peer) announceTransactions(removePeer func(string)) {
|
||||
queue = append(queue, hashes...)
|
||||
if len(queue) > maxQueuedTxAnns {
|
||||
// Fancy copy and resize to ensure buffer doesn't grow indefinitely
|
||||
queue = queue[:copy(queue, queue[len(queue)-maxQueuedTxs:])]
|
||||
queue = queue[:copy(queue, queue[len(queue)-maxQueuedTxAnns:])]
|
||||
}
|
||||
|
||||
case <-done:
|
||||
|
@@ -282,6 +282,10 @@ func toBlockNumArg(number *big.Int) string {
|
||||
if number == nil {
|
||||
return "latest"
|
||||
}
|
||||
pending := big.NewInt(-1)
|
||||
if number.Cmp(pending) == 0 {
|
||||
return "pending"
|
||||
}
|
||||
return hexutil.EncodeBig(number)
|
||||
}
|
||||
|
||||
|
@@ -97,6 +97,22 @@ func TestToFilterArg(t *testing.T) {
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"with negative fromBlock and negative toBlock",
|
||||
ethereum.FilterQuery{
|
||||
Addresses: addresses,
|
||||
FromBlock: big.NewInt(-1),
|
||||
ToBlock: big.NewInt(-1),
|
||||
Topics: [][]common.Hash{},
|
||||
},
|
||||
map[string]interface{}{
|
||||
"address": addresses,
|
||||
"fromBlock": "pending",
|
||||
"toBlock": "pending",
|
||||
"topics": [][]common.Hash{},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"with blockhash",
|
||||
ethereum.FilterQuery{
|
||||
@@ -171,17 +187,18 @@ var (
|
||||
func newTestBackend(t *testing.T) (*node.Node, []*types.Block) {
|
||||
// Generate test chain.
|
||||
genesis, blocks := generateTestChain()
|
||||
|
||||
// Start Ethereum service.
|
||||
var ethservice *eth.Ethereum
|
||||
// Create node
|
||||
n, err := node.New(&node.Config{})
|
||||
n.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
config := ð.Config{Genesis: genesis}
|
||||
config.Ethash.PowMode = ethash.ModeFake
|
||||
ethservice, err = eth.New(ctx, config)
|
||||
return ethservice, err
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("can't create new node: %v", err)
|
||||
}
|
||||
// Create Ethereum Service
|
||||
config := ð.Config{Genesis: genesis}
|
||||
config.Ethash.PowMode = ethash.ModeFake
|
||||
ethservice, err := eth.New(n, config)
|
||||
if err != nil {
|
||||
t.Fatalf("can't create new ethereum service: %v", err)
|
||||
}
|
||||
// Import the test chain.
|
||||
if err := n.Start(); err != nil {
|
||||
t.Fatalf("can't start test node: %v", err)
|
||||
@@ -215,7 +232,7 @@ func generateTestChain() (*core.Genesis, []*types.Block) {
|
||||
func TestHeader(t *testing.T) {
|
||||
backend, chain := newTestBackend(t)
|
||||
client, _ := backend.Attach()
|
||||
defer backend.Stop()
|
||||
defer backend.Close()
|
||||
defer client.Close()
|
||||
|
||||
tests := map[string]struct {
|
||||
@@ -259,7 +276,7 @@ func TestHeader(t *testing.T) {
|
||||
func TestBalanceAt(t *testing.T) {
|
||||
backend, _ := newTestBackend(t)
|
||||
client, _ := backend.Attach()
|
||||
defer backend.Stop()
|
||||
defer backend.Close()
|
||||
defer client.Close()
|
||||
|
||||
tests := map[string]struct {
|
||||
@@ -305,7 +322,7 @@ func TestBalanceAt(t *testing.T) {
|
||||
func TestTransactionInBlockInterrupted(t *testing.T) {
|
||||
backend, _ := newTestBackend(t)
|
||||
client, _ := backend.Attach()
|
||||
defer backend.Stop()
|
||||
defer backend.Close()
|
||||
defer client.Close()
|
||||
|
||||
ec := NewClient(client)
|
||||
@@ -323,7 +340,7 @@ func TestTransactionInBlockInterrupted(t *testing.T) {
|
||||
func TestChainID(t *testing.T) {
|
||||
backend, _ := newTestBackend(t)
|
||||
client, _ := backend.Attach()
|
||||
defer backend.Stop()
|
||||
defer backend.Close()
|
||||
defer client.Close()
|
||||
ec := NewClient(client)
|
||||
|
||||
|
@@ -28,6 +28,7 @@ import (
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -36,9 +37,12 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/les"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/miner"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/gorilla/websocket"
|
||||
@@ -56,23 +60,33 @@ const (
|
||||
chainHeadChanSize = 10
|
||||
)
|
||||
|
||||
type txPool interface {
|
||||
// SubscribeNewTxsEvent should return an event subscription of
|
||||
// NewTxsEvent and send events to the given channel.
|
||||
SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription
|
||||
// backend encompasses the bare-minimum functionality needed for ethstats reporting
|
||||
type backend interface {
|
||||
SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription
|
||||
SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription
|
||||
CurrentHeader() *types.Header
|
||||
HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error)
|
||||
GetTd(ctx context.Context, hash common.Hash) *big.Int
|
||||
Stats() (pending int, queued int)
|
||||
Downloader() *downloader.Downloader
|
||||
}
|
||||
|
||||
type blockChain interface {
|
||||
SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription
|
||||
// fullNodeBackend encompasses the functionality necessary for a full node
|
||||
// reporting to ethstats
|
||||
type fullNodeBackend interface {
|
||||
backend
|
||||
Miner() *miner.Miner
|
||||
BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error)
|
||||
CurrentBlock() *types.Block
|
||||
SuggestPrice(ctx context.Context) (*big.Int, error)
|
||||
}
|
||||
|
||||
// Service implements an Ethereum netstats reporting daemon that pushes local
|
||||
// chain statistics up to a monitoring server.
|
||||
type Service struct {
|
||||
server *p2p.Server // Peer-to-peer server to retrieve networking infos
|
||||
eth *eth.Ethereum // Full Ethereum service if monitoring a full node
|
||||
les *les.LightEthereum // Light Ethereum service if monitoring a light node
|
||||
engine consensus.Engine // Consensus engine to retrieve variadic block fields
|
||||
server *p2p.Server // Peer-to-peer server to retrieve networking infos
|
||||
backend backend
|
||||
engine consensus.Engine // Consensus engine to retrieve variadic block fields
|
||||
|
||||
node string // Name of the node to display on the monitoring page
|
||||
pass string // Password to authorize access to the monitoring page
|
||||
@@ -80,53 +94,86 @@ type Service struct {
|
||||
|
||||
pongCh chan struct{} // Pong notifications are fed into this channel
|
||||
histCh chan []uint64 // History request block numbers are fed into this channel
|
||||
|
||||
}
|
||||
|
||||
// connWrapper is a wrapper to prevent concurrent-write or concurrent-read on the
|
||||
// websocket.
|
||||
//
|
||||
// From Gorilla websocket docs:
|
||||
// Connections support one concurrent reader and one concurrent writer.
|
||||
// Applications are responsible for ensuring that no more than one goroutine calls the write methods
|
||||
// - NextWriter, SetWriteDeadline, WriteMessage, WriteJSON, EnableWriteCompression, SetCompressionLevel
|
||||
// concurrently and that no more than one goroutine calls the read methods
|
||||
// - NextReader, SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler
|
||||
// concurrently.
|
||||
// The Close and WriteControl methods can be called concurrently with all other methods.
|
||||
type connWrapper struct {
|
||||
conn *websocket.Conn
|
||||
|
||||
rlock sync.Mutex
|
||||
wlock sync.Mutex
|
||||
}
|
||||
|
||||
func newConnectionWrapper(conn *websocket.Conn) *connWrapper {
|
||||
return &connWrapper{conn: conn}
|
||||
}
|
||||
|
||||
// WriteJSON wraps corresponding method on the websocket but is safe for concurrent calling
|
||||
func (w *connWrapper) WriteJSON(v interface{}) error {
|
||||
w.wlock.Lock()
|
||||
defer w.wlock.Unlock()
|
||||
|
||||
return w.conn.WriteJSON(v)
|
||||
}
|
||||
|
||||
// ReadJSON wraps corresponding method on the websocket but is safe for concurrent calling
|
||||
func (w *connWrapper) ReadJSON(v interface{}) error {
|
||||
w.rlock.Lock()
|
||||
defer w.rlock.Unlock()
|
||||
|
||||
return w.conn.ReadJSON(v)
|
||||
}
|
||||
|
||||
// Close wraps corresponding method on the websocket but is safe for concurrent calling
|
||||
func (w *connWrapper) Close() error {
|
||||
// The Close and WriteControl methods can be called concurrently with all other methods,
|
||||
// so the mutex is not used here
|
||||
return w.conn.Close()
|
||||
}
|
||||
|
||||
// New returns a monitoring service ready for stats reporting.
|
||||
func New(url string, ethServ *eth.Ethereum, lesServ *les.LightEthereum) (*Service, error) {
|
||||
func New(node *node.Node, backend backend, engine consensus.Engine, url string) error {
|
||||
// Parse the netstats connection url
|
||||
re := regexp.MustCompile("([^:@]*)(:([^@]*))?@(.+)")
|
||||
parts := re.FindStringSubmatch(url)
|
||||
if len(parts) != 5 {
|
||||
return nil, fmt.Errorf("invalid netstats url: \"%s\", should be nodename:secret@host:port", url)
|
||||
return fmt.Errorf("invalid netstats url: \"%s\", should be nodename:secret@host:port", url)
|
||||
}
|
||||
// Assemble and return the stats service
|
||||
var engine consensus.Engine
|
||||
if ethServ != nil {
|
||||
engine = ethServ.Engine()
|
||||
} else {
|
||||
engine = lesServ.Engine()
|
||||
ethstats := &Service{
|
||||
backend: backend,
|
||||
engine: engine,
|
||||
server: node.Server(),
|
||||
node: parts[1],
|
||||
pass: parts[3],
|
||||
host: parts[4],
|
||||
pongCh: make(chan struct{}),
|
||||
histCh: make(chan []uint64, 1),
|
||||
}
|
||||
return &Service{
|
||||
eth: ethServ,
|
||||
les: lesServ,
|
||||
engine: engine,
|
||||
node: parts[1],
|
||||
pass: parts[3],
|
||||
host: parts[4],
|
||||
pongCh: make(chan struct{}),
|
||||
histCh: make(chan []uint64, 1),
|
||||
}, nil
|
||||
|
||||
node.RegisterLifecycle(ethstats)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Protocols implements node.Service, returning the P2P network protocols used
|
||||
// by the stats service (nil as it doesn't use the devp2p overlay network).
|
||||
func (s *Service) Protocols() []p2p.Protocol { return nil }
|
||||
|
||||
// APIs implements node.Service, returning the RPC API endpoints provided by the
|
||||
// stats service (nil as it doesn't provide any user callable APIs).
|
||||
func (s *Service) APIs() []rpc.API { return nil }
|
||||
|
||||
// Start implements node.Service, starting up the monitoring and reporting daemon.
|
||||
func (s *Service) Start(server *p2p.Server) error {
|
||||
s.server = server
|
||||
// Start implements node.Lifecycle, starting up the monitoring and reporting daemon.
|
||||
func (s *Service) Start() error {
|
||||
go s.loop()
|
||||
|
||||
log.Info("Stats daemon started")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop implements node.Service, terminating the monitoring and reporting daemon.
|
||||
// Stop implements node.Lifecycle, terminating the monitoring and reporting daemon.
|
||||
func (s *Service) Stop() error {
|
||||
log.Info("Stats daemon stopped")
|
||||
return nil
|
||||
@@ -136,22 +183,12 @@ func (s *Service) Stop() error {
|
||||
// until termination.
|
||||
func (s *Service) loop() {
|
||||
// Subscribe to chain events to execute updates on
|
||||
var blockchain blockChain
|
||||
var txpool txPool
|
||||
if s.eth != nil {
|
||||
blockchain = s.eth.BlockChain()
|
||||
txpool = s.eth.TxPool()
|
||||
} else {
|
||||
blockchain = s.les.BlockChain()
|
||||
txpool = s.les.TxPool()
|
||||
}
|
||||
|
||||
chainHeadCh := make(chan core.ChainHeadEvent, chainHeadChanSize)
|
||||
headSub := blockchain.SubscribeChainHeadEvent(chainHeadCh)
|
||||
headSub := s.backend.SubscribeChainHeadEvent(chainHeadCh)
|
||||
defer headSub.Unsubscribe()
|
||||
|
||||
txEventCh := make(chan core.NewTxsEvent, txChanSize)
|
||||
txSub := txpool.SubscribeNewTxsEvent(txEventCh)
|
||||
txSub := s.backend.SubscribeNewTxsEvent(txEventCh)
|
||||
defer txSub.Unsubscribe()
|
||||
|
||||
// Start a goroutine that exhausts the subscriptions to avoid events piling up
|
||||
@@ -214,15 +251,17 @@ func (s *Service) loop() {
|
||||
case <-errTimer.C:
|
||||
// Establish a websocket connection to the server on any supported URL
|
||||
var (
|
||||
conn *websocket.Conn
|
||||
conn *connWrapper
|
||||
err error
|
||||
)
|
||||
dialer := websocket.Dialer{HandshakeTimeout: 5 * time.Second}
|
||||
header := make(http.Header)
|
||||
header.Set("origin", "http://localhost")
|
||||
for _, url := range urls {
|
||||
conn, _, err = dialer.Dial(url, header)
|
||||
c, _, e := dialer.Dial(url, header)
|
||||
err = e
|
||||
if err == nil {
|
||||
conn = newConnectionWrapper(c)
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -292,7 +331,7 @@ func (s *Service) loop() {
|
||||
// from the network socket. If any of them match an active request, it forwards
|
||||
// it, if they themselves are requests it initiates a reply, and lastly it drops
|
||||
// unknown packets.
|
||||
func (s *Service) readLoop(conn *websocket.Conn) {
|
||||
func (s *Service) readLoop(conn *connWrapper) {
|
||||
// If the read loop exists, close the connection
|
||||
defer conn.Close()
|
||||
|
||||
@@ -401,7 +440,7 @@ type authMsg struct {
|
||||
}
|
||||
|
||||
// login tries to authorize the client at the remote server.
|
||||
func (s *Service) login(conn *websocket.Conn) error {
|
||||
func (s *Service) login(conn *connWrapper) error {
|
||||
// Construct and send the login authentication
|
||||
infos := s.server.NodeInfo()
|
||||
|
||||
@@ -446,7 +485,7 @@ func (s *Service) login(conn *websocket.Conn) error {
|
||||
// report collects all possible data to report and send it to the stats server.
|
||||
// This should only be used on reconnects or rarely to avoid overloading the
|
||||
// server. Use the individual methods for reporting subscribed events.
|
||||
func (s *Service) report(conn *websocket.Conn) error {
|
||||
func (s *Service) report(conn *connWrapper) error {
|
||||
if err := s.reportLatency(conn); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -464,7 +503,7 @@ func (s *Service) report(conn *websocket.Conn) error {
|
||||
|
||||
// reportLatency sends a ping request to the server, measures the RTT time and
|
||||
// finally sends a latency update.
|
||||
func (s *Service) reportLatency(conn *websocket.Conn) error {
|
||||
func (s *Service) reportLatency(conn *connWrapper) error {
|
||||
// Send the current time to the ethstats server
|
||||
start := time.Now()
|
||||
|
||||
@@ -533,7 +572,7 @@ func (s uncleStats) MarshalJSON() ([]byte, error) {
|
||||
}
|
||||
|
||||
// reportBlock retrieves the current chain head and reports it to the stats server.
|
||||
func (s *Service) reportBlock(conn *websocket.Conn, block *types.Block) error {
|
||||
func (s *Service) reportBlock(conn *connWrapper, block *types.Block) error {
|
||||
// Gather the block details from the header or block chain
|
||||
details := s.assembleBlockStats(block)
|
||||
|
||||
@@ -560,13 +599,15 @@ func (s *Service) assembleBlockStats(block *types.Block) *blockStats {
|
||||
txs []txStats
|
||||
uncles []*types.Header
|
||||
)
|
||||
if s.eth != nil {
|
||||
// Full nodes have all needed information available
|
||||
|
||||
// check if backend is a full node
|
||||
fullBackend, ok := s.backend.(fullNodeBackend)
|
||||
if ok {
|
||||
if block == nil {
|
||||
block = s.eth.BlockChain().CurrentBlock()
|
||||
block = fullBackend.CurrentBlock()
|
||||
}
|
||||
header = block.Header()
|
||||
td = s.eth.BlockChain().GetTd(header.Hash(), header.Number.Uint64())
|
||||
td = fullBackend.GetTd(context.Background(), header.Hash())
|
||||
|
||||
txs = make([]txStats, len(block.Transactions()))
|
||||
for i, tx := range block.Transactions() {
|
||||
@@ -578,11 +619,12 @@ func (s *Service) assembleBlockStats(block *types.Block) *blockStats {
|
||||
if block != nil {
|
||||
header = block.Header()
|
||||
} else {
|
||||
header = s.les.BlockChain().CurrentHeader()
|
||||
header = s.backend.CurrentHeader()
|
||||
}
|
||||
td = s.les.BlockChain().GetTd(header.Hash(), header.Number.Uint64())
|
||||
td = s.backend.GetTd(context.Background(), header.Hash())
|
||||
txs = []txStats{}
|
||||
}
|
||||
|
||||
// Assemble and return the block stats
|
||||
author, _ := s.engine.Author(header)
|
||||
|
||||
@@ -605,7 +647,7 @@ func (s *Service) assembleBlockStats(block *types.Block) *blockStats {
|
||||
|
||||
// reportHistory retrieves the most recent batch of blocks and reports it to the
|
||||
// stats server.
|
||||
func (s *Service) reportHistory(conn *websocket.Conn, list []uint64) error {
|
||||
func (s *Service) reportHistory(conn *connWrapper, list []uint64) error {
|
||||
// Figure out the indexes that need reporting
|
||||
indexes := make([]uint64, 0, historyUpdateRange)
|
||||
if len(list) > 0 {
|
||||
@@ -613,12 +655,7 @@ func (s *Service) reportHistory(conn *websocket.Conn, list []uint64) error {
|
||||
indexes = append(indexes, list...)
|
||||
} else {
|
||||
// No indexes requested, send back the top ones
|
||||
var head int64
|
||||
if s.eth != nil {
|
||||
head = s.eth.BlockChain().CurrentHeader().Number.Int64()
|
||||
} else {
|
||||
head = s.les.BlockChain().CurrentHeader().Number.Int64()
|
||||
}
|
||||
head := s.backend.CurrentHeader().Number.Int64()
|
||||
start := head - historyUpdateRange + 1
|
||||
if start < 0 {
|
||||
start = 0
|
||||
@@ -630,12 +667,13 @@ func (s *Service) reportHistory(conn *websocket.Conn, list []uint64) error {
|
||||
// Gather the batch of blocks to report
|
||||
history := make([]*blockStats, len(indexes))
|
||||
for i, number := range indexes {
|
||||
fullBackend, ok := s.backend.(fullNodeBackend)
|
||||
// Retrieve the next block if it's known to us
|
||||
var block *types.Block
|
||||
if s.eth != nil {
|
||||
block = s.eth.BlockChain().GetBlockByNumber(number)
|
||||
if ok {
|
||||
block, _ = fullBackend.BlockByNumber(context.Background(), rpc.BlockNumber(number)) // TODO ignore error here ?
|
||||
} else {
|
||||
if header := s.les.BlockChain().GetHeaderByNumber(number); header != nil {
|
||||
if header, _ := s.backend.HeaderByNumber(context.Background(), rpc.BlockNumber(number)); header != nil {
|
||||
block = types.NewBlockWithHeader(header)
|
||||
}
|
||||
}
|
||||
@@ -671,14 +709,9 @@ type pendStats struct {
|
||||
|
||||
// reportPending retrieves the current number of pending transactions and reports
|
||||
// it to the stats server.
|
||||
func (s *Service) reportPending(conn *websocket.Conn) error {
|
||||
func (s *Service) reportPending(conn *connWrapper) error {
|
||||
// Retrieve the pending count from the local blockchain
|
||||
var pending int
|
||||
if s.eth != nil {
|
||||
pending, _ = s.eth.TxPool().Stats()
|
||||
} else {
|
||||
pending = s.les.TxPool().Stats()
|
||||
}
|
||||
pending, _ := s.backend.Stats()
|
||||
// Assemble the transaction stats and send it to the server
|
||||
log.Trace("Sending pending transactions to ethstats", "count", pending)
|
||||
|
||||
@@ -705,9 +738,9 @@ type nodeStats struct {
|
||||
Uptime int `json:"uptime"`
|
||||
}
|
||||
|
||||
// reportPending retrieves various stats about the node at the networking and
|
||||
// reportStats retrieves various stats about the node at the networking and
|
||||
// mining layer and reports it to the stats server.
|
||||
func (s *Service) reportStats(conn *websocket.Conn) error {
|
||||
func (s *Service) reportStats(conn *connWrapper) error {
|
||||
// Gather the syncing and mining infos from the local miner instance
|
||||
var (
|
||||
mining bool
|
||||
@@ -715,18 +748,20 @@ func (s *Service) reportStats(conn *websocket.Conn) error {
|
||||
syncing bool
|
||||
gasprice int
|
||||
)
|
||||
if s.eth != nil {
|
||||
mining = s.eth.Miner().Mining()
|
||||
hashrate = int(s.eth.Miner().HashRate())
|
||||
// check if backend is a full node
|
||||
fullBackend, ok := s.backend.(fullNodeBackend)
|
||||
if ok {
|
||||
mining = fullBackend.Miner().Mining()
|
||||
hashrate = int(fullBackend.Miner().HashRate())
|
||||
|
||||
sync := s.eth.Downloader().Progress()
|
||||
syncing = s.eth.BlockChain().CurrentHeader().Number.Uint64() >= sync.HighestBlock
|
||||
sync := fullBackend.Downloader().Progress()
|
||||
syncing = fullBackend.CurrentHeader().Number.Uint64() >= sync.HighestBlock
|
||||
|
||||
price, _ := s.eth.APIBackend.SuggestPrice(context.Background())
|
||||
price, _ := fullBackend.SuggestPrice(context.Background())
|
||||
gasprice = int(price.Uint64())
|
||||
} else {
|
||||
sync := s.les.Downloader().Progress()
|
||||
syncing = s.les.BlockChain().CurrentHeader().Number.Uint64() >= sync.HighestBlock
|
||||
sync := s.backend.Downloader().Progress()
|
||||
syncing = s.backend.CurrentHeader().Number.Uint64() >= sync.HighestBlock
|
||||
}
|
||||
// Assemble the node stats and send it to the server
|
||||
log.Trace("Sending node details to ethstats")
|
||||
|
2
go.mod
2
go.mod
@@ -31,7 +31,7 @@ require (
|
||||
github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989
|
||||
github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277
|
||||
github.com/hashicorp/golang-lru v0.5.4
|
||||
github.com/holiman/uint256 v1.1.0
|
||||
github.com/holiman/uint256 v1.1.1
|
||||
github.com/huin/goupnp v1.0.0
|
||||
github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883
|
||||
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458
|
||||
|
4
go.sum
4
go.sum
@@ -95,8 +95,8 @@ github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277 h1:E0whKx
|
||||
github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
|
||||
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
||||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/holiman/uint256 v1.1.0 h1:Iye6ze0DW9s+7EMn8y6Q4ebegDzpu28JQHEVM1Bq+Wg=
|
||||
github.com/holiman/uint256 v1.1.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
|
||||
github.com/holiman/uint256 v1.1.1 h1:4JywC80b+/hSfljFlEBLHrrh+CIONLDz9NuFl0af4Mw=
|
||||
github.com/holiman/uint256 v1.1.1/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo=
|
||||
|
@@ -17,12 +17,118 @@
|
||||
package graphql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestBuildSchema(t *testing.T) {
|
||||
stack, err := node.New(&node.DefaultConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("could not create new node: %v", err)
|
||||
}
|
||||
// Make sure the schema can be parsed and matched up to the object model.
|
||||
if _, err := newHandler(nil); err != nil {
|
||||
if err := newHandler(stack, nil, []string{}, []string{}); err != nil {
|
||||
t.Errorf("Could not construct GraphQL handler: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that a graphQL request is successfully handled when graphql is enabled on the specified endpoint
|
||||
func TestGraphQLHTTPOnSamePort_GQLRequest_Successful(t *testing.T) {
|
||||
stack := createNode(t, true)
|
||||
defer stack.Close()
|
||||
// start node
|
||||
if err := stack.Start(); err != nil {
|
||||
t.Fatalf("could not start node: %v", err)
|
||||
}
|
||||
// create http request
|
||||
body := strings.NewReader("{\"query\": \"{block{number}}\",\"variables\": null}")
|
||||
gqlReq, err := http.NewRequest(http.MethodGet, fmt.Sprintf("http://%s/graphql", "127.0.0.1:9393"), body)
|
||||
if err != nil {
|
||||
t.Error("could not issue new http request ", err)
|
||||
}
|
||||
gqlReq.Header.Set("Content-Type", "application/json")
|
||||
// read from response
|
||||
resp := doHTTPRequest(t, gqlReq)
|
||||
bodyBytes, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("could not read from response body: %v", err)
|
||||
}
|
||||
expected := "{\"data\":{\"block\":{\"number\":\"0x0\"}}}"
|
||||
assert.Equal(t, expected, string(bodyBytes))
|
||||
}
|
||||
|
||||
// Tests that a graphQL request is not handled successfully when graphql is not enabled on the specified endpoint
|
||||
func TestGraphQLHTTPOnSamePort_GQLRequest_Unsuccessful(t *testing.T) {
|
||||
stack := createNode(t, false)
|
||||
defer stack.Close()
|
||||
if err := stack.Start(); err != nil {
|
||||
t.Fatalf("could not start node: %v", err)
|
||||
}
|
||||
|
||||
// create http request
|
||||
body := strings.NewReader("{\"query\": \"{block{number}}\",\"variables\": null}")
|
||||
gqlReq, err := http.NewRequest(http.MethodPost, fmt.Sprintf("http://%s/graphql", "127.0.0.1:9393"), body)
|
||||
if err != nil {
|
||||
t.Error("could not issue new http request ", err)
|
||||
}
|
||||
gqlReq.Header.Set("Content-Type", "application/json")
|
||||
// read from response
|
||||
resp := doHTTPRequest(t, gqlReq)
|
||||
bodyBytes, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("could not read from response body: %v", err)
|
||||
}
|
||||
// make sure the request is not handled successfully
|
||||
assert.Equal(t, 404, resp.StatusCode)
|
||||
assert.Equal(t, "404 page not found\n", string(bodyBytes))
|
||||
}
|
||||
|
||||
func createNode(t *testing.T, gqlEnabled bool) *node.Node {
|
||||
stack, err := node.New(&node.Config{
|
||||
HTTPHost: "127.0.0.1",
|
||||
HTTPPort: 9393,
|
||||
WSHost: "127.0.0.1",
|
||||
WSPort: 9393,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("could not create node: %v", err)
|
||||
}
|
||||
if !gqlEnabled {
|
||||
return stack
|
||||
}
|
||||
|
||||
createGQLService(t, stack, "127.0.0.1:9393")
|
||||
|
||||
return stack
|
||||
}
|
||||
|
||||
func createGQLService(t *testing.T, stack *node.Node, endpoint string) {
|
||||
// create backend
|
||||
ethBackend, err := eth.New(stack, ð.DefaultConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("could not create eth backend: %v", err)
|
||||
}
|
||||
|
||||
// create gql service
|
||||
err = New(stack, ethBackend.APIBackend, []string{}, []string{})
|
||||
if err != nil {
|
||||
t.Fatalf("could not create graphql service: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doHTTPRequest(t *testing.T, req *http.Request) *http.Response {
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal("could not issue a GET request to the given endpoint", err)
|
||||
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
@@ -17,99 +17,36 @@
|
||||
package graphql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/graph-gophers/graphql-go"
|
||||
"github.com/graph-gophers/graphql-go/relay"
|
||||
)
|
||||
|
||||
// Service encapsulates a GraphQL service.
|
||||
type Service struct {
|
||||
endpoint string // The host:port endpoint for this service.
|
||||
cors []string // Allowed CORS domains
|
||||
vhosts []string // Recognised vhosts
|
||||
timeouts rpc.HTTPTimeouts // Timeout settings for HTTP requests.
|
||||
backend ethapi.Backend // The backend that queries will operate on.
|
||||
handler http.Handler // The `http.Handler` used to answer queries.
|
||||
listener net.Listener // The listening socket.
|
||||
}
|
||||
|
||||
// New constructs a new GraphQL service instance.
|
||||
func New(backend ethapi.Backend, endpoint string, cors, vhosts []string, timeouts rpc.HTTPTimeouts) (*Service, error) {
|
||||
return &Service{
|
||||
endpoint: endpoint,
|
||||
cors: cors,
|
||||
vhosts: vhosts,
|
||||
timeouts: timeouts,
|
||||
backend: backend,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Protocols returns the list of protocols exported by this service.
|
||||
func (s *Service) Protocols() []p2p.Protocol { return nil }
|
||||
|
||||
// APIs returns the list of APIs exported by this service.
|
||||
func (s *Service) APIs() []rpc.API { return nil }
|
||||
|
||||
// Start is called after all services have been constructed and the networking
|
||||
// layer was also initialized to spawn any goroutines required by the service.
|
||||
func (s *Service) Start(server *p2p.Server) error {
|
||||
var err error
|
||||
s.handler, err = newHandler(s.backend)
|
||||
if err != nil {
|
||||
return err
|
||||
func New(stack *node.Node, backend ethapi.Backend, cors, vhosts []string) error {
|
||||
if backend == nil {
|
||||
panic("missing backend")
|
||||
}
|
||||
if s.listener, err = net.Listen("tcp", s.endpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
// create handler stack and wrap the graphql handler
|
||||
handler := node.NewHTTPHandlerStack(s.handler, s.cors, s.vhosts)
|
||||
// make sure timeout values are meaningful
|
||||
node.CheckTimeouts(&s.timeouts)
|
||||
// create http server
|
||||
httpSrv := &http.Server{
|
||||
Handler: handler,
|
||||
ReadTimeout: s.timeouts.ReadTimeout,
|
||||
WriteTimeout: s.timeouts.WriteTimeout,
|
||||
IdleTimeout: s.timeouts.IdleTimeout,
|
||||
}
|
||||
go httpSrv.Serve(s.listener)
|
||||
log.Info("GraphQL endpoint opened", "url", fmt.Sprintf("http://%s", s.endpoint))
|
||||
return nil
|
||||
// check if http server with given endpoint exists and enable graphQL on it
|
||||
return newHandler(stack, backend, cors, vhosts)
|
||||
}
|
||||
|
||||
// newHandler returns a new `http.Handler` that will answer GraphQL queries.
|
||||
// It additionally exports an interactive query browser on the / endpoint.
|
||||
func newHandler(backend ethapi.Backend) (http.Handler, error) {
|
||||
func newHandler(stack *node.Node, backend ethapi.Backend, cors, vhosts []string) error {
|
||||
q := Resolver{backend}
|
||||
|
||||
s, err := graphql.ParseSchema(schema, &q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
h := &relay.Handler{Schema: s}
|
||||
handler := node.NewHTTPHandlerStack(h, cors, vhosts)
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/", GraphiQL{})
|
||||
mux.Handle("/graphql", h)
|
||||
mux.Handle("/graphql/", h)
|
||||
return mux, nil
|
||||
}
|
||||
stack.RegisterHandler("GraphQL UI", "/graphql/ui", GraphiQL{})
|
||||
stack.RegisterHandler("GraphQL", "/graphql", handler)
|
||||
stack.RegisterHandler("GraphQL", "/graphql/", handler)
|
||||
|
||||
// Stop terminates all goroutines belonging to the service, blocking until they
|
||||
// are all terminated.
|
||||
func (s *Service) Stop() error {
|
||||
if s.listener != nil {
|
||||
s.listener.Close()
|
||||
s.listener = nil
|
||||
log.Info("GraphQL endpoint closed", "url", fmt.Sprintf("http://%s", s.endpoint))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -23,6 +23,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
@@ -45,14 +46,16 @@ type Backend interface {
|
||||
ChainDb() ethdb.Database
|
||||
AccountManager() *accounts.Manager
|
||||
ExtRPCEnabled() bool
|
||||
RPCTxFeeCap() float64 // global tx fee cap for all transaction related APIs
|
||||
RPCGasCap() uint64 // global gas cap for eth_call over rpc: DoS protection
|
||||
RPCTxFeeCap() float64 // global tx fee cap for all transaction related APIs
|
||||
|
||||
// Blockchain API
|
||||
SetHead(number uint64)
|
||||
HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error)
|
||||
HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error)
|
||||
HeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error)
|
||||
CurrentHeader() *types.Header
|
||||
CurrentBlock() *types.Block
|
||||
BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error)
|
||||
BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error)
|
||||
BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error)
|
||||
@@ -84,7 +87,7 @@ type Backend interface {
|
||||
SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription
|
||||
|
||||
ChainConfig() *params.ChainConfig
|
||||
CurrentBlock() *types.Block
|
||||
Engine() consensus.Engine
|
||||
}
|
||||
|
||||
func GetAPIs(apiBackend Backend) []rpc.API {
|
||||
|
12
les/api.go
12
les/api.go
@@ -202,6 +202,18 @@ func (api *PrivateLightServerAPI) SetDefaultParams(params map[string]interface{}
|
||||
return err
|
||||
}
|
||||
|
||||
// SetConnectedBias set the connection bias, which is applied to already connected clients
|
||||
// So that already connected client won't be kicked out very soon and we can ensure all
|
||||
// connected clients can have enough time to request or sync some data.
|
||||
// When the input parameter `bias` < 0 (illegal), return error.
|
||||
func (api *PrivateLightServerAPI) SetConnectedBias(bias time.Duration) error {
|
||||
if bias < time.Duration(0) {
|
||||
return fmt.Errorf("bias illegal: %v less than 0", bias)
|
||||
}
|
||||
api.server.clientPool.setConnectedBias(bias)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Benchmark runs a request performance benchmark with a given set of measurement setups
|
||||
// in multiple passes specified by passCount. The measurement time for each setup in each
|
||||
// pass is specified in milliseconds by length.
|
||||
|
@@ -23,6 +23,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
@@ -282,3 +283,11 @@ func (b *LesApiBackend) ServiceFilter(ctx context.Context, session *bloombits.Ma
|
||||
go session.Multiplex(bloomRetrievalBatch, bloomRetrievalWait, b.eth.bloomRequests)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *LesApiBackend) Engine() consensus.Engine {
|
||||
return b.eth.engine
|
||||
}
|
||||
|
||||
func (b *LesApiBackend) CurrentHeader() *types.Header {
|
||||
return b.eth.blockchain.CurrentHeader()
|
||||
}
|
||||
|
@@ -55,7 +55,7 @@ func TestMain(m *testing.M) {
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
|
||||
// register the Delivery service which will run as a devp2p
|
||||
// protocol when using the exec adapter
|
||||
adapters.RegisterServices(services)
|
||||
adapters.RegisterLifecycles(services)
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
@@ -392,7 +392,7 @@ func getCapacityInfo(ctx context.Context, t *testing.T, server *rpc.Client) (min
|
||||
return
|
||||
}
|
||||
|
||||
var services = adapters.Services{
|
||||
var services = adapters.LifecycleConstructors{
|
||||
"lesclient": newLesClientService,
|
||||
"lesserver": newLesServerService,
|
||||
}
|
||||
@@ -414,7 +414,7 @@ func NewNetwork() (*simulations.Network, func(), error) {
|
||||
return net, teardown, nil
|
||||
}
|
||||
|
||||
func NewAdapter(adapterType string, services adapters.Services) (adapter adapters.NodeAdapter, teardown func(), err error) {
|
||||
func NewAdapter(adapterType string, services adapters.LifecycleConstructors) (adapter adapters.NodeAdapter, teardown func(), err error) {
|
||||
teardown = func() {}
|
||||
switch adapterType {
|
||||
case "sim":
|
||||
@@ -454,7 +454,7 @@ func testSim(t *testing.T, serverCount, clientCount int, serverDir, clientDir []
|
||||
|
||||
for i := range clients {
|
||||
clientconf := adapters.RandomNodeConfig()
|
||||
clientconf.Services = []string{"lesclient"}
|
||||
clientconf.Lifecycles = []string{"lesclient"}
|
||||
if len(clientDir) == clientCount {
|
||||
clientconf.DataDir = clientDir[i]
|
||||
}
|
||||
@@ -467,7 +467,7 @@ func testSim(t *testing.T, serverCount, clientCount int, serverDir, clientDir []
|
||||
|
||||
for i := range servers {
|
||||
serverconf := adapters.RandomNodeConfig()
|
||||
serverconf.Services = []string{"lesserver"}
|
||||
serverconf.Lifecycles = []string{"lesserver"}
|
||||
if len(serverDir) == serverCount {
|
||||
serverconf.DataDir = serverDir[i]
|
||||
}
|
||||
@@ -492,26 +492,25 @@ func testSim(t *testing.T, serverCount, clientCount int, serverDir, clientDir []
|
||||
return test(ctx, net, servers, clients)
|
||||
}
|
||||
|
||||
func newLesClientService(ctx *adapters.ServiceContext) (node.Service, error) {
|
||||
func newLesClientService(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) {
|
||||
config := eth.DefaultConfig
|
||||
config.SyncMode = downloader.LightSync
|
||||
config.Ethash.PowMode = ethash.ModeFake
|
||||
return New(ctx.NodeContext, &config)
|
||||
return New(stack, &config)
|
||||
}
|
||||
|
||||
func newLesServerService(ctx *adapters.ServiceContext) (node.Service, error) {
|
||||
func newLesServerService(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) {
|
||||
config := eth.DefaultConfig
|
||||
config.SyncMode = downloader.FullSync
|
||||
config.LightServ = testServerCapacity
|
||||
config.LightPeers = testMaxClients
|
||||
ethereum, err := eth.New(ctx.NodeContext, &config)
|
||||
ethereum, err := eth.New(stack, &config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
server, err := NewLesServer(ethereum, &config)
|
||||
_, err = NewLesServer(stack, ethereum, &config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ethereum.AddLesServer(server)
|
||||
return ethereum, nil
|
||||
}
|
||||
|
@@ -51,16 +51,6 @@ type CheckpointOracle struct {
|
||||
|
||||
// New creates a checkpoint oracle handler with given configs and callback.
|
||||
func New(config *params.CheckpointOracleConfig, getLocal func(uint64) params.TrustedCheckpoint) *CheckpointOracle {
|
||||
if config == nil {
|
||||
log.Info("Checkpoint registrar is not enabled")
|
||||
return nil
|
||||
}
|
||||
if config.Address == (common.Address{}) || uint64(len(config.Signers)) < config.Threshold {
|
||||
log.Warn("Invalid checkpoint registrar config")
|
||||
return nil
|
||||
}
|
||||
log.Info("Configured checkpoint registrar", "address", config.Address, "signers", len(config.Signers), "threshold", config.Threshold)
|
||||
|
||||
return &CheckpointOracle{
|
||||
config: config,
|
||||
getLocal: getLocal,
|
||||
@@ -103,8 +93,11 @@ func (oracle *CheckpointOracle) StableCheckpoint() (*params.TrustedCheckpoint, u
|
||||
// Look it up properly
|
||||
// Retrieve the latest checkpoint from the contract, abort if empty
|
||||
latest, hash, height, err := oracle.contract.Contract().GetLatestCheckpoint(nil)
|
||||
oracle.lastCheckTime = time.Now()
|
||||
if err != nil || (latest == 0 && hash == [32]byte{}) {
|
||||
return nil, 0
|
||||
oracle.lastCheckPointHeight = 0
|
||||
oracle.lastCheckPoint = nil
|
||||
return oracle.lastCheckPoint, oracle.lastCheckPointHeight
|
||||
}
|
||||
local := oracle.getLocal(latest)
|
||||
|
||||
@@ -116,10 +109,9 @@ func (oracle *CheckpointOracle) StableCheckpoint() (*params.TrustedCheckpoint, u
|
||||
//
|
||||
// In both cases, no stable checkpoint will be returned.
|
||||
if local.HashEqual(hash) {
|
||||
oracle.lastCheckTime = time.Now()
|
||||
oracle.lastCheckPointHeight = height.Uint64()
|
||||
oracle.lastCheckPoint = &local
|
||||
return &local, height.Uint64()
|
||||
return oracle.lastCheckPoint, oracle.lastCheckPointHeight
|
||||
}
|
||||
return nil, 0
|
||||
}
|
||||
|
@@ -22,7 +22,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/common/mclock"
|
||||
@@ -37,7 +36,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/eth/gasprice"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||
"github.com/ethereum/go-ethereum/les/checkpointoracle"
|
||||
lpc "github.com/ethereum/go-ethereum/les/lespay/client"
|
||||
"github.com/ethereum/go-ethereum/light"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
@@ -72,14 +70,17 @@ type LightEthereum struct {
|
||||
engine consensus.Engine
|
||||
accountManager *accounts.Manager
|
||||
netRPCService *ethapi.PublicNetAPI
|
||||
|
||||
p2pServer *p2p.Server
|
||||
}
|
||||
|
||||
func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
|
||||
chainDb, err := ctx.OpenDatabase("lightchaindata", config.DatabaseCache, config.DatabaseHandles, "eth/db/chaindata/")
|
||||
// New creates an instance of the light client.
|
||||
func New(stack *node.Node, config *eth.Config) (*LightEthereum, error) {
|
||||
chainDb, err := stack.OpenDatabase("lightchaindata", config.DatabaseCache, config.DatabaseHandles, "eth/db/chaindata/")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lespayDb, err := ctx.OpenDatabase("lespay", 0, 0, "eth/db/lespay")
|
||||
lespayDb, err := stack.OpenDatabase("lespay", 0, 0, "eth/db/lespay")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -100,17 +101,18 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
|
||||
closeCh: make(chan struct{}),
|
||||
},
|
||||
peers: peers,
|
||||
eventMux: ctx.EventMux,
|
||||
eventMux: stack.EventMux(),
|
||||
reqDist: newRequestDistributor(peers, &mclock.System{}),
|
||||
accountManager: ctx.AccountManager,
|
||||
engine: eth.CreateConsensusEngine(ctx, chainConfig, &config.Ethash, nil, false, chainDb),
|
||||
accountManager: stack.AccountManager(),
|
||||
engine: eth.CreateConsensusEngine(stack, chainConfig, &config.Ethash, nil, false, chainDb),
|
||||
bloomRequests: make(chan chan *bloombits.Retrieval),
|
||||
bloomIndexer: eth.NewBloomIndexer(chainDb, params.BloomBitsBlocksClient, params.HelperTrieConfirmations),
|
||||
valueTracker: lpc.NewValueTracker(lespayDb, &mclock.System{}, requestList, time.Minute, 1/float64(time.Hour), 1/float64(time.Hour*100), 1/float64(time.Hour*1000)),
|
||||
p2pServer: stack.Server(),
|
||||
}
|
||||
peers.subscribe((*vtSubscription)(leth.valueTracker))
|
||||
|
||||
dnsdisc, err := leth.setupDiscovery(&ctx.Config.P2P)
|
||||
dnsdisc, err := leth.setupDiscovery(&stack.Config().P2P)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -139,11 +141,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
|
||||
leth.txPool = light.NewTxPool(leth.chainConfig, leth.blockchain, leth.relay)
|
||||
|
||||
// Set up checkpoint oracle.
|
||||
oracle := config.CheckpointOracle
|
||||
if oracle == nil {
|
||||
oracle = params.CheckpointOracles[genesisHash]
|
||||
}
|
||||
leth.oracle = checkpointoracle.New(oracle, leth.localCheckpoint)
|
||||
leth.oracle = leth.setupOracle(stack, genesisHash, config)
|
||||
|
||||
// Note: AddChildIndexer starts the update process for the child
|
||||
leth.bloomIndexer.AddChildIndexer(leth.bloomTrieIndexer)
|
||||
@@ -160,7 +158,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
|
||||
rawdb.WriteChainConfig(chainDb, genesisHash, chainConfig)
|
||||
}
|
||||
|
||||
leth.ApiBackend = &LesApiBackend{ctx.ExtRPCEnabled(), leth, nil}
|
||||
leth.ApiBackend = &LesApiBackend{stack.Config().ExtRPCEnabled(), leth, nil}
|
||||
gpoParams := config.GPO
|
||||
if gpoParams.Default == nil {
|
||||
gpoParams.Default = config.Miner.GasPrice
|
||||
@@ -172,6 +170,14 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
|
||||
log.Warn("Ultra light client is enabled", "trustedNodes", len(leth.handler.ulc.keys), "minTrustedFraction", leth.handler.ulc.fraction)
|
||||
leth.blockchain.DisableCheckFreq()
|
||||
}
|
||||
|
||||
leth.netRPCService = ethapi.NewPublicNetAPI(leth.p2pServer, leth.config.NetworkId)
|
||||
|
||||
// Register the backend on the node
|
||||
stack.RegisterAPIs(leth.APIs())
|
||||
stack.RegisterProtocols(leth.Protocols())
|
||||
stack.RegisterLifecycle(leth)
|
||||
|
||||
return leth, nil
|
||||
}
|
||||
|
||||
@@ -265,32 +271,31 @@ func (s *LightEthereum) LesVersion() int { return int(ClientP
|
||||
func (s *LightEthereum) Downloader() *downloader.Downloader { return s.handler.downloader }
|
||||
func (s *LightEthereum) EventMux() *event.TypeMux { return s.eventMux }
|
||||
|
||||
// Protocols implements node.Service, returning all the currently configured
|
||||
// network protocols to start.
|
||||
// Protocols returns all the currently configured network protocols to start.
|
||||
func (s *LightEthereum) Protocols() []p2p.Protocol {
|
||||
return s.makeProtocols(ClientProtocolVersions, s.handler.runPeer, func(id enode.ID) interface{} {
|
||||
if p := s.peers.peer(peerIdToString(id)); p != nil {
|
||||
if p := s.peers.peer(id.String()); p != nil {
|
||||
return p.Info()
|
||||
}
|
||||
return nil
|
||||
}, s.dialCandidates)
|
||||
}
|
||||
|
||||
// Start implements node.Service, starting all internal goroutines needed by the
|
||||
// Start implements node.Lifecycle, starting all internal goroutines needed by the
|
||||
// light ethereum protocol implementation.
|
||||
func (s *LightEthereum) Start(srvr *p2p.Server) error {
|
||||
func (s *LightEthereum) Start() error {
|
||||
log.Warn("Light client mode is an experimental feature")
|
||||
|
||||
s.serverPool.start()
|
||||
// Start bloom request workers.
|
||||
s.wg.Add(bloomServiceThreads)
|
||||
s.startBloomHandlers(params.BloomBitsBlocksClient)
|
||||
s.handler.start()
|
||||
|
||||
s.netRPCService = ethapi.NewPublicNetAPI(srvr, s.config.NetworkId)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop implements node.Service, terminating all internal goroutines used by the
|
||||
// Stop implements node.Lifecycle, terminating all internal goroutines used by the
|
||||
// Ethereum protocol.
|
||||
func (s *LightEthereum) Stop() error {
|
||||
close(s.closeCh)
|
||||
@@ -313,11 +318,3 @@ func (s *LightEthereum) Stop() error {
|
||||
log.Info("Light ethereum stopped")
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetClient sets the rpc client and binds the registrar contract.
|
||||
func (s *LightEthereum) SetContractBackend(backend bind.ContractBackend) {
|
||||
if s.oracle == nil {
|
||||
return
|
||||
}
|
||||
s.oracle.Start(backend)
|
||||
}
|
||||
|
@@ -64,16 +64,20 @@ func newClientHandler(ulcServers []string, ulcFraction int, checkpoint *params.T
|
||||
if checkpoint != nil {
|
||||
height = (checkpoint.SectionIndex+1)*params.CHTFrequency - 1
|
||||
}
|
||||
handler.fetcher = newLightFetcher(handler, backend.serverPool.getTimeout)
|
||||
handler.fetcher = newLightFetcher(backend.blockchain, backend.engine, backend.peers, handler.ulc, backend.chainDb, backend.reqDist, handler.synchronise)
|
||||
handler.downloader = downloader.New(height, backend.chainDb, nil, backend.eventMux, nil, backend.blockchain, handler.removePeer)
|
||||
handler.backend.peers.subscribe((*downloaderPeerNotify)(handler))
|
||||
return handler
|
||||
}
|
||||
|
||||
func (h *clientHandler) start() {
|
||||
h.fetcher.start()
|
||||
}
|
||||
|
||||
func (h *clientHandler) stop() {
|
||||
close(h.closeCh)
|
||||
h.downloader.Terminate()
|
||||
h.fetcher.close()
|
||||
h.fetcher.stop()
|
||||
h.wg.Wait()
|
||||
}
|
||||
|
||||
@@ -121,7 +125,6 @@ func (h *clientHandler) handle(p *serverPeer) error {
|
||||
connectionTimer.Update(time.Duration(mclock.Now() - connectedAt))
|
||||
serverConnectionGauge.Update(int64(h.backend.peers.len()))
|
||||
}()
|
||||
|
||||
h.fetcher.announce(p, &announceData{Hash: p.headInfo.Hash, Number: p.headInfo.Number, Td: p.headInfo.Td})
|
||||
|
||||
// Mark the peer starts to be served.
|
||||
@@ -185,6 +188,9 @@ func (h *clientHandler) handleMsg(p *serverPeer) error {
|
||||
p.Log().Trace("Valid announcement signature")
|
||||
}
|
||||
p.Log().Trace("Announce message content", "number", req.Number, "hash", req.Hash, "td", req.Td, "reorg", req.ReorgDepth)
|
||||
|
||||
// Update peer head information first and then notify the announcement
|
||||
p.updateHead(req.Hash, req.Number, req.Td)
|
||||
h.fetcher.announce(p, &req)
|
||||
}
|
||||
case BlockHeadersMsg:
|
||||
@@ -196,12 +202,17 @@ func (h *clientHandler) handleMsg(p *serverPeer) error {
|
||||
if err := msg.Decode(&resp); err != nil {
|
||||
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
||||
}
|
||||
headers := resp.Headers
|
||||
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
|
||||
p.answeredRequest(resp.ReqID)
|
||||
if h.fetcher.requestedID(resp.ReqID) {
|
||||
h.fetcher.deliverHeaders(p, resp.ReqID, resp.Headers)
|
||||
} else {
|
||||
if err := h.downloader.DeliverHeaders(p.id, resp.Headers); err != nil {
|
||||
|
||||
// Filter out any explicitly requested headers, deliver the rest to the downloader
|
||||
filter := len(headers) == 1
|
||||
if filter {
|
||||
headers = h.fetcher.deliverHeaders(p, resp.ReqID, resp.Headers)
|
||||
}
|
||||
if len(headers) != 0 || !filter {
|
||||
if err := h.downloader.DeliverHeaders(p.id, headers); err != nil {
|
||||
log.Debug("Failed to deliver headers", "err", err)
|
||||
}
|
||||
}
|
||||
@@ -320,8 +331,7 @@ func (h *clientHandler) handleMsg(p *serverPeer) error {
|
||||
// Deliver the received response to retriever.
|
||||
if deliverMsg != nil {
|
||||
if err := h.backend.retriever.deliver(p, deliverMsg); err != nil {
|
||||
p.errCount++
|
||||
if p.errCount > maxResponseErrors {
|
||||
if val := p.errCount.Add(1, mclock.Now()); val > maxResponseErrors {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@@ -42,15 +42,7 @@ const (
|
||||
persistCumulativeTimeRefresh = time.Minute * 5 // refresh period of the cumulative running time persistence
|
||||
posBalanceCacheLimit = 8192 // the maximum number of cached items in positive balance queue
|
||||
negBalanceCacheLimit = 8192 // the maximum number of cached items in negative balance queue
|
||||
|
||||
// connectedBias is applied to already connected clients So that
|
||||
// already connected client won't be kicked out very soon and we
|
||||
// can ensure all connected clients can have enough time to request
|
||||
// or sync some data.
|
||||
//
|
||||
// todo(rjl493456442) make it configurable. It can be the option of
|
||||
// free trial time!
|
||||
connectedBias = time.Minute * 3
|
||||
defaultConnectedBias = time.Minute * 3 // the default connectedBias used in clientPool
|
||||
)
|
||||
|
||||
// clientPool implements a client database that assigns a priority to each client
|
||||
@@ -94,7 +86,7 @@ type clientPool struct {
|
||||
freeClientCap uint64 // The capacity value of each free client
|
||||
startTime mclock.AbsTime // The timestamp at which the clientpool started running
|
||||
cumulativeTime int64 // The cumulative running time of clientpool at the start point.
|
||||
disableBias bool // Disable connection bias(used in testing)
|
||||
connectedBias time.Duration // The connection bias. 0: Disable connection bias(used in testing)
|
||||
}
|
||||
|
||||
// clientPoolPeer represents a client peer in the pool.
|
||||
@@ -171,6 +163,7 @@ func newClientPool(db ethdb.Database, freeClientCap uint64, clock mclock.Clock,
|
||||
startTime: clock.Now(),
|
||||
cumulativeTime: ndb.getCumulativeTime(),
|
||||
stopCh: make(chan struct{}),
|
||||
connectedBias: defaultConnectedBias,
|
||||
}
|
||||
// If the negative balance of free client is even lower than 1,
|
||||
// delete this entry.
|
||||
@@ -219,7 +212,7 @@ func (f *clientPool) connect(peer clientPoolPeer, capacity uint64) bool {
|
||||
id, freeID := peer.ID(), peer.freeClientId()
|
||||
if _, ok := f.connectedMap[id]; ok {
|
||||
clientRejectedMeter.Mark(1)
|
||||
log.Debug("Client already connected", "address", freeID, "id", peerIdToString(id))
|
||||
log.Debug("Client already connected", "address", freeID, "id", id.String())
|
||||
return false
|
||||
}
|
||||
// Create a clientInfo but do not add it yet
|
||||
@@ -279,16 +272,12 @@ func (f *clientPool) connect(peer clientPoolPeer, capacity uint64) bool {
|
||||
newCount--
|
||||
return newCapacity > f.capLimit || newCount > f.connLimit
|
||||
})
|
||||
bias := connectedBias
|
||||
if f.disableBias {
|
||||
bias = 0
|
||||
}
|
||||
if newCapacity > f.capLimit || newCount > f.connLimit || (e.balanceTracker.estimatedPriority(now+mclock.AbsTime(bias), false)-kickPriority) > 0 {
|
||||
if newCapacity > f.capLimit || newCount > f.connLimit || (e.balanceTracker.estimatedPriority(now+mclock.AbsTime(f.connectedBias), false)-kickPriority) > 0 {
|
||||
for _, c := range kickList {
|
||||
f.connectedQueue.Push(c)
|
||||
}
|
||||
clientRejectedMeter.Mark(1)
|
||||
log.Debug("Client rejected", "address", freeID, "id", peerIdToString(id))
|
||||
log.Debug("Client rejected", "address", freeID, "id", id.String())
|
||||
return false
|
||||
}
|
||||
// accept new client, drop old ones
|
||||
@@ -333,7 +322,7 @@ func (f *clientPool) disconnect(p clientPoolPeer) {
|
||||
// Short circuit if the peer hasn't been registered.
|
||||
e := f.connectedMap[p.ID()]
|
||||
if e == nil {
|
||||
log.Debug("Client not connected", "address", p.freeClientId(), "id", peerIdToString(p.ID()))
|
||||
log.Debug("Client not connected", "address", p.freeClientId(), "id", p.ID().String())
|
||||
return
|
||||
}
|
||||
f.dropClient(e, f.clock.Now(), false)
|
||||
@@ -371,6 +360,16 @@ func (f *clientPool) setDefaultFactors(posFactors, negFactors priceFactors) {
|
||||
f.defaultNegFactors = negFactors
|
||||
}
|
||||
|
||||
// setConnectedBias sets the connection bias, which is applied to already connected clients
|
||||
// So that already connected client won't be kicked out very soon and we can ensure all
|
||||
// connected clients can have enough time to request or sync some data.
|
||||
func (f *clientPool) setConnectedBias(bias time.Duration) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
f.connectedBias = bias
|
||||
}
|
||||
|
||||
// dropClient removes a client from the connected queue and finalizes its balance.
|
||||
// If kick is true then it also initiates the disconnection.
|
||||
func (f *clientPool) dropClient(e *clientInfo, now mclock.AbsTime, kick bool) {
|
||||
|
@@ -91,7 +91,7 @@ func testClientPool(t *testing.T, connLimit, clientCount, paidCount int, randomD
|
||||
}
|
||||
pool = newClientPool(db, 1, &clock, disconnFn)
|
||||
)
|
||||
pool.disableBias = true
|
||||
pool.setConnectedBias(0)
|
||||
pool.setLimits(connLimit, uint64(connLimit))
|
||||
pool.setDefaultFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})
|
||||
|
||||
@@ -248,7 +248,7 @@ func TestPaidClientKickedOut(t *testing.T) {
|
||||
clock.Run(time.Millisecond)
|
||||
}
|
||||
clock.Run(time.Second)
|
||||
clock.Run(connectedBias)
|
||||
clock.Run(defaultConnectedBias)
|
||||
if !pool.connect(poolTestPeer(11), 0) {
|
||||
t.Fatalf("Free client should be accectped")
|
||||
}
|
||||
|
@@ -26,9 +26,12 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/les/checkpointoracle"
|
||||
"github.com/ethereum/go-ethereum/light"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discv5"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
@@ -145,3 +148,26 @@ func (c *lesCommons) localCheckpoint(index uint64) params.TrustedCheckpoint {
|
||||
BloomRoot: light.GetBloomTrieRoot(c.chainDb, index, sectionHead),
|
||||
}
|
||||
}
|
||||
|
||||
// setupOracle sets up the checkpoint oracle contract client.
|
||||
func (c *lesCommons) setupOracle(node *node.Node, genesis common.Hash, ethconfig *eth.Config) *checkpointoracle.CheckpointOracle {
|
||||
config := ethconfig.CheckpointOracle
|
||||
if config == nil {
|
||||
// Try loading default config.
|
||||
config = params.CheckpointOracles[genesis]
|
||||
}
|
||||
if config == nil {
|
||||
log.Info("Checkpoint registrar is not enabled")
|
||||
return nil
|
||||
}
|
||||
if config.Address == (common.Address{}) || uint64(len(config.Signers)) < config.Threshold {
|
||||
log.Warn("Invalid checkpoint registrar config")
|
||||
return nil
|
||||
}
|
||||
oracle := checkpointoracle.New(config, c.localCheckpoint)
|
||||
rpcClient, _ := node.Attach()
|
||||
client := ethclient.NewClient(rpcClient)
|
||||
oracle.Start(client)
|
||||
log.Info("Configured checkpoint registrar", "address", config.Address, "signers", len(config.Signers), "threshold", config.Threshold)
|
||||
return oracle
|
||||
}
|
||||
|
1301
les/fetcher.go
1301
les/fetcher.go
File diff suppressed because it is too large
Load Diff
268
les/fetcher_test.go
Normal file
268
les/fetcher_test.go
Normal file
@@ -0,0 +1,268 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package les
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
// verifyImportEvent verifies that one single event arrive on an import channel.
|
||||
func verifyImportEvent(t *testing.T, imported chan interface{}, arrive bool) {
|
||||
if arrive {
|
||||
select {
|
||||
case <-imported:
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("import timeout")
|
||||
}
|
||||
} else {
|
||||
select {
|
||||
case <-imported:
|
||||
t.Fatalf("import invoked")
|
||||
case <-time.After(20 * time.Millisecond):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// verifyImportDone verifies that no more events are arriving on an import channel.
|
||||
func verifyImportDone(t *testing.T, imported chan interface{}) {
|
||||
select {
|
||||
case <-imported:
|
||||
t.Fatalf("extra block imported")
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
}
|
||||
}
|
||||
|
||||
// verifyChainHeight verifies the chain height is as expected.
|
||||
func verifyChainHeight(t *testing.T, fetcher *lightFetcher, height uint64) {
|
||||
local := fetcher.chain.CurrentHeader().Number.Uint64()
|
||||
if local != height {
|
||||
t.Fatalf("chain height mismatch, got %d, want %d", local, height)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSequentialAnnouncementsLes2(t *testing.T) { testSequentialAnnouncements(t, 2) }
|
||||
func TestSequentialAnnouncementsLes3(t *testing.T) { testSequentialAnnouncements(t, 3) }
|
||||
|
||||
func testSequentialAnnouncements(t *testing.T, protocol int) {
|
||||
s, c, teardown := newClientServerEnv(t, 4, protocol, nil, nil, 0, false, false, true)
|
||||
defer teardown()
|
||||
|
||||
// Create connected peer pair.
|
||||
c.handler.fetcher.noAnnounce = true // Ignore the first announce from peer which can trigger a resync.
|
||||
p1, _, err := newTestPeerPair("peer", protocol, s.handler, c.handler)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create peer pair %v", err)
|
||||
}
|
||||
c.handler.fetcher.noAnnounce = false
|
||||
|
||||
importCh := make(chan interface{})
|
||||
c.handler.fetcher.newHeadHook = func(header *types.Header) {
|
||||
importCh <- header
|
||||
}
|
||||
for i := uint64(1); i <= s.backend.Blockchain().CurrentHeader().Number.Uint64(); i++ {
|
||||
header := s.backend.Blockchain().GetHeaderByNumber(i)
|
||||
hash, number := header.Hash(), header.Number.Uint64()
|
||||
td := rawdb.ReadTd(s.db, hash, number)
|
||||
|
||||
announce := announceData{hash, number, td, 0, nil}
|
||||
if p1.cpeer.announceType == announceTypeSigned {
|
||||
announce.sign(s.handler.server.privateKey)
|
||||
}
|
||||
p1.cpeer.sendAnnounce(announce)
|
||||
verifyImportEvent(t, importCh, true)
|
||||
}
|
||||
verifyImportDone(t, importCh)
|
||||
verifyChainHeight(t, c.handler.fetcher, 4)
|
||||
}
|
||||
|
||||
func TestGappedAnnouncementsLes2(t *testing.T) { testGappedAnnouncements(t, 2) }
|
||||
func TestGappedAnnouncementsLes3(t *testing.T) { testGappedAnnouncements(t, 3) }
|
||||
|
||||
func testGappedAnnouncements(t *testing.T, protocol int) {
|
||||
s, c, teardown := newClientServerEnv(t, 4, protocol, nil, nil, 0, false, false, true)
|
||||
defer teardown()
|
||||
|
||||
// Create connected peer pair.
|
||||
c.handler.fetcher.noAnnounce = true // Ignore the first announce from peer which can trigger a resync.
|
||||
peer, _, err := newTestPeerPair("peer", protocol, s.handler, c.handler)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create peer pair %v", err)
|
||||
}
|
||||
c.handler.fetcher.noAnnounce = false
|
||||
|
||||
done := make(chan *types.Header, 1)
|
||||
c.handler.fetcher.newHeadHook = func(header *types.Header) { done <- header }
|
||||
|
||||
// Prepare announcement by latest header.
|
||||
latest := s.backend.Blockchain().CurrentHeader()
|
||||
hash, number := latest.Hash(), latest.Number.Uint64()
|
||||
td := rawdb.ReadTd(s.db, hash, number)
|
||||
|
||||
// Sign the announcement if necessary.
|
||||
announce := announceData{hash, number, td, 0, nil}
|
||||
if peer.cpeer.announceType == announceTypeSigned {
|
||||
announce.sign(s.handler.server.privateKey)
|
||||
}
|
||||
peer.cpeer.sendAnnounce(announce)
|
||||
|
||||
<-done // Wait syncing
|
||||
verifyChainHeight(t, c.handler.fetcher, 4)
|
||||
|
||||
// Send a reorged announcement
|
||||
var newAnno = make(chan struct{}, 1)
|
||||
c.handler.fetcher.noAnnounce = true
|
||||
c.handler.fetcher.newAnnounce = func(*serverPeer, *announceData) {
|
||||
newAnno <- struct{}{}
|
||||
}
|
||||
blocks, _ := core.GenerateChain(rawdb.ReadChainConfig(s.db, s.backend.Blockchain().Genesis().Hash()), s.backend.Blockchain().GetBlockByNumber(3),
|
||||
ethash.NewFaker(), s.db, 2, func(i int, gen *core.BlockGen) {
|
||||
gen.OffsetTime(-9) // higher block difficulty
|
||||
})
|
||||
s.backend.Blockchain().InsertChain(blocks)
|
||||
<-newAnno
|
||||
c.handler.fetcher.noAnnounce = false
|
||||
c.handler.fetcher.newAnnounce = nil
|
||||
|
||||
latest = blocks[len(blocks)-1].Header()
|
||||
hash, number = latest.Hash(), latest.Number.Uint64()
|
||||
td = rawdb.ReadTd(s.db, hash, number)
|
||||
|
||||
announce = announceData{hash, number, td, 1, nil}
|
||||
if peer.cpeer.announceType == announceTypeSigned {
|
||||
announce.sign(s.handler.server.privateKey)
|
||||
}
|
||||
peer.cpeer.sendAnnounce(announce)
|
||||
|
||||
<-done // Wait syncing
|
||||
verifyChainHeight(t, c.handler.fetcher, 5)
|
||||
}
|
||||
|
||||
func TestTrustedAnnouncementsLes2(t *testing.T) { testTrustedAnnouncement(t, 2) }
|
||||
func TestTrustedAnnouncementsLes3(t *testing.T) { testTrustedAnnouncement(t, 3) }
|
||||
|
||||
func testTrustedAnnouncement(t *testing.T, protocol int) {
|
||||
var (
|
||||
servers []*testServer
|
||||
teardowns []func()
|
||||
nodes []*enode.Node
|
||||
ids []string
|
||||
cpeers []*clientPeer
|
||||
speers []*serverPeer
|
||||
)
|
||||
for i := 0; i < 10; i++ {
|
||||
s, n, teardown := newTestServerPeer(t, 10, protocol)
|
||||
|
||||
servers = append(servers, s)
|
||||
nodes = append(nodes, n)
|
||||
teardowns = append(teardowns, teardown)
|
||||
|
||||
// A half of them are trusted servers.
|
||||
if i < 5 {
|
||||
ids = append(ids, n.String())
|
||||
}
|
||||
}
|
||||
_, c, teardown := newClientServerEnv(t, 0, protocol, nil, ids, 60, false, false, true)
|
||||
defer teardown()
|
||||
defer func() {
|
||||
for i := 0; i < len(teardowns); i++ {
|
||||
teardowns[i]()
|
||||
}
|
||||
}()
|
||||
|
||||
c.handler.fetcher.noAnnounce = true // Ignore the first announce from peer which can trigger a resync.
|
||||
|
||||
// Connect all server instances.
|
||||
for i := 0; i < len(servers); i++ {
|
||||
sp, cp, err := connect(servers[i].handler, nodes[i].ID(), c.handler, protocol)
|
||||
if err != nil {
|
||||
t.Fatalf("connect server and client failed, err %s", err)
|
||||
}
|
||||
cpeers = append(cpeers, cp)
|
||||
speers = append(speers, sp)
|
||||
}
|
||||
c.handler.fetcher.noAnnounce = false
|
||||
|
||||
newHead := make(chan *types.Header, 1)
|
||||
c.handler.fetcher.newHeadHook = func(header *types.Header) { newHead <- header }
|
||||
|
||||
check := func(height []uint64, expected uint64, callback func()) {
|
||||
for i := 0; i < len(height); i++ {
|
||||
for j := 0; j < len(servers); j++ {
|
||||
h := servers[j].backend.Blockchain().GetHeaderByNumber(height[i])
|
||||
hash, number := h.Hash(), h.Number.Uint64()
|
||||
td := rawdb.ReadTd(servers[j].db, hash, number)
|
||||
|
||||
// Sign the announcement if necessary.
|
||||
announce := announceData{hash, number, td, 0, nil}
|
||||
p := cpeers[j]
|
||||
if p.announceType == announceTypeSigned {
|
||||
announce.sign(servers[j].handler.server.privateKey)
|
||||
}
|
||||
p.sendAnnounce(announce)
|
||||
}
|
||||
}
|
||||
if callback != nil {
|
||||
callback()
|
||||
}
|
||||
verifyChainHeight(t, c.handler.fetcher, expected)
|
||||
}
|
||||
check([]uint64{1}, 1, func() { <-newHead }) // Sequential announcements
|
||||
check([]uint64{4}, 4, func() { <-newHead }) // ULC-style light syncing, rollback untrusted headers
|
||||
check([]uint64{10}, 10, func() { <-newHead }) // Sync the whole chain.
|
||||
}
|
||||
|
||||
func TestInvalidAnnounces(t *testing.T) {
|
||||
s, c, teardown := newClientServerEnv(t, 4, lpv3, nil, nil, 0, false, false, true)
|
||||
defer teardown()
|
||||
|
||||
// Create connected peer pair.
|
||||
c.handler.fetcher.noAnnounce = true // Ignore the first announce from peer which can trigger a resync.
|
||||
peer, _, err := newTestPeerPair("peer", lpv3, s.handler, c.handler)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create peer pair %v", err)
|
||||
}
|
||||
c.handler.fetcher.noAnnounce = false
|
||||
|
||||
done := make(chan *types.Header, 1)
|
||||
c.handler.fetcher.newHeadHook = func(header *types.Header) { done <- header }
|
||||
|
||||
// Prepare announcement by latest header.
|
||||
headerOne := s.backend.Blockchain().GetHeaderByNumber(1)
|
||||
hash, number := headerOne.Hash(), headerOne.Number.Uint64()
|
||||
td := big.NewInt(200) // bad td
|
||||
|
||||
// Sign the announcement if necessary.
|
||||
announce := announceData{hash, number, td, 0, nil}
|
||||
if peer.cpeer.announceType == announceTypeSigned {
|
||||
announce.sign(s.handler.server.privateKey)
|
||||
}
|
||||
peer.cpeer.sendAnnounce(announce)
|
||||
<-done // Wait syncing
|
||||
|
||||
// Ensure the bad peer is evicited
|
||||
if c.handler.backend.peers.len() != 0 {
|
||||
t.Fatalf("Failed to evict invalid peer")
|
||||
}
|
||||
}
|
@@ -222,13 +222,13 @@ func testOdr(t *testing.T, protocol int, expFail uint64, checkCached bool, fn od
|
||||
|
||||
// expect retrievals to fail (except genesis block) without a les peer
|
||||
client.handler.backend.peers.lock.Lock()
|
||||
client.peer.speer.hasBlock = func(common.Hash, uint64, bool) bool { return false }
|
||||
client.peer.speer.hasBlockHook = func(common.Hash, uint64, bool) bool { return false }
|
||||
client.handler.backend.peers.lock.Unlock()
|
||||
test(expFail)
|
||||
|
||||
// expect all retrievals to pass
|
||||
client.handler.backend.peers.lock.Lock()
|
||||
client.peer.speer.hasBlock = func(common.Hash, uint64, bool) bool { return true }
|
||||
client.peer.speer.hasBlockHook = func(common.Hash, uint64, bool) bool { return true }
|
||||
client.handler.backend.peers.lock.Unlock()
|
||||
test(5)
|
||||
|
||||
|
103
les/peer.go
103
les/peer.go
@@ -36,7 +36,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/les/utils"
|
||||
"github.com/ethereum/go-ethereum/light"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
@@ -115,11 +114,6 @@ func (m keyValueMap) get(key string, val interface{}) error {
|
||||
return rlp.DecodeBytes(enc, val)
|
||||
}
|
||||
|
||||
// peerIdToString converts enode.ID to a string form
|
||||
func peerIdToString(id enode.ID) string {
|
||||
return fmt.Sprintf("%x", id.Bytes())
|
||||
}
|
||||
|
||||
// peerCommons contains fields needed by both server peer and client peer.
|
||||
type peerCommons struct {
|
||||
*p2p.Peer
|
||||
@@ -343,12 +337,12 @@ type serverPeer struct {
|
||||
sentReqs map[uint64]sentReqEntry
|
||||
|
||||
// Statistics
|
||||
errCount int // Counter the invalid responses server has replied
|
||||
errCount utils.LinearExpiredValue // Counter the invalid responses server has replied
|
||||
updateCount uint64
|
||||
updateTime mclock.AbsTime
|
||||
|
||||
// Callbacks
|
||||
hasBlock func(common.Hash, uint64, bool) bool // Used to determine whether the server has the specified block.
|
||||
// Test callback hooks
|
||||
hasBlockHook func(common.Hash, uint64, bool) bool // Used to determine whether the server has the specified block.
|
||||
}
|
||||
|
||||
func newServerPeer(version int, network uint64, trusted bool, p *p2p.Peer, rw p2p.MsgReadWriter) *serverPeer {
|
||||
@@ -356,13 +350,14 @@ func newServerPeer(version int, network uint64, trusted bool, p *p2p.Peer, rw p2
|
||||
peerCommons: peerCommons{
|
||||
Peer: p,
|
||||
rw: rw,
|
||||
id: peerIdToString(p.ID()),
|
||||
id: p.ID().String(),
|
||||
version: version,
|
||||
network: network,
|
||||
sendQueue: utils.NewExecQueue(100),
|
||||
closeCh: make(chan struct{}),
|
||||
},
|
||||
trusted: trusted,
|
||||
trusted: trusted,
|
||||
errCount: utils.LinearExpiredValue{Rate: mclock.AbsTime(time.Hour)},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -524,7 +519,11 @@ func (p *serverPeer) getTxRelayCost(amount, size int) uint64 {
|
||||
// HasBlock checks if the peer has a given block
|
||||
func (p *serverPeer) HasBlock(hash common.Hash, number uint64, hasState bool) bool {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
if p.hasBlockHook != nil {
|
||||
return p.hasBlockHook(hash, number, hasState)
|
||||
}
|
||||
head := p.headInfo.Number
|
||||
var since, recent uint64
|
||||
if hasState {
|
||||
@@ -534,10 +533,7 @@ func (p *serverPeer) HasBlock(hash common.Hash, number uint64, hasState bool) bo
|
||||
since = p.chainSince
|
||||
recent = p.chainRecent
|
||||
}
|
||||
hasBlock := p.hasBlock
|
||||
p.lock.RUnlock()
|
||||
|
||||
return head >= number && number >= since && (recent == 0 || number+recent+4 > head) && hasBlock != nil && hasBlock(hash, number, hasState)
|
||||
return head >= number && number >= since && (recent == 0 || number+recent+4 > head)
|
||||
}
|
||||
|
||||
// updateFlowControl updates the flow control parameters belonging to the server
|
||||
@@ -562,6 +558,15 @@ func (p *serverPeer) updateFlowControl(update keyValueMap) {
|
||||
}
|
||||
}
|
||||
|
||||
// updateHead updates the head information based on the announcement from
|
||||
// the peer.
|
||||
func (p *serverPeer) updateHead(hash common.Hash, number uint64, td *big.Int) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
p.headInfo = blockInfo{Hash: hash, Number: number, Td: td}
|
||||
}
|
||||
|
||||
// Handshake executes the les protocol handshake, negotiating version number,
|
||||
// network IDs, difficulties, head and genesis blocks.
|
||||
func (p *serverPeer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, server *LesServer) error {
|
||||
@@ -712,11 +717,15 @@ type clientPeer struct {
|
||||
// responseLock ensures that responses are queued in the same order as
|
||||
// RequestProcessed is called
|
||||
responseLock sync.Mutex
|
||||
server bool
|
||||
invalidCount uint32 // Counter the invalid request the client peer has made.
|
||||
responseCount uint64 // Counter to generate an unique id for request processing.
|
||||
errCh chan error
|
||||
fcClient *flowcontrol.ClientNode // Server side mirror token bucket.
|
||||
|
||||
// invalidLock is used for protecting invalidCount.
|
||||
invalidLock sync.RWMutex
|
||||
invalidCount utils.LinearExpiredValue // Counter the invalid request the client peer has made.
|
||||
|
||||
server bool
|
||||
errCh chan error
|
||||
fcClient *flowcontrol.ClientNode // Server side mirror token bucket.
|
||||
}
|
||||
|
||||
func newClientPeer(version int, network uint64, p *p2p.Peer, rw p2p.MsgReadWriter) *clientPeer {
|
||||
@@ -724,13 +733,14 @@ func newClientPeer(version int, network uint64, p *p2p.Peer, rw p2p.MsgReadWrite
|
||||
peerCommons: peerCommons{
|
||||
Peer: p,
|
||||
rw: rw,
|
||||
id: peerIdToString(p.ID()),
|
||||
id: p.ID().String(),
|
||||
version: version,
|
||||
network: network,
|
||||
sendQueue: utils.NewExecQueue(100),
|
||||
closeCh: make(chan struct{}),
|
||||
},
|
||||
errCh: make(chan error, 1),
|
||||
invalidCount: utils.LinearExpiredValue{Rate: mclock.AbsTime(time.Hour)},
|
||||
errCh: make(chan error, 1),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -970,6 +980,18 @@ func (p *clientPeer) Handshake(td *big.Int, head common.Hash, headNum uint64, ge
|
||||
})
|
||||
}
|
||||
|
||||
func (p *clientPeer) bumpInvalid() {
|
||||
p.invalidLock.Lock()
|
||||
p.invalidCount.Add(1, mclock.Now())
|
||||
p.invalidLock.Unlock()
|
||||
}
|
||||
|
||||
func (p *clientPeer) getInvalid() uint64 {
|
||||
p.invalidLock.RLock()
|
||||
defer p.invalidLock.RUnlock()
|
||||
return p.invalidCount.Value(mclock.Now())
|
||||
}
|
||||
|
||||
// serverPeerSubscriber is an interface to notify services about added or
|
||||
// removed server peers
|
||||
type serverPeerSubscriber interface {
|
||||
@@ -1266,3 +1288,42 @@ func (ps *serverPeerSet) close() {
|
||||
}
|
||||
ps.closed = true
|
||||
}
|
||||
|
||||
// serverSet is a special set which contains all connected les servers.
|
||||
// Les servers will also be discovered by discovery protocol because they
|
||||
// also run the LES protocol. We can't drop them although they are useless
|
||||
// for us(server) but for other protocols(e.g. ETH) upon the devp2p they
|
||||
// may be useful.
|
||||
type serverSet struct {
|
||||
lock sync.Mutex
|
||||
set map[string]*clientPeer
|
||||
closed bool
|
||||
}
|
||||
|
||||
func newServerSet() *serverSet {
|
||||
return &serverSet{set: make(map[string]*clientPeer)}
|
||||
}
|
||||
|
||||
func (s *serverSet) register(peer *clientPeer) error {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
if s.closed {
|
||||
return errClosed
|
||||
}
|
||||
if _, exist := s.set[peer.id]; exist {
|
||||
return errAlreadyRegistered
|
||||
}
|
||||
s.set[peer.id] = peer
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *serverSet) close() {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
for _, p := range s.set {
|
||||
p.Disconnect(p2p.DiscQuitting)
|
||||
}
|
||||
s.closed = true
|
||||
}
|
||||
|
@@ -20,14 +20,12 @@ import (
|
||||
"crypto/ecdsa"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/common/mclock"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/les/checkpointoracle"
|
||||
"github.com/ethereum/go-ethereum/les/flowcontrol"
|
||||
"github.com/ethereum/go-ethereum/light"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discv5"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
@@ -41,6 +39,7 @@ type LesServer struct {
|
||||
|
||||
archiveMode bool // Flag whether the ethereum node runs in archive mode.
|
||||
peers *clientPeerSet
|
||||
serverset *serverSet
|
||||
handler *serverHandler
|
||||
lesTopics []discv5.Topic
|
||||
privateKey *ecdsa.PrivateKey
|
||||
@@ -55,9 +54,11 @@ type LesServer struct {
|
||||
minCapacity, maxCapacity, freeCapacity uint64
|
||||
threadsIdle int // Request serving threads count when system is idle.
|
||||
threadsBusy int // Request serving threads count when system is busy(block insertion).
|
||||
|
||||
p2pSrv *p2p.Server
|
||||
}
|
||||
|
||||
func NewLesServer(e *eth.Ethereum, config *eth.Config) (*LesServer, error) {
|
||||
func NewLesServer(node *node.Node, e *eth.Ethereum, config *eth.Config) (*LesServer, error) {
|
||||
// Collect les protocol version information supported by local node.
|
||||
lesTopics := make([]discv5.Topic, len(AdvertiseProtocolVersions))
|
||||
for i, pv := range AdvertiseProtocolVersions {
|
||||
@@ -83,22 +84,21 @@ func NewLesServer(e *eth.Ethereum, config *eth.Config) (*LesServer, error) {
|
||||
},
|
||||
archiveMode: e.ArchiveMode(),
|
||||
peers: newClientPeerSet(),
|
||||
serverset: newServerSet(),
|
||||
lesTopics: lesTopics,
|
||||
fcManager: flowcontrol.NewClientManager(nil, &mclock.System{}),
|
||||
servingQueue: newServingQueue(int64(time.Millisecond*10), float64(config.LightServ)/100),
|
||||
threadsBusy: config.LightServ/100 + 1,
|
||||
threadsIdle: threads,
|
||||
p2pSrv: node.Server(),
|
||||
}
|
||||
srv.handler = newServerHandler(srv, e.BlockChain(), e.ChainDb(), e.TxPool(), e.Synced)
|
||||
srv.costTracker, srv.minCapacity = newCostTracker(e.ChainDb(), config)
|
||||
srv.freeCapacity = srv.minCapacity
|
||||
srv.oracle = srv.setupOracle(node, e.BlockChain().Genesis().Hash(), config)
|
||||
|
||||
// Set up checkpoint oracle.
|
||||
oracle := config.CheckpointOracle
|
||||
if oracle == nil {
|
||||
oracle = params.CheckpointOracles[e.BlockChain().Genesis().Hash()]
|
||||
}
|
||||
srv.oracle = checkpointoracle.New(oracle, srv.localCheckpoint)
|
||||
// Initialize the bloom trie indexer.
|
||||
e.BloomIndexer().AddChildIndexer(srv.bloomTrieIndexer)
|
||||
|
||||
// Initialize server capacity management fields.
|
||||
srv.defParams = flowcontrol.ServerParams{
|
||||
@@ -116,7 +116,7 @@ func NewLesServer(e *eth.Ethereum, config *eth.Config) (*LesServer, error) {
|
||||
srv.maxCapacity = totalRecharge
|
||||
}
|
||||
srv.fcManager.SetCapacityLimits(srv.freeCapacity, srv.maxCapacity, srv.freeCapacity*2)
|
||||
srv.clientPool = newClientPool(srv.chainDb, srv.freeCapacity, mclock.System{}, func(id enode.ID) { go srv.peers.unregister(peerIdToString(id)) })
|
||||
srv.clientPool = newClientPool(srv.chainDb, srv.freeCapacity, mclock.System{}, func(id enode.ID) { go srv.peers.unregister(id.String()) })
|
||||
srv.clientPool.setDefaultFactors(priceFactors{0, 1, 1}, priceFactors{0, 1, 1})
|
||||
|
||||
checkpoint := srv.latestLocalCheckpoint()
|
||||
@@ -125,6 +125,11 @@ func NewLesServer(e *eth.Ethereum, config *eth.Config) (*LesServer, error) {
|
||||
"chtroot", checkpoint.CHTRoot, "bloomroot", checkpoint.BloomRoot)
|
||||
}
|
||||
srv.chtIndexer.Start(e.BlockChain())
|
||||
|
||||
node.RegisterProtocols(srv.Protocols())
|
||||
node.RegisterAPIs(srv.APIs())
|
||||
node.RegisterLifecycle(srv)
|
||||
|
||||
return srv, nil
|
||||
}
|
||||
|
||||
@@ -153,7 +158,7 @@ func (s *LesServer) APIs() []rpc.API {
|
||||
|
||||
func (s *LesServer) Protocols() []p2p.Protocol {
|
||||
ps := s.makeProtocols(ServerProtocolVersions, s.handler.runPeer, func(id enode.ID) interface{} {
|
||||
if p := s.peers.peer(peerIdToString(id)); p != nil {
|
||||
if p := s.peers.peer(id.String()); p != nil {
|
||||
return p.Info()
|
||||
}
|
||||
return nil
|
||||
@@ -166,14 +171,14 @@ func (s *LesServer) Protocols() []p2p.Protocol {
|
||||
}
|
||||
|
||||
// Start starts the LES server
|
||||
func (s *LesServer) Start(srvr *p2p.Server) {
|
||||
s.privateKey = srvr.PrivateKey
|
||||
func (s *LesServer) Start() error {
|
||||
s.privateKey = s.p2pSrv.PrivateKey
|
||||
s.handler.start()
|
||||
|
||||
s.wg.Add(1)
|
||||
go s.capacityManagement()
|
||||
|
||||
if srvr.DiscV5 != nil {
|
||||
if s.p2pSrv.DiscV5 != nil {
|
||||
for _, topic := range s.lesTopics {
|
||||
topic := topic
|
||||
go func() {
|
||||
@@ -181,16 +186,21 @@ func (s *LesServer) Start(srvr *p2p.Server) {
|
||||
logger.Info("Starting topic registration")
|
||||
defer logger.Info("Terminated topic registration")
|
||||
|
||||
srvr.DiscV5.RegisterTopic(topic, s.closeCh)
|
||||
s.p2pSrv.DiscV5.RegisterTopic(topic, s.closeCh)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops the LES service
|
||||
func (s *LesServer) Stop() {
|
||||
func (s *LesServer) Stop() error {
|
||||
close(s.closeCh)
|
||||
|
||||
// Disconnect existing connections with other LES servers.
|
||||
s.serverset.close()
|
||||
|
||||
// Disconnect existing sessions.
|
||||
// This also closes the gate for any new registrations on the peer set.
|
||||
// sessions which are already established but not added to pm.peers yet
|
||||
@@ -207,18 +217,8 @@ func (s *LesServer) Stop() {
|
||||
s.chtIndexer.Close()
|
||||
s.wg.Wait()
|
||||
log.Info("Les server stopped")
|
||||
}
|
||||
|
||||
func (s *LesServer) SetBloomBitsIndexer(bloomIndexer *core.ChainIndexer) {
|
||||
bloomIndexer.AddChildIndexer(s.bloomTrieIndexer)
|
||||
}
|
||||
|
||||
// SetClient sets the rpc client and starts running checkpoint contract if it is not yet watched.
|
||||
func (s *LesServer) SetContractBackend(backend bind.ContractBackend) {
|
||||
if s.oracle == nil {
|
||||
return
|
||||
}
|
||||
s.oracle.Start(backend)
|
||||
return nil
|
||||
}
|
||||
|
||||
// capacityManagement starts an event handler loop that updates the recharge curve of
|
||||
|
@@ -123,6 +123,9 @@ func (h *serverHandler) handle(p *clientPeer) error {
|
||||
return err
|
||||
}
|
||||
if p.server {
|
||||
if err := h.server.serverset.register(p); err != nil {
|
||||
return err
|
||||
}
|
||||
// connected to another server, no messages expected, just wait for disconnection
|
||||
_, err := p.rw.ReadMsg()
|
||||
return err
|
||||
@@ -322,7 +325,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
|
||||
origin = h.blockchain.GetHeaderByNumber(query.Origin.Number)
|
||||
}
|
||||
if origin == nil {
|
||||
atomic.AddUint32(&p.invalidCount, 1)
|
||||
p.bumpInvalid()
|
||||
break
|
||||
}
|
||||
headers = append(headers, origin)
|
||||
@@ -419,7 +422,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
|
||||
}
|
||||
body := h.blockchain.GetBodyRLP(hash)
|
||||
if body == nil {
|
||||
atomic.AddUint32(&p.invalidCount, 1)
|
||||
p.bumpInvalid()
|
||||
continue
|
||||
}
|
||||
bodies = append(bodies, body)
|
||||
@@ -467,7 +470,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
|
||||
header := h.blockchain.GetHeaderByHash(request.BHash)
|
||||
if header == nil {
|
||||
p.Log().Warn("Failed to retrieve associate header for code", "hash", request.BHash)
|
||||
atomic.AddUint32(&p.invalidCount, 1)
|
||||
p.bumpInvalid()
|
||||
continue
|
||||
}
|
||||
// Refuse to search stale state data in the database since looking for
|
||||
@@ -475,7 +478,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
|
||||
local := h.blockchain.CurrentHeader().Number.Uint64()
|
||||
if !h.server.archiveMode && header.Number.Uint64()+core.TriesInMemory <= local {
|
||||
p.Log().Debug("Reject stale code request", "number", header.Number.Uint64(), "head", local)
|
||||
atomic.AddUint32(&p.invalidCount, 1)
|
||||
p.bumpInvalid()
|
||||
continue
|
||||
}
|
||||
triedb := h.blockchain.StateCache().TrieDB()
|
||||
@@ -483,7 +486,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
|
||||
account, err := h.getAccount(triedb, header.Root, common.BytesToHash(request.AccKey))
|
||||
if err != nil {
|
||||
p.Log().Warn("Failed to retrieve account for code", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "err", err)
|
||||
atomic.AddUint32(&p.invalidCount, 1)
|
||||
p.bumpInvalid()
|
||||
continue
|
||||
}
|
||||
code, err := triedb.Node(common.BytesToHash(account.CodeHash))
|
||||
@@ -542,7 +545,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
|
||||
results := h.blockchain.GetReceiptsByHash(hash)
|
||||
if results == nil {
|
||||
if header := h.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
|
||||
atomic.AddUint32(&p.invalidCount, 1)
|
||||
p.bumpInvalid()
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -605,7 +608,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
|
||||
|
||||
if header = h.blockchain.GetHeaderByHash(request.BHash); header == nil {
|
||||
p.Log().Warn("Failed to retrieve header for proof", "hash", request.BHash)
|
||||
atomic.AddUint32(&p.invalidCount, 1)
|
||||
p.bumpInvalid()
|
||||
continue
|
||||
}
|
||||
// Refuse to search stale state data in the database since looking for
|
||||
@@ -613,14 +616,14 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
|
||||
local := h.blockchain.CurrentHeader().Number.Uint64()
|
||||
if !h.server.archiveMode && header.Number.Uint64()+core.TriesInMemory <= local {
|
||||
p.Log().Debug("Reject stale trie request", "number", header.Number.Uint64(), "head", local)
|
||||
atomic.AddUint32(&p.invalidCount, 1)
|
||||
p.bumpInvalid()
|
||||
continue
|
||||
}
|
||||
root = header.Root
|
||||
}
|
||||
// If a header lookup failed (non existent), ignore subsequent requests for the same header
|
||||
if root == (common.Hash{}) {
|
||||
atomic.AddUint32(&p.invalidCount, 1)
|
||||
p.bumpInvalid()
|
||||
continue
|
||||
}
|
||||
// Open the account or storage trie for the request
|
||||
@@ -639,7 +642,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
|
||||
account, err := h.getAccount(statedb.TrieDB(), root, common.BytesToHash(request.AccKey))
|
||||
if err != nil {
|
||||
p.Log().Warn("Failed to retrieve account for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "err", err)
|
||||
atomic.AddUint32(&p.invalidCount, 1)
|
||||
p.bumpInvalid()
|
||||
continue
|
||||
}
|
||||
trie, err = statedb.OpenStorageTrie(common.BytesToHash(request.AccKey), account.Root)
|
||||
@@ -833,9 +836,9 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
|
||||
clientErrorMeter.Mark(1)
|
||||
return errResp(ErrInvalidMsgCode, "%v", msg.Code)
|
||||
}
|
||||
// If the client has made too much invalid request(e.g. request a non-exist data),
|
||||
// If the client has made too much invalid request(e.g. request a non-existent data),
|
||||
// reject them to prevent SPAM attack.
|
||||
if atomic.LoadUint32(&p.invalidCount) > maxRequestErrors {
|
||||
if p.getInvalid() > maxRequestErrors {
|
||||
clientErrorMeter.Mark(1)
|
||||
return errTooManyInvalidRequest
|
||||
}
|
||||
|
@@ -223,6 +223,7 @@ func newTestClientHandler(backend *backends.SimulatedBackend, odr *LesOdr, index
|
||||
if client.oracle != nil {
|
||||
client.oracle.Start(backend)
|
||||
}
|
||||
client.handler.start()
|
||||
return client.handler
|
||||
}
|
||||
|
||||
|
@@ -124,6 +124,50 @@ func (e *ExpiredValue) SubExp(a ExpiredValue) {
|
||||
}
|
||||
}
|
||||
|
||||
// LinearExpiredValue is very similar with the expiredValue which the value
|
||||
// will continuously expired. But the different part is it's expired linearly.
|
||||
type LinearExpiredValue struct {
|
||||
Offset uint64 // The latest time offset
|
||||
Val uint64 // The remaining value, can never be negative
|
||||
Rate mclock.AbsTime `rlp:"-"` // Expiration rate(by nanosecond), will ignored by RLP
|
||||
}
|
||||
|
||||
// value calculates the value at the given moment. This function always has the
|
||||
// assumption that the given timestamp shouldn't less than the recorded one.
|
||||
func (e LinearExpiredValue) Value(now mclock.AbsTime) uint64 {
|
||||
offset := uint64(now / e.Rate)
|
||||
if e.Offset < offset {
|
||||
diff := offset - e.Offset
|
||||
if e.Val >= diff {
|
||||
e.Val -= diff
|
||||
} else {
|
||||
e.Val = 0
|
||||
}
|
||||
}
|
||||
return e.Val
|
||||
}
|
||||
|
||||
// add adds a signed value at the given moment. This function always has the
|
||||
// assumption that the given timestamp shouldn't less than the recorded one.
|
||||
func (e *LinearExpiredValue) Add(amount int64, now mclock.AbsTime) uint64 {
|
||||
offset := uint64(now / e.Rate)
|
||||
if e.Offset < offset {
|
||||
diff := offset - e.Offset
|
||||
if e.Val >= diff {
|
||||
e.Val -= diff
|
||||
} else {
|
||||
e.Val = 0
|
||||
}
|
||||
e.Offset = offset
|
||||
}
|
||||
if amount < 0 && uint64(-amount) > e.Val {
|
||||
e.Val = 0
|
||||
} else {
|
||||
e.Val = uint64(int64(e.Val) + amount)
|
||||
}
|
||||
return e.Val
|
||||
}
|
||||
|
||||
// Expirer changes logOffset with a linear rate which can be changed during operation.
|
||||
// It is not thread safe, if access by multiple goroutines is needed then it should be
|
||||
// encapsulated into a locked structure.
|
||||
|
@@ -18,6 +18,8 @@ package utils
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/mclock"
|
||||
)
|
||||
|
||||
func TestValueExpiration(t *testing.T) {
|
||||
@@ -116,3 +118,78 @@ func TestExpiredValueSubtraction(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLinearExpiredValue(t *testing.T) {
|
||||
var cases = []struct {
|
||||
value LinearExpiredValue
|
||||
now mclock.AbsTime
|
||||
expect uint64
|
||||
}{
|
||||
{LinearExpiredValue{
|
||||
Offset: 0,
|
||||
Val: 0,
|
||||
Rate: mclock.AbsTime(1),
|
||||
}, 0, 0},
|
||||
|
||||
{LinearExpiredValue{
|
||||
Offset: 1,
|
||||
Val: 1,
|
||||
Rate: mclock.AbsTime(1),
|
||||
}, 0, 1},
|
||||
|
||||
{LinearExpiredValue{
|
||||
Offset: 1,
|
||||
Val: 1,
|
||||
Rate: mclock.AbsTime(1),
|
||||
}, mclock.AbsTime(2), 0},
|
||||
|
||||
{LinearExpiredValue{
|
||||
Offset: 1,
|
||||
Val: 1,
|
||||
Rate: mclock.AbsTime(1),
|
||||
}, mclock.AbsTime(3), 0},
|
||||
}
|
||||
for _, c := range cases {
|
||||
if value := c.value.Value(c.now); value != c.expect {
|
||||
t.Fatalf("Value mismatch, want=%d, got=%d", c.expect, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLinearExpiredAddition(t *testing.T) {
|
||||
var cases = []struct {
|
||||
value LinearExpiredValue
|
||||
amount int64
|
||||
now mclock.AbsTime
|
||||
expect uint64
|
||||
}{
|
||||
{LinearExpiredValue{
|
||||
Offset: 0,
|
||||
Val: 0,
|
||||
Rate: mclock.AbsTime(1),
|
||||
}, -1, 0, 0},
|
||||
|
||||
{LinearExpiredValue{
|
||||
Offset: 1,
|
||||
Val: 1,
|
||||
Rate: mclock.AbsTime(1),
|
||||
}, -1, 0, 0},
|
||||
|
||||
{LinearExpiredValue{
|
||||
Offset: 1,
|
||||
Val: 2,
|
||||
Rate: mclock.AbsTime(1),
|
||||
}, -1, mclock.AbsTime(2), 0},
|
||||
|
||||
{LinearExpiredValue{
|
||||
Offset: 1,
|
||||
Val: 2,
|
||||
Rate: mclock.AbsTime(1),
|
||||
}, -2, mclock.AbsTime(2), 0},
|
||||
}
|
||||
for _, c := range cases {
|
||||
if value := c.value.Add(c.amount, c.now); value != c.expect {
|
||||
t.Fatalf("Value mismatch, want=%d, got=%d", c.expect, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -147,7 +147,7 @@ func NewChtIndexer(db ethdb.Database, odr OdrBackend, size, confirms uint64, dis
|
||||
diskdb: db,
|
||||
odr: odr,
|
||||
trieTable: trieTable,
|
||||
triedb: trie.NewDatabaseWithCache(trieTable, 1), // Use a tiny cache only to keep memory down
|
||||
triedb: trie.NewDatabaseWithCache(trieTable, 1, ""), // Use a tiny cache only to keep memory down
|
||||
trieset: mapset.NewSet(),
|
||||
sectionSize: size,
|
||||
disablePruning: disablePruning,
|
||||
@@ -340,7 +340,7 @@ func NewBloomTrieIndexer(db ethdb.Database, odr OdrBackend, parentSize, size uin
|
||||
diskdb: db,
|
||||
odr: odr,
|
||||
trieTable: trieTable,
|
||||
triedb: trie.NewDatabaseWithCache(trieTable, 1), // Use a tiny cache only to keep memory down
|
||||
triedb: trie.NewDatabaseWithCache(trieTable, 1, ""), // Use a tiny cache only to keep memory down
|
||||
trieset: mapset.NewSet(),
|
||||
parentSize: parentSize,
|
||||
size: size,
|
||||
|
@@ -61,30 +61,31 @@ func main() {
|
||||
genesis := makeGenesis(faucets, sealers)
|
||||
|
||||
var (
|
||||
nodes []*node.Node
|
||||
nodes []*eth.Ethereum
|
||||
enodes []*enode.Node
|
||||
)
|
||||
|
||||
for _, sealer := range sealers {
|
||||
// Start the node and wait until it's up
|
||||
node, err := makeSealer(genesis)
|
||||
stack, ethBackend, err := makeSealer(genesis)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer node.Close()
|
||||
defer stack.Close()
|
||||
|
||||
for node.Server().NodeInfo().Ports.Listener == 0 {
|
||||
for stack.Server().NodeInfo().Ports.Listener == 0 {
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
}
|
||||
// Connect the node to al the previous ones
|
||||
// Connect the node to all the previous ones
|
||||
for _, n := range enodes {
|
||||
node.Server().AddPeer(n)
|
||||
stack.Server().AddPeer(n)
|
||||
}
|
||||
// Start tracking the node and it's enode
|
||||
nodes = append(nodes, node)
|
||||
enodes = append(enodes, node.Server().Self())
|
||||
// Start tracking the node and its enode
|
||||
nodes = append(nodes, ethBackend)
|
||||
enodes = append(enodes, stack.Server().Self())
|
||||
|
||||
// Inject the signer key and start sealing with it
|
||||
store := node.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
|
||||
store := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
|
||||
signer, err := store.ImportECDSA(sealer, "")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -93,15 +94,11 @@ func main() {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
// Iterate over all the nodes and start signing with them
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// Iterate over all the nodes and start signing on them
|
||||
time.Sleep(3 * time.Second)
|
||||
for _, node := range nodes {
|
||||
var ethereum *eth.Ethereum
|
||||
if err := node.Service(ðereum); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := ethereum.StartMining(1); err != nil {
|
||||
if err := node.StartMining(1); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
@@ -110,25 +107,22 @@ func main() {
|
||||
// Start injecting transactions from the faucet like crazy
|
||||
nonces := make([]uint64, len(faucets))
|
||||
for {
|
||||
// Pick a random signer node
|
||||
index := rand.Intn(len(faucets))
|
||||
backend := nodes[index%len(nodes)]
|
||||
|
||||
// Fetch the accessor for the relevant signer
|
||||
var ethereum *eth.Ethereum
|
||||
if err := nodes[index%len(nodes)].Service(ðereum); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// Create a self transaction and inject into the pool
|
||||
tx, err := types.SignTx(types.NewTransaction(nonces[index], crypto.PubkeyToAddress(faucets[index].PublicKey), new(big.Int), 21000, big.NewInt(100000000000), nil), types.HomesteadSigner{}, faucets[index])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := ethereum.TxPool().AddLocal(tx); err != nil {
|
||||
if err := backend.TxPool().AddLocal(tx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
nonces[index]++
|
||||
|
||||
// Wait if we're too saturated
|
||||
if pend, _ := ethereum.TxPool().Stats(); pend > 2048 {
|
||||
if pend, _ := backend.TxPool().Stats(); pend > 2048 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
@@ -171,7 +165,7 @@ func makeGenesis(faucets []*ecdsa.PrivateKey, sealers []*ecdsa.PrivateKey) *core
|
||||
return genesis
|
||||
}
|
||||
|
||||
func makeSealer(genesis *core.Genesis) (*node.Node, error) {
|
||||
func makeSealer(genesis *core.Genesis) (*node.Node, *eth.Ethereum, error) {
|
||||
// Define the basic configurations for the Ethereum node
|
||||
datadir, _ := ioutil.TempDir("", "")
|
||||
|
||||
@@ -189,27 +183,28 @@ func makeSealer(genesis *core.Genesis) (*node.Node, error) {
|
||||
// Start the node and configure a full Ethereum node on it
|
||||
stack, err := node.New(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
return eth.New(ctx, ð.Config{
|
||||
Genesis: genesis,
|
||||
NetworkId: genesis.Config.ChainID.Uint64(),
|
||||
SyncMode: downloader.FullSync,
|
||||
DatabaseCache: 256,
|
||||
DatabaseHandles: 256,
|
||||
TxPool: core.DefaultTxPoolConfig,
|
||||
GPO: eth.DefaultConfig.GPO,
|
||||
Miner: miner.Config{
|
||||
GasFloor: genesis.GasLimit * 9 / 10,
|
||||
GasCeil: genesis.GasLimit * 11 / 10,
|
||||
GasPrice: big.NewInt(1),
|
||||
Recommit: time.Second,
|
||||
},
|
||||
})
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
// Create and register the backend
|
||||
ethBackend, err := eth.New(stack, ð.Config{
|
||||
Genesis: genesis,
|
||||
NetworkId: genesis.Config.ChainID.Uint64(),
|
||||
SyncMode: downloader.FullSync,
|
||||
DatabaseCache: 256,
|
||||
DatabaseHandles: 256,
|
||||
TxPool: core.DefaultTxPoolConfig,
|
||||
GPO: eth.DefaultConfig.GPO,
|
||||
Miner: miner.Config{
|
||||
GasFloor: genesis.GasLimit * 9 / 10,
|
||||
GasCeil: genesis.GasLimit * 11 / 10,
|
||||
GasPrice: big.NewInt(1),
|
||||
Recommit: time.Second,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// Start the node and return if successful
|
||||
return stack, stack.Start()
|
||||
|
||||
err = stack.Start()
|
||||
return stack, ethBackend, err
|
||||
}
|
||||
|
@@ -61,43 +61,39 @@ func main() {
|
||||
genesis := makeGenesis(faucets)
|
||||
|
||||
var (
|
||||
nodes []*node.Node
|
||||
nodes []*eth.Ethereum
|
||||
enodes []*enode.Node
|
||||
)
|
||||
for i := 0; i < 4; i++ {
|
||||
// Start the node and wait until it's up
|
||||
node, err := makeMiner(genesis)
|
||||
stack, ethBackend, err := makeMiner(genesis)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer node.Close()
|
||||
defer stack.Close()
|
||||
|
||||
for node.Server().NodeInfo().Ports.Listener == 0 {
|
||||
for stack.Server().NodeInfo().Ports.Listener == 0 {
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
}
|
||||
// Connect the node to al the previous ones
|
||||
// Connect the node to all the previous ones
|
||||
for _, n := range enodes {
|
||||
node.Server().AddPeer(n)
|
||||
stack.Server().AddPeer(n)
|
||||
}
|
||||
// Start tracking the node and it's enode
|
||||
nodes = append(nodes, node)
|
||||
enodes = append(enodes, node.Server().Self())
|
||||
// Start tracking the node and its enode
|
||||
nodes = append(nodes, ethBackend)
|
||||
enodes = append(enodes, stack.Server().Self())
|
||||
|
||||
// Inject the signer key and start sealing with it
|
||||
store := node.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
|
||||
store := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
|
||||
if _, err := store.NewAccount(""); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
// Iterate over all the nodes and start signing with them
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// Iterate over all the nodes and start mining
|
||||
time.Sleep(3 * time.Second)
|
||||
for _, node := range nodes {
|
||||
var ethereum *eth.Ethereum
|
||||
if err := node.Service(ðereum); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := ethereum.StartMining(1); err != nil {
|
||||
if err := node.StartMining(1); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
@@ -106,25 +102,22 @@ func main() {
|
||||
// Start injecting transactions from the faucets like crazy
|
||||
nonces := make([]uint64, len(faucets))
|
||||
for {
|
||||
// Pick a random mining node
|
||||
index := rand.Intn(len(faucets))
|
||||
backend := nodes[index%len(nodes)]
|
||||
|
||||
// Fetch the accessor for the relevant signer
|
||||
var ethereum *eth.Ethereum
|
||||
if err := nodes[index%len(nodes)].Service(ðereum); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// Create a self transaction and inject into the pool
|
||||
tx, err := types.SignTx(types.NewTransaction(nonces[index], crypto.PubkeyToAddress(faucets[index].PublicKey), new(big.Int), 21000, big.NewInt(100000000000+rand.Int63n(65536)), nil), types.HomesteadSigner{}, faucets[index])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := ethereum.TxPool().AddLocal(tx); err != nil {
|
||||
if err := backend.TxPool().AddLocal(tx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
nonces[index]++
|
||||
|
||||
// Wait if we're too saturated
|
||||
if pend, _ := ethereum.TxPool().Stats(); pend > 2048 {
|
||||
if pend, _ := backend.TxPool().Stats(); pend > 2048 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
@@ -149,7 +142,7 @@ func makeGenesis(faucets []*ecdsa.PrivateKey) *core.Genesis {
|
||||
return genesis
|
||||
}
|
||||
|
||||
func makeMiner(genesis *core.Genesis) (*node.Node, error) {
|
||||
func makeMiner(genesis *core.Genesis) (*node.Node, *eth.Ethereum, error) {
|
||||
// Define the basic configurations for the Ethereum node
|
||||
datadir, _ := ioutil.TempDir("", "")
|
||||
|
||||
@@ -165,31 +158,31 @@ func makeMiner(genesis *core.Genesis) (*node.Node, error) {
|
||||
NoUSB: true,
|
||||
UseLightweightKDF: true,
|
||||
}
|
||||
// Start the node and configure a full Ethereum node on it
|
||||
// Create the node and configure a full Ethereum node on it
|
||||
stack, err := node.New(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
return eth.New(ctx, ð.Config{
|
||||
Genesis: genesis,
|
||||
NetworkId: genesis.Config.ChainID.Uint64(),
|
||||
SyncMode: downloader.FullSync,
|
||||
DatabaseCache: 256,
|
||||
DatabaseHandles: 256,
|
||||
TxPool: core.DefaultTxPoolConfig,
|
||||
GPO: eth.DefaultConfig.GPO,
|
||||
Ethash: eth.DefaultConfig.Ethash,
|
||||
Miner: miner.Config{
|
||||
GasFloor: genesis.GasLimit * 9 / 10,
|
||||
GasCeil: genesis.GasLimit * 11 / 10,
|
||||
GasPrice: big.NewInt(1),
|
||||
Recommit: time.Second,
|
||||
},
|
||||
})
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
ethBackend, err := eth.New(stack, ð.Config{
|
||||
Genesis: genesis,
|
||||
NetworkId: genesis.Config.ChainID.Uint64(),
|
||||
SyncMode: downloader.FullSync,
|
||||
DatabaseCache: 256,
|
||||
DatabaseHandles: 256,
|
||||
TxPool: core.DefaultTxPoolConfig,
|
||||
GPO: eth.DefaultConfig.GPO,
|
||||
Ethash: eth.DefaultConfig.Ethash,
|
||||
Miner: miner.Config{
|
||||
GasFloor: genesis.GasLimit * 9 / 10,
|
||||
GasCeil: genesis.GasLimit * 11 / 10,
|
||||
GasPrice: big.NewInt(1),
|
||||
Recommit: time.Second,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// Start the node and return if successful
|
||||
return stack, stack.Start()
|
||||
|
||||
err = stack.Start()
|
||||
return stack, ethBackend, err
|
||||
}
|
||||
|
@@ -305,6 +305,28 @@ func (w *worker) close() {
|
||||
close(w.exitCh)
|
||||
}
|
||||
|
||||
// recalcRecommit recalculates the resubmitting interval upon feedback.
|
||||
func recalcRecommit(minRecommit, prev time.Duration, target float64, inc bool) time.Duration {
|
||||
var (
|
||||
prevF = float64(prev.Nanoseconds())
|
||||
next float64
|
||||
)
|
||||
if inc {
|
||||
next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target+intervalAdjustBias)
|
||||
max := float64(maxRecommitInterval.Nanoseconds())
|
||||
if next > max {
|
||||
next = max
|
||||
}
|
||||
} else {
|
||||
next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target-intervalAdjustBias)
|
||||
min := float64(minRecommit.Nanoseconds())
|
||||
if next < min {
|
||||
next = min
|
||||
}
|
||||
}
|
||||
return time.Duration(int64(next))
|
||||
}
|
||||
|
||||
// newWorkLoop is a standalone goroutine to submit new mining work upon received events.
|
||||
func (w *worker) newWorkLoop(recommit time.Duration) {
|
||||
var (
|
||||
@@ -327,27 +349,6 @@ func (w *worker) newWorkLoop(recommit time.Duration) {
|
||||
timer.Reset(recommit)
|
||||
atomic.StoreInt32(&w.newTxs, 0)
|
||||
}
|
||||
// recalcRecommit recalculates the resubmitting interval upon feedback.
|
||||
recalcRecommit := func(target float64, inc bool) {
|
||||
var (
|
||||
prev = float64(recommit.Nanoseconds())
|
||||
next float64
|
||||
)
|
||||
if inc {
|
||||
next = prev*(1-intervalAdjustRatio) + intervalAdjustRatio*(target+intervalAdjustBias)
|
||||
// Recap if interval is larger than the maximum time interval
|
||||
if next > float64(maxRecommitInterval.Nanoseconds()) {
|
||||
next = float64(maxRecommitInterval.Nanoseconds())
|
||||
}
|
||||
} else {
|
||||
next = prev*(1-intervalAdjustRatio) + intervalAdjustRatio*(target-intervalAdjustBias)
|
||||
// Recap if interval is less than the user specified minimum
|
||||
if next < float64(minRecommit.Nanoseconds()) {
|
||||
next = float64(minRecommit.Nanoseconds())
|
||||
}
|
||||
}
|
||||
recommit = time.Duration(int64(next))
|
||||
}
|
||||
// clearPending cleans the stale pending tasks.
|
||||
clearPending := func(number uint64) {
|
||||
w.pendingMu.Lock()
|
||||
@@ -400,11 +401,12 @@ func (w *worker) newWorkLoop(recommit time.Duration) {
|
||||
// Adjust resubmit interval by feedback.
|
||||
if adjust.inc {
|
||||
before := recommit
|
||||
recalcRecommit(float64(recommit.Nanoseconds())/adjust.ratio, true)
|
||||
target := float64(recommit.Nanoseconds()) / adjust.ratio
|
||||
recommit = recalcRecommit(minRecommit, recommit, target, true)
|
||||
log.Trace("Increase miner recommit interval", "from", before, "to", recommit)
|
||||
} else {
|
||||
before := recommit
|
||||
recalcRecommit(float64(minRecommit.Nanoseconds()), false)
|
||||
recommit = recalcRecommit(minRecommit, recommit, float64(minRecommit.Nanoseconds()), false)
|
||||
log.Trace("Decrease miner recommit interval", "from", before, "to", recommit)
|
||||
}
|
||||
|
||||
@@ -553,7 +555,7 @@ func (w *worker) taskLoop() {
|
||||
continue
|
||||
}
|
||||
w.pendingMu.Lock()
|
||||
w.pendingTasks[w.engine.SealHash(task.block.Header())] = task
|
||||
w.pendingTasks[sealHash] = task
|
||||
w.pendingMu.Unlock()
|
||||
|
||||
if err := w.engine.Seal(w.chain, task.block, w.resultCh, stopCh); err != nil {
|
||||
@@ -974,13 +976,9 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64)
|
||||
// and commits new work if consensus engine is running.
|
||||
func (w *worker) commit(uncles []*types.Header, interval func(), update bool, start time.Time) error {
|
||||
// Deep copy receipts here to avoid interaction between different tasks.
|
||||
receipts := make([]*types.Receipt, len(w.current.receipts))
|
||||
for i, l := range w.current.receipts {
|
||||
receipts[i] = new(types.Receipt)
|
||||
*receipts[i] = *l
|
||||
}
|
||||
receipts := copyReceipts(w.current.receipts)
|
||||
s := w.current.state.Copy()
|
||||
block, err := w.engine.FinalizeAndAssemble(w.chain, w.current.header, s, w.current.txs, uncles, w.current.receipts)
|
||||
block, err := w.engine.FinalizeAndAssemble(w.chain, w.current.header, s, w.current.txs, uncles, receipts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -991,15 +989,10 @@ func (w *worker) commit(uncles []*types.Header, interval func(), update bool, st
|
||||
select {
|
||||
case w.taskCh <- &task{receipts: receipts, state: s, block: block, createdAt: time.Now()}:
|
||||
w.unconfirmed.Shift(block.NumberU64() - 1)
|
||||
|
||||
feesWei := new(big.Int)
|
||||
for i, tx := range block.Transactions() {
|
||||
feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(receipts[i].GasUsed), tx.GasPrice()))
|
||||
}
|
||||
feesEth := new(big.Float).Quo(new(big.Float).SetInt(feesWei), new(big.Float).SetInt(big.NewInt(params.Ether)))
|
||||
|
||||
log.Info("Commit new mining work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()),
|
||||
"uncles", len(uncles), "txs", w.current.tcount, "gas", block.GasUsed(), "fees", feesEth, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||
"uncles", len(uncles), "txs", w.current.tcount,
|
||||
"gas", block.GasUsed(), "fees", totalFees(block, receipts),
|
||||
"elapsed", common.PrettyDuration(time.Since(start)))
|
||||
|
||||
case <-w.exitCh:
|
||||
log.Info("Worker has exited")
|
||||
@@ -1011,6 +1004,16 @@ func (w *worker) commit(uncles []*types.Header, interval func(), update bool, st
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyReceipts makes a deep copy of the given receipts.
|
||||
func copyReceipts(receipts []*types.Receipt) []*types.Receipt {
|
||||
result := make([]*types.Receipt, len(receipts))
|
||||
for i, l := range receipts {
|
||||
cpy := *l
|
||||
result[i] = &cpy
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// postSideBlock fires a side chain event, only use it for testing.
|
||||
func (w *worker) postSideBlock(event core.ChainSideEvent) {
|
||||
select {
|
||||
@@ -1018,3 +1021,12 @@ func (w *worker) postSideBlock(event core.ChainSideEvent) {
|
||||
case <-w.exitCh:
|
||||
}
|
||||
}
|
||||
|
||||
// totalFees computes total consumed fees in ETH. Block transactions and receipts have to have the same order.
|
||||
func totalFees(block *types.Block, receipts []*types.Receipt) *big.Float {
|
||||
feesWei := new(big.Int)
|
||||
for i, tx := range block.Transactions() {
|
||||
feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(receipts[i].GasUsed), tx.GasPrice()))
|
||||
}
|
||||
return new(big.Float).Quo(new(big.Float).SetInt(feesWei), new(big.Float).SetInt(big.NewInt(params.Ether)))
|
||||
}
|
||||
|
@@ -32,7 +32,7 @@ import (
|
||||
// Signer is an interface defining the callback when a contract requires a
|
||||
// method to sign the transaction before submission.
|
||||
type Signer interface {
|
||||
Sign(*Address, *Transaction) (tx *Transaction, _ error)
|
||||
Sign(addr *Address, unsignedTx *Transaction) (tx *Transaction, _ error)
|
||||
}
|
||||
|
||||
type MobileSigner struct {
|
||||
|
@@ -175,49 +175,44 @@ func NewNode(datadir string, config *NodeConfig) (stack *Node, _ error) {
|
||||
ethConf.SyncMode = downloader.LightSync
|
||||
ethConf.NetworkId = uint64(config.EthereumNetworkID)
|
||||
ethConf.DatabaseCache = config.EthereumDatabaseCache
|
||||
if err := rawStack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
return les.New(ctx, ðConf)
|
||||
}); err != nil {
|
||||
lesBackend, err := les.New(rawStack, ðConf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ethereum init: %v", err)
|
||||
}
|
||||
// If netstats reporting is requested, do it
|
||||
if config.EthereumNetStats != "" {
|
||||
if err := rawStack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
var lesServ *les.LightEthereum
|
||||
ctx.Service(&lesServ)
|
||||
|
||||
return ethstats.New(config.EthereumNetStats, nil, lesServ)
|
||||
}); err != nil {
|
||||
if err := ethstats.New(rawStack, lesBackend.ApiBackend, lesBackend.Engine(), config.EthereumNetStats); err != nil {
|
||||
return nil, fmt.Errorf("netstats init: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Register the Whisper protocol if requested
|
||||
if config.WhisperEnabled {
|
||||
if err := rawStack.Register(func(*node.ServiceContext) (node.Service, error) {
|
||||
return whisper.New(&whisper.DefaultConfig), nil
|
||||
}); err != nil {
|
||||
if _, err := whisper.New(rawStack, &whisper.DefaultConfig); err != nil {
|
||||
return nil, fmt.Errorf("whisper init: %v", err)
|
||||
}
|
||||
}
|
||||
return &Node{rawStack}, nil
|
||||
}
|
||||
|
||||
// Close terminates a running node along with all it's services, tearing internal
|
||||
// state doen too. It's not possible to restart a closed node.
|
||||
// Close terminates a running node along with all it's services, tearing internal state
|
||||
// down. It is not possible to restart a closed node.
|
||||
func (n *Node) Close() error {
|
||||
return n.node.Close()
|
||||
}
|
||||
|
||||
// Start creates a live P2P node and starts running it.
|
||||
func (n *Node) Start() error {
|
||||
// TODO: recreate the node so it can be started multiple times
|
||||
return n.node.Start()
|
||||
}
|
||||
|
||||
// Stop terminates a running node along with all it's services. If the node was
|
||||
// not started, an error is returned.
|
||||
// Stop terminates a running node along with all its services. If the node was not started,
|
||||
// an error is returned. It is not possible to restart a stopped node.
|
||||
//
|
||||
// Deprecated: use Close()
|
||||
func (n *Node) Stop() error {
|
||||
return n.node.Stop()
|
||||
return n.node.Close()
|
||||
}
|
||||
|
||||
// GetEthereumClient retrieves a client to access the Ethereum subsystem.
|
||||
|
183
node/api.go
183
node/api.go
@@ -23,26 +23,46 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// PrivateAdminAPI is the collection of administrative API methods exposed only
|
||||
// over a secure RPC channel.
|
||||
type PrivateAdminAPI struct {
|
||||
node *Node // Node interfaced by this API
|
||||
// apis returns the collection of built-in RPC APIs.
|
||||
func (n *Node) apis() []rpc.API {
|
||||
return []rpc.API{
|
||||
{
|
||||
Namespace: "admin",
|
||||
Version: "1.0",
|
||||
Service: &privateAdminAPI{n},
|
||||
}, {
|
||||
Namespace: "admin",
|
||||
Version: "1.0",
|
||||
Service: &publicAdminAPI{n},
|
||||
Public: true,
|
||||
}, {
|
||||
Namespace: "debug",
|
||||
Version: "1.0",
|
||||
Service: debug.Handler,
|
||||
}, {
|
||||
Namespace: "web3",
|
||||
Version: "1.0",
|
||||
Service: &publicWeb3API{n},
|
||||
Public: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewPrivateAdminAPI creates a new API definition for the private admin methods
|
||||
// of the node itself.
|
||||
func NewPrivateAdminAPI(node *Node) *PrivateAdminAPI {
|
||||
return &PrivateAdminAPI{node: node}
|
||||
// privateAdminAPI is the collection of administrative API methods exposed only
|
||||
// over a secure RPC channel.
|
||||
type privateAdminAPI struct {
|
||||
node *Node // Node interfaced by this API
|
||||
}
|
||||
|
||||
// AddPeer requests connecting to a remote node, and also maintaining the new
|
||||
// connection at all times, even reconnecting if it is lost.
|
||||
func (api *PrivateAdminAPI) AddPeer(url string) (bool, error) {
|
||||
func (api *privateAdminAPI) AddPeer(url string) (bool, error) {
|
||||
// Make sure the server is running, fail otherwise
|
||||
server := api.node.Server()
|
||||
if server == nil {
|
||||
@@ -58,7 +78,7 @@ func (api *PrivateAdminAPI) AddPeer(url string) (bool, error) {
|
||||
}
|
||||
|
||||
// RemovePeer disconnects from a remote node if the connection exists
|
||||
func (api *PrivateAdminAPI) RemovePeer(url string) (bool, error) {
|
||||
func (api *privateAdminAPI) RemovePeer(url string) (bool, error) {
|
||||
// Make sure the server is running, fail otherwise
|
||||
server := api.node.Server()
|
||||
if server == nil {
|
||||
@@ -74,7 +94,7 @@ func (api *PrivateAdminAPI) RemovePeer(url string) (bool, error) {
|
||||
}
|
||||
|
||||
// AddTrustedPeer allows a remote node to always connect, even if slots are full
|
||||
func (api *PrivateAdminAPI) AddTrustedPeer(url string) (bool, error) {
|
||||
func (api *privateAdminAPI) AddTrustedPeer(url string) (bool, error) {
|
||||
// Make sure the server is running, fail otherwise
|
||||
server := api.node.Server()
|
||||
if server == nil {
|
||||
@@ -90,7 +110,7 @@ func (api *PrivateAdminAPI) AddTrustedPeer(url string) (bool, error) {
|
||||
|
||||
// RemoveTrustedPeer removes a remote node from the trusted peer set, but it
|
||||
// does not disconnect it automatically.
|
||||
func (api *PrivateAdminAPI) RemoveTrustedPeer(url string) (bool, error) {
|
||||
func (api *privateAdminAPI) RemoveTrustedPeer(url string) (bool, error) {
|
||||
// Make sure the server is running, fail otherwise
|
||||
server := api.node.Server()
|
||||
if server == nil {
|
||||
@@ -106,7 +126,7 @@ func (api *PrivateAdminAPI) RemoveTrustedPeer(url string) (bool, error) {
|
||||
|
||||
// PeerEvents creates an RPC subscription which receives peer events from the
|
||||
// node's p2p.Server
|
||||
func (api *PrivateAdminAPI) PeerEvents(ctx context.Context) (*rpc.Subscription, error) {
|
||||
func (api *privateAdminAPI) PeerEvents(ctx context.Context) (*rpc.Subscription, error) {
|
||||
// Make sure the server is running, fail otherwise
|
||||
server := api.node.Server()
|
||||
if server == nil {
|
||||
@@ -143,14 +163,11 @@ func (api *PrivateAdminAPI) PeerEvents(ctx context.Context) (*rpc.Subscription,
|
||||
}
|
||||
|
||||
// StartRPC starts the HTTP RPC API server.
|
||||
func (api *PrivateAdminAPI) StartRPC(host *string, port *int, cors *string, apis *string, vhosts *string) (bool, error) {
|
||||
func (api *privateAdminAPI) StartRPC(host *string, port *int, cors *string, apis *string, vhosts *string) (bool, error) {
|
||||
api.node.lock.Lock()
|
||||
defer api.node.lock.Unlock()
|
||||
|
||||
if api.node.httpHandler != nil {
|
||||
return false, fmt.Errorf("HTTP RPC already running on %s", api.node.httpEndpoint)
|
||||
}
|
||||
|
||||
// Determine host and port.
|
||||
if host == nil {
|
||||
h := DefaultHTTPHost
|
||||
if api.node.config.HTTPHost != "" {
|
||||
@@ -162,57 +179,55 @@ func (api *PrivateAdminAPI) StartRPC(host *string, port *int, cors *string, apis
|
||||
port = &api.node.config.HTTPPort
|
||||
}
|
||||
|
||||
allowedOrigins := api.node.config.HTTPCors
|
||||
// Determine config.
|
||||
config := httpConfig{
|
||||
CorsAllowedOrigins: api.node.config.HTTPCors,
|
||||
Vhosts: api.node.config.HTTPVirtualHosts,
|
||||
Modules: api.node.config.HTTPModules,
|
||||
}
|
||||
if cors != nil {
|
||||
allowedOrigins = nil
|
||||
config.CorsAllowedOrigins = nil
|
||||
for _, origin := range strings.Split(*cors, ",") {
|
||||
allowedOrigins = append(allowedOrigins, strings.TrimSpace(origin))
|
||||
config.CorsAllowedOrigins = append(config.CorsAllowedOrigins, strings.TrimSpace(origin))
|
||||
}
|
||||
}
|
||||
|
||||
allowedVHosts := api.node.config.HTTPVirtualHosts
|
||||
if vhosts != nil {
|
||||
allowedVHosts = nil
|
||||
config.Vhosts = nil
|
||||
for _, vhost := range strings.Split(*host, ",") {
|
||||
allowedVHosts = append(allowedVHosts, strings.TrimSpace(vhost))
|
||||
config.Vhosts = append(config.Vhosts, strings.TrimSpace(vhost))
|
||||
}
|
||||
}
|
||||
|
||||
modules := api.node.httpWhitelist
|
||||
if apis != nil {
|
||||
modules = nil
|
||||
config.Modules = nil
|
||||
for _, m := range strings.Split(*apis, ",") {
|
||||
modules = append(modules, strings.TrimSpace(m))
|
||||
config.Modules = append(config.Modules, strings.TrimSpace(m))
|
||||
}
|
||||
}
|
||||
|
||||
if err := api.node.startHTTP(fmt.Sprintf("%s:%d", *host, *port), api.node.rpcAPIs, modules, allowedOrigins, allowedVHosts, api.node.config.HTTPTimeouts, api.node.config.WSOrigins); err != nil {
|
||||
if err := api.node.http.setListenAddr(*host, *port); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := api.node.http.enableRPC(api.node.rpcAPIs, config); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := api.node.http.start(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// StopRPC terminates an already running HTTP RPC API endpoint.
|
||||
func (api *PrivateAdminAPI) StopRPC() (bool, error) {
|
||||
api.node.lock.Lock()
|
||||
defer api.node.lock.Unlock()
|
||||
|
||||
if api.node.httpHandler == nil {
|
||||
return false, fmt.Errorf("HTTP RPC not running")
|
||||
}
|
||||
api.node.stopHTTP()
|
||||
// StopRPC shuts down the HTTP server.
|
||||
func (api *privateAdminAPI) StopRPC() (bool, error) {
|
||||
api.node.http.stop()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// StartWS starts the websocket RPC API server.
|
||||
func (api *PrivateAdminAPI) StartWS(host *string, port *int, allowedOrigins *string, apis *string) (bool, error) {
|
||||
func (api *privateAdminAPI) StartWS(host *string, port *int, allowedOrigins *string, apis *string) (bool, error) {
|
||||
api.node.lock.Lock()
|
||||
defer api.node.lock.Unlock()
|
||||
|
||||
if api.node.wsHandler != nil {
|
||||
return false, fmt.Errorf("WebSocket RPC already running on %s", api.node.wsEndpoint)
|
||||
}
|
||||
|
||||
// Determine host and port.
|
||||
if host == nil {
|
||||
h := DefaultWSHost
|
||||
if api.node.config.WSHost != "" {
|
||||
@@ -224,55 +239,56 @@ func (api *PrivateAdminAPI) StartWS(host *string, port *int, allowedOrigins *str
|
||||
port = &api.node.config.WSPort
|
||||
}
|
||||
|
||||
origins := api.node.config.WSOrigins
|
||||
if allowedOrigins != nil {
|
||||
origins = nil
|
||||
for _, origin := range strings.Split(*allowedOrigins, ",") {
|
||||
origins = append(origins, strings.TrimSpace(origin))
|
||||
}
|
||||
// Determine config.
|
||||
config := wsConfig{
|
||||
Modules: api.node.config.WSModules,
|
||||
Origins: api.node.config.WSOrigins,
|
||||
// ExposeAll: api.node.config.WSExposeAll,
|
||||
}
|
||||
|
||||
modules := api.node.config.WSModules
|
||||
if apis != nil {
|
||||
modules = nil
|
||||
config.Modules = nil
|
||||
for _, m := range strings.Split(*apis, ",") {
|
||||
modules = append(modules, strings.TrimSpace(m))
|
||||
config.Modules = append(config.Modules, strings.TrimSpace(m))
|
||||
}
|
||||
}
|
||||
if allowedOrigins != nil {
|
||||
config.Origins = nil
|
||||
for _, origin := range strings.Split(*allowedOrigins, ",") {
|
||||
config.Origins = append(config.Origins, strings.TrimSpace(origin))
|
||||
}
|
||||
}
|
||||
|
||||
if err := api.node.startWS(fmt.Sprintf("%s:%d", *host, *port), api.node.rpcAPIs, modules, origins, api.node.config.WSExposeAll); err != nil {
|
||||
// Enable WebSocket on the server.
|
||||
server := api.node.wsServerForPort(*port)
|
||||
if err := server.setListenAddr(*host, *port); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// StopWS terminates an already running websocket RPC API endpoint.
|
||||
func (api *PrivateAdminAPI) StopWS() (bool, error) {
|
||||
api.node.lock.Lock()
|
||||
defer api.node.lock.Unlock()
|
||||
|
||||
if api.node.wsHandler == nil {
|
||||
return false, fmt.Errorf("WebSocket RPC not running")
|
||||
if err := server.enableWS(api.node.rpcAPIs, config); err != nil {
|
||||
return false, err
|
||||
}
|
||||
api.node.stopWS()
|
||||
if err := server.start(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
api.node.http.log.Info("WebSocket endpoint opened", "url", api.node.WSEndpoint())
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// PublicAdminAPI is the collection of administrative API methods exposed over
|
||||
// both secure and unsecure RPC channels.
|
||||
type PublicAdminAPI struct {
|
||||
node *Node // Node interfaced by this API
|
||||
// StopWS terminates all WebSocket servers.
|
||||
func (api *privateAdminAPI) StopWS() (bool, error) {
|
||||
api.node.http.stopWS()
|
||||
api.node.ws.stop()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// NewPublicAdminAPI creates a new API definition for the public admin methods
|
||||
// of the node itself.
|
||||
func NewPublicAdminAPI(node *Node) *PublicAdminAPI {
|
||||
return &PublicAdminAPI{node: node}
|
||||
// publicAdminAPI is the collection of administrative API methods exposed over
|
||||
// both secure and unsecure RPC channels.
|
||||
type publicAdminAPI struct {
|
||||
node *Node // Node interfaced by this API
|
||||
}
|
||||
|
||||
// Peers retrieves all the information we know about each individual peer at the
|
||||
// protocol granularity.
|
||||
func (api *PublicAdminAPI) Peers() ([]*p2p.PeerInfo, error) {
|
||||
func (api *publicAdminAPI) Peers() ([]*p2p.PeerInfo, error) {
|
||||
server := api.node.Server()
|
||||
if server == nil {
|
||||
return nil, ErrNodeStopped
|
||||
@@ -282,7 +298,7 @@ func (api *PublicAdminAPI) Peers() ([]*p2p.PeerInfo, error) {
|
||||
|
||||
// NodeInfo retrieves all the information we know about the host node at the
|
||||
// protocol granularity.
|
||||
func (api *PublicAdminAPI) NodeInfo() (*p2p.NodeInfo, error) {
|
||||
func (api *publicAdminAPI) NodeInfo() (*p2p.NodeInfo, error) {
|
||||
server := api.node.Server()
|
||||
if server == nil {
|
||||
return nil, ErrNodeStopped
|
||||
@@ -291,27 +307,22 @@ func (api *PublicAdminAPI) NodeInfo() (*p2p.NodeInfo, error) {
|
||||
}
|
||||
|
||||
// Datadir retrieves the current data directory the node is using.
|
||||
func (api *PublicAdminAPI) Datadir() string {
|
||||
func (api *publicAdminAPI) Datadir() string {
|
||||
return api.node.DataDir()
|
||||
}
|
||||
|
||||
// PublicWeb3API offers helper utils
|
||||
type PublicWeb3API struct {
|
||||
// publicWeb3API offers helper utils
|
||||
type publicWeb3API struct {
|
||||
stack *Node
|
||||
}
|
||||
|
||||
// NewPublicWeb3API creates a new Web3Service instance
|
||||
func NewPublicWeb3API(stack *Node) *PublicWeb3API {
|
||||
return &PublicWeb3API{stack}
|
||||
}
|
||||
|
||||
// ClientVersion returns the node name
|
||||
func (s *PublicWeb3API) ClientVersion() string {
|
||||
func (s *publicWeb3API) ClientVersion() string {
|
||||
return s.stack.Server().Name
|
||||
}
|
||||
|
||||
// Sha3 applies the ethereum sha3 implementation on the input.
|
||||
// It assumes the input is hex encoded.
|
||||
func (s *PublicWeb3API) Sha3(input hexutil.Bytes) hexutil.Bytes {
|
||||
func (s *publicWeb3API) Sha3(input hexutil.Bytes) hexutil.Bytes {
|
||||
return crypto.Keccak256(input)
|
||||
}
|
||||
|
350
node/api_test.go
Normal file
350
node/api_test.go
Normal file
@@ -0,0 +1,350 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package node
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// This test uses the admin_startRPC and admin_startWS APIs,
|
||||
// checking whether the HTTP server is started correctly.
|
||||
func TestStartRPC(t *testing.T) {
|
||||
type test struct {
|
||||
name string
|
||||
cfg Config
|
||||
fn func(*testing.T, *Node, *privateAdminAPI)
|
||||
|
||||
// Checks. These run after the node is configured and all API calls have been made.
|
||||
wantReachable bool // whether the HTTP server should be reachable at all
|
||||
wantHandlers bool // whether RegisterHandler handlers should be accessible
|
||||
wantRPC bool // whether JSON-RPC/HTTP should be accessible
|
||||
wantWS bool // whether JSON-RPC/WS should be accessible
|
||||
}
|
||||
|
||||
tests := []test{
|
||||
{
|
||||
name: "all off",
|
||||
cfg: Config{},
|
||||
fn: func(t *testing.T, n *Node, api *privateAdminAPI) {
|
||||
},
|
||||
wantReachable: false,
|
||||
wantHandlers: false,
|
||||
wantRPC: false,
|
||||
wantWS: false,
|
||||
},
|
||||
{
|
||||
name: "rpc enabled through config",
|
||||
cfg: Config{HTTPHost: "127.0.0.1"},
|
||||
fn: func(t *testing.T, n *Node, api *privateAdminAPI) {
|
||||
},
|
||||
wantReachable: true,
|
||||
wantHandlers: true,
|
||||
wantRPC: true,
|
||||
wantWS: false,
|
||||
},
|
||||
{
|
||||
name: "rpc enabled through API",
|
||||
cfg: Config{},
|
||||
fn: func(t *testing.T, n *Node, api *privateAdminAPI) {
|
||||
_, err := api.StartRPC(sp("127.0.0.1"), ip(0), nil, nil, nil)
|
||||
assert.NoError(t, err)
|
||||
},
|
||||
wantReachable: true,
|
||||
wantHandlers: true,
|
||||
wantRPC: true,
|
||||
wantWS: false,
|
||||
},
|
||||
{
|
||||
name: "rpc start again after failure",
|
||||
cfg: Config{},
|
||||
fn: func(t *testing.T, n *Node, api *privateAdminAPI) {
|
||||
// Listen on a random port.
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatal("can't listen:", err)
|
||||
}
|
||||
defer listener.Close()
|
||||
port := listener.Addr().(*net.TCPAddr).Port
|
||||
|
||||
// Now try to start RPC on that port. This should fail.
|
||||
_, err = api.StartRPC(sp("127.0.0.1"), ip(port), nil, nil, nil)
|
||||
if err == nil {
|
||||
t.Fatal("StartRPC should have failed on port", port)
|
||||
}
|
||||
|
||||
// Try again after unblocking the port. It should work this time.
|
||||
listener.Close()
|
||||
_, err = api.StartRPC(sp("127.0.0.1"), ip(port), nil, nil, nil)
|
||||
assert.NoError(t, err)
|
||||
},
|
||||
wantReachable: true,
|
||||
wantHandlers: true,
|
||||
wantRPC: true,
|
||||
wantWS: false,
|
||||
},
|
||||
{
|
||||
name: "rpc stopped through API",
|
||||
cfg: Config{HTTPHost: "127.0.0.1"},
|
||||
fn: func(t *testing.T, n *Node, api *privateAdminAPI) {
|
||||
_, err := api.StopRPC()
|
||||
assert.NoError(t, err)
|
||||
},
|
||||
wantReachable: false,
|
||||
wantHandlers: false,
|
||||
wantRPC: false,
|
||||
wantWS: false,
|
||||
},
|
||||
{
|
||||
name: "rpc stopped twice",
|
||||
cfg: Config{HTTPHost: "127.0.0.1"},
|
||||
fn: func(t *testing.T, n *Node, api *privateAdminAPI) {
|
||||
_, err := api.StopRPC()
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = api.StopRPC()
|
||||
assert.NoError(t, err)
|
||||
},
|
||||
wantReachable: false,
|
||||
wantHandlers: false,
|
||||
wantRPC: false,
|
||||
wantWS: false,
|
||||
},
|
||||
{
|
||||
name: "ws enabled through config",
|
||||
cfg: Config{WSHost: "127.0.0.1"},
|
||||
wantReachable: true,
|
||||
wantHandlers: false,
|
||||
wantRPC: false,
|
||||
wantWS: true,
|
||||
},
|
||||
{
|
||||
name: "ws enabled through API",
|
||||
cfg: Config{},
|
||||
fn: func(t *testing.T, n *Node, api *privateAdminAPI) {
|
||||
_, err := api.StartWS(sp("127.0.0.1"), ip(0), nil, nil)
|
||||
assert.NoError(t, err)
|
||||
},
|
||||
wantReachable: true,
|
||||
wantHandlers: false,
|
||||
wantRPC: false,
|
||||
wantWS: true,
|
||||
},
|
||||
{
|
||||
name: "ws stopped through API",
|
||||
cfg: Config{WSHost: "127.0.0.1"},
|
||||
fn: func(t *testing.T, n *Node, api *privateAdminAPI) {
|
||||
_, err := api.StopWS()
|
||||
assert.NoError(t, err)
|
||||
},
|
||||
wantReachable: false,
|
||||
wantHandlers: false,
|
||||
wantRPC: false,
|
||||
wantWS: false,
|
||||
},
|
||||
{
|
||||
name: "ws stopped twice",
|
||||
cfg: Config{WSHost: "127.0.0.1"},
|
||||
fn: func(t *testing.T, n *Node, api *privateAdminAPI) {
|
||||
_, err := api.StopWS()
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = api.StopWS()
|
||||
assert.NoError(t, err)
|
||||
},
|
||||
wantReachable: false,
|
||||
wantHandlers: false,
|
||||
wantRPC: false,
|
||||
wantWS: false,
|
||||
},
|
||||
{
|
||||
name: "ws enabled after RPC",
|
||||
cfg: Config{HTTPHost: "127.0.0.1"},
|
||||
fn: func(t *testing.T, n *Node, api *privateAdminAPI) {
|
||||
wsport := n.http.port
|
||||
_, err := api.StartWS(sp("127.0.0.1"), ip(wsport), nil, nil)
|
||||
assert.NoError(t, err)
|
||||
},
|
||||
wantReachable: true,
|
||||
wantHandlers: true,
|
||||
wantRPC: true,
|
||||
wantWS: true,
|
||||
},
|
||||
{
|
||||
name: "ws enabled after RPC then stopped",
|
||||
cfg: Config{HTTPHost: "127.0.0.1"},
|
||||
fn: func(t *testing.T, n *Node, api *privateAdminAPI) {
|
||||
wsport := n.http.port
|
||||
_, err := api.StartWS(sp("127.0.0.1"), ip(wsport), nil, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = api.StopWS()
|
||||
assert.NoError(t, err)
|
||||
},
|
||||
wantReachable: true,
|
||||
wantHandlers: true,
|
||||
wantRPC: true,
|
||||
wantWS: false,
|
||||
},
|
||||
{
|
||||
name: "rpc stopped with ws enabled",
|
||||
fn: func(t *testing.T, n *Node, api *privateAdminAPI) {
|
||||
_, err := api.StartRPC(sp("127.0.0.1"), ip(0), nil, nil, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
wsport := n.http.port
|
||||
_, err = api.StartWS(sp("127.0.0.1"), ip(wsport), nil, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = api.StopRPC()
|
||||
assert.NoError(t, err)
|
||||
},
|
||||
wantReachable: false,
|
||||
wantHandlers: false,
|
||||
wantRPC: false,
|
||||
wantWS: false,
|
||||
},
|
||||
{
|
||||
name: "rpc enabled after ws",
|
||||
fn: func(t *testing.T, n *Node, api *privateAdminAPI) {
|
||||
_, err := api.StartWS(sp("127.0.0.1"), ip(0), nil, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
wsport := n.http.port
|
||||
_, err = api.StartRPC(sp("127.0.0.1"), ip(wsport), nil, nil, nil)
|
||||
assert.NoError(t, err)
|
||||
},
|
||||
wantReachable: true,
|
||||
wantHandlers: true,
|
||||
wantRPC: true,
|
||||
wantWS: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
// Apply some sane defaults.
|
||||
config := test.cfg
|
||||
// config.Logger = testlog.Logger(t, log.LvlDebug)
|
||||
config.NoUSB = true
|
||||
config.P2P.NoDiscovery = true
|
||||
|
||||
// Create Node.
|
||||
stack, err := New(&config)
|
||||
if err != nil {
|
||||
t.Fatal("can't create node:", err)
|
||||
}
|
||||
defer stack.Close()
|
||||
|
||||
// Register the test handler.
|
||||
stack.RegisterHandler("test", "/test", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte("OK"))
|
||||
}))
|
||||
|
||||
if err := stack.Start(); err != nil {
|
||||
t.Fatal("can't start node:", err)
|
||||
}
|
||||
|
||||
// Run the API call hook.
|
||||
if test.fn != nil {
|
||||
test.fn(t, stack, &privateAdminAPI{stack})
|
||||
}
|
||||
|
||||
// Check if the HTTP endpoints are available.
|
||||
baseURL := stack.HTTPEndpoint()
|
||||
reachable := checkReachable(baseURL)
|
||||
handlersAvailable := checkBodyOK(baseURL + "/test")
|
||||
rpcAvailable := checkRPC(baseURL)
|
||||
wsAvailable := checkRPC(strings.Replace(baseURL, "http://", "ws://", 1))
|
||||
if reachable != test.wantReachable {
|
||||
t.Errorf("HTTP server is %sreachable, want it %sreachable", not(reachable), not(test.wantReachable))
|
||||
}
|
||||
if handlersAvailable != test.wantHandlers {
|
||||
t.Errorf("RegisterHandler handlers %savailable, want them %savailable", not(handlersAvailable), not(test.wantHandlers))
|
||||
}
|
||||
if rpcAvailable != test.wantRPC {
|
||||
t.Errorf("HTTP RPC %savailable, want it %savailable", not(rpcAvailable), not(test.wantRPC))
|
||||
}
|
||||
if wsAvailable != test.wantWS {
|
||||
t.Errorf("WS RPC %savailable, want it %savailable", not(wsAvailable), not(test.wantWS))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// checkReachable checks if the TCP endpoint in rawurl is open.
|
||||
func checkReachable(rawurl string) bool {
|
||||
u, err := url.Parse(rawurl)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
conn, err := net.Dial("tcp", u.Host)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
conn.Close()
|
||||
return true
|
||||
}
|
||||
|
||||
// checkBodyOK checks whether the given HTTP URL responds with 200 OK and body "OK".
|
||||
func checkBodyOK(url string) bool {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return false
|
||||
}
|
||||
buf := make([]byte, 2)
|
||||
if _, err = io.ReadFull(resp.Body, buf); err != nil {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(buf, []byte("OK"))
|
||||
}
|
||||
|
||||
// checkRPC checks whether JSON-RPC works against the given URL.
|
||||
func checkRPC(url string) bool {
|
||||
c, err := rpc.Dial(url)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
_, err = c.SupportedModules()
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// string/int pointer helpers.
|
||||
func sp(s string) *string { return &s }
|
||||
func ip(i int) *int { return &i }
|
||||
|
||||
func not(ok bool) string {
|
||||
if ok {
|
||||
return ""
|
||||
}
|
||||
return "not "
|
||||
}
|
@@ -162,15 +162,6 @@ type Config struct {
|
||||
// private APIs to untrusted users is a major security risk.
|
||||
WSExposeAll bool `toml:",omitempty"`
|
||||
|
||||
// GraphQLHost is the host interface on which to start the GraphQL server. If this
|
||||
// field is empty, no GraphQL API endpoint will be started.
|
||||
GraphQLHost string
|
||||
|
||||
// GraphQLPort is the TCP port number on which to start the GraphQL server. The
|
||||
// default zero value is/ valid and will pick a port number randomly (useful
|
||||
// for ephemeral nodes).
|
||||
GraphQLPort int `toml:",omitempty"`
|
||||
|
||||
// GraphQLCors is the Cross-Origin Resource Sharing header to send to requesting
|
||||
// clients. Please be aware that CORS is a browser enforced security, it's fully
|
||||
// useless for custom HTTP clients.
|
||||
@@ -247,15 +238,6 @@ func (c *Config) HTTPEndpoint() string {
|
||||
return fmt.Sprintf("%s:%d", c.HTTPHost, c.HTTPPort)
|
||||
}
|
||||
|
||||
// GraphQLEndpoint resolves a GraphQL endpoint based on the configured host interface
|
||||
// and port parameters.
|
||||
func (c *Config) GraphQLEndpoint() string {
|
||||
if c.GraphQLHost == "" {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%s:%d", c.GraphQLHost, c.GraphQLPort)
|
||||
}
|
||||
|
||||
// DefaultHTTPEndpoint returns the HTTP endpoint used by default.
|
||||
func DefaultHTTPEndpoint() string {
|
||||
config := &Config{HTTPHost: DefaultHTTPHost, HTTPPort: DefaultHTTPPort}
|
||||
@@ -280,7 +262,7 @@ func DefaultWSEndpoint() string {
|
||||
// ExtRPCEnabled returns the indicator whether node enables the external
|
||||
// RPC(http, ws or graphql).
|
||||
func (c *Config) ExtRPCEnabled() bool {
|
||||
return c.HTTPHost != "" || c.WSHost != "" || c.GraphQLHost != ""
|
||||
return c.HTTPHost != "" || c.WSHost != ""
|
||||
}
|
||||
|
||||
// NodeName returns the devp2p node identifier.
|
||||
|
@@ -45,7 +45,6 @@ var DefaultConfig = Config{
|
||||
HTTPTimeouts: rpc.DefaultHTTPTimeouts,
|
||||
WSPort: DefaultWSPort,
|
||||
WSModules: []string{"net", "web3"},
|
||||
GraphQLPort: DefaultGraphQLPort,
|
||||
GraphQLVirtualHosts: []string{"localhost"},
|
||||
P2P: p2p.Config{
|
||||
ListenAddr: ":30303",
|
||||
|
37
node/doc.go
37
node/doc.go
@@ -22,6 +22,43 @@ resources to provide RPC APIs. Services can also offer devp2p protocols, which a
|
||||
up to the devp2p network when the node instance is started.
|
||||
|
||||
|
||||
Node Lifecycle
|
||||
|
||||
The Node object has a lifecycle consisting of three basic states, INITIALIZING, RUNNING
|
||||
and CLOSED.
|
||||
|
||||
|
||||
●───────┐
|
||||
New()
|
||||
│
|
||||
▼
|
||||
INITIALIZING ────Start()─┐
|
||||
│ │
|
||||
│ ▼
|
||||
Close() RUNNING
|
||||
│ │
|
||||
▼ │
|
||||
CLOSED ◀──────Close()─┘
|
||||
|
||||
|
||||
Creating a Node allocates basic resources such as the data directory and returns the node
|
||||
in its INITIALIZING state. Lifecycle objects, RPC APIs and peer-to-peer networking
|
||||
protocols can be registered in this state. Basic operations such as opening a key-value
|
||||
database are permitted while initializing.
|
||||
|
||||
Once everything is registered, the node can be started, which moves it into the RUNNING
|
||||
state. Starting the node starts all registered Lifecycle objects and enables RPC and
|
||||
peer-to-peer networking. Note that no additional Lifecycles, APIs or p2p protocols can be
|
||||
registered while the node is running.
|
||||
|
||||
Closing the node releases all held resources. The actions performed by Close depend on the
|
||||
state it was in. When closing a node in INITIALIZING state, resources related to the data
|
||||
directory are released. If the node was RUNNING, closing it also stops all Lifecycle
|
||||
objects and shuts down RPC and peer-to-peer networking.
|
||||
|
||||
You must always call Close on Node, even if the node was not started.
|
||||
|
||||
|
||||
Resources Managed By Node
|
||||
|
||||
All file-system resources used by a node instance are located in a directory called the
|
||||
|
@@ -48,21 +48,6 @@ func StartHTTPEndpoint(endpoint string, timeouts rpc.HTTPTimeouts, handler http.
|
||||
return httpSrv, listener.Addr(), err
|
||||
}
|
||||
|
||||
// startWSEndpoint starts a websocket endpoint.
|
||||
func startWSEndpoint(endpoint string, handler http.Handler) (*http.Server, net.Addr, error) {
|
||||
// start the HTTP listener
|
||||
var (
|
||||
listener net.Listener
|
||||
err error
|
||||
)
|
||||
if listener, err = net.Listen("tcp", endpoint); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
wsSrv := &http.Server{Handler: handler}
|
||||
go wsSrv.Serve(listener)
|
||||
return wsSrv, listener.Addr(), err
|
||||
}
|
||||
|
||||
// checkModuleAvailability checks that all names given in modules are actually
|
||||
// available API services. It assumes that the MetadataApi module ("rpc") is always available;
|
||||
// the registration of this "rpc" module happens in NewServer() and is thus common to all endpoints.
|
||||
|
@@ -39,17 +39,6 @@ func convertFileLockError(err error) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// DuplicateServiceError is returned during Node startup if a registered service
|
||||
// constructor returns a service of the same type that was already started.
|
||||
type DuplicateServiceError struct {
|
||||
Kind reflect.Type
|
||||
}
|
||||
|
||||
// Error generates a textual representation of the duplicate service error.
|
||||
func (e *DuplicateServiceError) Error() string {
|
||||
return fmt.Sprintf("duplicate service: %v", e.Kind)
|
||||
}
|
||||
|
||||
// StopError is returned if a Node fails to stop either any of its registered
|
||||
// services or itself.
|
||||
type StopError struct {
|
||||
|
31
node/lifecycle.go
Normal file
31
node/lifecycle.go
Normal file
@@ -0,0 +1,31 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package node
|
||||
|
||||
// Lifecycle encompasses the behavior of services that can be started and stopped
|
||||
// on the node. Lifecycle management is delegated to the node, but it is the
|
||||
// responsibility of the service-specific package to configure and register the
|
||||
// service on the node using the `RegisterLifecycle` method.
|
||||
type Lifecycle interface {
|
||||
// Start is called after all services have been constructed and the networking
|
||||
// layer was also initialized to spawn any goroutines required by the service.
|
||||
Start() error
|
||||
|
||||
// Stop terminates all goroutines belonging to the service, blocking until they
|
||||
// are all terminated.
|
||||
Stop() error
|
||||
}
|
870
node/node.go
870
node/node.go
File diff suppressed because it is too large
Load Diff
@@ -21,26 +21,20 @@ import (
|
||||
"log"
|
||||
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// SampleService is a trivial network service that can be attached to a node for
|
||||
// SampleLifecycle is a trivial network service that can be attached to a node for
|
||||
// life cycle management.
|
||||
//
|
||||
// The following methods are needed to implement a node.Service:
|
||||
// - Protocols() []p2p.Protocol - devp2p protocols the service can communicate on
|
||||
// - APIs() []rpc.API - api methods the service wants to expose on rpc channels
|
||||
// The following methods are needed to implement a node.Lifecycle:
|
||||
// - Start() error - method invoked when the node is ready to start the service
|
||||
// - Stop() error - method invoked when the node terminates the service
|
||||
type SampleService struct{}
|
||||
type SampleLifecycle struct{}
|
||||
|
||||
func (s *SampleService) Protocols() []p2p.Protocol { return nil }
|
||||
func (s *SampleService) APIs() []rpc.API { return nil }
|
||||
func (s *SampleService) Start(*p2p.Server) error { fmt.Println("Service starting..."); return nil }
|
||||
func (s *SampleService) Stop() error { fmt.Println("Service stopping..."); return nil }
|
||||
func (s *SampleLifecycle) Start() error { fmt.Println("Service starting..."); return nil }
|
||||
func (s *SampleLifecycle) Stop() error { fmt.Println("Service stopping..."); return nil }
|
||||
|
||||
func ExampleService() {
|
||||
func ExampleLifecycle() {
|
||||
// Create a network node to run protocols with the default values.
|
||||
stack, err := node.New(&node.Config{})
|
||||
if err != nil {
|
||||
@@ -48,29 +42,18 @@ func ExampleService() {
|
||||
}
|
||||
defer stack.Close()
|
||||
|
||||
// Create and register a simple network service. This is done through the definition
|
||||
// of a node.ServiceConstructor that will instantiate a node.Service. The reason for
|
||||
// the factory method approach is to support service restarts without relying on the
|
||||
// individual implementations' support for such operations.
|
||||
constructor := func(context *node.ServiceContext) (node.Service, error) {
|
||||
return new(SampleService), nil
|
||||
}
|
||||
if err := stack.Register(constructor); err != nil {
|
||||
log.Fatalf("Failed to register service: %v", err)
|
||||
}
|
||||
// Create and register a simple network Lifecycle.
|
||||
service := new(SampleLifecycle)
|
||||
stack.RegisterLifecycle(service)
|
||||
|
||||
// Boot up the entire protocol stack, do a restart and terminate
|
||||
if err := stack.Start(); err != nil {
|
||||
log.Fatalf("Failed to start the protocol stack: %v", err)
|
||||
}
|
||||
if err := stack.Restart(); err != nil {
|
||||
log.Fatalf("Failed to restart the protocol stack: %v", err)
|
||||
}
|
||||
if err := stack.Stop(); err != nil {
|
||||
if err := stack.Close(); err != nil {
|
||||
log.Fatalf("Failed to stop the protocol stack: %v", err)
|
||||
}
|
||||
// Output:
|
||||
// Service starting...
|
||||
// Service stopping...
|
||||
// Service starting...
|
||||
// Service stopping...
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user