all: on-chain oracle checkpoint syncing (#19543)

* all: implement simple checkpoint syncing

cmd, les, node: remove callback mechanism

cmd, node: remove callback definition

les: simplify the registrar

les: expose checkpoint rpc services in the light client

les, light: don't store untrusted receipt

cmd, contracts, les: discard stale checkpoint

cmd, contracts/registrar: loose restriction of registeration

cmd, contracts: add replay-protection

all: off-chain multi-signature contract

params: deploy checkpoint contract for rinkeby

cmd/registrar: add raw signing mode for registrar

cmd/registrar, contracts/registrar, les: fixed messages

* cmd/registrar, contracts/registrar: fix lints

* accounts/abi/bind, les: address comments

* cmd, contracts, les, light, params: minor checkpoint sync cleanups

* cmd, eth, les, light: move checkpoint config to config file

* cmd, eth, les, params: address comments

* eth, les, params: address comments

* cmd: polish up the checkpoint admin CLI

* cmd, contracts, params: deploy new version contract

* cmd/checkpoint-admin: add another flag for clef mode signing

* cmd, contracts, les: rename and regen checkpoint oracle with abigen
This commit is contained in:
gary rong
2019-06-28 15:34:02 +08:00
committed by Péter Szilágyi
parent 702f52fb99
commit f7cdea2bdc
49 changed files with 2861 additions and 383 deletions

View File

@@ -34,6 +34,8 @@ var (
ErrMinCap = errors.New("capacity too small")
ErrTotalCap = errors.New("total capacity exceeded")
ErrUnknownBenchmarkType = errors.New("unknown benchmark type")
ErrNoCheckpoint = errors.New("no local checkpoint provided")
ErrNotActivated = errors.New("checkpoint registrar is not activated")
dropCapacityDelay = time.Second // delay applied to decreasing capacity changes
)
@@ -470,3 +472,59 @@ func (api *PrivateLightServerAPI) Benchmark(setups []map[string]interface{}, pas
}
return result, nil
}
// PrivateLightAPI provides an API to access the LES light server or light client.
type PrivateLightAPI struct {
backend *lesCommons
reg *checkpointOracle
}
// NewPrivateLightAPI creates a new LES service API.
func NewPrivateLightAPI(backend *lesCommons, reg *checkpointOracle) *PrivateLightAPI {
return &PrivateLightAPI{
backend: backend,
reg: reg,
}
}
// LatestCheckpoint returns the latest local checkpoint package.
//
// The checkpoint package consists of 4 strings:
// result[0], hex encoded latest section index
// result[1], 32 bytes hex encoded latest section head hash
// result[2], 32 bytes hex encoded latest section canonical hash trie root hash
// result[3], 32 bytes hex encoded latest section bloom trie root hash
func (api *PrivateLightAPI) LatestCheckpoint() ([4]string, error) {
var res [4]string
cp := api.backend.latestLocalCheckpoint()
if cp.Empty() {
return res, ErrNoCheckpoint
}
res[0] = hexutil.EncodeUint64(cp.SectionIndex)
res[1], res[2], res[3] = cp.SectionHead.Hex(), cp.CHTRoot.Hex(), cp.BloomRoot.Hex()
return res, nil
}
// GetLocalCheckpoint returns the specific local checkpoint package.
//
// The checkpoint package consists of 3 strings:
// result[0], 32 bytes hex encoded latest section head hash
// result[1], 32 bytes hex encoded latest section canonical hash trie root hash
// result[2], 32 bytes hex encoded latest section bloom trie root hash
func (api *PrivateLightAPI) GetCheckpoint(index uint64) ([3]string, error) {
var res [3]string
cp := api.backend.getLocalCheckpoint(index)
if cp.Empty() {
return res, ErrNoCheckpoint
}
res[0], res[1], res[2] = cp.SectionHead.Hex(), cp.CHTRoot.Hex(), cp.BloomRoot.Hex()
return res, nil
}
// GetCheckpointContractAddress returns the contract contract address in hex format.
func (api *PrivateLightAPI) GetCheckpointContractAddress() (string, error) {
if api.reg == nil {
return "", ErrNotActivated
}
return api.reg.config.Address.Hex(), nil
}

View File

@@ -23,6 +23,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/mclock"
@@ -43,14 +44,13 @@ import (
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discv5"
"github.com/ethereum/go-ethereum/params"
rpc "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/rpc"
)
type LightEthereum struct {
lesCommons
odr *LesOdr
relay *LesTxRelay
chainConfig *params.ChainConfig
// Channel for shutting down the service
shutdownChan chan bool
@@ -62,6 +62,7 @@ type LightEthereum struct {
serverPool *serverPool
reqDist *requestDistributor
retriever *retrieveManager
relay *lesTxRelay
bloomRequests chan chan *bloombits.Retrieval // Channel receiving bloom data retrieval requests
bloomIndexer *core.ChainIndexer
@@ -116,16 +117,20 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
}
leth.serverPool = newServerPool(chainDb, quitSync, &leth.wg, trustedNodes)
leth.retriever = newRetrieveManager(peers, leth.reqDist, leth.serverPool)
leth.relay = NewLesTxRelay(peers, leth.retriever)
leth.relay = newLesTxRelay(peers, leth.retriever)
leth.odr = NewLesOdr(chainDb, light.DefaultClientIndexerConfig, leth.retriever)
leth.chtIndexer = light.NewChtIndexer(chainDb, leth.odr, params.CHTFrequency, params.HelperTrieConfirmations)
leth.bloomTrieIndexer = light.NewBloomTrieIndexer(chainDb, leth.odr, params.BloomBitsBlocksClient, params.BloomTrieFrequency)
leth.odr.SetIndexers(leth.chtIndexer, leth.bloomTrieIndexer, leth.bloomIndexer)
checkpoint := config.Checkpoint
if checkpoint == nil {
checkpoint = params.TrustedCheckpoints[genesisHash]
}
// Note: NewLightChain adds the trusted checkpoint so it needs an ODR with
// indexers already set but not started yet
if leth.blockchain, err = light.NewLightChain(leth.odr, leth.chainConfig, leth.engine); err != nil {
if leth.blockchain, err = light.NewLightChain(leth.odr, leth.chainConfig, leth.engine, checkpoint); err != nil {
return nil, err
}
// Note: AddChildIndexer starts the update process for the child
@@ -141,32 +146,6 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
}
leth.txPool = light.NewTxPool(leth.chainConfig, leth.blockchain, leth.relay)
if leth.protocolManager, err = NewProtocolManager(
leth.chainConfig,
light.DefaultClientIndexerConfig,
true,
config.NetworkId,
leth.eventMux,
leth.engine,
leth.peers,
leth.blockchain,
nil,
chainDb,
leth.odr,
leth.relay,
leth.serverPool,
quitSync,
&leth.wg,
config.ULC,
nil); err != nil {
return nil, err
}
if leth.protocolManager.isULCEnabled() {
log.Warn("Ultra light client is enabled", "trustedNodes", len(leth.protocolManager.ulc.trustedKeys), "minTrustedFraction", leth.protocolManager.ulc.minTrustedFraction)
leth.blockchain.DisableCheckFreq()
}
leth.ApiBackend = &LesApiBackend{ctx.ExtRPCEnabled(), leth, nil}
gpoParams := config.GPO
@@ -174,6 +153,19 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
gpoParams.Default = config.Miner.GasPrice
}
leth.ApiBackend.gpo = gasprice.NewOracle(leth.ApiBackend, gpoParams)
oracle := config.CheckpointOracle
if oracle == nil {
oracle = params.CheckpointOracles[genesisHash]
}
registrar := newCheckpointOracle(oracle, leth.getLocalCheckpoint)
if leth.protocolManager, err = NewProtocolManager(leth.chainConfig, checkpoint, light.DefaultClientIndexerConfig, config.ULC, true, config.NetworkId, leth.eventMux, leth.peers, leth.blockchain, nil, chainDb, leth.odr, leth.serverPool, registrar, quitSync, &leth.wg, nil); err != nil {
return nil, err
}
if leth.protocolManager.isULCEnabled() {
log.Warn("Ultra light client is enabled", "trustedNodes", len(leth.protocolManager.ulc.trustedKeys), "minTrustedFraction", leth.protocolManager.ulc.minTrustedFraction)
leth.blockchain.DisableCheckFreq()
}
return leth, nil
}
@@ -234,6 +226,11 @@ func (s *LightEthereum) APIs() []rpc.API {
Version: "1.0",
Service: s.netRPCService,
Public: true,
}, {
Namespace: "les",
Version: "1.0",
Service: NewPrivateLightAPI(&s.lesCommons, s.protocolManager.reg),
Public: false,
},
}...)
}
@@ -288,3 +285,12 @@ func (s *LightEthereum) Stop() error {
return nil
}
// SetClient sets the rpc client and binds the registrar contract.
func (s *LightEthereum) SetContractBackend(backend bind.ContractBackend) {
// Short circuit if registrar is nil
if s.protocolManager.reg == nil {
return
}
s.protocolManager.reg.start(backend)
}

158
les/checkpointoracle.go Normal file
View File

@@ -0,0 +1,158 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"encoding/binary"
"sync/atomic"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/contracts/checkpointoracle"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
)
// checkpointOracle is responsible for offering the latest stable checkpoint
// generated and announced by the contract admins on-chain. The checkpoint is
// verified by clients locally during the checkpoint syncing.
type checkpointOracle struct {
config *params.CheckpointOracleConfig
contract *checkpointoracle.CheckpointOracle
// Whether the contract backend is set.
running int32
getLocal func(uint64) params.TrustedCheckpoint // Function used to retrieve local checkpoint
syncDoneHook func() // Function used to notify that light syncing has completed.
}
// newCheckpointOracle returns a checkpoint registrar handler.
func newCheckpointOracle(config *params.CheckpointOracleConfig, getLocal func(uint64) params.TrustedCheckpoint) *checkpointOracle {
if config == nil {
log.Info("Checkpoint registrar is not enabled")
return nil
}
if config.Address == (common.Address{}) || uint64(len(config.Signers)) < config.Threshold {
log.Warn("Invalid checkpoint registrar config")
return nil
}
log.Info("Configured checkpoint registrar", "address", config.Address, "signers", len(config.Signers), "threshold", config.Threshold)
return &checkpointOracle{
config: config,
getLocal: getLocal,
}
}
// start binds the registrar contract and start listening to the
// newCheckpointEvent for the server side.
func (reg *checkpointOracle) start(backend bind.ContractBackend) {
contract, err := checkpointoracle.NewCheckpointOracle(reg.config.Address, backend)
if err != nil {
log.Error("Oracle contract binding failed", "err", err)
return
}
if !atomic.CompareAndSwapInt32(&reg.running, 0, 1) {
log.Error("Already bound and listening to registrar")
return
}
reg.contract = contract
}
// isRunning returns an indicator whether the registrar is running.
func (reg *checkpointOracle) isRunning() bool {
return atomic.LoadInt32(&reg.running) == 1
}
// stableCheckpoint returns the stable checkpoint which was generated by local
// indexers and announced by trusted signers.
func (reg *checkpointOracle) stableCheckpoint() (*params.TrustedCheckpoint, uint64) {
// Retrieve the latest checkpoint from the contract, abort if empty
latest, hash, height, err := reg.contract.Contract().GetLatestCheckpoint(nil)
if err != nil || (latest == 0 && hash == [32]byte{}) {
return nil, 0
}
local := reg.getLocal(latest)
// The following scenarios may occur:
//
// * local node is out of sync so that it doesn't have the
// checkpoint which registered in the contract.
// * local checkpoint doesn't match with the registered one.
//
// In both cases, server won't send the **stable** checkpoint
// to the client(no worry, client can use hardcoded one instead).
if local.HashEqual(common.Hash(hash)) {
return &local, height.Uint64()
}
return nil, 0
}
// verifySigners recovers the signer addresses according to the signature and
// checks whether there are enough approvals to finalize the checkpoint.
func (reg *checkpointOracle) verifySigners(index uint64, hash [32]byte, signatures [][]byte) (bool, []common.Address) {
// Short circuit if the given signatures doesn't reach the threshold.
if len(signatures) < int(reg.config.Threshold) {
return false, nil
}
var (
signers []common.Address
checked = make(map[common.Address]struct{})
)
for i := 0; i < len(signatures); i++ {
if len(signatures[i]) != 65 {
continue
}
// EIP 191 style signatures
//
// Arguments when calculating hash to validate
// 1: byte(0x19) - the initial 0x19 byte
// 2: byte(0) - the version byte (data with intended validator)
// 3: this - the validator address
// -- Application specific data
// 4 : checkpoint section_index (uint64)
// 5 : checkpoint hash (bytes32)
// hash = keccak256(checkpoint_index, section_head, cht_root, bloom_root)
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, index)
data := append([]byte{0x19, 0x00}, append(reg.config.Address.Bytes(), append(buf, hash[:]...)...)...)
signatures[i][64] -= 27 // Transform V from 27/28 to 0/1 according to the yellow paper for verification.
pubkey, err := crypto.Ecrecover(crypto.Keccak256(data), signatures[i])
if err != nil {
return false, nil
}
var signer common.Address
copy(signer[:], crypto.Keccak256(pubkey[1:])[12:])
if _, exist := checked[signer]; exist {
continue
}
for _, s := range reg.config.Signers {
if s == signer {
signers = append(signers, signer)
checked[signer] = struct{}{}
}
}
}
threshold := reg.config.Threshold
if uint64(len(signers)) < threshold {
log.Warn("Not enough signers to approve checkpoint", "signers", len(signers), "threshold", threshold)
return false, nil
}
return true, signers
}

View File

@@ -76,24 +76,6 @@ func (c *lesCommons) makeProtocols(versions []uint) []p2p.Protocol {
// nodeInfo retrieves some protocol metadata about the running host node.
func (c *lesCommons) nodeInfo() interface{} {
var cht params.TrustedCheckpoint
sections, _, _ := c.chtIndexer.Sections()
sections2, _, _ := c.bloomTrieIndexer.Sections()
if sections2 < sections {
sections = sections2
}
if sections > 0 {
sectionIndex := sections - 1
sectionHead := c.bloomTrieIndexer.SectionHead(sectionIndex)
cht = params.TrustedCheckpoint{
SectionIndex: sectionIndex,
SectionHead: sectionHead,
CHTRoot: light.GetChtRoot(c.chainDb, sectionIndex, sectionHead),
BloomRoot: light.GetBloomTrieRoot(c.chainDb, sectionIndex, sectionHead),
}
}
chain := c.protocolManager.blockchain
head := chain.CurrentHeader()
hash := head.Hash()
@@ -103,6 +85,38 @@ func (c *lesCommons) nodeInfo() interface{} {
Genesis: chain.Genesis().Hash(),
Config: chain.Config(),
Head: chain.CurrentHeader().Hash(),
CHT: cht,
CHT: c.latestLocalCheckpoint(),
}
}
// latestLocalCheckpoint finds the common stored section index and returns a set of
// post-processed trie roots (CHT and BloomTrie) associated with
// the appropriate section index and head hash as a local checkpoint package.
func (c *lesCommons) latestLocalCheckpoint() params.TrustedCheckpoint {
sections, _, _ := c.chtIndexer.Sections()
sections2, _, _ := c.bloomTrieIndexer.Sections()
// Cap the section index if the two sections are not consistent.
if sections > sections2 {
sections = sections2
}
if sections == 0 {
// No checkpoint information can be provided.
return params.TrustedCheckpoint{}
}
return c.getLocalCheckpoint(sections - 1)
}
// getLocalCheckpoint returns a set of post-processed trie roots (CHT and BloomTrie)
// associated with the appropriate head hash by specific section index.
//
// The returned checkpoint is only the checkpoint generated by the local indexers,
// not the stable checkpoint registered in the registrar contract.
func (c *lesCommons) getLocalCheckpoint(index uint64) params.TrustedCheckpoint {
sectionHead := c.chtIndexer.SectionHead(index)
return params.TrustedCheckpoint{
SectionIndex: index,
SectionHead: sectionHead,
CHTRoot: light.GetChtRoot(c.chainDb, index, sectionHead),
BloomRoot: light.GetBloomTrieRoot(c.chainDb, index, sectionHead),
}
}

View File

@@ -27,7 +27,6 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
@@ -101,7 +100,7 @@ type ProtocolManager struct {
networkId uint64 // The identity of network.
txpool txPool
txrelay *LesTxRelay
txrelay *lesTxRelay
blockchain BlockChain
chainDb ethdb.Database
odr *LesOdr
@@ -115,6 +114,8 @@ type ProtocolManager struct {
fetcher *lightFetcher
ulc *ulc
peers *peerSet
checkpoint *params.TrustedCheckpoint
reg *checkpointOracle // If reg == nil, it means the checkpoint registrar is not activated
// channels for fetcher, syncer, txsyncLoop
newPeerCh chan *peer
@@ -131,23 +132,7 @@ type ProtocolManager struct {
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
// with the ethereum network.
func NewProtocolManager(
chainConfig *params.ChainConfig,
indexerConfig *light.IndexerConfig,
client bool,
networkId uint64,
mux *event.TypeMux,
engine consensus.Engine,
peers *peerSet,
blockchain BlockChain,
txpool txPool,
chainDb ethdb.Database,
odr *LesOdr,
txrelay *LesTxRelay,
serverPool *serverPool,
quitSync chan struct{},
wg *sync.WaitGroup,
ulcConfig *eth.ULCConfig, synced func() bool) (*ProtocolManager, error) {
func NewProtocolManager(chainConfig *params.ChainConfig, checkpoint *params.TrustedCheckpoint, indexerConfig *light.IndexerConfig, ulcConfig *eth.ULCConfig, client bool, networkId uint64, mux *event.TypeMux, peers *peerSet, blockchain BlockChain, txpool txPool, chainDb ethdb.Database, odr *LesOdr, serverPool *serverPool, registrar *checkpointOracle, quitSync chan struct{}, wg *sync.WaitGroup, synced func() bool) (*ProtocolManager, error) {
// Create the protocol manager with the base fields
manager := &ProtocolManager{
client: client,
@@ -159,13 +144,14 @@ func NewProtocolManager(
odr: odr,
networkId: networkId,
txpool: txpool,
txrelay: txrelay,
serverPool: serverPool,
reg: registrar,
peers: peers,
newPeerCh: make(chan *peer),
quitSync: quitSync,
wg: wg,
noMorePeers: make(chan struct{}),
checkpoint: checkpoint,
synced: synced,
}
if odr != nil {
@@ -182,11 +168,11 @@ func NewProtocolManager(
removePeer = func(id string) {}
}
if client {
var checkpoint uint64
if cht, ok := params.TrustedCheckpoints[blockchain.Genesis().Hash()]; ok {
checkpoint = (cht.SectionIndex+1)*params.CHTFrequency - 1
var checkpointNumber uint64
if checkpoint != nil {
checkpointNumber = (checkpoint.SectionIndex+1)*params.CHTFrequency - 1
}
manager.downloader = downloader.New(checkpoint, chainDb, nil, manager.eventMux, nil, blockchain, removePeer)
manager.downloader = downloader.New(checkpointNumber, chainDb, nil, manager.eventMux, nil, blockchain, removePeer)
manager.peers.notify((*downloaderPeerNotify)(manager))
manager.fetcher = newLightFetcher(manager)
}

View File

@@ -259,7 +259,6 @@ func testGetCode(t *testing.T, protocol int) {
var codereqs []*CodeReq
var codes [][]byte
for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
header := bc.GetHeaderByNumber(i)
req := &CodeReq{
@@ -342,11 +341,10 @@ func testGetProofs(t *testing.T, protocol int) {
var proofreqs []ProofReq
proofsV2 := light.NewNodeSet()
accounts := []common.Address{testBankAddress, acc1Addr, acc2Addr, {}}
accounts := []common.Address{bankAddr, userAddr1, userAddr2, {}}
for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
header := bc.GetHeaderByNumber(i)
root := header.Root
trie, _ := trie.New(root, trie.NewDatabase(server.db))
trie, _ := trie.New(header.Root, trie.NewDatabase(server.db))
for _, acc := range accounts {
req := ProofReq{
@@ -377,7 +375,7 @@ func testGetStaleProof(t *testing.T, protocol int) {
check := func(number uint64, wantOK bool) {
var (
header = bc.GetHeaderByNumber(number)
account = crypto.Keccak256(testBankAddress.Bytes())
account = crypto.Keccak256(userAddr1.Bytes())
)
req := &ProofReq{
BHash: header.Hash(),
@@ -390,7 +388,7 @@ func testGetStaleProof(t *testing.T, protocol int) {
if wantOK {
proofsV2 := light.NewNodeSet()
t, _ := trie.New(header.Root, trie.NewDatabase(server.db))
t.Prove(crypto.Keccak256(account), 0, proofsV2)
t.Prove(account, 0, proofsV2)
expected = proofsV2.NodeList()
}
if err := expectResponse(server.tPeer.app, ProofsV2Msg, 42, testBufLimit, expected); err != nil {
@@ -496,14 +494,15 @@ func TestGetBloombitsProofs(t *testing.T) {
}
func TestTransactionStatusLes2(t *testing.T) {
db := rawdb.NewMemoryDatabase()
pm := newTestProtocolManagerMust(t, false, 0, nil, nil, nil, db, nil)
chain := pm.blockchain.(*core.BlockChain)
server, tearDown := newServerEnv(t, 0, 2, nil)
defer tearDown()
chain := server.pm.blockchain.(*core.BlockChain)
config := core.DefaultTxPoolConfig
config.Journal = ""
txpool := core.NewTxPool(config, params.TestChainConfig, chain)
pm.txpool = txpool
peer, _ := newTestPeer(t, "peer", 2, pm, true, 0)
server.pm.txpool = txpool
peer, _ := newTestPeer(t, "peer", 2, server.pm, true, 0)
defer peer.close()
var reqID uint64
@@ -511,13 +510,13 @@ func TestTransactionStatusLes2(t *testing.T) {
test := func(tx *types.Transaction, send bool, expStatus light.TxStatus) {
reqID++
if send {
cost := peer.GetRequestCost(SendTxV2Msg, 1)
sendRequest(peer.app, SendTxV2Msg, reqID, cost, types.Transactions{tx})
cost := server.tPeer.GetRequestCost(SendTxV2Msg, 1)
sendRequest(server.tPeer.app, SendTxV2Msg, reqID, cost, types.Transactions{tx})
} else {
cost := peer.GetRequestCost(GetTxStatusMsg, 1)
sendRequest(peer.app, GetTxStatusMsg, reqID, cost, []common.Hash{tx.Hash()})
cost := server.tPeer.GetRequestCost(GetTxStatusMsg, 1)
sendRequest(server.tPeer.app, GetTxStatusMsg, reqID, cost, []common.Hash{tx.Hash()})
}
if err := expectResponse(peer.app, TxStatusMsg, reqID, testBufLimit, []light.TxStatus{expStatus}); err != nil {
if err := expectResponse(server.tPeer.app, TxStatusMsg, reqID, testBufLimit, []light.TxStatus{expStatus}); err != nil {
t.Errorf("transaction status mismatch")
}
}
@@ -525,16 +524,16 @@ func TestTransactionStatusLes2(t *testing.T) {
signer := types.HomesteadSigner{}
// test error status by sending an underpriced transaction
tx0, _ := types.SignTx(types.NewTransaction(0, acc1Addr, big.NewInt(10000), params.TxGas, nil, nil), signer, testBankKey)
tx0, _ := types.SignTx(types.NewTransaction(0, userAddr1, big.NewInt(10000), params.TxGas, nil, nil), signer, bankKey)
test(tx0, true, light.TxStatus{Status: core.TxStatusUnknown, Error: core.ErrUnderpriced.Error()})
tx1, _ := types.SignTx(types.NewTransaction(0, acc1Addr, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, testBankKey)
tx1, _ := types.SignTx(types.NewTransaction(0, userAddr1, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, bankKey)
test(tx1, false, light.TxStatus{Status: core.TxStatusUnknown}) // query before sending, should be unknown
test(tx1, true, light.TxStatus{Status: core.TxStatusPending}) // send valid processable tx, should return pending
test(tx1, true, light.TxStatus{Status: core.TxStatusPending}) // adding it again should not return an error
tx2, _ := types.SignTx(types.NewTransaction(1, acc1Addr, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, testBankKey)
tx3, _ := types.SignTx(types.NewTransaction(2, acc1Addr, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, testBankKey)
tx2, _ := types.SignTx(types.NewTransaction(1, userAddr1, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, bankKey)
tx3, _ := types.SignTx(types.NewTransaction(2, userAddr1, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, bankKey)
// send transactions in the wrong order, tx3 should be queued
test(tx3, true, light.TxStatus{Status: core.TxStatusQueued})
test(tx2, true, light.TxStatus{Status: core.TxStatusPending})
@@ -542,7 +541,7 @@ func TestTransactionStatusLes2(t *testing.T) {
test(tx3, false, light.TxStatus{Status: core.TxStatusPending})
// generate and add a block with tx1 and tx2 included
gchain, _ := core.GenerateChain(params.TestChainConfig, chain.GetBlockByNumber(0), ethash.NewFaker(), db, 1, func(i int, block *core.BlockGen) {
gchain, _ := core.GenerateChain(params.TestChainConfig, chain.GetBlockByNumber(0), ethash.NewFaker(), server.db, 1, func(i int, block *core.BlockGen) {
block.AddTx(tx1)
block.AddTx(tx2)
})
@@ -561,12 +560,12 @@ func TestTransactionStatusLes2(t *testing.T) {
}
// check if their status is included now
block1hash := rawdb.ReadCanonicalHash(db, 1)
block1hash := rawdb.ReadCanonicalHash(server.db, 1)
test(tx1, false, light.TxStatus{Status: core.TxStatusIncluded, Lookup: &rawdb.LegacyTxLookupEntry{BlockHash: block1hash, BlockIndex: 1, Index: 0}})
test(tx2, false, light.TxStatus{Status: core.TxStatusIncluded, Lookup: &rawdb.LegacyTxLookupEntry{BlockHash: block1hash, BlockIndex: 1, Index: 1}})
// create a reorg that rolls them back
gchain, _ = core.GenerateChain(params.TestChainConfig, chain.GetBlockByNumber(0), ethash.NewFaker(), db, 2, func(i int, block *core.BlockGen) {})
gchain, _ = core.GenerateChain(params.TestChainConfig, chain.GetBlockByNumber(0), ethash.NewFaker(), server.db, 2, func(i int, block *core.BlockGen) {})
if _, err := chain.InsertChain(gchain); err != nil {
panic(err)
}
@@ -589,7 +588,7 @@ func TestStopResumeLes3(t *testing.T) {
db := rawdb.NewMemoryDatabase()
clock := &mclock.Simulated{}
testCost := testBufLimit / 10
pm, err := newTestProtocolManager(false, 0, nil, nil, nil, db, nil, testCost, clock)
pm, _, err := newTestProtocolManager(false, 0, nil, nil, nil, db, nil, testCost, clock)
if err != nil {
t.Fatalf("Failed to create protocol manager: %v", err)
}

View File

@@ -20,19 +20,22 @@
package les
import (
"context"
"crypto/rand"
"math/big"
"sync"
"testing"
"time"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/contracts/checkpointoracle/contract"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/ethdb"
@@ -45,14 +48,14 @@ import (
)
var (
testBankKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
testBankAddress = crypto.PubkeyToAddress(testBankKey.PublicKey)
testBankFunds = big.NewInt(1000000000000000000)
bankKey, _ = crypto.GenerateKey()
bankAddr = crypto.PubkeyToAddress(bankKey.PublicKey)
bankFunds = big.NewInt(1000000000000000000)
acc1Key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
acc2Key, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
acc1Addr = crypto.PubkeyToAddress(acc1Key.PublicKey)
acc2Addr = crypto.PubkeyToAddress(acc2Key.PublicKey)
userKey1, _ = crypto.GenerateKey()
userKey2, _ = crypto.GenerateKey()
userAddr1 = crypto.PubkeyToAddress(userKey1.PublicKey)
userAddr2 = crypto.PubkeyToAddress(userKey2.PublicKey)
testContractCode = common.Hex2Bytes("606060405260cc8060106000396000f360606040526000357c01000000000000000000000000000000000000000000000000000000009004806360cd2685146041578063c16431b914606b57603f565b005b6055600480803590602001909190505060a9565b6040518082815260200191505060405180910390f35b60886004808035906020019091908035906020019091905050608a565b005b80600060005083606481101560025790900160005b50819055505b5050565b6000600060005082606481101560025790900160005b5054905060c7565b91905056")
testContractAddr common.Address
@@ -60,8 +63,21 @@ var (
testContractDeployed = uint64(2)
testEventEmitterCode = common.Hex2Bytes("60606040523415600e57600080fd5b7f57050ab73f6b9ebdd9f76b8d4997793f48cf956e965ee070551b9ca0bb71584e60405160405180910390a160358060476000396000f3006060604052600080fd00a165627a7a723058203f727efcad8b5811f8cb1fc2620ce5e8c63570d697aef968172de296ea3994140029")
testEventEmitterAddr common.Address
// Checkpoint registrar relative
registrarAddr common.Address
signerKey, _ = crypto.GenerateKey()
signerAddr = crypto.PubkeyToAddress(signerKey.PublicKey)
)
var (
// The block frequency for creating checkpoint(only used in test)
sectionSize = big.NewInt(512)
// The number of confirmations needed to generate a checkpoint(only used in test).
processConfirms = big.NewInt(4)
//
testBufLimit = uint64(1000000)
testBufRecharge = uint64(1000)
)
@@ -81,102 +97,139 @@ contract test {
}
*/
func testChainGen(i int, block *core.BlockGen) {
signer := types.HomesteadSigner{}
// prepareTestchain pre-commits specified number customized blocks into chain.
func prepareTestchain(n int, backend *backends.SimulatedBackend) {
var (
ctx = context.Background()
signer = types.HomesteadSigner{}
)
for i := 0; i < n; i++ {
switch i {
case 0:
// deploy checkpoint contract
registrarAddr, _, _, _ = contract.DeployCheckpointOracle(bind.NewKeyedTransactor(bankKey), backend, []common.Address{signerAddr}, sectionSize, processConfirms, big.NewInt(1))
// bankUser transfers some ether to user1
nonce, _ := backend.PendingNonceAt(ctx, bankAddr)
tx, _ := types.SignTx(types.NewTransaction(nonce, userAddr1, big.NewInt(10000), params.TxGas, nil, nil), signer, bankKey)
backend.SendTransaction(ctx, tx)
case 1:
bankNonce, _ := backend.PendingNonceAt(ctx, bankAddr)
userNonce1, _ := backend.PendingNonceAt(ctx, userAddr1)
switch i {
case 0:
// In block 1, the test bank sends account #1 some ether.
tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil), signer, testBankKey)
block.AddTx(tx)
case 1:
// In block 2, the test bank sends some more ether to account #1.
// acc1Addr passes it on to account #2.
// acc1Addr creates a test contract.
// acc1Addr creates a test event.
nonce := block.TxNonce(acc1Addr)
// bankUser transfers more ether to user1
tx1, _ := types.SignTx(types.NewTransaction(bankNonce, userAddr1, big.NewInt(1000), params.TxGas, nil, nil), signer, bankKey)
backend.SendTransaction(ctx, tx1)
tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, testBankKey)
tx2, _ := types.SignTx(types.NewTransaction(nonce, acc2Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, acc1Key)
tx3, _ := types.SignTx(types.NewContractCreation(nonce+1, big.NewInt(0), 200000, big.NewInt(0), testContractCode), signer, acc1Key)
testContractAddr = crypto.CreateAddress(acc1Addr, nonce+1)
tx4, _ := types.SignTx(types.NewContractCreation(nonce+2, big.NewInt(0), 200000, big.NewInt(0), testEventEmitterCode), signer, acc1Key)
testEventEmitterAddr = crypto.CreateAddress(acc1Addr, nonce+2)
block.AddTx(tx1)
block.AddTx(tx2)
block.AddTx(tx3)
block.AddTx(tx4)
case 2:
// Block 3 is empty but was mined by account #2.
block.SetCoinbase(acc2Addr)
block.SetExtra([]byte("yeehaw"))
data := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001")
tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBankAddress), testContractAddr, big.NewInt(0), 100000, nil, data), signer, testBankKey)
block.AddTx(tx)
case 3:
// Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data).
b2 := block.PrevBlock(1).Header()
b2.Extra = []byte("foo")
block.AddUncle(b2)
b3 := block.PrevBlock(2).Header()
b3.Extra = []byte("foo")
block.AddUncle(b3)
data := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000002")
tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBankAddress), testContractAddr, big.NewInt(0), 100000, nil, data), signer, testBankKey)
block.AddTx(tx)
// user1 relays ether to user2
tx2, _ := types.SignTx(types.NewTransaction(userNonce1, userAddr2, big.NewInt(1000), params.TxGas, nil, nil), signer, userKey1)
backend.SendTransaction(ctx, tx2)
// user1 deploys a test contract
tx3, _ := types.SignTx(types.NewContractCreation(userNonce1+1, big.NewInt(0), 200000, big.NewInt(0), testContractCode), signer, userKey1)
backend.SendTransaction(ctx, tx3)
testContractAddr = crypto.CreateAddress(userAddr1, userNonce1+1)
// user1 deploys a event contract
tx4, _ := types.SignTx(types.NewContractCreation(userNonce1+2, big.NewInt(0), 200000, big.NewInt(0), testEventEmitterCode), signer, userKey1)
backend.SendTransaction(ctx, tx4)
case 2:
// bankUser transfer some ether to signer
bankNonce, _ := backend.PendingNonceAt(ctx, bankAddr)
tx1, _ := types.SignTx(types.NewTransaction(bankNonce, signerAddr, big.NewInt(1000000000), params.TxGas, nil, nil), signer, bankKey)
backend.SendTransaction(ctx, tx1)
// invoke test contract
data := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001")
tx2, _ := types.SignTx(types.NewTransaction(bankNonce+1, testContractAddr, big.NewInt(0), 100000, nil, data), signer, bankKey)
backend.SendTransaction(ctx, tx2)
case 3:
// invoke test contract
bankNonce, _ := backend.PendingNonceAt(ctx, bankAddr)
data := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000002")
tx, _ := types.SignTx(types.NewTransaction(bankNonce, testContractAddr, big.NewInt(0), 100000, nil, data), signer, bankKey)
backend.SendTransaction(ctx, tx)
}
backend.Commit()
}
}
// testIndexers creates a set of indexers with specified params for testing purpose.
func testIndexers(db ethdb.Database, odr light.OdrBackend, iConfig *light.IndexerConfig) (*core.ChainIndexer, *core.ChainIndexer, *core.ChainIndexer) {
chtIndexer := light.NewChtIndexer(db, odr, iConfig.ChtSize, iConfig.ChtConfirms)
bloomIndexer := eth.NewBloomIndexer(db, iConfig.BloomSize, iConfig.BloomConfirms)
bloomTrieIndexer := light.NewBloomTrieIndexer(db, odr, iConfig.BloomSize, iConfig.BloomTrieSize)
bloomIndexer.AddChildIndexer(bloomTrieIndexer)
return chtIndexer, bloomIndexer, bloomTrieIndexer
func testIndexers(db ethdb.Database, odr light.OdrBackend, config *light.IndexerConfig) []*core.ChainIndexer {
var indexers [3]*core.ChainIndexer
indexers[0] = light.NewChtIndexer(db, odr, config.ChtSize, config.ChtConfirms)
indexers[1] = eth.NewBloomIndexer(db, config.BloomSize, config.BloomConfirms)
indexers[2] = light.NewBloomTrieIndexer(db, odr, config.BloomSize, config.BloomTrieSize)
// make bloomTrieIndexer as a child indexer of bloom indexer.
indexers[1].AddChildIndexer(indexers[2])
return indexers[:]
}
// newTestProtocolManager creates a new protocol manager for testing purposes,
// with the given number of blocks already known, potential notification
// channels for different events and relative chain indexers array.
func newTestProtocolManager(lightSync bool, blocks int, generator func(int, *core.BlockGen), odr *LesOdr, peers *peerSet, db ethdb.Database, ulcConfig *eth.ULCConfig, testCost uint64, clock mclock.Clock) (*ProtocolManager, error) {
func newTestProtocolManager(lightSync bool, blocks int, odr *LesOdr, indexers []*core.ChainIndexer, peers *peerSet, db ethdb.Database, ulcConfig *eth.ULCConfig, testCost uint64, clock mclock.Clock) (*ProtocolManager, *backends.SimulatedBackend, error) {
var (
evmux = new(event.TypeMux)
engine = ethash.NewFaker()
gspec = core.Genesis{
Config: params.TestChainConfig,
Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}},
Config: params.AllEthashProtocolChanges,
Alloc: core.GenesisAlloc{bankAddr: {Balance: bankFunds}},
}
genesis = gspec.MustCommit(db)
chain BlockChain
pool txPool
pool txPool
chain BlockChain
exitCh = make(chan struct{})
)
gspec.MustCommit(db)
if peers == nil {
peers = newPeerSet()
}
// create a simulation backend and pre-commit several customized block to the database.
simulation := backends.NewSimulatedBackendWithDatabase(db, gspec.Alloc, 100000000)
prepareTestchain(blocks, simulation)
// initialize empty chain for light client or pre-committed chain for server.
if lightSync {
chain, _ = light.NewLightChain(odr, gspec.Config, engine)
chain, _ = light.NewLightChain(odr, gspec.Config, engine, nil)
} else {
blockchain, _ := core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{}, nil)
gchain, _ := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, blocks, generator)
if _, err := blockchain.InsertChain(gchain); err != nil {
panic(err)
}
chain = blockchain
pool = core.NewTxPool(core.DefaultTxPoolConfig, gspec.Config, blockchain)
chain = simulation.Blockchain()
pool = core.NewTxPool(core.DefaultTxPoolConfig, gspec.Config, simulation.Blockchain())
}
// Create contract registrar
indexConfig := light.TestServerIndexerConfig
if lightSync {
indexConfig = light.TestClientIndexerConfig
}
pm, err := NewProtocolManager(gspec.Config, indexConfig, lightSync, NetworkId, evmux, engine, peers, chain, pool, db, odr, nil, nil, make(chan struct{}), new(sync.WaitGroup), ulcConfig, func() bool { return true })
if err != nil {
return nil, err
config := &params.CheckpointOracleConfig{
Address: crypto.CreateAddress(bankAddr, 0),
Signers: []common.Address{signerAddr},
Threshold: 1,
}
var reg *checkpointOracle
if indexers != nil {
getLocal := func(index uint64) params.TrustedCheckpoint {
chtIndexer := indexers[0]
sectionHead := chtIndexer.SectionHead(index)
return params.TrustedCheckpoint{
SectionIndex: index,
SectionHead: sectionHead,
CHTRoot: light.GetChtRoot(db, index, sectionHead),
BloomRoot: light.GetBloomTrieRoot(db, index, sectionHead),
}
}
reg = newCheckpointOracle(config, getLocal)
}
pm, err := NewProtocolManager(gspec.Config, nil, indexConfig, ulcConfig, lightSync, NetworkId, evmux, peers, chain, pool, db, odr, nil, reg, exitCh, new(sync.WaitGroup), func() bool { return true })
if err != nil {
return nil, nil, err
}
// Registrar initialization could failed if checkpoint contract is not specified.
if pm.reg != nil {
pm.reg.start(simulation)
}
// Set up les server stuff.
if !lightSync {
srv := &LesServer{lesCommons: lesCommons{protocolManager: pm}}
srv := &LesServer{lesCommons: lesCommons{protocolManager: pm, chainDb: db}}
pm.server = srv
pm.servingQueue = newServingQueue(int64(time.Millisecond*10), 1, nil)
pm.servingQueue.setThreads(4)
@@ -189,19 +242,19 @@ func newTestProtocolManager(lightSync bool, blocks int, generator func(int, *cor
srv.fcManager = flowcontrol.NewClientManager(nil, clock)
}
pm.Start(1000)
return pm, nil
return pm, simulation, nil
}
// newTestProtocolManagerMust creates a new protocol manager for testing purposes,
// with the given number of blocks already known, potential notification
// channels for different events and relative chain indexers array. In case of an error, the constructor force-
// fails the test.
func newTestProtocolManagerMust(t *testing.T, lightSync bool, blocks int, generator func(int, *core.BlockGen), odr *LesOdr, peers *peerSet, db ethdb.Database, ulcConfig *eth.ULCConfig) *ProtocolManager {
pm, err := newTestProtocolManager(lightSync, blocks, generator, odr, peers, db, ulcConfig, 0, &mclock.System{})
// with the given number of blocks already known, potential notification channels
// for different events and relative chain indexers array. In case of an error, the
// constructor force-fails the test.
func newTestProtocolManagerMust(t *testing.T, lightSync bool, blocks int, odr *LesOdr, indexers []*core.ChainIndexer, peers *peerSet, db ethdb.Database, ulcConfig *eth.ULCConfig) (*ProtocolManager, *backends.SimulatedBackend) {
pm, backend, err := newTestProtocolManager(lightSync, blocks, odr, indexers, peers, db, ulcConfig, 0, &mclock.System{})
if err != nil {
t.Fatalf("Failed to create protocol manager: %v", err)
}
return pm
return pm, backend
}
// testPeer is a simulated peer to allow testing direct network calls.
@@ -324,11 +377,13 @@ func (p *testPeer) close() {
// TestEntity represents a network entity for testing with necessary auxiliary fields.
type TestEntity struct {
db ethdb.Database
rPeer *peer
tPeer *testPeer
peers *peerSet
pm *ProtocolManager
db ethdb.Database
rPeer *peer
tPeer *testPeer
peers *peerSet
pm *ProtocolManager
backend *backends.SimulatedBackend
// Indexers
chtIndexer *core.ChainIndexer
bloomIndexer *core.ChainIndexer
@@ -338,11 +393,12 @@ type TestEntity struct {
// newServerEnv creates a server testing environment with a connected test peer for testing purpose.
func newServerEnv(t *testing.T, blocks int, protocol int, waitIndexers func(*core.ChainIndexer, *core.ChainIndexer, *core.ChainIndexer)) (*TestEntity, func()) {
db := rawdb.NewMemoryDatabase()
cIndexer, bIndexer, btIndexer := testIndexers(db, nil, light.TestServerIndexerConfig)
indexers := testIndexers(db, nil, light.TestServerIndexerConfig)
pm := newTestProtocolManagerMust(t, false, blocks, testChainGen, nil, nil, db, nil)
pm, b := newTestProtocolManagerMust(t, false, blocks, nil, indexers, nil, db, nil)
peer, _ := newTestPeer(t, "peer", protocol, pm, true, 0)
cIndexer, bIndexer, btIndexer := indexers[0], indexers[1], indexers[2]
cIndexer.Start(pm.blockchain.(*core.BlockChain))
bIndexer.Start(pm.blockchain.(*core.BlockChain))
@@ -355,6 +411,7 @@ func newServerEnv(t *testing.T, blocks int, protocol int, waitIndexers func(*cor
db: db,
tPeer: peer,
pm: pm,
backend: b,
chtIndexer: cIndexer,
bloomIndexer: bIndexer,
bloomTrieIndexer: btIndexer,
@@ -376,12 +433,16 @@ func newClientServerEnv(t *testing.T, blocks int, protocol int, waitIndexers fun
rm := newRetrieveManager(lPeers, dist, nil)
odr := NewLesOdr(ldb, light.TestClientIndexerConfig, rm)
cIndexer, bIndexer, btIndexer := testIndexers(db, nil, light.TestServerIndexerConfig)
lcIndexer, lbIndexer, lbtIndexer := testIndexers(ldb, odr, light.TestClientIndexerConfig)
indexers := testIndexers(db, nil, light.TestServerIndexerConfig)
lIndexers := testIndexers(ldb, odr, light.TestClientIndexerConfig)
cIndexer, bIndexer, btIndexer := indexers[0], indexers[1], indexers[2]
lcIndexer, lbIndexer, lbtIndexer := lIndexers[0], lIndexers[1], lIndexers[2]
odr.SetIndexers(lcIndexer, lbtIndexer, lbIndexer)
pm := newTestProtocolManagerMust(t, false, blocks, testChainGen, nil, peers, db, nil)
lpm := newTestProtocolManagerMust(t, true, 0, nil, odr, lPeers, ldb, nil)
pm, b := newTestProtocolManagerMust(t, false, blocks, nil, indexers, peers, db, nil)
lpm, lb := newTestProtocolManagerMust(t, true, 0, odr, lIndexers, lPeers, ldb, nil)
startIndexers := func(clientMode bool, pm *ProtocolManager) {
if clientMode {
@@ -421,6 +482,7 @@ func newClientServerEnv(t *testing.T, blocks int, protocol int, waitIndexers fun
pm: pm,
rPeer: peer,
peers: peers,
backend: b,
chtIndexer: cIndexer,
bloomIndexer: bIndexer,
bloomTrieIndexer: btIndexer,
@@ -429,6 +491,7 @@ func newClientServerEnv(t *testing.T, blocks int, protocol int, waitIndexers fun
pm: lpm,
rPeer: lPeer,
peers: lPeers,
backend: lb,
chtIndexer: lcIndexer,
bloomIndexer: lbIndexer,
bloomTrieIndexer: lbtIndexer,

View File

@@ -166,11 +166,13 @@ func (r *ReceiptsRequest) Validate(db ethdb.Database, msg *Msg) error {
receipt := receipts[0]
// Retrieve our stored header and validate receipt content against it
header := rawdb.ReadHeader(db, r.Hash, r.Number)
if header == nil {
if r.Header == nil {
r.Header = rawdb.ReadHeader(db, r.Hash, r.Number)
}
if r.Header == nil {
return errHeaderUnavailable
}
if header.ReceiptHash != types.DeriveSha(receipt) {
if r.Header.ReceiptHash != types.DeriveSha(receipt) {
return errReceiptHashMismatch
}
// Validations passed, store and return
@@ -323,7 +325,11 @@ func (r *ChtRequest) CanSend(peer *peer) bool {
peer.lock.RLock()
defer peer.lock.RUnlock()
return peer.headInfo.Number >= r.Config.ChtConfirms && r.ChtNum <= (peer.headInfo.Number-r.Config.ChtConfirms)/r.Config.ChtSize
if r.Untrusted {
return peer.headInfo.Number >= r.BlockNum && peer.id == r.PeerId
} else {
return peer.headInfo.Number >= r.Config.ChtConfirms && r.ChtNum <= (peer.headInfo.Number-r.Config.ChtConfirms)/r.Config.ChtSize
}
}
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
@@ -364,32 +370,37 @@ func (r *ChtRequest) Validate(db ethdb.Database, msg *Msg) error {
}
// Verify the CHT
var encNumber [8]byte
binary.BigEndian.PutUint64(encNumber[:], r.BlockNum)
reads := &readTraceDB{db: nodeSet}
value, _, err := trie.VerifyProof(r.ChtRoot, encNumber[:], reads)
if err != nil {
return fmt.Errorf("merkle proof verification failed: %v", err)
}
if len(reads.reads) != nodeSet.KeyCount() {
return errUselessNodes
}
// Note: For untrusted CHT request, there is no proof response but
// header data.
var node light.ChtNode
if err := rlp.DecodeBytes(value, &node); err != nil {
return err
}
if node.Hash != header.Hash() {
return errCHTHashMismatch
}
if r.BlockNum != header.Number.Uint64() {
return errCHTNumberMismatch
if !r.Untrusted {
var encNumber [8]byte
binary.BigEndian.PutUint64(encNumber[:], r.BlockNum)
reads := &readTraceDB{db: nodeSet}
value, _, err := trie.VerifyProof(r.ChtRoot, encNumber[:], reads)
if err != nil {
return fmt.Errorf("merkle proof verification failed: %v", err)
}
if len(reads.reads) != nodeSet.KeyCount() {
return errUselessNodes
}
if err := rlp.DecodeBytes(value, &node); err != nil {
return err
}
if node.Hash != header.Hash() {
return errCHTHashMismatch
}
if r.BlockNum != header.Number.Uint64() {
return errCHTNumberMismatch
}
}
// Verifications passed, store and return
r.Header = header
r.Proof = nodeSet
r.Td = node.Td
r.Td = node.Td // For untrusted request, td here is nil, todo improve the les/2 protocol
return nil
}

View File

@@ -78,7 +78,7 @@ func TestOdrAccountsLes2(t *testing.T) { testOdr(t, 2, 1, true, odrAccounts) }
func odrAccounts(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {
dummyAddr := common.HexToAddress("1234567812345678123456781234567812345678")
acc := []common.Address{testBankAddress, acc1Addr, acc2Addr, dummyAddr}
acc := []common.Address{bankAddr, userAddr1, userAddr2, dummyAddr}
var (
res []byte
@@ -121,7 +121,7 @@ func odrContractCall(ctx context.Context, db ethdb.Database, config *params.Chai
statedb, err := state.New(header.Root, state.NewDatabase(db))
if err == nil {
from := statedb.GetOrNewStateObject(testBankAddress)
from := statedb.GetOrNewStateObject(bankAddr)
from.SetBalance(math.MaxBig256)
msg := callmsg{types.NewMessage(from.Address(), &testContractAddr, 0, new(big.Int), 100000, new(big.Int), data, false)}
@@ -137,8 +137,8 @@ func odrContractCall(ctx context.Context, db ethdb.Database, config *params.Chai
} else {
header := lc.GetHeaderByHash(bhash)
state := light.NewState(ctx, header, lc.Odr())
state.SetBalance(testBankAddress, math.MaxBig256)
msg := callmsg{types.NewMessage(testBankAddress, &testContractAddr, 0, new(big.Int), 100000, new(big.Int), data, false)}
state.SetBalance(bankAddr, math.MaxBig256)
msg := callmsg{types.NewMessage(bankAddr, &testContractAddr, 0, new(big.Int), 100000, new(big.Int), data, false)}
context := core.NewEVMContext(msg, header, lc, nil)
vmenv := vm.NewEVM(context, state, config, vm.Config{})
gp := new(core.GasPool).AddGas(math.MaxUint64)

View File

@@ -33,6 +33,7 @@ import (
"github.com/ethereum/go-ethereum/les/flowcontrol"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
)
@@ -79,6 +80,10 @@ type peer struct {
announceType uint64
// Checkpoint relative fields
checkpoint params.TrustedCheckpoint
checkpointNumber uint64
id string
headInfo *announceData
@@ -575,6 +580,14 @@ func (p *peer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis
send = send.add("flowControl/MRC", costList)
p.fcCosts = costList.decode(ProtocolLengths[uint(p.version)])
p.fcParams = server.defParams
if server.protocolManager != nil && server.protocolManager.reg != nil && server.protocolManager.reg.isRunning() {
cp, height := server.protocolManager.reg.stableCheckpoint()
if cp != nil {
send = send.add("checkpoint/value", cp)
send = send.add("checkpoint/registerHeight", height)
}
}
} else {
//on client node
p.announceType = announceTypeSimple
@@ -658,20 +671,24 @@ func (p *peer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis
return errResp(ErrUselessPeer, "peer cannot serve requests")
}
var params flowcontrol.ServerParams
if err := recv.get("flowControl/BL", &params.BufLimit); err != nil {
var sParams flowcontrol.ServerParams
if err := recv.get("flowControl/BL", &sParams.BufLimit); err != nil {
return err
}
if err := recv.get("flowControl/MRR", &params.MinRecharge); err != nil {
if err := recv.get("flowControl/MRR", &sParams.MinRecharge); err != nil {
return err
}
var MRC RequestCostList
if err := recv.get("flowControl/MRC", &MRC); err != nil {
return err
}
p.fcParams = params
p.fcServer = flowcontrol.NewServerNode(params, &mclock.System{})
p.fcParams = sParams
p.fcServer = flowcontrol.NewServerNode(sParams, &mclock.System{})
p.fcCosts = MRC.decode(ProtocolLengths[uint(p.version)])
recv.get("checkpoint/value", &p.checkpoint)
recv.get("checkpoint/registerHeight", &p.checkpointNumber)
if !p.isOnlyAnnounce {
for msgCode := range reqAvgTimeCost {
if p.fcCosts[msgCode] == nil {

View File

@@ -28,7 +28,7 @@ import (
"github.com/ethereum/go-ethereum/light"
)
var testBankSecureTrieKey = secAddr(testBankAddress)
var testBankSecureTrieKey = secAddr(bankAddr)
func secAddr(addr common.Address) []byte {
return crypto.Keccak256(addr[:])

View File

@@ -21,6 +21,7 @@ import (
"sync"
"time"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core"
@@ -72,68 +73,38 @@ type LesServer struct {
priorityClientPool *priorityClientPool
}
func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) {
func NewLesServer(e *eth.Ethereum, config *eth.Config) (*LesServer, error) {
var csvLogger *csvlogger.Logger
if logFileName != "" {
csvLogger = csvlogger.NewLogger(logFileName, time.Second*10, "event, peerId")
}
quitSync := make(chan struct{})
pm, err := NewProtocolManager(
eth.BlockChain().Config(),
light.DefaultServerIndexerConfig,
false,
config.NetworkId,
eth.EventMux(),
eth.Engine(),
newPeerSet(),
eth.BlockChain(),
eth.TxPool(),
eth.ChainDb(),
nil,
nil,
nil,
quitSync,
new(sync.WaitGroup),
config.ULC,
eth.Synced)
if err != nil {
return nil, err
}
if logProtocolHandler {
pm.logger = csvLogger
}
requestLogger := csvLogger
if !logRequestServing {
requestLogger = nil
}
pm.servingQueue = newServingQueue(int64(time.Millisecond*10), float64(config.LightServ)/100, requestLogger)
lesTopics := make([]discv5.Topic, len(AdvertiseProtocolVersions))
for i, pv := range AdvertiseProtocolVersions {
lesTopics[i] = lesTopic(eth.BlockChain().Genesis().Hash(), pv)
lesTopics[i] = lesTopic(e.BlockChain().Genesis().Hash(), pv)
}
quitSync := make(chan struct{})
srv := &LesServer{
lesCommons: lesCommons{
config: config,
chainDb: eth.ChainDb(),
iConfig: light.DefaultServerIndexerConfig,
chtIndexer: light.NewChtIndexer(eth.ChainDb(), nil, params.CHTFrequency, params.HelperTrieProcessConfirmations),
bloomTrieIndexer: light.NewBloomTrieIndexer(eth.ChainDb(), nil, params.BloomBitsBlocks, params.BloomTrieFrequency),
protocolManager: pm,
chainDb: e.ChainDb(),
chtIndexer: light.NewChtIndexer(e.ChainDb(), nil, params.CHTFrequency, params.HelperTrieProcessConfirmations),
bloomTrieIndexer: light.NewBloomTrieIndexer(e.ChainDb(), nil, params.BloomBitsBlocks, params.BloomTrieFrequency),
},
archiveMode: eth.ArchiveMode(),
archiveMode: e.ArchiveMode(),
quitSync: quitSync,
lesTopics: lesTopics,
onlyAnnounce: config.OnlyAnnounce,
csvLogger: csvLogger,
logTotalCap: requestLogger.NewChannel("totalCapacity", 0.01),
}
srv.costTracker, srv.minCapacity = newCostTracker(eth.ChainDb(), config, requestLogger)
srv.costTracker, srv.minCapacity = newCostTracker(e.ChainDb(), config, requestLogger)
logger := log.New()
pm.server = srv
srv.thcNormal = config.LightServ * 4 / 100
if srv.thcNormal < 4 {
srv.thcNormal = 4
@@ -141,22 +112,31 @@ func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) {
srv.thcBlockProcessing = config.LightServ/100 + 1
srv.fcManager = flowcontrol.NewClientManager(nil, &mclock.System{})
chtSectionCount, _, _ := srv.chtIndexer.Sections()
if chtSectionCount != 0 {
chtLastSection := chtSectionCount - 1
chtSectionHead := srv.chtIndexer.SectionHead(chtLastSection)
chtRoot := light.GetChtRoot(pm.chainDb, chtLastSection, chtSectionHead)
logger.Info("Loaded CHT", "section", chtLastSection, "head", chtSectionHead, "root", chtRoot)
}
bloomTrieSectionCount, _, _ := srv.bloomTrieIndexer.Sections()
if bloomTrieSectionCount != 0 {
bloomTrieLastSection := bloomTrieSectionCount - 1
bloomTrieSectionHead := srv.bloomTrieIndexer.SectionHead(bloomTrieLastSection)
bloomTrieRoot := light.GetBloomTrieRoot(pm.chainDb, bloomTrieLastSection, bloomTrieSectionHead)
logger.Info("Loaded bloom trie", "section", bloomTrieLastSection, "head", bloomTrieSectionHead, "root", bloomTrieRoot)
checkpoint := srv.latestLocalCheckpoint()
if !checkpoint.Empty() {
logger.Info("Loaded latest checkpoint", "section", checkpoint.SectionIndex, "head", checkpoint.SectionHead,
"chtroot", checkpoint.CHTRoot, "bloomroot", checkpoint.BloomRoot)
}
srv.chtIndexer.Start(eth.BlockChain())
srv.chtIndexer.Start(e.BlockChain())
oracle := config.CheckpointOracle
if oracle == nil {
oracle = params.CheckpointOracles[e.BlockChain().Genesis().Hash()]
}
registrar := newCheckpointOracle(oracle, srv.getLocalCheckpoint)
// TODO(rjl493456442) Checkpoint is useless for les server, separate handler for client and server.
pm, err := NewProtocolManager(e.BlockChain().Config(), nil, light.DefaultServerIndexerConfig, config.ULC, false, config.NetworkId, e.EventMux(), newPeerSet(), e.BlockChain(), e.TxPool(), e.ChainDb(), nil, nil, registrar, quitSync, new(sync.WaitGroup), e.Synced)
if err != nil {
return nil, err
}
srv.protocolManager = pm
if logProtocolHandler {
pm.logger = csvLogger
}
pm.servingQueue = newServingQueue(int64(time.Millisecond*10), float64(config.LightServ)/100, requestLogger)
pm.server = srv
return srv, nil
}
@@ -168,6 +148,12 @@ func (s *LesServer) APIs() []rpc.API {
Service: NewPrivateLightServerAPI(s),
Public: false,
},
{
Namespace: "les",
Version: "1.0",
Service: NewPrivateLightAPI(&s.lesCommons, s.protocolManager.reg),
Public: false,
},
}
}
@@ -292,6 +278,13 @@ func (s *LesServer) SetBloomBitsIndexer(bloomIndexer *core.ChainIndexer) {
bloomIndexer.AddChildIndexer(s.bloomTrieIndexer)
}
// SetClient sets the rpc client and starts running checkpoint contract if it is not yet watched.
func (s *LesServer) SetContractBackend(backend bind.ContractBackend) {
if s.protocolManager.reg != nil {
s.protocolManager.reg.start(backend)
}
}
// Stop stops the LES service
func (s *LesServer) Stop() {
s.fcManager.Stop()

View File

@@ -18,11 +18,29 @@ package les
import (
"context"
"errors"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
)
var errInvalidCheckpoint = errors.New("invalid advertised checkpoint")
const (
// lightSync starts syncing from the current highest block.
// If the chain is empty, syncing the entire header chain.
lightSync = iota
// legacyCheckpointSync starts syncing from a hardcoded checkpoint.
legacyCheckpointSync
// checkpointSync starts syncing from a checkpoint signed by trusted
// signer or hardcoded checkpoint for compatibility.
checkpointSync
)
// syncer is responsible for periodically synchronising with the network, both
@@ -54,26 +72,141 @@ func (pm *ProtocolManager) syncer() {
}
}
func (pm *ProtocolManager) needToSync(peerHead blockInfo) bool {
head := pm.blockchain.CurrentHeader()
currentTd := rawdb.ReadTd(pm.chainDb, head.Hash(), head.Number.Uint64())
return currentTd != nil && peerHead.Td.Cmp(currentTd) > 0
// validateCheckpoint verifies the advertised checkpoint by peer is valid or not.
//
// Each network has several hard-coded checkpoint signer addresses. Only the
// checkpoint issued by the specified signer is considered valid.
//
// In addition to the checkpoint registered in the registrar contract, there are
// several legacy hardcoded checkpoints in our codebase. These checkpoints are
// also considered as valid.
func (pm *ProtocolManager) validateCheckpoint(peer *peer) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
// Fetch the block header corresponding to the checkpoint registration.
cp := peer.checkpoint
header, err := light.GetUntrustedHeaderByNumber(ctx, pm.odr, peer.checkpointNumber, peer.id)
if err != nil {
return err
}
// Fetch block logs associated with the block header.
logs, err := light.GetUntrustedBlockLogs(ctx, pm.odr, header)
if err != nil {
return err
}
events := pm.reg.contract.LookupCheckpointEvents(logs, cp.SectionIndex, cp.Hash())
if len(events) == 0 {
return errInvalidCheckpoint
}
var (
index = events[0].Index
hash = events[0].CheckpointHash
signatures [][]byte
)
for _, event := range events {
signatures = append(signatures, append(event.R[:], append(event.S[:], event.V)...))
}
valid, signers := pm.reg.verifySigners(index, hash, signatures)
if !valid {
return errInvalidCheckpoint
}
log.Warn("Verified advertised checkpoint", "peer", peer.id, "signers", len(signers))
return nil
}
// synchronise tries to sync up our local block chain with a remote peer.
// synchronise tries to sync up our local chain with a remote peer.
func (pm *ProtocolManager) synchronise(peer *peer) {
// Short circuit if no peers are available
// Short circuit if the peer is nil.
if peer == nil {
return
}
// Make sure the peer's TD is higher than our own.
if !pm.needToSync(peer.headBlockInfo()) {
latest := pm.blockchain.CurrentHeader()
currentTd := rawdb.ReadTd(pm.chainDb, latest.Hash(), latest.Number.Uint64())
if currentTd != nil && peer.headBlockInfo().Td.Cmp(currentTd) < 0 {
return
}
// Recap the checkpoint.
//
// The light client may be connected to several different versions of the server.
// (1) Old version server which can not provide stable checkpoint in the handshake packet.
// => Use hardcoded checkpoint or empty checkpoint
// (2) New version server but simple checkpoint syncing is not enabled(e.g. mainnet, new testnet or private network)
// => Use hardcoded checkpoint or empty checkpoint
// (3) New version server but the provided stable checkpoint is even lower than the hardcoded one.
// => Use hardcoded checkpoint
// (4) New version server with valid and higher stable checkpoint
// => Use provided checkpoint
var checkpoint = &peer.checkpoint
var hardcoded bool
if pm.checkpoint != nil && pm.checkpoint.SectionIndex >= peer.checkpoint.SectionIndex {
checkpoint = pm.checkpoint // Use the hardcoded one.
hardcoded = true
}
// Determine whether we should run checkpoint syncing or normal light syncing.
//
// Here has four situations that we will disable the checkpoint syncing:
//
// 1. The checkpoint is empty
// 2. The latest head block of the local chain is above the checkpoint.
// 3. The checkpoint is hardcoded(recap with local hardcoded checkpoint)
// 4. For some networks the checkpoint syncing is not activated.
mode := checkpointSync
switch {
case checkpoint.Empty():
mode = lightSync
log.Debug("Disable checkpoint syncing", "reason", "empty checkpoint")
case latest.Number.Uint64() >= (checkpoint.SectionIndex+1)*pm.iConfig.ChtSize-1:
mode = lightSync
log.Debug("Disable checkpoint syncing", "reason", "local chain beyond the checkpoint")
case hardcoded:
mode = legacyCheckpointSync
log.Debug("Disable checkpoint syncing", "reason", "checkpoint is hardcoded")
case pm.reg == nil || !pm.reg.isRunning():
mode = legacyCheckpointSync
log.Debug("Disable checkpoint syncing", "reason", "checkpoint syncing is not activated")
}
// Notify testing framework if syncing has completed(for testing purpose).
defer func() {
if pm.reg != nil && pm.reg.syncDoneHook != nil {
pm.reg.syncDoneHook()
}
}()
start := time.Now()
if mode == checkpointSync || mode == legacyCheckpointSync {
// Validate the advertised checkpoint
if mode == legacyCheckpointSync {
checkpoint = pm.checkpoint
} else if mode == checkpointSync {
if err := pm.validateCheckpoint(peer); err != nil {
log.Debug("Failed to validate checkpoint", "reason", err)
pm.removePeer(peer.id)
return
}
pm.blockchain.(*light.LightChain).AddTrustedCheckpoint(checkpoint)
}
log.Debug("Checkpoint syncing start", "peer", peer.id, "checkpoint", checkpoint.SectionIndex)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
pm.blockchain.(*light.LightChain).SyncCht(ctx)
pm.downloader.Synchronise(peer.id, peer.Head(), peer.Td(), downloader.LightSync)
// Fetch the start point block header.
//
// For the ethash consensus engine, the start header is the block header
// of the checkpoint.
//
// For the clique consensus engine, the start header is the block header
// of the latest epoch covered by checkpoint.
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
if !checkpoint.Empty() && !pm.blockchain.(*light.LightChain).SyncCheckpoint(ctx, checkpoint) {
log.Debug("Sync checkpoint failed")
pm.removePeer(peer.id)
return
}
}
// Fetch the remaining block headers based on the current chain header.
if err := pm.downloader.Synchronise(peer.id, peer.Head(), peer.Td(), downloader.LightSync); err != nil {
log.Debug("Synchronise failed", "reason", err)
return
}
log.Debug("Synchronise finished", "elapsed", common.PrettyDuration(time.Since(start)))
}

133
les/sync_test.go Normal file
View File

@@ -0,0 +1,133 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"fmt"
"math/big"
"testing"
"time"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/params"
)
// Test light syncing which will download all headers from genesis.
func TestLightSyncingLes2(t *testing.T) { testCheckpointSyncing(t, 2, 0) }
func TestLightSyncingLes3(t *testing.T) { testCheckpointSyncing(t, 3, 0) }
// Test legacy checkpoint syncing which will download tail headers
// based on a hardcoded checkpoint.
func TestLegacyCheckpointSyncingLes2(t *testing.T) { testCheckpointSyncing(t, 2, 1) }
func TestLegacyCheckpointSyncingLes3(t *testing.T) { testCheckpointSyncing(t, 3, 1) }
// Test checkpoint syncing which will download tail headers based
// on a verified checkpoint.
func TestCheckpointSyncingLes2(t *testing.T) { testCheckpointSyncing(t, 2, 2) }
func TestCheckpointSyncingLes3(t *testing.T) { testCheckpointSyncing(t, 3, 2) }
func testCheckpointSyncing(t *testing.T, protocol int, syncMode int) {
config := light.TestServerIndexerConfig
waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
for {
cs, _, _ := cIndexer.Sections()
bts, _, _ := btIndexer.Sections()
if cs >= 1 && bts >= 1 {
break
}
time.Sleep(10 * time.Millisecond)
}
}
// Generate 512+4 blocks (totally 1 CHT sections)
server, client, tearDown := newClientServerEnv(t, int(config.ChtSize+config.ChtConfirms), protocol, waitIndexers, false)
defer tearDown()
expected := config.ChtSize + config.ChtConfirms
// Checkpoint syncing or legacy checkpoint syncing.
if syncMode == 1 || syncMode == 2 {
// Assemble checkpoint 0
s, _, head := server.chtIndexer.Sections()
cp := &params.TrustedCheckpoint{
SectionIndex: 0,
SectionHead: head,
CHTRoot: light.GetChtRoot(server.db, s-1, head),
BloomRoot: light.GetBloomTrieRoot(server.db, s-1, head),
}
if syncMode == 1 {
// Register the assembled checkpoint as hardcoded one.
client.pm.checkpoint = cp
client.pm.blockchain.(*light.LightChain).AddTrustedCheckpoint(cp)
} else {
// Register the assembled checkpoint into oracle.
header := server.backend.Blockchain().CurrentHeader()
data := append([]byte{0x19, 0x00}, append(registrarAddr.Bytes(), append([]byte{0, 0, 0, 0, 0, 0, 0, 0}, cp.Hash().Bytes()...)...)...)
sig, _ := crypto.Sign(crypto.Keccak256(data), signerKey)
sig[64] += 27 // Transform V from 0/1 to 27/28 according to the yellow paper
if _, err := server.pm.reg.contract.RegisterCheckpoint(signerKey, cp.SectionIndex, cp.Hash().Bytes(), new(big.Int).Sub(header.Number, big.NewInt(1)), header.ParentHash, [][]byte{sig}); err != nil {
t.Error("register checkpoint failed", err)
}
server.backend.Commit()
// Wait for the checkpoint registration
for {
_, hash, _, err := server.pm.reg.contract.Contract().GetLatestCheckpoint(nil)
if err != nil || hash == [32]byte{} {
time.Sleep(100 * time.Millisecond)
continue
}
break
}
expected += 1
}
}
done := make(chan error)
client.pm.reg.syncDoneHook = func() {
header := client.pm.blockchain.CurrentHeader()
if header.Number.Uint64() == expected {
done <- nil
} else {
done <- fmt.Errorf("blockchain length mismatch, want %d, got %d", expected, header.Number)
}
}
// Create connected peer pair.
peer, err1, lPeer, err2 := newTestPeerPair("peer", protocol, server.pm, client.pm)
select {
case <-time.After(time.Millisecond * 100):
case err := <-err1:
t.Fatalf("peer 1 handshake error: %v", err)
case err := <-err2:
t.Fatalf("peer 2 handshake error: %v", err)
}
server.rPeer, client.rPeer = peer, lPeer
select {
case err := <-done:
if err != nil {
t.Error("sync failed", err)
}
return
case <-time.NewTimer(10 * time.Second).C:
t.Error("checkpoint syncing timeout")
}
}

View File

View File

@@ -30,7 +30,7 @@ type ltrInfo struct {
sentTo map[*peer]struct{}
}
type LesTxRelay struct {
type lesTxRelay struct {
txSent map[common.Hash]*ltrInfo
txPending map[common.Hash]struct{}
ps *peerSet
@@ -42,8 +42,8 @@ type LesTxRelay struct {
retriever *retrieveManager
}
func NewLesTxRelay(ps *peerSet, retriever *retrieveManager) *LesTxRelay {
r := &LesTxRelay{
func newLesTxRelay(ps *peerSet, retriever *retrieveManager) *lesTxRelay {
r := &lesTxRelay{
txSent: make(map[common.Hash]*ltrInfo),
txPending: make(map[common.Hash]struct{}),
ps: ps,
@@ -54,18 +54,18 @@ func NewLesTxRelay(ps *peerSet, retriever *retrieveManager) *LesTxRelay {
return r
}
func (self *LesTxRelay) Stop() {
func (self *lesTxRelay) Stop() {
close(self.stop)
}
func (self *LesTxRelay) registerPeer(p *peer) {
func (self *lesTxRelay) registerPeer(p *peer) {
self.lock.Lock()
defer self.lock.Unlock()
self.peerList = self.ps.AllPeers()
}
func (self *LesTxRelay) unregisterPeer(p *peer) {
func (self *lesTxRelay) unregisterPeer(p *peer) {
self.lock.Lock()
defer self.lock.Unlock()
@@ -74,7 +74,7 @@ func (self *LesTxRelay) unregisterPeer(p *peer) {
// send sends a list of transactions to at most a given number of peers at
// once, never resending any particular transaction to the same peer twice
func (self *LesTxRelay) send(txs types.Transactions, count int) {
func (self *lesTxRelay) send(txs types.Transactions, count int) {
sendTo := make(map[*peer]types.Transactions)
self.peerStartPos++ // rotate the starting position of the peer list
@@ -143,14 +143,14 @@ func (self *LesTxRelay) send(txs types.Transactions, count int) {
}
}
func (self *LesTxRelay) Send(txs types.Transactions) {
func (self *lesTxRelay) Send(txs types.Transactions) {
self.lock.Lock()
defer self.lock.Unlock()
self.send(txs, 3)
}
func (self *LesTxRelay) NewHead(head common.Hash, mined []common.Hash, rollback []common.Hash) {
func (self *lesTxRelay) NewHead(head common.Hash, mined []common.Hash, rollback []common.Hash) {
self.lock.Lock()
defer self.lock.Unlock()
@@ -173,7 +173,7 @@ func (self *LesTxRelay) NewHead(head common.Hash, mined []common.Hash, rollback
}
}
func (self *LesTxRelay) Discard(hashes []common.Hash) {
func (self *lesTxRelay) Discard(hashes []common.Hash) {
self.lock.Lock()
defer self.lock.Unlock()

View File

@@ -26,7 +26,6 @@ import (
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth"
@@ -36,7 +35,7 @@ import (
)
func TestULCSyncWithOnePeer(t *testing.T) {
f := newFullPeerPair(t, 1, 4, testChainGen)
f := newFullPeerPair(t, 1, 4)
ulcConfig := &eth.ULCConfig{
MinTrustedFraction: 100,
TrustedServers: []string{f.Node.String()},
@@ -63,7 +62,7 @@ func TestULCSyncWithOnePeer(t *testing.T) {
}
func TestULCReceiveAnnounce(t *testing.T) {
f := newFullPeerPair(t, 1, 4, testChainGen)
f := newFullPeerPair(t, 1, 4)
ulcConfig := &eth.ULCConfig{
MinTrustedFraction: 100,
TrustedServers: []string{f.Node.String()},
@@ -100,8 +99,8 @@ func TestULCReceiveAnnounce(t *testing.T) {
}
func TestULCShouldNotSyncWithTwoPeersOneHaveEmptyChain(t *testing.T) {
f1 := newFullPeerPair(t, 1, 4, testChainGen)
f2 := newFullPeerPair(t, 2, 0, nil)
f1 := newFullPeerPair(t, 1, 4)
f2 := newFullPeerPair(t, 2, 0)
ulcConf := &ulc{minTrustedFraction: 100, trustedKeys: make(map[string]struct{})}
ulcConf.trustedKeys[f1.Node.ID().String()] = struct{}{}
ulcConf.trustedKeys[f2.Node.ID().String()] = struct{}{}
@@ -131,9 +130,9 @@ func TestULCShouldNotSyncWithTwoPeersOneHaveEmptyChain(t *testing.T) {
}
func TestULCShouldNotSyncWithThreePeersOneHaveEmptyChain(t *testing.T) {
f1 := newFullPeerPair(t, 1, 3, testChainGen)
f2 := newFullPeerPair(t, 2, 4, testChainGen)
f3 := newFullPeerPair(t, 3, 0, nil)
f1 := newFullPeerPair(t, 1, 3)
f2 := newFullPeerPair(t, 2, 4)
f3 := newFullPeerPair(t, 3, 0)
ulcConfig := &eth.ULCConfig{
MinTrustedFraction: 60,
@@ -211,10 +210,10 @@ func connectPeers(full, light pairPeer, version int) (*peer, *peer, error) {
}
// newFullPeerPair creates node with full sync mode
func newFullPeerPair(t *testing.T, index int, numberOfblocks int, chainGen func(int, *core.BlockGen)) pairPeer {
func newFullPeerPair(t *testing.T, index int, numberOfblocks int) pairPeer {
db := rawdb.NewMemoryDatabase()
pmFull := newTestProtocolManagerMust(t, false, numberOfblocks, chainGen, nil, nil, db, nil)
pmFull, _ := newTestProtocolManagerMust(t, false, numberOfblocks, nil, nil, nil, db, nil)
peerPairFull := pairPeer{
Name: "full node",
@@ -238,7 +237,7 @@ func newLightPeer(t *testing.T, ulcConfig *eth.ULCConfig) pairPeer {
odr := NewLesOdr(ldb, light.DefaultClientIndexerConfig, rm)
pmLight := newTestProtocolManagerMust(t, true, 0, nil, odr, peers, ldb, ulcConfig)
pmLight, _ := newTestProtocolManagerMust(t, true, 0, odr, nil, peers, ldb, ulcConfig)
peerPairLight := pairPeer{
Name: "ulc node",
PM: pmLight,