les, light: LES/2 protocol version (#14970)

This PR implements the new LES protocol version extensions:

* new and more efficient Merkle proofs reply format (when replying to
  a multiple Merkle proofs request, we just send a single set of trie
  nodes containing all necessary nodes)
* BBT (BloomBitsTrie) works similarly to the existing CHT and contains
  the bloombits search data to speed up log searches
* GetTxStatusMsg returns the inclusion position or the
  pending/queued/unknown state of a transaction referenced by hash
* an optional signature of new block data (number/hash/td) can be
  included in AnnounceMsg to provide an option for "very light
  clients" (mobile/embedded devices) to skip expensive Ethash check
  and accept multiple signatures of somewhat trusted servers (still a
  lot better than trusting a single server completely and retrieving
  everything through RPC). The new client mode is not implemented in
  this PR, just the protocol extension.
This commit is contained in:
Felföldi Zsolt
2017-10-24 15:19:09 +02:00
committed by Felix Lange
parent 6d6a5a9337
commit ca376ead88
34 changed files with 2056 additions and 488 deletions

View File

@ -17,7 +17,10 @@
package les
import (
"bytes"
"math/big"
"math/rand"
"runtime"
"testing"
"github.com/ethereum/go-ethereum/common"
@ -26,7 +29,9 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
@ -39,9 +44,29 @@ func expectResponse(r p2p.MsgReader, msgcode, reqID, bv uint64, data interface{}
return p2p.ExpectMsg(r, msgcode, resp{reqID, bv, data})
}
func testCheckProof(t *testing.T, exp *light.NodeSet, got light.NodeList) {
if exp.KeyCount() > len(got) {
t.Errorf("proof has fewer nodes than expected")
return
}
if exp.KeyCount() < len(got) {
t.Errorf("proof has more nodes than expected")
return
}
for _, node := range got {
n, _ := exp.Get(crypto.Keccak256(node))
if !bytes.Equal(n, node) {
t.Errorf("proof contents mismatch")
return
}
}
}
// Tests that block headers can be retrieved from a remote chain based on user queries.
func TestGetBlockHeadersLes1(t *testing.T) { testGetBlockHeaders(t, 1) }
func TestGetBlockHeadersLes2(t *testing.T) { testGetBlockHeaders(t, 2) }
func testGetBlockHeaders(t *testing.T, protocol int) {
db, _ := ethdb.NewMemDatabase()
pm := newTestProtocolManagerMust(t, false, downloader.MaxHashFetch+15, nil, nil, nil, db)
@ -171,6 +196,8 @@ func testGetBlockHeaders(t *testing.T, protocol int) {
// Tests that block contents can be retrieved from a remote chain based on their hashes.
func TestGetBlockBodiesLes1(t *testing.T) { testGetBlockBodies(t, 1) }
func TestGetBlockBodiesLes2(t *testing.T) { testGetBlockBodies(t, 2) }
func testGetBlockBodies(t *testing.T, protocol int) {
db, _ := ethdb.NewMemDatabase()
pm := newTestProtocolManagerMust(t, false, downloader.MaxBlockFetch+15, nil, nil, nil, db)
@ -247,6 +274,8 @@ func testGetBlockBodies(t *testing.T, protocol int) {
// Tests that the contract codes can be retrieved based on account addresses.
func TestGetCodeLes1(t *testing.T) { testGetCode(t, 1) }
func TestGetCodeLes2(t *testing.T) { testGetCode(t, 2) }
func testGetCode(t *testing.T, protocol int) {
// Assemble the test environment
db, _ := ethdb.NewMemDatabase()
@ -280,6 +309,8 @@ func testGetCode(t *testing.T, protocol int) {
// Tests that the transaction receipts can be retrieved based on hashes.
func TestGetReceiptLes1(t *testing.T) { testGetReceipt(t, 1) }
func TestGetReceiptLes2(t *testing.T) { testGetReceipt(t, 2) }
func testGetReceipt(t *testing.T, protocol int) {
// Assemble the test environment
db, _ := ethdb.NewMemDatabase()
@ -307,6 +338,8 @@ func testGetReceipt(t *testing.T, protocol int) {
// Tests that trie merkle proofs can be retrieved
func TestGetProofsLes1(t *testing.T) { testGetProofs(t, 1) }
func TestGetProofsLes2(t *testing.T) { testGetProofs(t, 2) }
func testGetProofs(t *testing.T, protocol int) {
// Assemble the test environment
db, _ := ethdb.NewMemDatabase()
@ -315,8 +348,11 @@ func testGetProofs(t *testing.T, protocol int) {
peer, _ := newTestPeer(t, "peer", protocol, pm, true)
defer peer.close()
var proofreqs []ProofReq
var proofs [][]rlp.RawValue
var (
proofreqs []ProofReq
proofsV1 [][]rlp.RawValue
)
proofsV2 := light.NewNodeSet()
accounts := []common.Address{testBankAddress, acc1Addr, acc2Addr, {}}
for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
@ -331,14 +367,124 @@ func testGetProofs(t *testing.T, protocol int) {
}
proofreqs = append(proofreqs, req)
proof := trie.Prove(crypto.Keccak256(acc[:]))
proofs = append(proofs, proof)
switch protocol {
case 1:
var proof light.NodeList
trie.Prove(crypto.Keccak256(acc[:]), 0, &proof)
proofsV1 = append(proofsV1, proof)
case 2:
trie.Prove(crypto.Keccak256(acc[:]), 0, proofsV2)
}
}
}
// Send the proof request and verify the response
cost := peer.GetRequestCost(GetProofsMsg, len(proofreqs))
sendRequest(peer.app, GetProofsMsg, 42, cost, proofreqs)
if err := expectResponse(peer.app, ProofsMsg, 42, testBufLimit, proofs); err != nil {
t.Errorf("proofs mismatch: %v", err)
switch protocol {
case 1:
cost := peer.GetRequestCost(GetProofsV1Msg, len(proofreqs))
sendRequest(peer.app, GetProofsV1Msg, 42, cost, proofreqs)
if err := expectResponse(peer.app, ProofsV1Msg, 42, testBufLimit, proofsV1); err != nil {
t.Errorf("proofs mismatch: %v", err)
}
case 2:
cost := peer.GetRequestCost(GetProofsV2Msg, len(proofreqs))
sendRequest(peer.app, GetProofsV2Msg, 42, cost, proofreqs)
msg, err := peer.app.ReadMsg()
if err != nil {
t.Errorf("Message read error: %v", err)
}
var resp struct {
ReqID, BV uint64
Data light.NodeList
}
if err := msg.Decode(&resp); err != nil {
t.Errorf("reply decode error: %v", err)
}
if msg.Code != ProofsV2Msg {
t.Errorf("Message code mismatch")
}
if resp.ReqID != 42 {
t.Errorf("ReqID mismatch")
}
if resp.BV != testBufLimit {
t.Errorf("BV mismatch")
}
testCheckProof(t, proofsV2, resp.Data)
}
}
func TestTransactionStatusLes2(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
pm := newTestProtocolManagerMust(t, false, 0, nil, nil, nil, db)
chain := pm.blockchain.(*core.BlockChain)
txpool := core.NewTxPool(core.DefaultTxPoolConfig, params.TestChainConfig, chain)
pm.txpool = txpool
peer, _ := newTestPeer(t, "peer", 2, pm, true)
defer peer.close()
var reqID uint64
test := func(tx *types.Transaction, send bool, expStatus core.TxStatusData) {
reqID++
if send {
cost := peer.GetRequestCost(SendTxV2Msg, 1)
sendRequest(peer.app, SendTxV2Msg, reqID, cost, types.Transactions{tx})
} else {
cost := peer.GetRequestCost(GetTxStatusMsg, 1)
sendRequest(peer.app, GetTxStatusMsg, reqID, cost, []common.Hash{tx.Hash()})
}
if err := expectResponse(peer.app, TxStatusMsg, reqID, testBufLimit, []core.TxStatusData{expStatus}); err != nil {
t.Errorf("transaction status mismatch")
}
}
signer := types.HomesteadSigner{}
// test error status by sending an underpriced transaction
tx0, _ := types.SignTx(types.NewTransaction(0, acc1Addr, big.NewInt(10000), bigTxGas, nil, nil), signer, testBankKey)
test(tx0, true, core.TxStatusData{Status: core.TxStatusError, Data: []byte("transaction underpriced")})
tx1, _ := types.SignTx(types.NewTransaction(0, acc1Addr, big.NewInt(10000), bigTxGas, big.NewInt(100000000000), nil), signer, testBankKey)
test(tx1, false, core.TxStatusData{Status: core.TxStatusUnknown}) // query before sending, should be unknown
test(tx1, true, core.TxStatusData{Status: core.TxStatusPending}) // send valid processable tx, should return pending
test(tx1, true, core.TxStatusData{Status: core.TxStatusPending}) // adding it again should not return an error
tx2, _ := types.SignTx(types.NewTransaction(1, acc1Addr, big.NewInt(10000), bigTxGas, big.NewInt(100000000000), nil), signer, testBankKey)
tx3, _ := types.SignTx(types.NewTransaction(2, acc1Addr, big.NewInt(10000), bigTxGas, big.NewInt(100000000000), nil), signer, testBankKey)
// send transactions in the wrong order, tx3 should be queued
test(tx3, true, core.TxStatusData{Status: core.TxStatusQueued})
test(tx2, true, core.TxStatusData{Status: core.TxStatusPending})
// query again, now tx3 should be pending too
test(tx3, false, core.TxStatusData{Status: core.TxStatusPending})
// generate and add a block with tx1 and tx2 included
gchain, _ := core.GenerateChain(params.TestChainConfig, chain.GetBlockByNumber(0), db, 1, func(i int, block *core.BlockGen) {
block.AddTx(tx1)
block.AddTx(tx2)
})
if _, err := chain.InsertChain(gchain); err != nil {
panic(err)
}
// check if their status is included now
block1hash := core.GetCanonicalHash(db, 1)
tx1pos, _ := rlp.EncodeToBytes(core.TxLookupEntry{BlockHash: block1hash, BlockIndex: 1, Index: 0})
tx2pos, _ := rlp.EncodeToBytes(core.TxLookupEntry{BlockHash: block1hash, BlockIndex: 1, Index: 1})
test(tx1, false, core.TxStatusData{Status: core.TxStatusIncluded, Data: tx1pos})
test(tx2, false, core.TxStatusData{Status: core.TxStatusIncluded, Data: tx2pos})
// create a reorg that rolls them back
gchain, _ = core.GenerateChain(params.TestChainConfig, chain.GetBlockByNumber(0), db, 2, func(i int, block *core.BlockGen) {})
if _, err := chain.InsertChain(gchain); err != nil {
panic(err)
}
// wait until TxPool processes the reorg
for {
if pending, _ := txpool.Stats(); pending == 3 {
break
}
runtime.Gosched()
}
// check if their status is pending again
test(tx1, false, core.TxStatusData{Status: core.TxStatusPending})
test(tx2, false, core.TxStatusData{Status: core.TxStatusPending})
}