les, light: improve txstatus retrieval (#22349)
Transaction unindexing will be enabled by default as of 1.10, which causes tx status retrieval will be broken without this PR. This PR introduces a retry mechanism in TxStatus retrieval.
This commit is contained in:
@ -52,9 +52,16 @@ func TestGetBlockHeadersLes3(t *testing.T) { testGetBlockHeaders(t, 3) }
|
||||
func TestGetBlockHeadersLes4(t *testing.T) { testGetBlockHeaders(t, 4) }
|
||||
|
||||
func testGetBlockHeaders(t *testing.T, protocol int) {
|
||||
server, tearDown := newServerEnv(t, downloader.MaxHeaderFetch+15, protocol, nil, false, true, 0)
|
||||
netconfig := testnetConfig{
|
||||
blocks: downloader.MaxHeaderFetch + 15,
|
||||
protocol: protocol,
|
||||
nopruning: true,
|
||||
}
|
||||
server, _, tearDown := newClientServerEnv(t, netconfig)
|
||||
defer tearDown()
|
||||
|
||||
rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol)
|
||||
defer closePeer()
|
||||
bc := server.handler.blockchain
|
||||
|
||||
// Create a "random" unknown hash for testing
|
||||
@ -169,8 +176,8 @@ func testGetBlockHeaders(t *testing.T, protocol int) {
|
||||
// Send the hash request and verify the response
|
||||
reqID++
|
||||
|
||||
sendRequest(server.peer.app, GetBlockHeadersMsg, reqID, tt.query)
|
||||
if err := expectResponse(server.peer.app, BlockHeadersMsg, reqID, testBufLimit, headers); err != nil {
|
||||
sendRequest(rawPeer.app, GetBlockHeadersMsg, reqID, tt.query)
|
||||
if err := expectResponse(rawPeer.app, BlockHeadersMsg, reqID, testBufLimit, headers); err != nil {
|
||||
t.Errorf("test %d: headers mismatch: %v", i, err)
|
||||
}
|
||||
}
|
||||
@ -182,9 +189,17 @@ func TestGetBlockBodiesLes3(t *testing.T) { testGetBlockBodies(t, 3) }
|
||||
func TestGetBlockBodiesLes4(t *testing.T) { testGetBlockBodies(t, 4) }
|
||||
|
||||
func testGetBlockBodies(t *testing.T, protocol int) {
|
||||
server, tearDown := newServerEnv(t, downloader.MaxBlockFetch+15, protocol, nil, false, true, 0)
|
||||
netconfig := testnetConfig{
|
||||
blocks: downloader.MaxHeaderFetch + 15,
|
||||
protocol: protocol,
|
||||
nopruning: true,
|
||||
}
|
||||
server, _, tearDown := newClientServerEnv(t, netconfig)
|
||||
defer tearDown()
|
||||
|
||||
rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol)
|
||||
defer closePeer()
|
||||
|
||||
bc := server.handler.blockchain
|
||||
|
||||
// Create a batch of tests for various scenarios
|
||||
@ -247,8 +262,8 @@ func testGetBlockBodies(t *testing.T, protocol int) {
|
||||
reqID++
|
||||
|
||||
// Send the hash request and verify the response
|
||||
sendRequest(server.peer.app, GetBlockBodiesMsg, reqID, hashes)
|
||||
if err := expectResponse(server.peer.app, BlockBodiesMsg, reqID, testBufLimit, bodies); err != nil {
|
||||
sendRequest(rawPeer.app, GetBlockBodiesMsg, reqID, hashes)
|
||||
if err := expectResponse(rawPeer.app, BlockBodiesMsg, reqID, testBufLimit, bodies); err != nil {
|
||||
t.Errorf("test %d: bodies mismatch: %v", i, err)
|
||||
}
|
||||
}
|
||||
@ -261,8 +276,17 @@ func TestGetCodeLes4(t *testing.T) { testGetCode(t, 4) }
|
||||
|
||||
func testGetCode(t *testing.T, protocol int) {
|
||||
// Assemble the test environment
|
||||
server, tearDown := newServerEnv(t, 4, protocol, nil, false, true, 0)
|
||||
netconfig := testnetConfig{
|
||||
blocks: 4,
|
||||
protocol: protocol,
|
||||
nopruning: true,
|
||||
}
|
||||
server, _, tearDown := newClientServerEnv(t, netconfig)
|
||||
defer tearDown()
|
||||
|
||||
rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol)
|
||||
defer closePeer()
|
||||
|
||||
bc := server.handler.blockchain
|
||||
|
||||
var codereqs []*CodeReq
|
||||
@ -279,8 +303,8 @@ func testGetCode(t *testing.T, protocol int) {
|
||||
}
|
||||
}
|
||||
|
||||
sendRequest(server.peer.app, GetCodeMsg, 42, codereqs)
|
||||
if err := expectResponse(server.peer.app, CodeMsg, 42, testBufLimit, codes); err != nil {
|
||||
sendRequest(rawPeer.app, GetCodeMsg, 42, codereqs)
|
||||
if err := expectResponse(rawPeer.app, CodeMsg, 42, testBufLimit, codes); err != nil {
|
||||
t.Errorf("codes mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
@ -291,8 +315,17 @@ func TestGetStaleCodeLes3(t *testing.T) { testGetStaleCode(t, 3) }
|
||||
func TestGetStaleCodeLes4(t *testing.T) { testGetStaleCode(t, 4) }
|
||||
|
||||
func testGetStaleCode(t *testing.T, protocol int) {
|
||||
server, tearDown := newServerEnv(t, core.TriesInMemory+4, protocol, nil, false, true, 0)
|
||||
netconfig := testnetConfig{
|
||||
blocks: core.TriesInMemory + 4,
|
||||
protocol: protocol,
|
||||
nopruning: true,
|
||||
}
|
||||
server, _, tearDown := newClientServerEnv(t, netconfig)
|
||||
defer tearDown()
|
||||
|
||||
rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol)
|
||||
defer closePeer()
|
||||
|
||||
bc := server.handler.blockchain
|
||||
|
||||
check := func(number uint64, expected [][]byte) {
|
||||
@ -300,8 +333,8 @@ func testGetStaleCode(t *testing.T, protocol int) {
|
||||
BHash: bc.GetHeaderByNumber(number).Hash(),
|
||||
AccKey: crypto.Keccak256(testContractAddr[:]),
|
||||
}
|
||||
sendRequest(server.peer.app, GetCodeMsg, 42, []*CodeReq{req})
|
||||
if err := expectResponse(server.peer.app, CodeMsg, 42, testBufLimit, expected); err != nil {
|
||||
sendRequest(rawPeer.app, GetCodeMsg, 42, []*CodeReq{req})
|
||||
if err := expectResponse(rawPeer.app, CodeMsg, 42, testBufLimit, expected); err != nil {
|
||||
t.Errorf("codes mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
@ -317,9 +350,17 @@ func TestGetReceiptLes4(t *testing.T) { testGetReceipt(t, 4) }
|
||||
|
||||
func testGetReceipt(t *testing.T, protocol int) {
|
||||
// Assemble the test environment
|
||||
server, tearDown := newServerEnv(t, 4, protocol, nil, false, true, 0)
|
||||
netconfig := testnetConfig{
|
||||
blocks: 4,
|
||||
protocol: protocol,
|
||||
nopruning: true,
|
||||
}
|
||||
server, _, tearDown := newClientServerEnv(t, netconfig)
|
||||
defer tearDown()
|
||||
|
||||
rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol)
|
||||
defer closePeer()
|
||||
|
||||
bc := server.handler.blockchain
|
||||
|
||||
// Collect the hashes to request, and the response to expect
|
||||
@ -332,8 +373,8 @@ func testGetReceipt(t *testing.T, protocol int) {
|
||||
receipts = append(receipts, rawdb.ReadRawReceipts(server.db, block.Hash(), block.NumberU64()))
|
||||
}
|
||||
// Send the hash request and verify the response
|
||||
sendRequest(server.peer.app, GetReceiptsMsg, 42, hashes)
|
||||
if err := expectResponse(server.peer.app, ReceiptsMsg, 42, testBufLimit, receipts); err != nil {
|
||||
sendRequest(rawPeer.app, GetReceiptsMsg, 42, hashes)
|
||||
if err := expectResponse(rawPeer.app, ReceiptsMsg, 42, testBufLimit, receipts); err != nil {
|
||||
t.Errorf("receipts mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
@ -345,9 +386,17 @@ func TestGetProofsLes4(t *testing.T) { testGetProofs(t, 4) }
|
||||
|
||||
func testGetProofs(t *testing.T, protocol int) {
|
||||
// Assemble the test environment
|
||||
server, tearDown := newServerEnv(t, 4, protocol, nil, false, true, 0)
|
||||
netconfig := testnetConfig{
|
||||
blocks: 4,
|
||||
protocol: protocol,
|
||||
nopruning: true,
|
||||
}
|
||||
server, _, tearDown := newClientServerEnv(t, netconfig)
|
||||
defer tearDown()
|
||||
|
||||
rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol)
|
||||
defer closePeer()
|
||||
|
||||
bc := server.handler.blockchain
|
||||
|
||||
var proofreqs []ProofReq
|
||||
@ -368,8 +417,8 @@ func testGetProofs(t *testing.T, protocol int) {
|
||||
}
|
||||
}
|
||||
// Send the proof request and verify the response
|
||||
sendRequest(server.peer.app, GetProofsV2Msg, 42, proofreqs)
|
||||
if err := expectResponse(server.peer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.NodeList()); err != nil {
|
||||
sendRequest(rawPeer.app, GetProofsV2Msg, 42, proofreqs)
|
||||
if err := expectResponse(rawPeer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.NodeList()); err != nil {
|
||||
t.Errorf("proofs mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
@ -380,8 +429,17 @@ func TestGetStaleProofLes3(t *testing.T) { testGetStaleProof(t, 3) }
|
||||
func TestGetStaleProofLes4(t *testing.T) { testGetStaleProof(t, 4) }
|
||||
|
||||
func testGetStaleProof(t *testing.T, protocol int) {
|
||||
server, tearDown := newServerEnv(t, core.TriesInMemory+4, protocol, nil, false, true, 0)
|
||||
netconfig := testnetConfig{
|
||||
blocks: core.TriesInMemory + 4,
|
||||
protocol: protocol,
|
||||
nopruning: true,
|
||||
}
|
||||
server, _, tearDown := newClientServerEnv(t, netconfig)
|
||||
defer tearDown()
|
||||
|
||||
rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol)
|
||||
defer closePeer()
|
||||
|
||||
bc := server.handler.blockchain
|
||||
|
||||
check := func(number uint64, wantOK bool) {
|
||||
@ -393,7 +451,7 @@ func testGetStaleProof(t *testing.T, protocol int) {
|
||||
BHash: header.Hash(),
|
||||
Key: account,
|
||||
}
|
||||
sendRequest(server.peer.app, GetProofsV2Msg, 42, []*ProofReq{req})
|
||||
sendRequest(rawPeer.app, GetProofsV2Msg, 42, []*ProofReq{req})
|
||||
|
||||
var expected []rlp.RawValue
|
||||
if wantOK {
|
||||
@ -402,7 +460,7 @@ func testGetStaleProof(t *testing.T, protocol int) {
|
||||
t.Prove(account, 0, proofsV2)
|
||||
expected = proofsV2.NodeList()
|
||||
}
|
||||
if err := expectResponse(server.peer.app, ProofsV2Msg, 42, testBufLimit, expected); err != nil {
|
||||
if err := expectResponse(rawPeer.app, ProofsV2Msg, 42, testBufLimit, expected); err != nil {
|
||||
t.Errorf("codes mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
@ -417,20 +475,30 @@ func TestGetCHTProofsLes3(t *testing.T) { testGetCHTProofs(t, 3) }
|
||||
func TestGetCHTProofsLes4(t *testing.T) { testGetCHTProofs(t, 4) }
|
||||
|
||||
func testGetCHTProofs(t *testing.T, protocol int) {
|
||||
config := light.TestServerIndexerConfig
|
||||
|
||||
waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
|
||||
for {
|
||||
cs, _, _ := cIndexer.Sections()
|
||||
if cs >= 1 {
|
||||
break
|
||||
var (
|
||||
config = light.TestServerIndexerConfig
|
||||
waitIndexers = func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
|
||||
for {
|
||||
cs, _, _ := cIndexer.Sections()
|
||||
if cs >= 1 {
|
||||
break
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
server, tearDown := newServerEnv(t, int(config.ChtSize+config.ChtConfirms), protocol, waitIndexers, false, true, 0)
|
||||
netconfig = testnetConfig{
|
||||
blocks: int(config.ChtSize + config.ChtConfirms),
|
||||
protocol: protocol,
|
||||
indexFn: waitIndexers,
|
||||
nopruning: true,
|
||||
}
|
||||
)
|
||||
server, _, tearDown := newClientServerEnv(t, netconfig)
|
||||
defer tearDown()
|
||||
|
||||
rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol)
|
||||
defer closePeer()
|
||||
|
||||
bc := server.handler.blockchain
|
||||
|
||||
// Assemble the proofs from the different protocols
|
||||
@ -454,8 +522,8 @@ func testGetCHTProofs(t *testing.T, protocol int) {
|
||||
AuxReq: htAuxHeader,
|
||||
}}
|
||||
// Send the proof request and verify the response
|
||||
sendRequest(server.peer.app, GetHelperTrieProofsMsg, 42, requestsV2)
|
||||
if err := expectResponse(server.peer.app, HelperTrieProofsMsg, 42, testBufLimit, proofsV2); err != nil {
|
||||
sendRequest(rawPeer.app, GetHelperTrieProofsMsg, 42, requestsV2)
|
||||
if err := expectResponse(rawPeer.app, HelperTrieProofsMsg, 42, testBufLimit, proofsV2); err != nil {
|
||||
t.Errorf("proofs mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
@ -466,20 +534,30 @@ func TestGetBloombitsProofsLes4(t *testing.T) { testGetBloombitsProofs(t, 4) }
|
||||
|
||||
// Tests that bloombits proofs can be correctly retrieved.
|
||||
func testGetBloombitsProofs(t *testing.T, protocol int) {
|
||||
config := light.TestServerIndexerConfig
|
||||
|
||||
waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
|
||||
for {
|
||||
bts, _, _ := btIndexer.Sections()
|
||||
if bts >= 1 {
|
||||
break
|
||||
var (
|
||||
config = light.TestServerIndexerConfig
|
||||
waitIndexers = func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
|
||||
for {
|
||||
bts, _, _ := btIndexer.Sections()
|
||||
if bts >= 1 {
|
||||
break
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
server, tearDown := newServerEnv(t, int(config.BloomTrieSize+config.BloomTrieConfirms), protocol, waitIndexers, false, true, 0)
|
||||
netconfig = testnetConfig{
|
||||
blocks: int(config.BloomTrieSize + config.BloomTrieConfirms),
|
||||
protocol: protocol,
|
||||
indexFn: waitIndexers,
|
||||
nopruning: true,
|
||||
}
|
||||
)
|
||||
server, _, tearDown := newClientServerEnv(t, netconfig)
|
||||
defer tearDown()
|
||||
|
||||
rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol)
|
||||
defer closePeer()
|
||||
|
||||
bc := server.handler.blockchain
|
||||
|
||||
// Request and verify each bit of the bloom bits proofs
|
||||
@ -503,20 +581,28 @@ func testGetBloombitsProofs(t *testing.T, protocol int) {
|
||||
trie.Prove(key, 0, &proofs.Proofs)
|
||||
|
||||
// Send the proof request and verify the response
|
||||
sendRequest(server.peer.app, GetHelperTrieProofsMsg, 42, requests)
|
||||
if err := expectResponse(server.peer.app, HelperTrieProofsMsg, 42, testBufLimit, proofs); err != nil {
|
||||
sendRequest(rawPeer.app, GetHelperTrieProofsMsg, 42, requests)
|
||||
if err := expectResponse(rawPeer.app, HelperTrieProofsMsg, 42, testBufLimit, proofs); err != nil {
|
||||
t.Errorf("bit %d: proofs mismatch: %v", bit, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTransactionStatusLes2(t *testing.T) { testTransactionStatus(t, 2) }
|
||||
func TestTransactionStatusLes3(t *testing.T) { testTransactionStatus(t, 3) }
|
||||
func TestTransactionStatusLes4(t *testing.T) { testTransactionStatus(t, 4) }
|
||||
func TestTransactionStatusLes2(t *testing.T) { testTransactionStatus(t, lpv2) }
|
||||
func TestTransactionStatusLes3(t *testing.T) { testTransactionStatus(t, lpv3) }
|
||||
func TestTransactionStatusLes4(t *testing.T) { testTransactionStatus(t, lpv4) }
|
||||
|
||||
func testTransactionStatus(t *testing.T, protocol int) {
|
||||
server, tearDown := newServerEnv(t, 0, protocol, nil, false, true, 0)
|
||||
netconfig := testnetConfig{
|
||||
protocol: protocol,
|
||||
nopruning: true,
|
||||
}
|
||||
server, _, tearDown := newClientServerEnv(t, netconfig)
|
||||
defer tearDown()
|
||||
|
||||
rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol)
|
||||
defer closePeer()
|
||||
|
||||
server.handler.addTxsSync = true
|
||||
|
||||
chain := server.handler.blockchain
|
||||
@ -526,11 +612,11 @@ func testTransactionStatus(t *testing.T, protocol int) {
|
||||
test := func(tx *types.Transaction, send bool, expStatus light.TxStatus) {
|
||||
reqID++
|
||||
if send {
|
||||
sendRequest(server.peer.app, SendTxV2Msg, reqID, types.Transactions{tx})
|
||||
sendRequest(rawPeer.app, SendTxV2Msg, reqID, types.Transactions{tx})
|
||||
} else {
|
||||
sendRequest(server.peer.app, GetTxStatusMsg, reqID, []common.Hash{tx.Hash()})
|
||||
sendRequest(rawPeer.app, GetTxStatusMsg, reqID, []common.Hash{tx.Hash()})
|
||||
}
|
||||
if err := expectResponse(server.peer.app, TxStatusMsg, reqID, testBufLimit, []light.TxStatus{expStatus}); err != nil {
|
||||
if err := expectResponse(rawPeer.app, TxStatusMsg, reqID, testBufLimit, []light.TxStatus{expStatus}); err != nil {
|
||||
t.Errorf("transaction status mismatch")
|
||||
}
|
||||
}
|
||||
@ -572,7 +658,7 @@ func testTransactionStatus(t *testing.T, protocol int) {
|
||||
t.Fatalf("pending count mismatch: have %d, want 1", pending)
|
||||
}
|
||||
// Discard new block announcement
|
||||
msg, _ := server.peer.app.ReadMsg()
|
||||
msg, _ := rawPeer.app.ReadMsg()
|
||||
msg.Discard()
|
||||
|
||||
// check if their status is included now
|
||||
@ -597,7 +683,7 @@ func testTransactionStatus(t *testing.T, protocol int) {
|
||||
t.Fatalf("pending count mismatch: have %d, want 3", pending)
|
||||
}
|
||||
// Discard new block announcement
|
||||
msg, _ = server.peer.app.ReadMsg()
|
||||
msg, _ = rawPeer.app.ReadMsg()
|
||||
msg.Discard()
|
||||
|
||||
// check if their status is pending again
|
||||
@ -605,11 +691,23 @@ func testTransactionStatus(t *testing.T, protocol int) {
|
||||
test(tx2, false, light.TxStatus{Status: core.TxStatusPending})
|
||||
}
|
||||
|
||||
func TestStopResumeLes3(t *testing.T) {
|
||||
server, tearDown := newServerEnv(t, 0, 3, nil, true, true, testBufLimit/10)
|
||||
func TestStopResumeLES3(t *testing.T) { testStopResume(t, lpv3) }
|
||||
func TestStopResumeLES4(t *testing.T) { testStopResume(t, lpv4) }
|
||||
|
||||
func testStopResume(t *testing.T, protocol int) {
|
||||
netconfig := testnetConfig{
|
||||
protocol: protocol,
|
||||
simClock: true,
|
||||
nopruning: true,
|
||||
}
|
||||
server, _, tearDown := newClientServerEnv(t, netconfig)
|
||||
defer tearDown()
|
||||
|
||||
server.handler.server.costTracker.testing = true
|
||||
server.handler.server.costTracker.testCostList = testCostList(testBufLimit / 10)
|
||||
|
||||
rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol)
|
||||
defer closePeer()
|
||||
|
||||
var (
|
||||
reqID uint64
|
||||
@ -619,14 +717,14 @@ func TestStopResumeLes3(t *testing.T) {
|
||||
header := server.handler.blockchain.CurrentHeader()
|
||||
req := func() {
|
||||
reqID++
|
||||
sendRequest(server.peer.app, GetBlockHeadersMsg, reqID, &GetBlockHeadersData{Origin: hashOrNumber{Hash: header.Hash()}, Amount: 1})
|
||||
sendRequest(rawPeer.app, GetBlockHeadersMsg, reqID, &GetBlockHeadersData{Origin: hashOrNumber{Hash: header.Hash()}, Amount: 1})
|
||||
}
|
||||
for i := 1; i <= 5; i++ {
|
||||
// send requests while we still have enough buffer and expect a response
|
||||
for expBuf >= testCost {
|
||||
req()
|
||||
expBuf -= testCost
|
||||
if err := expectResponse(server.peer.app, BlockHeadersMsg, reqID, expBuf, []*types.Header{header}); err != nil {
|
||||
if err := expectResponse(rawPeer.app, BlockHeadersMsg, reqID, expBuf, []*types.Header{header}); err != nil {
|
||||
t.Errorf("expected response and failed: %v", err)
|
||||
}
|
||||
}
|
||||
@ -636,7 +734,7 @@ func TestStopResumeLes3(t *testing.T) {
|
||||
req()
|
||||
c--
|
||||
}
|
||||
if err := p2p.ExpectMsg(server.peer.app, StopMsg, nil); err != nil {
|
||||
if err := p2p.ExpectMsg(rawPeer.app, StopMsg, nil); err != nil {
|
||||
t.Errorf("expected StopMsg and failed: %v", err)
|
||||
}
|
||||
// wait until the buffer is recharged by half of the limit
|
||||
@ -645,7 +743,7 @@ func TestStopResumeLes3(t *testing.T) {
|
||||
|
||||
// expect a ResumeMsg with the partially recharged buffer value
|
||||
expBuf += testBufRecharge * wait
|
||||
if err := p2p.ExpectMsg(server.peer.app, ResumeMsg, expBuf); err != nil {
|
||||
if err := p2p.ExpectMsg(rawPeer.app, ResumeMsg, expBuf); err != nil {
|
||||
t.Errorf("expected ResumeMsg and failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user