core, eth: split the db blocks into headers and bodies
This commit is contained in:
@ -345,33 +345,33 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
if err := msg.Decode(&query); err != nil {
|
||||
return errResp(ErrDecode, "%v: %v", msg, err)
|
||||
}
|
||||
// Gather blocks until the fetch or network limits is reached
|
||||
// Gather headers until the fetch or network limits is reached
|
||||
var (
|
||||
bytes common.StorageSize
|
||||
headers []*types.Header
|
||||
unknown bool
|
||||
)
|
||||
for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit && len(headers) < downloader.MaxHeaderFetch {
|
||||
// Retrieve the next block satisfying the query
|
||||
var origin *types.Block
|
||||
// Retrieve the next header satisfying the query
|
||||
var origin *types.Header
|
||||
if query.Origin.Hash != (common.Hash{}) {
|
||||
origin = pm.chainman.GetBlock(query.Origin.Hash)
|
||||
origin = pm.chainman.GetHeader(query.Origin.Hash)
|
||||
} else {
|
||||
origin = pm.chainman.GetBlockByNumber(query.Origin.Number)
|
||||
origin = pm.chainman.GetHeaderByNumber(query.Origin.Number)
|
||||
}
|
||||
if origin == nil {
|
||||
break
|
||||
}
|
||||
headers = append(headers, origin.Header())
|
||||
bytes += origin.Size()
|
||||
headers = append(headers, origin)
|
||||
bytes += 500 // Approximate, should be good enough estimate
|
||||
|
||||
// Advance to the next block of the query
|
||||
// Advance to the next header of the query
|
||||
switch {
|
||||
case query.Origin.Hash != (common.Hash{}) && query.Reverse:
|
||||
// Hash based traversal towards the genesis block
|
||||
for i := 0; i < int(query.Skip)+1; i++ {
|
||||
if block := pm.chainman.GetBlock(query.Origin.Hash); block != nil {
|
||||
query.Origin.Hash = block.ParentHash()
|
||||
if header := pm.chainman.GetHeader(query.Origin.Hash); header != nil {
|
||||
query.Origin.Hash = header.ParentHash
|
||||
} else {
|
||||
unknown = true
|
||||
break
|
||||
@ -379,9 +379,9 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
}
|
||||
case query.Origin.Hash != (common.Hash{}) && !query.Reverse:
|
||||
// Hash based traversal towards the leaf block
|
||||
if block := pm.chainman.GetBlockByNumber(origin.NumberU64() + query.Skip + 1); block != nil {
|
||||
if pm.chainman.GetBlockHashesFromHash(block.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash {
|
||||
query.Origin.Hash = block.Hash()
|
||||
if header := pm.chainman.GetHeaderByNumber(origin.Number.Uint64() + query.Skip + 1); header != nil {
|
||||
if pm.chainman.GetBlockHashesFromHash(header.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash {
|
||||
query.Origin.Hash = header.Hash()
|
||||
} else {
|
||||
unknown = true
|
||||
}
|
||||
@ -452,23 +452,24 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
// Gather blocks until the fetch or network limits is reached
|
||||
var (
|
||||
hash common.Hash
|
||||
bytes common.StorageSize
|
||||
bodies []*blockBody
|
||||
bytes int
|
||||
bodies []*blockBodyRLP
|
||||
)
|
||||
for bytes < softResponseLimit && len(bodies) < downloader.MaxBlockFetch {
|
||||
//Retrieve the hash of the next block
|
||||
// Retrieve the hash of the next block
|
||||
if err := msgStream.Decode(&hash); err == rlp.EOL {
|
||||
break
|
||||
} else if err != nil {
|
||||
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
||||
}
|
||||
// Retrieve the requested block, stopping if enough was found
|
||||
if block := pm.chainman.GetBlock(hash); block != nil {
|
||||
bodies = append(bodies, &blockBody{Transactions: block.Transactions(), Uncles: block.Uncles()})
|
||||
bytes += block.Size()
|
||||
// Retrieve the requested block body, stopping if enough was found
|
||||
if data := pm.chainman.GetBodyRLP(hash); len(data) != 0 {
|
||||
body := blockBodyRLP(data)
|
||||
bodies = append(bodies, &body)
|
||||
bytes += len(body)
|
||||
}
|
||||
}
|
||||
return p.SendBlockBodies(bodies)
|
||||
return p.SendBlockBodiesRLP(bodies)
|
||||
|
||||
case p.version >= eth63 && msg.Code == GetNodeDataMsg:
|
||||
// Decode the retrieval message
|
||||
|
Reference in New Issue
Block a user