rlp, trie: faster trie node encoding (#24126)
This change speeds up trie hashing and all other activities that require RLP encoding of trie nodes by approximately 20%. The speedup is achieved by avoiding reflection overhead during node encoding. The interface type trie.node now contains a method 'encode' that works with rlp.EncoderBuffer. Management of EncoderBuffers is left to calling code. trie.hasher, which is pooled to avoid allocations, now maintains an EncoderBuffer. This means memory resources related to trie node encoding are tied to the hasher pool. Co-authored-by: Felix Lange <fjl@twurst.com>
This commit is contained in:
@ -113,16 +113,9 @@ func (n rawFullNode) cache() (hashNode, bool) { panic("this should never end u
|
||||
func (n rawFullNode) fstring(ind string) string { panic("this should never end up in a live trie") }
|
||||
|
||||
func (n rawFullNode) EncodeRLP(w io.Writer) error {
|
||||
var nodes [17]node
|
||||
|
||||
for i, child := range n {
|
||||
if child != nil {
|
||||
nodes[i] = child
|
||||
} else {
|
||||
nodes[i] = nilValueNode
|
||||
}
|
||||
}
|
||||
return rlp.Encode(w, nodes)
|
||||
eb := rlp.NewEncoderBuffer(w)
|
||||
n.encode(eb)
|
||||
return eb.Flush()
|
||||
}
|
||||
|
||||
// rawShortNode represents only the useful data content of a short node, with the
|
||||
@ -164,11 +157,7 @@ func (n *cachedNode) rlp() []byte {
|
||||
if node, ok := n.node.(rawNode); ok {
|
||||
return node
|
||||
}
|
||||
blob, err := rlp.EncodeToBytes(n.node)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return blob
|
||||
return nodeToBytes(n.node)
|
||||
}
|
||||
|
||||
// obj returns the decoded and expanded trie node, either directly from the cache,
|
||||
|
Reference in New Issue
Block a user