rlp, trie: faster trie node encoding (#24126)
This change speeds up trie hashing and all other activities that require RLP encoding of trie nodes by approximately 20%. The speedup is achieved by avoiding reflection overhead during node encoding. The interface type trie.node now contains a method 'encode' that works with rlp.EncoderBuffer. Management of EncoderBuffers is left to calling code. trie.hasher, which is pooled to avoid allocations, now maintains an EncoderBuffer. This means memory resources related to trie node encoding are tied to the hasher pool. Co-authored-by: Felix Lange <fjl@twurst.com>
This commit is contained in:
@ -24,22 +24,12 @@ import (
|
||||
"golang.org/x/crypto/sha3"
|
||||
)
|
||||
|
||||
type sliceBuffer []byte
|
||||
|
||||
func (b *sliceBuffer) Write(data []byte) (n int, err error) {
|
||||
*b = append(*b, data...)
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
func (b *sliceBuffer) Reset() {
|
||||
*b = (*b)[:0]
|
||||
}
|
||||
|
||||
// hasher is a type used for the trie Hash operation. A hasher has some
|
||||
// internal preallocated temp space
|
||||
type hasher struct {
|
||||
sha crypto.KeccakState
|
||||
tmp sliceBuffer
|
||||
tmp []byte
|
||||
encbuf rlp.EncoderBuffer
|
||||
parallel bool // Whether to use paralallel threads when hashing
|
||||
}
|
||||
|
||||
@ -47,8 +37,9 @@ type hasher struct {
|
||||
var hasherPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &hasher{
|
||||
tmp: make(sliceBuffer, 0, 550), // cap is as large as a full fullNode.
|
||||
sha: sha3.NewLegacyKeccak256().(crypto.KeccakState),
|
||||
tmp: make([]byte, 0, 550), // cap is as large as a full fullNode.
|
||||
sha: sha3.NewLegacyKeccak256().(crypto.KeccakState),
|
||||
encbuf: rlp.NewEncoderBuffer(nil),
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -153,30 +144,41 @@ func (h *hasher) hashFullNodeChildren(n *fullNode) (collapsed *fullNode, cached
|
||||
// into compact form for RLP encoding.
|
||||
// If the rlp data is smaller than 32 bytes, `nil` is returned.
|
||||
func (h *hasher) shortnodeToHash(n *shortNode, force bool) node {
|
||||
h.tmp.Reset()
|
||||
if err := rlp.Encode(&h.tmp, n); err != nil {
|
||||
panic("encode error: " + err.Error())
|
||||
}
|
||||
n.encode(h.encbuf)
|
||||
enc := h.encodedBytes()
|
||||
|
||||
if len(h.tmp) < 32 && !force {
|
||||
if len(enc) < 32 && !force {
|
||||
return n // Nodes smaller than 32 bytes are stored inside their parent
|
||||
}
|
||||
return h.hashData(h.tmp)
|
||||
return h.hashData(enc)
|
||||
}
|
||||
|
||||
// shortnodeToHash is used to creates a hashNode from a set of hashNodes, (which
|
||||
// may contain nil values)
|
||||
func (h *hasher) fullnodeToHash(n *fullNode, force bool) node {
|
||||
h.tmp.Reset()
|
||||
// Generate the RLP encoding of the node
|
||||
if err := n.EncodeRLP(&h.tmp); err != nil {
|
||||
panic("encode error: " + err.Error())
|
||||
}
|
||||
n.encode(h.encbuf)
|
||||
enc := h.encodedBytes()
|
||||
|
||||
if len(h.tmp) < 32 && !force {
|
||||
if len(enc) < 32 && !force {
|
||||
return n // Nodes smaller than 32 bytes are stored inside their parent
|
||||
}
|
||||
return h.hashData(h.tmp)
|
||||
return h.hashData(enc)
|
||||
}
|
||||
|
||||
// encodedBytes returns the result of the last encoding operation on h.encbuf.
|
||||
// This also resets the encoder buffer.
|
||||
//
|
||||
// All node encoding must be done like this:
|
||||
//
|
||||
// node.encode(h.encbuf)
|
||||
// enc := h.encodedBytes()
|
||||
//
|
||||
// This convention exists because node.encode can only be inlined/escape-analyzed when
|
||||
// called on a concrete receiver type.
|
||||
func (h *hasher) encodedBytes() []byte {
|
||||
h.tmp = h.encbuf.AppendToBytes(h.tmp[:0])
|
||||
h.encbuf.Reset(nil)
|
||||
return h.tmp
|
||||
}
|
||||
|
||||
// hashData hashes the provided data
|
||||
|
Reference in New Issue
Block a user