core/rawdb: freezer batch write (#23462)
This change is a rewrite of the freezer code. When writing ancient chain data to the freezer, the previous version first encoded each individual item to a temporary buffer, then wrote the buffer. For small item sizes (for example, in the block hash freezer table), this strategy causes a lot of system calls for writing tiny chunks of data. It also allocated a lot of temporary []byte buffers. In the new version, we instead encode multiple items into a re-useable batch buffer, which is then written to the file all at once. This avoids performing a system call for every inserted item. To make the internal batching work, the ancient database API had to be changed. While integrating this new API in BlockChain.InsertReceiptChain, additional optimizations were also added there. Co-authored-by: Felix Lange <fjl@twurst.com>
This commit is contained in:
committed by
GitHub
parent
9a0df80bbc
commit
794c6133ef
@ -29,6 +29,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"golang.org/x/crypto/sha3"
|
||||
@ -438,7 +439,7 @@ func TestAncientStorage(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temp freezer dir: %v", err)
|
||||
}
|
||||
defer os.Remove(frdir)
|
||||
defer os.RemoveAll(frdir)
|
||||
|
||||
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
|
||||
if err != nil {
|
||||
@ -467,8 +468,10 @@ func TestAncientStorage(t *testing.T) {
|
||||
if blob := ReadTdRLP(db, hash, number); len(blob) > 0 {
|
||||
t.Fatalf("non existent td returned")
|
||||
}
|
||||
|
||||
// Write and verify the header in the database
|
||||
WriteAncientBlock(db, block, nil, big.NewInt(100))
|
||||
WriteAncientBlocks(db, []*types.Block{block}, []types.Receipts{nil}, big.NewInt(100))
|
||||
|
||||
if blob := ReadHeaderRLP(db, hash, number); len(blob) == 0 {
|
||||
t.Fatalf("no header returned")
|
||||
}
|
||||
@ -481,6 +484,7 @@ func TestAncientStorage(t *testing.T) {
|
||||
if blob := ReadTdRLP(db, hash, number); len(blob) == 0 {
|
||||
t.Fatalf("no td returned")
|
||||
}
|
||||
|
||||
// Use a fake hash for data retrieval, nothing should be returned.
|
||||
fakeHash := common.BytesToHash([]byte{0x01, 0x02, 0x03})
|
||||
if blob := ReadHeaderRLP(db, fakeHash, number); len(blob) != 0 {
|
||||
@ -528,3 +532,141 @@ func TestCanonicalHashIteration(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashesInRange(t *testing.T) {
|
||||
mkHeader := func(number, seq int) *types.Header {
|
||||
h := types.Header{
|
||||
Difficulty: new(big.Int),
|
||||
Number: big.NewInt(int64(number)),
|
||||
GasLimit: uint64(seq),
|
||||
}
|
||||
return &h
|
||||
}
|
||||
db := NewMemoryDatabase()
|
||||
// For each number, write N versions of that particular number
|
||||
total := 0
|
||||
for i := 0; i < 15; i++ {
|
||||
for ii := 0; ii < i; ii++ {
|
||||
WriteHeader(db, mkHeader(i, ii))
|
||||
total++
|
||||
}
|
||||
}
|
||||
if have, want := len(ReadAllHashesInRange(db, 10, 10)), 10; have != want {
|
||||
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
||||
}
|
||||
if have, want := len(ReadAllHashesInRange(db, 10, 9)), 0; have != want {
|
||||
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
||||
}
|
||||
if have, want := len(ReadAllHashesInRange(db, 0, 100)), total; have != want {
|
||||
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
||||
}
|
||||
if have, want := len(ReadAllHashesInRange(db, 9, 10)), 9+10; have != want {
|
||||
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
||||
}
|
||||
if have, want := len(ReadAllHashes(db, 10)), 10; have != want {
|
||||
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
||||
}
|
||||
if have, want := len(ReadAllHashes(db, 16)), 0; have != want {
|
||||
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
||||
}
|
||||
if have, want := len(ReadAllHashes(db, 1)), 1; have != want {
|
||||
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
||||
}
|
||||
}
|
||||
|
||||
// This measures the write speed of the WriteAncientBlocks operation.
|
||||
func BenchmarkWriteAncientBlocks(b *testing.B) {
|
||||
// Open freezer database.
|
||||
frdir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
b.Fatalf("failed to create temp freezer dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(frdir)
|
||||
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
|
||||
if err != nil {
|
||||
b.Fatalf("failed to create database with ancient backend")
|
||||
}
|
||||
|
||||
// Create the data to insert. The blocks must have consecutive numbers, so we create
|
||||
// all of them ahead of time. However, there is no need to create receipts
|
||||
// individually for each block, just make one batch here and reuse it for all writes.
|
||||
const batchSize = 128
|
||||
const blockTxs = 20
|
||||
allBlocks := makeTestBlocks(b.N, blockTxs)
|
||||
batchReceipts := makeTestReceipts(batchSize, blockTxs)
|
||||
b.ResetTimer()
|
||||
|
||||
// The benchmark loop writes batches of blocks, but note that the total block count is
|
||||
// b.N. This means the resulting ns/op measurement is the time it takes to write a
|
||||
// single block and its associated data.
|
||||
var td = big.NewInt(55)
|
||||
var totalSize int64
|
||||
for i := 0; i < b.N; i += batchSize {
|
||||
length := batchSize
|
||||
if i+batchSize > b.N {
|
||||
length = b.N - i
|
||||
}
|
||||
|
||||
blocks := allBlocks[i : i+length]
|
||||
receipts := batchReceipts[:length]
|
||||
writeSize, err := WriteAncientBlocks(db, blocks, receipts, td)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
totalSize += writeSize
|
||||
}
|
||||
|
||||
// Enable MB/s reporting.
|
||||
b.SetBytes(totalSize / int64(b.N))
|
||||
}
|
||||
|
||||
// makeTestBlocks creates fake blocks for the ancient write benchmark.
|
||||
func makeTestBlocks(nblock int, txsPerBlock int) []*types.Block {
|
||||
key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
signer := types.LatestSignerForChainID(big.NewInt(8))
|
||||
|
||||
// Create transactions.
|
||||
txs := make([]*types.Transaction, txsPerBlock)
|
||||
for i := 0; i < len(txs); i++ {
|
||||
var err error
|
||||
to := common.Address{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
|
||||
txs[i], err = types.SignNewTx(key, signer, &types.LegacyTx{
|
||||
Nonce: 2,
|
||||
GasPrice: big.NewInt(30000),
|
||||
Gas: 0x45454545,
|
||||
To: &to,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create the blocks.
|
||||
blocks := make([]*types.Block, nblock)
|
||||
for i := 0; i < nblock; i++ {
|
||||
header := &types.Header{
|
||||
Number: big.NewInt(int64(i)),
|
||||
Extra: []byte("test block"),
|
||||
}
|
||||
blocks[i] = types.NewBlockWithHeader(header).WithBody(txs, nil)
|
||||
blocks[i].Hash() // pre-cache the block hash
|
||||
}
|
||||
return blocks
|
||||
}
|
||||
|
||||
// makeTestReceipts creates fake receipts for the ancient write benchmark.
|
||||
func makeTestReceipts(n int, nPerBlock int) []types.Receipts {
|
||||
receipts := make([]*types.Receipt, nPerBlock)
|
||||
for i := 0; i < len(receipts); i++ {
|
||||
receipts[i] = &types.Receipt{
|
||||
Status: types.ReceiptStatusSuccessful,
|
||||
CumulativeGasUsed: 0x888888888,
|
||||
Logs: make([]*types.Log, 5),
|
||||
}
|
||||
}
|
||||
allReceipts := make([]types.Receipts, n)
|
||||
for i := 0; i < n; i++ {
|
||||
allReceipts[i] = receipts
|
||||
}
|
||||
return allReceipts
|
||||
}
|
||||
|
Reference in New Issue
Block a user