les: historical data garbage collection (#19570)

This change introduces garbage collection for the light client. Historical
chain data is deleted periodically. If you want to disable the GC, use
the --light.nopruning flag.
This commit is contained in:
gary rong
2020-07-13 17:02:54 +08:00
committed by GitHub
parent b8dd0890b3
commit 6eef141aef
45 changed files with 841 additions and 213 deletions

View File

@ -112,7 +112,7 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, engine consensus.
if header := bc.GetHeaderByHash(hash); header != nil {
log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
bc.SetHead(header.Number.Uint64() - 1)
log.Error("Chain rewind was successful, resuming normal operation")
log.Info("Chain rewind was successful, resuming normal operation")
}
}
return bc, nil
@ -155,7 +155,11 @@ func (lc *LightChain) loadLastState() error {
// Corrupt or empty database, init from scratch
lc.Reset()
} else {
if header := lc.GetHeaderByHash(head); header != nil {
header := lc.GetHeaderByHash(head)
if header == nil {
// Corrupt or empty database, init from scratch
lc.Reset()
} else {
lc.hc.SetCurrentHeader(header)
}
}
@ -163,7 +167,6 @@ func (lc *LightChain) loadLastState() error {
header := lc.hc.CurrentHeader()
headerTd := lc.GetTd(header.Hash(), header.Number.Uint64())
log.Info("Loaded most recent local header", "number", header.Number, "hash", header.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(header.Time), 0)))
return nil
}
@ -431,6 +434,17 @@ func (lc *LightChain) GetTdByHash(hash common.Hash) *big.Int {
return lc.hc.GetTdByHash(hash)
}
// GetHeaderByNumberOdr retrieves the total difficult from the database or
// network by hash and number, caching it (associated with its hash) if found.
func (lc *LightChain) GetTdOdr(ctx context.Context, hash common.Hash, number uint64) *big.Int {
td := lc.GetTd(hash, number)
if td != nil {
return td
}
td, _ = GetTd(ctx, lc.odr, hash, number)
return td
}
// GetHeader retrieves a block header from the database by hash and number,
// caching it if found.
func (lc *LightChain) GetHeader(hash common.Hash, number uint64) *types.Header {

View File

@ -82,7 +82,6 @@ func StorageTrieID(state *TrieID, addrHash, root common.Hash) *TrieID {
// TrieRequest is the ODR request type for state/storage trie entries
type TrieRequest struct {
OdrRequest
Id *TrieID
Key []byte
Proof *NodeSet
@ -95,7 +94,6 @@ func (req *TrieRequest) StoreResult(db ethdb.Database) {
// CodeRequest is the ODR request type for retrieving contract code
type CodeRequest struct {
OdrRequest
Id *TrieID // references storage trie of the account
Hash common.Hash
Data []byte
@ -108,9 +106,9 @@ func (req *CodeRequest) StoreResult(db ethdb.Database) {
// BlockRequest is the ODR request type for retrieving block bodies
type BlockRequest struct {
OdrRequest
Hash common.Hash
Number uint64
Header *types.Header
Rlp []byte
}
@ -119,9 +117,8 @@ func (req *BlockRequest) StoreResult(db ethdb.Database) {
rawdb.WriteBodyRLP(db, req.Hash, req.Number, req.Rlp)
}
// ReceiptsRequest is the ODR request type for retrieving block bodies
// ReceiptsRequest is the ODR request type for retrieving receipts.
type ReceiptsRequest struct {
OdrRequest
Untrusted bool // Indicator whether the result retrieved is trusted or not
Hash common.Hash
Number uint64
@ -138,7 +135,6 @@ func (req *ReceiptsRequest) StoreResult(db ethdb.Database) {
// ChtRequest is the ODR request type for state/storage trie entries
type ChtRequest struct {
OdrRequest
Untrusted bool // Indicator whether the result retrieved is trusted or not
PeerId string // The specified peer id from which to retrieve data.
Config *IndexerConfig
@ -193,7 +189,6 @@ type TxStatus struct {
// TxStatusRequest is the ODR request type for retrieving transaction status
type TxStatusRequest struct {
OdrRequest
Hashes []common.Hash
Status []TxStatus
}

View File

@ -19,6 +19,7 @@ package light
import (
"bytes"
"context"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
@ -30,65 +31,83 @@ import (
var sha3Nil = crypto.Keccak256Hash(nil)
// GetHeaderByNumber retrieves the canonical block header corresponding to the
// given number.
func GetHeaderByNumber(ctx context.Context, odr OdrBackend, number uint64) (*types.Header, error) {
// Try to find it in the local database first.
db := odr.Database()
hash := rawdb.ReadCanonicalHash(db, number)
if (hash != common.Hash{}) {
// if there is a canonical hash, there is a header too
header := rawdb.ReadHeader(db, hash, number)
if header == nil {
panic("Canonical hash present but header not found")
}
return header, nil
}
var (
chtCount, sectionHeadNum uint64
sectionHead common.Hash
)
if odr.ChtIndexer() != nil {
chtCount, sectionHeadNum, sectionHead = odr.ChtIndexer().Sections()
canonicalHash := rawdb.ReadCanonicalHash(db, sectionHeadNum)
// if the CHT was injected as a trusted checkpoint, we have no canonical hash yet so we accept zero hash too
for chtCount > 0 && canonicalHash != sectionHead && canonicalHash != (common.Hash{}) {
chtCount--
if chtCount > 0 {
sectionHeadNum = chtCount*odr.IndexerConfig().ChtSize - 1
sectionHead = odr.ChtIndexer().SectionHead(chtCount - 1)
canonicalHash = rawdb.ReadCanonicalHash(db, sectionHeadNum)
}
// If there is a canonical hash, there should have a header too.
// But if it's pruned, re-fetch from network again.
if (hash != common.Hash{}) {
if header := rawdb.ReadHeader(db, hash, number); header != nil {
return header, nil
}
}
if number >= chtCount*odr.IndexerConfig().ChtSize {
// Retrieve the header via ODR, ensure the requested header is covered
// by local trusted CHT.
chts, _, chtHead := odr.ChtIndexer().Sections()
if number >= chts*odr.IndexerConfig().ChtSize {
return nil, errNoTrustedCht
}
r := &ChtRequest{ChtRoot: GetChtRoot(db, chtCount-1, sectionHead), ChtNum: chtCount - 1, BlockNum: number, Config: odr.IndexerConfig()}
r := &ChtRequest{
ChtRoot: GetChtRoot(db, chts-1, chtHead),
ChtNum: chts - 1,
BlockNum: number,
Config: odr.IndexerConfig(),
}
if err := odr.Retrieve(ctx, r); err != nil {
return nil, err
}
return r.Header, nil
}
// GetUntrustedHeaderByNumber fetches specified block header without correctness checking.
// Note this function should only be used in light client checkpoint syncing.
// GetUntrustedHeaderByNumber retrieves specified block header without
// correctness checking. Note this function should only be used in light
// client checkpoint syncing.
func GetUntrustedHeaderByNumber(ctx context.Context, odr OdrBackend, number uint64, peerId string) (*types.Header, error) {
r := &ChtRequest{BlockNum: number, ChtNum: number / odr.IndexerConfig().ChtSize, Untrusted: true, PeerId: peerId, Config: odr.IndexerConfig()}
// todo(rjl493456442) it's a hack to retrieve headers which is not covered
// by CHT. Fix it in LES4
r := &ChtRequest{
BlockNum: number,
ChtNum: number / odr.IndexerConfig().ChtSize,
Untrusted: true,
PeerId: peerId,
Config: odr.IndexerConfig(),
}
if err := odr.Retrieve(ctx, r); err != nil {
return nil, err
}
return r.Header, nil
}
// GetCanonicalHash retrieves the canonical block hash corresponding to the number.
func GetCanonicalHash(ctx context.Context, odr OdrBackend, number uint64) (common.Hash, error) {
hash := rawdb.ReadCanonicalHash(odr.Database(), number)
if (hash != common.Hash{}) {
if hash != (common.Hash{}) {
return hash, nil
}
header, err := GetHeaderByNumber(ctx, odr, number)
if header != nil {
return header.Hash(), nil
if err != nil {
return common.Hash{}, err
}
return common.Hash{}, err
// number -> canonical mapping already be stored in db, get it.
return header.Hash(), nil
}
// GetTd retrieves the total difficulty corresponding to the number and hash.
func GetTd(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) (*big.Int, error) {
td := rawdb.ReadTd(odr.Database(), hash, number)
if td != nil {
return td, nil
}
_, err := GetHeaderByNumber(ctx, odr, number)
if err != nil {
return nil, err
}
// <hash, number> -> td mapping already be stored in db, get it.
return rawdb.ReadTd(odr.Database(), hash, number), nil
}
// GetBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
@ -96,15 +115,19 @@ func GetBodyRLP(ctx context.Context, odr OdrBackend, hash common.Hash, number ui
if data := rawdb.ReadBodyRLP(odr.Database(), hash, number); data != nil {
return data, nil
}
r := &BlockRequest{Hash: hash, Number: number}
// Retrieve the block header first and pass it for verification.
header, err := GetHeaderByNumber(ctx, odr, number)
if err != nil {
return nil, errNoHeader
}
r := &BlockRequest{Hash: hash, Number: number, Header: header}
if err := odr.Retrieve(ctx, r); err != nil {
return nil, err
} else {
return r.Rlp, nil
}
return r.Rlp, nil
}
// GetBody retrieves the block body (transactons, uncles) corresponding to the
// GetBody retrieves the block body (transactions, uncles) corresponding to the
// hash.
func GetBody(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) (*types.Body, error) {
data, err := GetBodyRLP(ctx, odr, hash, number)
@ -122,8 +145,8 @@ func GetBody(ctx context.Context, odr OdrBackend, hash common.Hash, number uint6
// back from the stored header and body.
func GetBlock(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) (*types.Block, error) {
// Retrieve the block header and body contents
header := rawdb.ReadHeader(odr.Database(), hash, number)
if header == nil {
header, err := GetHeaderByNumber(ctx, odr, number)
if err != nil {
return nil, errNoHeader
}
body, err := GetBody(ctx, odr, hash, number)
@ -140,7 +163,11 @@ func GetBlockReceipts(ctx context.Context, odr OdrBackend, hash common.Hash, num
// Assume receipts are already stored locally and attempt to retrieve.
receipts := rawdb.ReadRawReceipts(odr.Database(), hash, number)
if receipts == nil {
r := &ReceiptsRequest{Hash: hash, Number: number}
header, err := GetHeaderByNumber(ctx, odr, number)
if err != nil {
return nil, errNoHeader
}
r := &ReceiptsRequest{Hash: hash, Number: number, Header: header}
if err := odr.Retrieve(ctx, r); err != nil {
return nil, err
}
@ -171,7 +198,6 @@ func GetBlockLogs(ctx context.Context, odr OdrBackend, hash common.Hash, number
if err != nil {
return nil, err
}
// Return the logs without deriving any computed fields on the receipts
logs := make([][]*types.Log, len(receipts))
for i, receipt := range receipts {
logs[i] = receipt.Logs
@ -203,64 +229,51 @@ func GetUntrustedBlockLogs(ctx context.Context, odr OdrBackend, header *types.He
return logs, nil
}
// GetBloomBits retrieves a batch of compressed bloomBits vectors belonging to the given bit index and section indexes
func GetBloomBits(ctx context.Context, odr OdrBackend, bitIdx uint, sectionIdxList []uint64) ([][]byte, error) {
// GetBloomBits retrieves a batch of compressed bloomBits vectors belonging to
// the given bit index and section indexes.
func GetBloomBits(ctx context.Context, odr OdrBackend, bit uint, sections []uint64) ([][]byte, error) {
var (
db = odr.Database()
result = make([][]byte, len(sectionIdxList))
reqList []uint64
reqIdx []int
reqIndex []int
reqSections []uint64
db = odr.Database()
result = make([][]byte, len(sections))
)
var (
bloomTrieCount, sectionHeadNum uint64
sectionHead common.Hash
)
if odr.BloomTrieIndexer() != nil {
bloomTrieCount, sectionHeadNum, sectionHead = odr.BloomTrieIndexer().Sections()
canonicalHash := rawdb.ReadCanonicalHash(db, sectionHeadNum)
// if the BloomTrie was injected as a trusted checkpoint, we have no canonical hash yet so we accept zero hash too
for bloomTrieCount > 0 && canonicalHash != sectionHead && canonicalHash != (common.Hash{}) {
bloomTrieCount--
if bloomTrieCount > 0 {
sectionHeadNum = bloomTrieCount*odr.IndexerConfig().BloomTrieSize - 1
sectionHead = odr.BloomTrieIndexer().SectionHead(bloomTrieCount - 1)
canonicalHash = rawdb.ReadCanonicalHash(db, sectionHeadNum)
}
}
}
for i, sectionIdx := range sectionIdxList {
sectionHead := rawdb.ReadCanonicalHash(db, (sectionIdx+1)*odr.IndexerConfig().BloomSize-1)
// if we don't have the canonical hash stored for this section head number, we'll still look for
// an entry with a zero sectionHead (we store it with zero section head too if we don't know it
// at the time of the retrieval)
bloomBits, err := rawdb.ReadBloomBits(db, bitIdx, sectionIdx, sectionHead)
if err == nil {
blooms, _, sectionHead := odr.BloomTrieIndexer().Sections()
for i, section := range sections {
sectionHead := rawdb.ReadCanonicalHash(db, (section+1)*odr.IndexerConfig().BloomSize-1)
// If we don't have the canonical hash stored for this section head number,
// we'll still look for an entry with a zero sectionHead (we store it with
// zero section head too if we don't know it at the time of the retrieval)
if bloomBits, _ := rawdb.ReadBloomBits(db, bit, section, sectionHead); len(bloomBits) != 0 {
result[i] = bloomBits
} else {
// TODO(rjl493456442) Convert sectionIndex to BloomTrie relative index
if sectionIdx >= bloomTrieCount {
return nil, errNoTrustedBloomTrie
}
reqList = append(reqList, sectionIdx)
reqIdx = append(reqIdx, i)
continue
}
// TODO(rjl493456442) Convert sectionIndex to BloomTrie relative index
if section >= blooms {
return nil, errNoTrustedBloomTrie
}
reqSections = append(reqSections, section)
reqIndex = append(reqIndex, i)
}
if reqList == nil {
// Find all bloombits in database, nothing to query via odr, return.
if reqSections == nil {
return result, nil
}
r := &BloomRequest{BloomTrieRoot: GetBloomTrieRoot(db, bloomTrieCount-1, sectionHead), BloomTrieNum: bloomTrieCount - 1,
BitIdx: bitIdx, SectionIndexList: reqList, Config: odr.IndexerConfig()}
// Send odr request to retrieve missing bloombits.
r := &BloomRequest{
BloomTrieRoot: GetBloomTrieRoot(db, blooms-1, sectionHead),
BloomTrieNum: blooms - 1,
BitIdx: bit,
SectionIndexList: reqSections,
Config: odr.IndexerConfig(),
}
if err := odr.Retrieve(ctx, r); err != nil {
return nil, err
} else {
for i, idx := range reqIdx {
result[idx] = r.BloomBits[i]
}
return result, nil
}
for i, idx := range reqIndex {
result[idx] = r.BloomBits[i]
}
return result, nil
}
// GetTransaction retrieves a canonical transaction by hash and also returns its position in the chain
@ -268,17 +281,16 @@ func GetTransaction(ctx context.Context, odr OdrBackend, txHash common.Hash) (*t
r := &TxStatusRequest{Hashes: []common.Hash{txHash}}
if err := odr.Retrieve(ctx, r); err != nil || r.Status[0].Status != core.TxStatusIncluded {
return nil, common.Hash{}, 0, 0, err
} else {
pos := r.Status[0].Lookup
// first ensure that we have the header, otherwise block body retrieval will fail
// also verify if this is a canonical block by getting the header by number and checking its hash
if header, err := GetHeaderByNumber(ctx, odr, pos.BlockIndex); err != nil || header.Hash() != pos.BlockHash {
return nil, common.Hash{}, 0, 0, err
}
if body, err := GetBody(ctx, odr, pos.BlockHash, pos.BlockIndex); err != nil || uint64(len(body.Transactions)) <= pos.Index || body.Transactions[pos.Index].Hash() != txHash {
return nil, common.Hash{}, 0, 0, err
} else {
return body.Transactions[pos.Index], pos.BlockHash, pos.BlockIndex, pos.Index, nil
}
}
pos := r.Status[0].Lookup
// first ensure that we have the header, otherwise block body retrieval will fail
// also verify if this is a canonical block by getting the header by number and checking its hash
if header, err := GetHeaderByNumber(ctx, odr, pos.BlockIndex); err != nil || header.Hash() != pos.BlockHash {
return nil, common.Hash{}, 0, 0, err
}
body, err := GetBody(ctx, odr, pos.BlockHash, pos.BlockIndex)
if err != nil || uint64(len(body.Transactions)) <= pos.Index || body.Transactions[pos.Index].Hash() != txHash {
return nil, common.Hash{}, 0, 0, err
}
return body.Transactions[pos.Index], pos.BlockHash, pos.BlockIndex, pos.Index, nil
}

View File

@ -17,6 +17,7 @@
package light
import (
"bytes"
"context"
"encoding/binary"
"errors"
@ -24,6 +25,7 @@ import (
"math/big"
"time"
"github.com/deckarep/golang-set"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/bitutil"
"github.com/ethereum/go-ethereum/core"
@ -128,23 +130,27 @@ func StoreChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead, root common
// ChtIndexerBackend implements core.ChainIndexerBackend.
type ChtIndexerBackend struct {
disablePruning bool
diskdb, trieTable ethdb.Database
odr OdrBackend
triedb *trie.Database
trieset mapset.Set
section, sectionSize uint64
lastHash common.Hash
trie *trie.Trie
}
// NewChtIndexer creates a Cht chain indexer
func NewChtIndexer(db ethdb.Database, odr OdrBackend, size, confirms uint64) *core.ChainIndexer {
func NewChtIndexer(db ethdb.Database, odr OdrBackend, size, confirms uint64, disablePruning bool) *core.ChainIndexer {
trieTable := rawdb.NewTable(db, ChtTablePrefix)
backend := &ChtIndexerBackend{
diskdb: db,
odr: odr,
trieTable: trieTable,
triedb: trie.NewDatabaseWithCache(trieTable, 1), // Use a tiny cache only to keep memory down
sectionSize: size,
diskdb: db,
odr: odr,
trieTable: trieTable,
triedb: trie.NewDatabaseWithCache(trieTable, 1), // Use a tiny cache only to keep memory down
trieset: mapset.NewSet(),
sectionSize: size,
disablePruning: disablePruning,
}
return core.NewChainIndexer(db, rawdb.NewTable(db, "chtIndexV2-"), backend, size, confirms, time.Millisecond*100, "cht")
}
@ -189,7 +195,6 @@ func (c *ChtIndexerBackend) Reset(ctx context.Context, section uint64, lastSecti
c.trie, err = trie.New(root, c.triedb)
}
}
c.section = section
return err
}
@ -216,13 +221,83 @@ func (c *ChtIndexerBackend) Commit() error {
if err != nil {
return err
}
c.triedb.Commit(root, false)
// Pruning historical trie nodes if necessary.
if !c.disablePruning {
// Flush the triedb and track the latest trie nodes.
c.trieset.Clear()
c.triedb.Commit(root, false, func(hash common.Hash) { c.trieset.Add(hash) })
it := c.trieTable.NewIterator(nil, nil)
defer it.Release()
var (
deleted int
remaining int
t = time.Now()
)
for it.Next() {
trimmed := bytes.TrimPrefix(it.Key(), []byte(ChtTablePrefix))
if !c.trieset.Contains(common.BytesToHash(trimmed)) {
c.trieTable.Delete(trimmed)
deleted += 1
} else {
remaining += 1
}
}
log.Debug("Prune historical CHT trie nodes", "deleted", deleted, "remaining", remaining, "elapsed", common.PrettyDuration(time.Since(t)))
} else {
c.triedb.Commit(root, false, nil)
}
log.Info("Storing CHT", "section", c.section, "head", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root))
StoreChtRoot(c.diskdb, c.section, c.lastHash, root)
return nil
}
// PruneSections implements core.ChainIndexerBackend which deletes all
// chain data(except hash<->number mappings) older than the specified
// threshold.
func (c *ChtIndexerBackend) Prune(threshold uint64) error {
// Short circuit if the light pruning is disabled.
if c.disablePruning {
return nil
}
t := time.Now()
// Always keep genesis header in database.
start, end := uint64(1), (threshold+1)*c.sectionSize
var batch = c.diskdb.NewBatch()
for {
numbers, hashes := rawdb.ReadAllCanonicalHashes(c.diskdb, start, end, 10240)
if len(numbers) == 0 {
break
}
for i := 0; i < len(numbers); i++ {
// Keep hash<->number mapping in database otherwise the hash based
// API(e.g. GetReceipt, GetLogs) will be broken.
//
// Storage size wise, the size of a mapping is ~41bytes. For one
// section is about 1.3MB which is acceptable.
//
// In order to totally get rid of this index, we need an additional
// flag to specify how many historical data light client can serve.
rawdb.DeleteCanonicalHash(batch, numbers[i])
rawdb.DeleteBlockWithoutNumber(batch, hashes[i], numbers[i])
}
if batch.ValueSize() > ethdb.IdealBatchSize {
if err := batch.Write(); err != nil {
return err
}
batch.Reset()
}
start = numbers[len(numbers)-1] + 1
}
if err := batch.Write(); err != nil {
return err
}
log.Debug("Prune history headers", "threshold", threshold, "elapsed", common.PrettyDuration(time.Since(t)))
return nil
}
var (
bloomTriePrefix = []byte("bltRoot-") // bloomTriePrefix + bloomTrieNum (uint64 big endian) -> trie root hash
BloomTrieTablePrefix = "blt-"
@ -245,8 +320,10 @@ func StoreBloomTrieRoot(db ethdb.Database, sectionIdx uint64, sectionHead, root
// BloomTrieIndexerBackend implements core.ChainIndexerBackend
type BloomTrieIndexerBackend struct {
disablePruning bool
diskdb, trieTable ethdb.Database
triedb *trie.Database
trieset mapset.Set
odr OdrBackend
section uint64
parentSize uint64
@ -257,15 +334,17 @@ type BloomTrieIndexerBackend struct {
}
// NewBloomTrieIndexer creates a BloomTrie chain indexer
func NewBloomTrieIndexer(db ethdb.Database, odr OdrBackend, parentSize, size uint64) *core.ChainIndexer {
func NewBloomTrieIndexer(db ethdb.Database, odr OdrBackend, parentSize, size uint64, disablePruning bool) *core.ChainIndexer {
trieTable := rawdb.NewTable(db, BloomTrieTablePrefix)
backend := &BloomTrieIndexerBackend{
diskdb: db,
odr: odr,
trieTable: trieTable,
triedb: trie.NewDatabaseWithCache(trieTable, 1), // Use a tiny cache only to keep memory down
parentSize: parentSize,
size: size,
diskdb: db,
odr: odr,
trieTable: trieTable,
triedb: trie.NewDatabaseWithCache(trieTable, 1), // Use a tiny cache only to keep memory down
trieset: mapset.NewSet(),
parentSize: parentSize,
size: size,
disablePruning: disablePruning,
}
backend.bloomTrieRatio = size / parentSize
backend.sectionHeads = make([]common.Hash, backend.bloomTrieRatio)
@ -303,7 +382,6 @@ func (b *BloomTrieIndexerBackend) fetchMissingNodes(ctx context.Context, section
}
}()
}
for i := uint(0); i < types.BloomBitLength; i++ {
indexCh <- i
}
@ -380,10 +458,51 @@ func (b *BloomTrieIndexerBackend) Commit() error {
if err != nil {
return err
}
b.triedb.Commit(root, false)
// Pruning historical trie nodes if necessary.
if !b.disablePruning {
// Flush the triedb and track the latest trie nodes.
b.trieset.Clear()
b.triedb.Commit(root, false, func(hash common.Hash) { b.trieset.Add(hash) })
it := b.trieTable.NewIterator(nil, nil)
defer it.Release()
var (
deleted int
remaining int
t = time.Now()
)
for it.Next() {
trimmed := bytes.TrimPrefix(it.Key(), []byte(BloomTrieTablePrefix))
if !b.trieset.Contains(common.BytesToHash(trimmed)) {
b.trieTable.Delete(trimmed)
deleted += 1
} else {
remaining += 1
}
}
log.Debug("Prune historical bloom trie nodes", "deleted", deleted, "remaining", remaining, "elapsed", common.PrettyDuration(time.Since(t)))
} else {
b.triedb.Commit(root, false, nil)
}
sectionHead := b.sectionHeads[b.bloomTrieRatio-1]
log.Info("Storing bloom trie", "section", b.section, "head", fmt.Sprintf("%064x", sectionHead), "root", fmt.Sprintf("%064x", root), "compression", float64(compSize)/float64(decompSize))
StoreBloomTrieRoot(b.diskdb, b.section, sectionHead, root)
log.Info("Storing bloom trie", "section", b.section, "head", fmt.Sprintf("%064x", sectionHead), "root", fmt.Sprintf("%064x", root), "compression", float64(compSize)/float64(decompSize))
return nil
}
// Prune implements core.ChainIndexerBackend which deletes all
// bloombits which older than the specified threshold.
func (b *BloomTrieIndexerBackend) Prune(threshold uint64) error {
// Short circuit if the light pruning is disabled.
if b.disablePruning {
return nil
}
start := time.Now()
for i := uint(0); i < types.BloomBitLength; i++ {
rawdb.DeleteBloombits(b.diskdb, i, 0, threshold*b.bloomTrieRatio+b.bloomTrieRatio)
}
log.Debug("Prune history bloombits", "threshold", threshold, "elapsed", common.PrettyDuration(time.Since(start)))
return nil
}