Merge pull request #14631 from zsfelfoldi/bloombits2
core/bloombits, eth/filter: transformed bloom bitmap based log search
This commit is contained in:
@ -759,12 +759,6 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
||||
log.Crit("Failed to write block receipts", "err", err)
|
||||
return
|
||||
}
|
||||
if err := WriteMipmapBloom(bc.chainDb, block.NumberU64(), receipts); err != nil {
|
||||
errs[index] = fmt.Errorf("failed to write log blooms: %v", err)
|
||||
atomic.AddInt32(&failed, 1)
|
||||
log.Crit("Failed to write log blooms", "err", err)
|
||||
return
|
||||
}
|
||||
if err := WriteTxLookupEntries(bc.chainDb, block); err != nil {
|
||||
errs[index] = fmt.Errorf("failed to write lookup metadata: %v", err)
|
||||
atomic.AddInt32(&failed, 1)
|
||||
@ -1017,10 +1011,6 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
||||
if err := WriteTxLookupEntries(bc.chainDb, block); err != nil {
|
||||
return i, err
|
||||
}
|
||||
// Write map map bloom filters
|
||||
if err := WriteMipmapBloom(bc.chainDb, block.NumberU64(), receipts); err != nil {
|
||||
return i, err
|
||||
}
|
||||
// Write hash preimages
|
||||
if err := WritePreimages(bc.chainDb, block.NumberU64(), state.Preimages()); err != nil {
|
||||
return i, err
|
||||
@ -1178,11 +1168,6 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
|
||||
if err := WriteTxLookupEntries(bc.chainDb, block); err != nil {
|
||||
return err
|
||||
}
|
||||
// Write map map bloom filters
|
||||
receipts := GetBlockReceipts(bc.chainDb, block.Hash(), block.NumberU64())
|
||||
if err := WriteMipmapBloom(bc.chainDb, block.NumberU64(), receipts); err != nil {
|
||||
return err
|
||||
}
|
||||
addedTxs = append(addedTxs, block.Transactions()...)
|
||||
}
|
||||
|
||||
|
18
core/bloombits/doc.go
Normal file
18
core/bloombits/doc.go
Normal file
@ -0,0 +1,18 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package bloombits implements bloom filtering on batches of data.
|
||||
package bloombits
|
87
core/bloombits/generator.go
Normal file
87
core/bloombits/generator.go
Normal file
@ -0,0 +1,87 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bloombits
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// errSectionOutOfBounds is returned if the user tried to add more bloom filters
|
||||
// to the batch than available space, or if tries to retrieve above the capacity,
|
||||
var errSectionOutOfBounds = errors.New("section out of bounds")
|
||||
|
||||
// Generator takes a number of bloom filters and generates the rotated bloom bits
|
||||
// to be used for batched filtering.
|
||||
type Generator struct {
|
||||
blooms [types.BloomBitLength][]byte // Rotated blooms for per-bit matching
|
||||
sections uint // Number of sections to batch together
|
||||
nextBit uint // Next bit to set when adding a bloom
|
||||
}
|
||||
|
||||
// NewGenerator creates a rotated bloom generator that can iteratively fill a
|
||||
// batched bloom filter's bits.
|
||||
func NewGenerator(sections uint) (*Generator, error) {
|
||||
if sections%8 != 0 {
|
||||
return nil, errors.New("section count not multiple of 8")
|
||||
}
|
||||
b := &Generator{sections: sections}
|
||||
for i := 0; i < types.BloomBitLength; i++ {
|
||||
b.blooms[i] = make([]byte, sections/8)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// AddBloom takes a single bloom filter and sets the corresponding bit column
|
||||
// in memory accordingly.
|
||||
func (b *Generator) AddBloom(index uint, bloom types.Bloom) error {
|
||||
// Make sure we're not adding more bloom filters than our capacity
|
||||
if b.nextBit >= b.sections {
|
||||
return errSectionOutOfBounds
|
||||
}
|
||||
if b.nextBit != index {
|
||||
return errors.New("bloom filter with unexpected index")
|
||||
}
|
||||
// Rotate the bloom and insert into our collection
|
||||
byteIndex := b.nextBit / 8
|
||||
bitMask := byte(1) << byte(7-b.nextBit%8)
|
||||
|
||||
for i := 0; i < types.BloomBitLength; i++ {
|
||||
bloomByteIndex := types.BloomByteLength - 1 - i/8
|
||||
bloomBitMask := byte(1) << byte(i%8)
|
||||
|
||||
if (bloom[bloomByteIndex] & bloomBitMask) != 0 {
|
||||
b.blooms[i][byteIndex] |= bitMask
|
||||
}
|
||||
}
|
||||
b.nextBit++
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Bitset returns the bit vector belonging to the given bit index after all
|
||||
// blooms have been added.
|
||||
func (b *Generator) Bitset(idx uint) ([]byte, error) {
|
||||
if b.nextBit != b.sections {
|
||||
return nil, errors.New("bloom not fully generated yet")
|
||||
}
|
||||
if idx >= b.sections {
|
||||
return nil, errSectionOutOfBounds
|
||||
}
|
||||
return b.blooms[idx], nil
|
||||
}
|
60
core/bloombits/generator_test.go
Normal file
60
core/bloombits/generator_test.go
Normal file
@ -0,0 +1,60 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bloombits
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// Tests that batched bloom bits are correctly rotated from the input bloom
|
||||
// filters.
|
||||
func TestGenerator(t *testing.T) {
|
||||
// Generate the input and the rotated output
|
||||
var input, output [types.BloomBitLength][types.BloomByteLength]byte
|
||||
|
||||
for i := 0; i < types.BloomBitLength; i++ {
|
||||
for j := 0; j < types.BloomBitLength; j++ {
|
||||
bit := byte(rand.Int() % 2)
|
||||
|
||||
input[i][j/8] |= bit << byte(7-j%8)
|
||||
output[types.BloomBitLength-1-j][i/8] |= bit << byte(7-i%8)
|
||||
}
|
||||
}
|
||||
// Crunch the input through the generator and verify the result
|
||||
gen, err := NewGenerator(types.BloomBitLength)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create bloombit generator: %v", err)
|
||||
}
|
||||
for i, bloom := range input {
|
||||
if err := gen.AddBloom(uint(i), bloom); err != nil {
|
||||
t.Fatalf("bloom %d: failed to add: %v", i, err)
|
||||
}
|
||||
}
|
||||
for i, want := range output {
|
||||
have, err := gen.Bitset(uint(i))
|
||||
if err != nil {
|
||||
t.Fatalf("output %d: failed to retrieve bits: %v", i, err)
|
||||
}
|
||||
if !bytes.Equal(have, want[:]) {
|
||||
t.Errorf("output %d: bit vector mismatch have %x, want %x", i, have, want)
|
||||
}
|
||||
}
|
||||
}
|
615
core/bloombits/matcher.go
Normal file
615
core/bloombits/matcher.go
Normal file
@ -0,0 +1,615 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bloombits
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/bitutil"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
// bloomIndexes represents the bit indexes inside the bloom filter that belong
|
||||
// to some key.
|
||||
type bloomIndexes [3]uint
|
||||
|
||||
// calcBloomIndexes returns the bloom filter bit indexes belonging to the given key.
|
||||
func calcBloomIndexes(b []byte) bloomIndexes {
|
||||
b = crypto.Keccak256(b)
|
||||
|
||||
var idxs bloomIndexes
|
||||
for i := 0; i < len(idxs); i++ {
|
||||
idxs[i] = (uint(b[2*i])<<8)&2047 + uint(b[2*i+1])
|
||||
}
|
||||
return idxs
|
||||
}
|
||||
|
||||
// partialMatches with a non-nil vector represents a section in which some sub-
|
||||
// matchers have already found potential matches. Subsequent sub-matchers will
|
||||
// binary AND their matches with this vector. If vector is nil, it represents a
|
||||
// section to be processed by the first sub-matcher.
|
||||
type partialMatches struct {
|
||||
section uint64
|
||||
bitset []byte
|
||||
}
|
||||
|
||||
// Retrieval represents a request for retrieval task assignments for a given
|
||||
// bit with the given number of fetch elements, or a response for such a request.
|
||||
// It can also have the actual results set to be used as a delivery data struct.
|
||||
type Retrieval struct {
|
||||
Bit uint
|
||||
Sections []uint64
|
||||
Bitsets [][]byte
|
||||
}
|
||||
|
||||
// Matcher is a pipelined system of schedulers and logic matchers which perform
|
||||
// binary AND/OR operations on the bit-streams, creating a stream of potential
|
||||
// blocks to inspect for data content.
|
||||
type Matcher struct {
|
||||
sectionSize uint64 // Size of the data batches to filter on
|
||||
|
||||
filters [][]bloomIndexes // Filter the system is matching for
|
||||
schedulers map[uint]*scheduler // Retrieval schedulers for loading bloom bits
|
||||
|
||||
retrievers chan chan uint // Retriever processes waiting for bit allocations
|
||||
counters chan chan uint // Retriever processes waiting for task count reports
|
||||
retrievals chan chan *Retrieval // Retriever processes waiting for task allocations
|
||||
deliveries chan *Retrieval // Retriever processes waiting for task response deliveries
|
||||
|
||||
running uint32 // Atomic flag whether a session is live or not
|
||||
}
|
||||
|
||||
// NewMatcher creates a new pipeline for retrieving bloom bit streams and doing
|
||||
// address and topic filtering on them.
|
||||
func NewMatcher(sectionSize uint64, filters [][][]byte) *Matcher {
|
||||
// Create the matcher instance
|
||||
m := &Matcher{
|
||||
sectionSize: sectionSize,
|
||||
schedulers: make(map[uint]*scheduler),
|
||||
retrievers: make(chan chan uint),
|
||||
counters: make(chan chan uint),
|
||||
retrievals: make(chan chan *Retrieval),
|
||||
deliveries: make(chan *Retrieval),
|
||||
}
|
||||
// Calculate the bloom bit indexes for the groups we're interested in
|
||||
m.filters = nil
|
||||
|
||||
for _, filter := range filters {
|
||||
bloomBits := make([]bloomIndexes, len(filter))
|
||||
for i, clause := range filter {
|
||||
bloomBits[i] = calcBloomIndexes(clause)
|
||||
}
|
||||
m.filters = append(m.filters, bloomBits)
|
||||
}
|
||||
// For every bit, create a scheduler to load/download the bit vectors
|
||||
for _, bloomIndexLists := range m.filters {
|
||||
for _, bloomIndexList := range bloomIndexLists {
|
||||
for _, bloomIndex := range bloomIndexList {
|
||||
m.addScheduler(bloomIndex)
|
||||
}
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// addScheduler adds a bit stream retrieval scheduler for the given bit index if
|
||||
// it has not existed before. If the bit is already selected for filtering, the
|
||||
// existing scheduler can be used.
|
||||
func (m *Matcher) addScheduler(idx uint) {
|
||||
if _, ok := m.schedulers[idx]; ok {
|
||||
return
|
||||
}
|
||||
m.schedulers[idx] = newScheduler(idx)
|
||||
}
|
||||
|
||||
// Start starts the matching process and returns a stream of bloom matches in
|
||||
// a given range of blocks. If there are no more matches in the range, the result
|
||||
// channel is closed.
|
||||
func (m *Matcher) Start(begin, end uint64, results chan uint64) (*MatcherSession, error) {
|
||||
// Make sure we're not creating concurrent sessions
|
||||
if atomic.SwapUint32(&m.running, 1) == 1 {
|
||||
return nil, errors.New("matcher already running")
|
||||
}
|
||||
defer atomic.StoreUint32(&m.running, 0)
|
||||
|
||||
// Initiate a new matching round
|
||||
session := &MatcherSession{
|
||||
matcher: m,
|
||||
quit: make(chan struct{}),
|
||||
kill: make(chan struct{}),
|
||||
}
|
||||
for _, scheduler := range m.schedulers {
|
||||
scheduler.reset()
|
||||
}
|
||||
sink := m.run(begin, end, cap(results), session)
|
||||
|
||||
// Read the output from the result sink and deliver to the user
|
||||
session.pend.Add(1)
|
||||
go func() {
|
||||
defer session.pend.Done()
|
||||
defer close(results)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-session.quit:
|
||||
return
|
||||
|
||||
case res, ok := <-sink:
|
||||
// New match result found
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
// Calculate the first and last blocks of the section
|
||||
sectionStart := res.section * m.sectionSize
|
||||
|
||||
first := sectionStart
|
||||
if begin > first {
|
||||
first = begin
|
||||
}
|
||||
last := sectionStart + m.sectionSize - 1
|
||||
if end < last {
|
||||
last = end
|
||||
}
|
||||
// Iterate over all the blocks in the section and return the matching ones
|
||||
for i := first; i <= last; i++ {
|
||||
// Skip the entire byte if no matches are found inside
|
||||
next := res.bitset[(i-sectionStart)/8]
|
||||
if next == 0 {
|
||||
i += 7
|
||||
continue
|
||||
}
|
||||
// Some bit it set, do the actual submatching
|
||||
if bit := 7 - i%8; next&(1<<bit) != 0 {
|
||||
select {
|
||||
case <-session.quit:
|
||||
return
|
||||
case results <- i:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return session, nil
|
||||
}
|
||||
|
||||
// run creates a daisy-chain of sub-matchers, one for the address set and one
|
||||
// for each topic set, each sub-matcher receiving a section only if the previous
|
||||
// ones have all found a potential match in one of the blocks of the section,
|
||||
// then binary AND-ing its own matches and forwaring the result to the next one.
|
||||
//
|
||||
// The method starts feeding the section indexes into the first sub-matcher on a
|
||||
// new goroutine and returns a sink channel receiving the results.
|
||||
func (m *Matcher) run(begin, end uint64, buffer int, session *MatcherSession) chan *partialMatches {
|
||||
// Create the source channel and feed section indexes into
|
||||
source := make(chan *partialMatches, buffer)
|
||||
|
||||
session.pend.Add(1)
|
||||
go func() {
|
||||
defer session.pend.Done()
|
||||
defer close(source)
|
||||
|
||||
for i := begin / m.sectionSize; i <= end/m.sectionSize; i++ {
|
||||
select {
|
||||
case <-session.quit:
|
||||
return
|
||||
case source <- &partialMatches{i, bytes.Repeat([]byte{0xff}, int(m.sectionSize/8))}:
|
||||
}
|
||||
}
|
||||
}()
|
||||
// Assemble the daisy-chained filtering pipeline
|
||||
next := source
|
||||
dist := make(chan *request, buffer)
|
||||
|
||||
for _, bloom := range m.filters {
|
||||
next = m.subMatch(next, dist, bloom, session)
|
||||
}
|
||||
// Start the request distribution
|
||||
session.pend.Add(1)
|
||||
go m.distributor(dist, session)
|
||||
|
||||
return next
|
||||
}
|
||||
|
||||
// subMatch creates a sub-matcher that filters for a set of addresses or topics, binary OR-s those matches, then
|
||||
// binary AND-s the result to the daisy-chain input (source) and forwards it to the daisy-chain output.
|
||||
// The matches of each address/topic are calculated by fetching the given sections of the three bloom bit indexes belonging to
|
||||
// that address/topic, and binary AND-ing those vectors together.
|
||||
func (m *Matcher) subMatch(source chan *partialMatches, dist chan *request, bloom []bloomIndexes, session *MatcherSession) chan *partialMatches {
|
||||
// Start the concurrent schedulers for each bit required by the bloom filter
|
||||
sectionSources := make([][3]chan uint64, len(bloom))
|
||||
sectionSinks := make([][3]chan []byte, len(bloom))
|
||||
for i, bits := range bloom {
|
||||
for j, bit := range bits {
|
||||
sectionSources[i][j] = make(chan uint64, cap(source))
|
||||
sectionSinks[i][j] = make(chan []byte, cap(source))
|
||||
|
||||
m.schedulers[bit].run(sectionSources[i][j], dist, sectionSinks[i][j], session.quit, &session.pend)
|
||||
}
|
||||
}
|
||||
|
||||
process := make(chan *partialMatches, cap(source)) // entries from source are forwarded here after fetches have been initiated
|
||||
results := make(chan *partialMatches, cap(source))
|
||||
|
||||
session.pend.Add(2)
|
||||
go func() {
|
||||
// Tear down the goroutine and terminate all source channels
|
||||
defer session.pend.Done()
|
||||
defer close(process)
|
||||
|
||||
defer func() {
|
||||
for _, bloomSources := range sectionSources {
|
||||
for _, bitSource := range bloomSources {
|
||||
close(bitSource)
|
||||
}
|
||||
}
|
||||
}()
|
||||
// Read sections from the source channel and multiplex into all bit-schedulers
|
||||
for {
|
||||
select {
|
||||
case <-session.quit:
|
||||
return
|
||||
|
||||
case subres, ok := <-source:
|
||||
// New subresult from previous link
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
// Multiplex the section index to all bit-schedulers
|
||||
for _, bloomSources := range sectionSources {
|
||||
for _, bitSource := range bloomSources {
|
||||
select {
|
||||
case <-session.quit:
|
||||
return
|
||||
case bitSource <- subres.section:
|
||||
}
|
||||
}
|
||||
}
|
||||
// Notify the processor that this section will become available
|
||||
select {
|
||||
case <-session.quit:
|
||||
return
|
||||
case process <- subres:
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
// Tear down the goroutine and terminate the final sink channel
|
||||
defer session.pend.Done()
|
||||
defer close(results)
|
||||
|
||||
// Read the source notifications and collect the delivered results
|
||||
for {
|
||||
select {
|
||||
case <-session.quit:
|
||||
return
|
||||
|
||||
case subres, ok := <-process:
|
||||
// Notified of a section being retrieved
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
// Gather all the sub-results and merge them together
|
||||
var orVector []byte
|
||||
for _, bloomSinks := range sectionSinks {
|
||||
var andVector []byte
|
||||
for _, bitSink := range bloomSinks {
|
||||
var data []byte
|
||||
select {
|
||||
case <-session.quit:
|
||||
return
|
||||
case data = <-bitSink:
|
||||
}
|
||||
if andVector == nil {
|
||||
andVector = make([]byte, int(m.sectionSize/8))
|
||||
copy(andVector, data)
|
||||
} else {
|
||||
bitutil.ANDBytes(andVector, andVector, data)
|
||||
}
|
||||
}
|
||||
if orVector == nil {
|
||||
orVector = andVector
|
||||
} else {
|
||||
bitutil.ORBytes(orVector, orVector, andVector)
|
||||
}
|
||||
}
|
||||
|
||||
if orVector == nil {
|
||||
orVector = make([]byte, int(m.sectionSize/8))
|
||||
}
|
||||
if subres.bitset != nil {
|
||||
bitutil.ANDBytes(orVector, orVector, subres.bitset)
|
||||
}
|
||||
if bitutil.TestBytes(orVector) {
|
||||
select {
|
||||
case <-session.quit:
|
||||
return
|
||||
case results <- &partialMatches{subres.section, orVector}:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return results
|
||||
}
|
||||
|
||||
// distributor receives requests from the schedulers and queues them into a set
|
||||
// of pending requests, which are assigned to retrievers wanting to fulfil them.
|
||||
func (m *Matcher) distributor(dist chan *request, session *MatcherSession) {
|
||||
defer session.pend.Done()
|
||||
|
||||
var (
|
||||
requests = make(map[uint][]uint64) // Per-bit list of section requests, ordered by section number
|
||||
unallocs = make(map[uint]struct{}) // Bits with pending requests but not allocated to any retriever
|
||||
retrievers chan chan uint // Waiting retrievers (toggled to nil if unallocs is empty)
|
||||
)
|
||||
var (
|
||||
allocs int // Number of active allocations to handle graceful shutdown requests
|
||||
shutdown = session.quit // Shutdown request channel, will gracefully wait for pending requests
|
||||
)
|
||||
|
||||
// assign is a helper method fo try to assign a pending bit an an actively
|
||||
// listening servicer, or schedule it up for later when one arrives.
|
||||
assign := func(bit uint) {
|
||||
select {
|
||||
case fetcher := <-m.retrievers:
|
||||
allocs++
|
||||
fetcher <- bit
|
||||
default:
|
||||
// No retrievers active, start listening for new ones
|
||||
retrievers = m.retrievers
|
||||
unallocs[bit] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-shutdown:
|
||||
// Graceful shutdown requested, wait until all pending requests are honoured
|
||||
if allocs == 0 {
|
||||
return
|
||||
}
|
||||
shutdown = nil
|
||||
|
||||
case <-session.kill:
|
||||
// Pending requests not honoured in time, hard terminate
|
||||
return
|
||||
|
||||
case req := <-dist:
|
||||
// New retrieval request arrived to be distributed to some fetcher process
|
||||
queue := requests[req.bit]
|
||||
index := sort.Search(len(queue), func(i int) bool { return queue[i] >= req.section })
|
||||
requests[req.bit] = append(queue[:index], append([]uint64{req.section}, queue[index:]...)...)
|
||||
|
||||
// If it's a new bit and we have waiting fetchers, allocate to them
|
||||
if len(queue) == 0 {
|
||||
assign(req.bit)
|
||||
}
|
||||
|
||||
case fetcher := <-retrievers:
|
||||
// New retriever arrived, find the lowest section-ed bit to assign
|
||||
bit, best := uint(0), uint64(math.MaxUint64)
|
||||
for idx := range unallocs {
|
||||
if requests[idx][0] < best {
|
||||
bit, best = idx, requests[idx][0]
|
||||
}
|
||||
}
|
||||
// Stop tracking this bit (and alloc notifications if no more work is available)
|
||||
delete(unallocs, bit)
|
||||
if len(unallocs) == 0 {
|
||||
retrievers = nil
|
||||
}
|
||||
allocs++
|
||||
fetcher <- bit
|
||||
|
||||
case fetcher := <-m.counters:
|
||||
// New task count request arrives, return number of items
|
||||
fetcher <- uint(len(requests[<-fetcher]))
|
||||
|
||||
case fetcher := <-m.retrievals:
|
||||
// New fetcher waiting for tasks to retrieve, assign
|
||||
task := <-fetcher
|
||||
if want := len(task.Sections); want >= len(requests[task.Bit]) {
|
||||
task.Sections = requests[task.Bit]
|
||||
delete(requests, task.Bit)
|
||||
} else {
|
||||
task.Sections = append(task.Sections[:0], requests[task.Bit][:want]...)
|
||||
requests[task.Bit] = append(requests[task.Bit][:0], requests[task.Bit][want:]...)
|
||||
}
|
||||
fetcher <- task
|
||||
|
||||
// If anything was left unallocated, try to assign to someone else
|
||||
if len(requests[task.Bit]) > 0 {
|
||||
assign(task.Bit)
|
||||
}
|
||||
|
||||
case result := <-m.deliveries:
|
||||
// New retrieval task response from fetcher, split out missing sections and
|
||||
// deliver complete ones
|
||||
var (
|
||||
sections = make([]uint64, 0, len(result.Sections))
|
||||
bitsets = make([][]byte, 0, len(result.Bitsets))
|
||||
missing = make([]uint64, 0, len(result.Sections))
|
||||
)
|
||||
for i, bitset := range result.Bitsets {
|
||||
if len(bitset) == 0 {
|
||||
missing = append(missing, result.Sections[i])
|
||||
continue
|
||||
}
|
||||
sections = append(sections, result.Sections[i])
|
||||
bitsets = append(bitsets, bitset)
|
||||
}
|
||||
m.schedulers[result.Bit].deliver(sections, bitsets)
|
||||
allocs--
|
||||
|
||||
// Reschedule missing sections and allocate bit if newly available
|
||||
if len(missing) > 0 {
|
||||
queue := requests[result.Bit]
|
||||
for _, section := range missing {
|
||||
index := sort.Search(len(queue), func(i int) bool { return queue[i] >= section })
|
||||
queue = append(queue[:index], append([]uint64{section}, queue[index:]...)...)
|
||||
}
|
||||
requests[result.Bit] = queue
|
||||
|
||||
if len(queue) == len(missing) {
|
||||
assign(result.Bit)
|
||||
}
|
||||
}
|
||||
// If we're in the process of shutting down, terminate
|
||||
if allocs == 0 && shutdown == nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MatcherSession is returned by a started matcher to be used as a terminator
|
||||
// for the actively running matching operation.
|
||||
type MatcherSession struct {
|
||||
matcher *Matcher
|
||||
|
||||
quit chan struct{} // Quit channel to request pipeline termination
|
||||
kill chan struct{} // Term channel to signal non-graceful forced shutdown
|
||||
pend sync.WaitGroup
|
||||
}
|
||||
|
||||
// Close stops the matching process and waits for all subprocesses to terminate
|
||||
// before returning. The timeout may be used for graceful shutdown, allowing the
|
||||
// currently running retrievals to complete before this time.
|
||||
func (s *MatcherSession) Close(timeout time.Duration) {
|
||||
// Bail out if the matcher is not running
|
||||
select {
|
||||
case <-s.quit:
|
||||
return
|
||||
default:
|
||||
}
|
||||
// Signal termination and wait for all goroutines to tear down
|
||||
close(s.quit)
|
||||
time.AfterFunc(timeout, func() { close(s.kill) })
|
||||
s.pend.Wait()
|
||||
}
|
||||
|
||||
// AllocateRetrieval assigns a bloom bit index to a client process that can either
|
||||
// immediately reuest and fetch the section contents assigned to this bit or wait
|
||||
// a little while for more sections to be requested.
|
||||
func (s *MatcherSession) AllocateRetrieval() (uint, bool) {
|
||||
fetcher := make(chan uint)
|
||||
|
||||
select {
|
||||
case <-s.quit:
|
||||
return 0, false
|
||||
case s.matcher.retrievers <- fetcher:
|
||||
bit, ok := <-fetcher
|
||||
return bit, ok
|
||||
}
|
||||
}
|
||||
|
||||
// PendingSections returns the number of pending section retrievals belonging to
|
||||
// the given bloom bit index.
|
||||
func (s *MatcherSession) PendingSections(bit uint) int {
|
||||
fetcher := make(chan uint)
|
||||
|
||||
select {
|
||||
case <-s.quit:
|
||||
return 0
|
||||
case s.matcher.counters <- fetcher:
|
||||
fetcher <- bit
|
||||
return int(<-fetcher)
|
||||
}
|
||||
}
|
||||
|
||||
// AllocateSections assigns all or part of an already allocated bit-task queue
|
||||
// to the requesting process.
|
||||
func (s *MatcherSession) AllocateSections(bit uint, count int) []uint64 {
|
||||
fetcher := make(chan *Retrieval)
|
||||
|
||||
select {
|
||||
case <-s.quit:
|
||||
return nil
|
||||
case s.matcher.retrievals <- fetcher:
|
||||
task := &Retrieval{
|
||||
Bit: bit,
|
||||
Sections: make([]uint64, count),
|
||||
}
|
||||
fetcher <- task
|
||||
return (<-fetcher).Sections
|
||||
}
|
||||
}
|
||||
|
||||
// DeliverSections delivers a batch of section bit-vectors for a specific bloom
|
||||
// bit index to be injected into the processing pipeline.
|
||||
func (s *MatcherSession) DeliverSections(bit uint, sections []uint64, bitsets [][]byte) {
|
||||
select {
|
||||
case <-s.kill:
|
||||
return
|
||||
case s.matcher.deliveries <- &Retrieval{Bit: bit, Sections: sections, Bitsets: bitsets}:
|
||||
}
|
||||
}
|
||||
|
||||
// Multiplex polls the matcher session for rerieval tasks and multiplexes it into
|
||||
// the reuested retrieval queue to be serviced together with other sessions.
|
||||
//
|
||||
// This method will block for the lifetime of the session. Even after termination
|
||||
// of the session, any request in-flight need to be responded to! Empty responses
|
||||
// are fine though in that case.
|
||||
func (s *MatcherSession) Multiplex(batch int, wait time.Duration, mux chan chan *Retrieval) {
|
||||
for {
|
||||
// Allocate a new bloom bit index to retrieve data for, stopping when done
|
||||
bit, ok := s.AllocateRetrieval()
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
// Bit allocated, throttle a bit if we're below our batch limit
|
||||
if s.PendingSections(bit) < batch {
|
||||
select {
|
||||
case <-s.quit:
|
||||
// Session terminating, we can't meaningfully service, abort
|
||||
s.AllocateSections(bit, 0)
|
||||
s.DeliverSections(bit, []uint64{}, [][]byte{})
|
||||
return
|
||||
|
||||
case <-time.After(wait):
|
||||
// Throttling up, fetch whatever's available
|
||||
}
|
||||
}
|
||||
// Allocate as much as we can handle and request servicing
|
||||
sections := s.AllocateSections(bit, batch)
|
||||
request := make(chan *Retrieval)
|
||||
|
||||
select {
|
||||
case <-s.quit:
|
||||
// Session terminating, we can't meaningfully service, abort
|
||||
s.DeliverSections(bit, sections, make([][]byte, len(sections)))
|
||||
return
|
||||
|
||||
case mux <- request:
|
||||
// Retrieval accepted, something must arrive before we're aborting
|
||||
request <- &Retrieval{Bit: bit, Sections: sections}
|
||||
|
||||
result := <-request
|
||||
s.DeliverSections(result.Bit, result.Sections, result.Bitsets)
|
||||
}
|
||||
}
|
||||
}
|
242
core/bloombits/matcher_test.go
Normal file
242
core/bloombits/matcher_test.go
Normal file
@ -0,0 +1,242 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bloombits
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const testSectionSize = 4096
|
||||
|
||||
// Tests the matcher pipeline on a single continuous workflow without interrupts.
|
||||
func TestMatcherContinuous(t *testing.T) {
|
||||
testMatcherDiffBatches(t, [][]bloomIndexes{{{10, 20, 30}}}, 100000, false, 75)
|
||||
testMatcherDiffBatches(t, [][]bloomIndexes{{{32, 3125, 100}}, {{40, 50, 10}}}, 100000, false, 81)
|
||||
testMatcherDiffBatches(t, [][]bloomIndexes{{{4, 8, 11}, {7, 8, 17}}, {{9, 9, 12}, {15, 20, 13}}, {{18, 15, 15}, {12, 10, 4}}}, 10000, false, 36)
|
||||
}
|
||||
|
||||
// Tests the matcher pipeline on a constantly interrupted and resumed work pattern
|
||||
// with the aim of ensuring data items are requested only once.
|
||||
func TestMatcherIntermittent(t *testing.T) {
|
||||
testMatcherDiffBatches(t, [][]bloomIndexes{{{10, 20, 30}}}, 100000, true, 75)
|
||||
testMatcherDiffBatches(t, [][]bloomIndexes{{{32, 3125, 100}}, {{40, 50, 10}}}, 100000, true, 81)
|
||||
testMatcherDiffBatches(t, [][]bloomIndexes{{{4, 8, 11}, {7, 8, 17}}, {{9, 9, 12}, {15, 20, 13}}, {{18, 15, 15}, {12, 10, 4}}}, 10000, true, 36)
|
||||
}
|
||||
|
||||
// Tests the matcher pipeline on random input to hopefully catch anomalies.
|
||||
func TestMatcherRandom(t *testing.T) {
|
||||
for i := 0; i < 10; i++ {
|
||||
testMatcherBothModes(t, makeRandomIndexes([]int{1}, 50), 10000, 0)
|
||||
testMatcherBothModes(t, makeRandomIndexes([]int{3}, 50), 10000, 0)
|
||||
testMatcherBothModes(t, makeRandomIndexes([]int{2, 2, 2}, 20), 10000, 0)
|
||||
testMatcherBothModes(t, makeRandomIndexes([]int{5, 5, 5}, 50), 10000, 0)
|
||||
testMatcherBothModes(t, makeRandomIndexes([]int{4, 4, 4}, 20), 10000, 0)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that matching on everything doesn't crash (special case internally).
|
||||
func TestWildcardMatcher(t *testing.T) {
|
||||
testMatcherBothModes(t, nil, 10000, 0)
|
||||
}
|
||||
|
||||
// makeRandomIndexes generates a random filter system, composed on multiple filter
|
||||
// criteria, each having one bloom list component for the address and arbitrarilly
|
||||
// many topic bloom list components.
|
||||
func makeRandomIndexes(lengths []int, max int) [][]bloomIndexes {
|
||||
res := make([][]bloomIndexes, len(lengths))
|
||||
for i, topics := range lengths {
|
||||
res[i] = make([]bloomIndexes, topics)
|
||||
for j := 0; j < topics; j++ {
|
||||
for k := 0; k < len(res[i][j]); k++ {
|
||||
res[i][j][k] = uint(rand.Intn(max-1) + 2)
|
||||
}
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// testMatcherDiffBatches runs the given matches test in single-delivery and also
|
||||
// in batches delivery mode, verifying that all kinds of deliveries are handled
|
||||
// correctly withn.
|
||||
func testMatcherDiffBatches(t *testing.T, filter [][]bloomIndexes, blocks uint64, intermittent bool, retrievals uint32) {
|
||||
singleton := testMatcher(t, filter, blocks, intermittent, retrievals, 1)
|
||||
batched := testMatcher(t, filter, blocks, intermittent, retrievals, 16)
|
||||
|
||||
if singleton != batched {
|
||||
t.Errorf("filter = %v blocks = %v intermittent = %v: request count mismatch, %v in signleton vs. %v in batched mode", filter, blocks, intermittent, singleton, batched)
|
||||
}
|
||||
}
|
||||
|
||||
// testMatcherBothModes runs the given matcher test in both continuous as well as
|
||||
// in intermittent mode, verifying that the request counts match each other.
|
||||
func testMatcherBothModes(t *testing.T, filter [][]bloomIndexes, blocks uint64, retrievals uint32) {
|
||||
continuous := testMatcher(t, filter, blocks, false, retrievals, 16)
|
||||
intermittent := testMatcher(t, filter, blocks, true, retrievals, 16)
|
||||
|
||||
if continuous != intermittent {
|
||||
t.Errorf("filter = %v blocks = %v: request count mismatch, %v in continuous vs. %v in intermittent mode", filter, blocks, continuous, intermittent)
|
||||
}
|
||||
}
|
||||
|
||||
// testMatcher is a generic tester to run the given matcher test and return the
|
||||
// number of requests made for cross validation between different modes.
|
||||
func testMatcher(t *testing.T, filter [][]bloomIndexes, blocks uint64, intermittent bool, retrievals uint32, maxReqCount int) uint32 {
|
||||
// Create a new matcher an simulate our explicit random bitsets
|
||||
matcher := NewMatcher(testSectionSize, nil)
|
||||
matcher.filters = filter
|
||||
|
||||
for _, rule := range filter {
|
||||
for _, topic := range rule {
|
||||
for _, bit := range topic {
|
||||
matcher.addScheduler(bit)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Track the number of retrieval requests made
|
||||
var requested uint32
|
||||
|
||||
// Start the matching session for the filter and the retriver goroutines
|
||||
quit := make(chan struct{})
|
||||
matches := make(chan uint64, 16)
|
||||
|
||||
session, err := matcher.Start(0, blocks-1, matches)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to stat matcher session: %v", err)
|
||||
}
|
||||
startRetrievers(session, quit, &requested, maxReqCount)
|
||||
|
||||
// Iterate over all the blocks and verify that the pipeline produces the correct matches
|
||||
for i := uint64(0); i < blocks; i++ {
|
||||
if expMatch3(filter, i) {
|
||||
match, ok := <-matches
|
||||
if !ok {
|
||||
t.Errorf("filter = %v blocks = %v intermittent = %v: expected #%v, results channel closed", filter, blocks, intermittent, i)
|
||||
return 0
|
||||
}
|
||||
if match != i {
|
||||
t.Errorf("filter = %v blocks = %v intermittent = %v: expected #%v, got #%v", filter, blocks, intermittent, i, match)
|
||||
}
|
||||
// If we're testing intermittent mode, abort and restart the pipeline
|
||||
if intermittent {
|
||||
session.Close(time.Second)
|
||||
close(quit)
|
||||
|
||||
quit = make(chan struct{})
|
||||
matches = make(chan uint64, 16)
|
||||
|
||||
session, err = matcher.Start(i+1, blocks-1, matches)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to stat matcher session: %v", err)
|
||||
}
|
||||
startRetrievers(session, quit, &requested, maxReqCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Ensure the result channel is torn down after the last block
|
||||
match, ok := <-matches
|
||||
if ok {
|
||||
t.Errorf("filter = %v blocks = %v intermittent = %v: expected closed channel, got #%v", filter, blocks, intermittent, match)
|
||||
}
|
||||
// Clean up the session and ensure we match the expected retrieval count
|
||||
session.Close(time.Second)
|
||||
close(quit)
|
||||
|
||||
if retrievals != 0 && requested != retrievals {
|
||||
t.Errorf("filter = %v blocks = %v intermittent = %v: request count mismatch, have #%v, want #%v", filter, blocks, intermittent, requested, retrievals)
|
||||
}
|
||||
return requested
|
||||
}
|
||||
|
||||
// startRetrievers starts a batch of goroutines listening for section requests
|
||||
// and serving them.
|
||||
func startRetrievers(session *MatcherSession, quit chan struct{}, retrievals *uint32, batch int) {
|
||||
requests := make(chan chan *Retrieval)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
// Start a multiplexer to test multiple threaded execution
|
||||
go session.Multiplex(batch, 100*time.Microsecond, requests)
|
||||
|
||||
// Start a services to match the above multiplexer
|
||||
go func() {
|
||||
for {
|
||||
// Wait for a service request or a shutdown
|
||||
select {
|
||||
case <-quit:
|
||||
return
|
||||
|
||||
case request := <-requests:
|
||||
task := <-request
|
||||
|
||||
task.Bitsets = make([][]byte, len(task.Sections))
|
||||
for i, section := range task.Sections {
|
||||
if rand.Int()%4 != 0 { // Handle occasional missing deliveries
|
||||
task.Bitsets[i] = generateBitset(task.Bit, section)
|
||||
atomic.AddUint32(retrievals, 1)
|
||||
}
|
||||
}
|
||||
request <- task
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// generateBitset generates the rotated bitset for the given bloom bit and section
|
||||
// numbers.
|
||||
func generateBitset(bit uint, section uint64) []byte {
|
||||
bitset := make([]byte, testSectionSize/8)
|
||||
for i := 0; i < len(bitset); i++ {
|
||||
for b := 0; b < 8; b++ {
|
||||
blockIdx := section*testSectionSize + uint64(i*8+b)
|
||||
bitset[i] += bitset[i]
|
||||
if (blockIdx % uint64(bit)) == 0 {
|
||||
bitset[i]++
|
||||
}
|
||||
}
|
||||
}
|
||||
return bitset
|
||||
}
|
||||
|
||||
func expMatch1(filter bloomIndexes, i uint64) bool {
|
||||
for _, ii := range filter {
|
||||
if (i % uint64(ii)) != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func expMatch2(filter []bloomIndexes, i uint64) bool {
|
||||
for _, ii := range filter {
|
||||
if expMatch1(ii, i) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func expMatch3(filter [][]bloomIndexes, i uint64) bool {
|
||||
for _, ii := range filter {
|
||||
if !expMatch2(ii, i) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
181
core/bloombits/scheduler.go
Normal file
181
core/bloombits/scheduler.go
Normal file
@ -0,0 +1,181 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bloombits
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// request represents a bloom retrieval task to prioritize and pull from the local
|
||||
// database or remotely from the network.
|
||||
type request struct {
|
||||
section uint64 // Section index to retrieve the a bit-vector from
|
||||
bit uint // Bit index within the section to retrieve the vector of
|
||||
}
|
||||
|
||||
// response represents the state of a requested bit-vector through a scheduler.
|
||||
type response struct {
|
||||
cached []byte // Cached bits to dedup multiple requests
|
||||
done chan struct{} // Channel to allow waiting for completion
|
||||
}
|
||||
|
||||
// scheduler handles the scheduling of bloom-filter retrieval operations for
|
||||
// entire section-batches belonging to a single bloom bit. Beside scheduling the
|
||||
// retrieval operations, this struct also deduplicates the requests and caches
|
||||
// the results to minimize network/database overhead even in complex filtering
|
||||
// scenarios.
|
||||
type scheduler struct {
|
||||
bit uint // Index of the bit in the bloom filter this scheduler is responsible for
|
||||
responses map[uint64]*response // Currently pending retrieval requests or already cached responses
|
||||
lock sync.Mutex // Lock protecting the responses from concurrent access
|
||||
}
|
||||
|
||||
// newScheduler creates a new bloom-filter retrieval scheduler for a specific
|
||||
// bit index.
|
||||
func newScheduler(idx uint) *scheduler {
|
||||
return &scheduler{
|
||||
bit: idx,
|
||||
responses: make(map[uint64]*response),
|
||||
}
|
||||
}
|
||||
|
||||
// run creates a retrieval pipeline, receiving section indexes from sections and
|
||||
// returning the results in the same order through the done channel. Concurrent
|
||||
// runs of the same scheduler are allowed, leading to retrieval task deduplication.
|
||||
func (s *scheduler) run(sections chan uint64, dist chan *request, done chan []byte, quit chan struct{}, wg *sync.WaitGroup) {
|
||||
// Create a forwarder channel between requests and responses of the same size as
|
||||
// the distribution channel (since that will block the pipeline anyway).
|
||||
pend := make(chan uint64, cap(dist))
|
||||
|
||||
// Start the pipeline schedulers to forward between user -> distributor -> user
|
||||
wg.Add(2)
|
||||
go s.scheduleRequests(sections, dist, pend, quit, wg)
|
||||
go s.scheduleDeliveries(pend, done, quit, wg)
|
||||
}
|
||||
|
||||
// reset cleans up any leftovers from previous runs. This is required before a
|
||||
// restart to ensure the no previously requested but never delivered state will
|
||||
// cause a lockup.
|
||||
func (s *scheduler) reset() {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
for section, res := range s.responses {
|
||||
if res.cached == nil {
|
||||
delete(s.responses, section)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scheduleRequests reads section retrieval requests from the input channel,
|
||||
// deduplicates the stream and pushes unique retrieval tasks into the distribution
|
||||
// channel for a database or network layer to honour.
|
||||
func (s *scheduler) scheduleRequests(reqs chan uint64, dist chan *request, pend chan uint64, quit chan struct{}, wg *sync.WaitGroup) {
|
||||
// Clean up the goroutine and pipeline when done
|
||||
defer wg.Done()
|
||||
defer close(pend)
|
||||
|
||||
// Keep reading and scheduling section requests
|
||||
for {
|
||||
select {
|
||||
case <-quit:
|
||||
return
|
||||
|
||||
case section, ok := <-reqs:
|
||||
// New section retrieval requested
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
// Deduplicate retrieval requests
|
||||
unique := false
|
||||
|
||||
s.lock.Lock()
|
||||
if s.responses[section] == nil {
|
||||
s.responses[section] = &response{
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
unique = true
|
||||
}
|
||||
s.lock.Unlock()
|
||||
|
||||
// Schedule the section for retrieval and notify the deliverer to expect this section
|
||||
if unique {
|
||||
select {
|
||||
case <-quit:
|
||||
return
|
||||
case dist <- &request{bit: s.bit, section: section}:
|
||||
}
|
||||
}
|
||||
select {
|
||||
case <-quit:
|
||||
return
|
||||
case pend <- section:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scheduleDeliveries reads section acceptance notifications and waits for them
|
||||
// to be delivered, pushing them into the output data buffer.
|
||||
func (s *scheduler) scheduleDeliveries(pend chan uint64, done chan []byte, quit chan struct{}, wg *sync.WaitGroup) {
|
||||
// Clean up the goroutine and pipeline when done
|
||||
defer wg.Done()
|
||||
defer close(done)
|
||||
|
||||
// Keep reading notifications and scheduling deliveries
|
||||
for {
|
||||
select {
|
||||
case <-quit:
|
||||
return
|
||||
|
||||
case idx, ok := <-pend:
|
||||
// New section retrieval pending
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
// Wait until the request is honoured
|
||||
s.lock.Lock()
|
||||
res := s.responses[idx]
|
||||
s.lock.Unlock()
|
||||
|
||||
select {
|
||||
case <-quit:
|
||||
return
|
||||
case <-res.done:
|
||||
}
|
||||
// Deliver the result
|
||||
select {
|
||||
case <-quit:
|
||||
return
|
||||
case done <- res.cached:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// deliver is called by the request distributor when a reply to a request arrives.
|
||||
func (s *scheduler) deliver(sections []uint64, data [][]byte) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
for i, section := range sections {
|
||||
if res := s.responses[section]; res != nil && res.cached == nil { // Avoid non-requests and double deliveries
|
||||
res.cached = data[i]
|
||||
close(res.done)
|
||||
}
|
||||
}
|
||||
}
|
105
core/bloombits/scheduler_test.go
Normal file
105
core/bloombits/scheduler_test.go
Normal file
@ -0,0 +1,105 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bloombits
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Tests that the scheduler can deduplicate and forward retrieval requests to
|
||||
// underlying fetchers and serve responses back, irrelevant of the concurrency
|
||||
// of the requesting clients or serving data fetchers.
|
||||
func TestSchedulerSingleClientSingleFetcher(t *testing.T) { testScheduler(t, 1, 1, 5000) }
|
||||
func TestSchedulerSingleClientMultiFetcher(t *testing.T) { testScheduler(t, 1, 10, 5000) }
|
||||
func TestSchedulerMultiClientSingleFetcher(t *testing.T) { testScheduler(t, 10, 1, 5000) }
|
||||
func TestSchedulerMultiClientMultiFetcher(t *testing.T) { testScheduler(t, 10, 10, 5000) }
|
||||
|
||||
func testScheduler(t *testing.T, clients int, fetchers int, requests int) {
|
||||
f := newScheduler(0)
|
||||
|
||||
// Create a batch of handler goroutines that respond to bloom bit requests and
|
||||
// deliver them to the scheduler.
|
||||
var fetchPend sync.WaitGroup
|
||||
fetchPend.Add(fetchers)
|
||||
defer fetchPend.Wait()
|
||||
|
||||
fetch := make(chan *request, 16)
|
||||
defer close(fetch)
|
||||
|
||||
var delivered uint32
|
||||
for i := 0; i < fetchers; i++ {
|
||||
go func() {
|
||||
defer fetchPend.Done()
|
||||
|
||||
for req := range fetch {
|
||||
time.Sleep(time.Duration(rand.Intn(int(100 * time.Microsecond))))
|
||||
atomic.AddUint32(&delivered, 1)
|
||||
|
||||
f.deliver([]uint64{
|
||||
req.section + uint64(requests), // Non-requested data (ensure it doesn't go out of bounds)
|
||||
req.section, // Requested data
|
||||
req.section, // Duplicated data (ensure it doesn't double close anything)
|
||||
}, [][]byte{
|
||||
[]byte{},
|
||||
new(big.Int).SetUint64(req.section).Bytes(),
|
||||
new(big.Int).SetUint64(req.section).Bytes(),
|
||||
})
|
||||
}
|
||||
}()
|
||||
}
|
||||
// Start a batch of goroutines to concurrently run scheduling tasks
|
||||
quit := make(chan struct{})
|
||||
|
||||
var pend sync.WaitGroup
|
||||
pend.Add(clients)
|
||||
|
||||
for i := 0; i < clients; i++ {
|
||||
go func() {
|
||||
defer pend.Done()
|
||||
|
||||
in := make(chan uint64, 16)
|
||||
out := make(chan []byte, 16)
|
||||
|
||||
f.run(in, fetch, out, quit, &pend)
|
||||
|
||||
go func() {
|
||||
for j := 0; j < requests; j++ {
|
||||
in <- uint64(j)
|
||||
}
|
||||
close(in)
|
||||
}()
|
||||
|
||||
for j := 0; j < requests; j++ {
|
||||
bits := <-out
|
||||
if want := new(big.Int).SetUint64(uint64(j)).Bytes(); !bytes.Equal(bits, want) {
|
||||
t.Errorf("vector %d: delivered content mismatch: have %x, want %x", j, bits, want)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
pend.Wait()
|
||||
|
||||
if have := atomic.LoadUint32(&delivered); int(have) != requests {
|
||||
t.Errorf("request count mismatch: have %v, want %v", have, requests)
|
||||
}
|
||||
}
|
@ -42,9 +42,8 @@ type ChainIndexerBackend interface {
|
||||
// will ensure a sequential order of headers.
|
||||
Process(header *types.Header)
|
||||
|
||||
// Commit finalizes the section metadata and stores it into the database. This
|
||||
// interface will usually be a batch writer.
|
||||
Commit(db ethdb.Database) error
|
||||
// Commit finalizes the section metadata and stores it into the database.
|
||||
Commit() error
|
||||
}
|
||||
|
||||
// ChainIndexer does a post-processing job for equally sized sections of the
|
||||
@ -102,9 +101,10 @@ func NewChainIndexer(chainDb, indexDb ethdb.Database, backend ChainIndexerBacken
|
||||
}
|
||||
|
||||
// Start creates a goroutine to feed chain head events into the indexer for
|
||||
// cascading background processing.
|
||||
func (c *ChainIndexer) Start(currentHeader *types.Header, eventMux *event.TypeMux) {
|
||||
go c.eventLoop(currentHeader, eventMux)
|
||||
// cascading background processing. Children do not need to be started, they
|
||||
// are notified about new events by their parents.
|
||||
func (c *ChainIndexer) Start(currentHeader *types.Header, chainEventer func(ch chan<- ChainEvent) event.Subscription) {
|
||||
go c.eventLoop(currentHeader, chainEventer)
|
||||
}
|
||||
|
||||
// Close tears down all goroutines belonging to the indexer and returns any error
|
||||
@ -125,6 +125,12 @@ func (c *ChainIndexer) Close() error {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
// Close all children
|
||||
for _, child := range c.children {
|
||||
if err := child.Close(); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
// Return any failures
|
||||
switch {
|
||||
case len(errs) == 0:
|
||||
@ -141,12 +147,12 @@ func (c *ChainIndexer) Close() error {
|
||||
// eventLoop is a secondary - optional - event loop of the indexer which is only
|
||||
// started for the outermost indexer to push chain head events into a processing
|
||||
// queue.
|
||||
func (c *ChainIndexer) eventLoop(currentHeader *types.Header, eventMux *event.TypeMux) {
|
||||
func (c *ChainIndexer) eventLoop(currentHeader *types.Header, chainEventer func(ch chan<- ChainEvent) event.Subscription) {
|
||||
// Mark the chain indexer as active, requiring an additional teardown
|
||||
atomic.StoreUint32(&c.active, 1)
|
||||
|
||||
// Subscribe to chain head events
|
||||
sub := eventMux.Subscribe(ChainEvent{})
|
||||
events := make(chan ChainEvent, 10)
|
||||
sub := chainEventer(events)
|
||||
defer sub.Unsubscribe()
|
||||
|
||||
// Fire the initial new head event to start any outstanding processing
|
||||
@ -163,14 +169,14 @@ func (c *ChainIndexer) eventLoop(currentHeader *types.Header, eventMux *event.Ty
|
||||
errc <- nil
|
||||
return
|
||||
|
||||
case ev, ok := <-sub.Chan():
|
||||
case ev, ok := <-events:
|
||||
// Received a new event, ensure it's not nil (closing) and update
|
||||
if !ok {
|
||||
errc := <-c.quit
|
||||
errc <- nil
|
||||
return
|
||||
}
|
||||
header := ev.Data.(ChainEvent).Block.Header()
|
||||
header := ev.Block.Header()
|
||||
if header.ParentHash != prevHash {
|
||||
c.newHead(FindCommonAncestor(c.chainDb, prevHeader, header).Number.Uint64(), true)
|
||||
}
|
||||
@ -226,8 +232,10 @@ func (c *ChainIndexer) newHead(head uint64, reorg bool) {
|
||||
// updateLoop is the main event loop of the indexer which pushes chain segments
|
||||
// down into the processing backend.
|
||||
func (c *ChainIndexer) updateLoop() {
|
||||
var updated time.Time
|
||||
|
||||
var (
|
||||
updating bool
|
||||
updated time.Time
|
||||
)
|
||||
for {
|
||||
select {
|
||||
case errc := <-c.quit:
|
||||
@ -242,6 +250,7 @@ func (c *ChainIndexer) updateLoop() {
|
||||
// Periodically print an upgrade log message to the user
|
||||
if time.Since(updated) > 8*time.Second {
|
||||
if c.knownSections > c.storedSections+1 {
|
||||
updating = true
|
||||
c.log.Info("Upgrading chain index", "percentage", c.storedSections*100/c.knownSections)
|
||||
}
|
||||
updated = time.Now()
|
||||
@ -255,12 +264,19 @@ func (c *ChainIndexer) updateLoop() {
|
||||
// Process the newly defined section in the background
|
||||
c.lock.Unlock()
|
||||
newHead, err := c.processSection(section, oldHead)
|
||||
if err != nil {
|
||||
c.log.Error("Section processing failed", "error", err)
|
||||
}
|
||||
c.lock.Lock()
|
||||
|
||||
// If processing succeeded and no reorgs occcurred, mark the section completed
|
||||
if err == nil && oldHead == c.sectionHead(section-1) {
|
||||
c.setSectionHead(section, newHead)
|
||||
c.setValidSections(section + 1)
|
||||
if c.storedSections == c.knownSections && updating {
|
||||
updating = false
|
||||
c.log.Info("Finished upgrading chain index")
|
||||
}
|
||||
|
||||
c.cascadedHead = c.storedSections*c.sectionSize - 1
|
||||
for _, child := range c.children {
|
||||
@ -311,7 +327,8 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com
|
||||
c.backend.Process(header)
|
||||
lastHead = header.Hash()
|
||||
}
|
||||
if err := c.backend.Commit(c.chainDb); err != nil {
|
||||
if err := c.backend.Commit(); err != nil {
|
||||
c.log.Error("Section commit failed", "error", err)
|
||||
return common.Hash{}, err
|
||||
}
|
||||
return lastHead, nil
|
||||
|
@ -58,7 +58,6 @@ func testChainIndexer(t *testing.T, count int) {
|
||||
)
|
||||
backends[i] = &testChainIndexBackend{t: t, processCh: make(chan uint64)}
|
||||
backends[i].indexer = NewChainIndexer(db, ethdb.NewTable(db, string([]byte{byte(i)})), backends[i], sectionSize, confirmsReq, 0, fmt.Sprintf("indexer-%d", i))
|
||||
defer backends[i].indexer.Close()
|
||||
|
||||
if sections, _, _ := backends[i].indexer.Sections(); sections != 0 {
|
||||
t.Fatalf("Canonical section count mismatch: have %v, want %v", sections, 0)
|
||||
@ -67,6 +66,7 @@ func testChainIndexer(t *testing.T, count int) {
|
||||
backends[i-1].indexer.AddChildIndexer(backends[i].indexer)
|
||||
}
|
||||
}
|
||||
defer backends[0].indexer.Close() // parent indexer shuts down children
|
||||
// notify pings the root indexer about a new head or reorg, then expect
|
||||
// processed blocks if a section is processable
|
||||
notify := func(headNum, failNum uint64, reorg bool) {
|
||||
@ -226,7 +226,7 @@ func (b *testChainIndexBackend) Process(header *types.Header) {
|
||||
}
|
||||
}
|
||||
|
||||
func (b *testChainIndexBackend) Commit(db ethdb.Database) error {
|
||||
func (b *testChainIndexBackend) Commit() error {
|
||||
if b.headerCnt != b.indexer.sectionSize {
|
||||
b.t.Error("Not enough headers processed")
|
||||
}
|
||||
|
@ -23,7 +23,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
@ -34,24 +33,41 @@ import (
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// DatabaseReader wraps the Get method of a backing data store.
|
||||
type DatabaseReader interface {
|
||||
Get(key []byte) (value []byte, err error)
|
||||
}
|
||||
|
||||
// DatabaseWriter wraps the Put method of a backing data store.
|
||||
type DatabaseWriter interface {
|
||||
Put(key, value []byte) error
|
||||
}
|
||||
|
||||
// DatabaseDeleter wraps the Delete method of a backing data store.
|
||||
type DatabaseDeleter interface {
|
||||
Delete(key []byte) error
|
||||
}
|
||||
|
||||
var (
|
||||
headHeaderKey = []byte("LastHeader")
|
||||
headBlockKey = []byte("LastBlock")
|
||||
headFastKey = []byte("LastFast")
|
||||
|
||||
headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
|
||||
tdSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + tdSuffix -> td
|
||||
numSuffix = []byte("n") // headerPrefix + num (uint64 big endian) + numSuffix -> hash
|
||||
blockHashPrefix = []byte("H") // blockHashPrefix + hash -> num (uint64 big endian)
|
||||
bodyPrefix = []byte("b") // bodyPrefix + num (uint64 big endian) + hash -> block body
|
||||
blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts
|
||||
lookupPrefix = []byte("l") // lookupPrefix + hash -> transaction/receipt lookup metadata
|
||||
preimagePrefix = "secure-key-" // preimagePrefix + hash -> preimage
|
||||
// Data item prefixes (use single byte to avoid mixing data types, avoid `i`).
|
||||
headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
|
||||
tdSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + tdSuffix -> td
|
||||
numSuffix = []byte("n") // headerPrefix + num (uint64 big endian) + numSuffix -> hash
|
||||
blockHashPrefix = []byte("H") // blockHashPrefix + hash -> num (uint64 big endian)
|
||||
bodyPrefix = []byte("b") // bodyPrefix + num (uint64 big endian) + hash -> block body
|
||||
blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts
|
||||
lookupPrefix = []byte("l") // lookupPrefix + hash -> transaction/receipt lookup metadata
|
||||
bloomBitsPrefix = []byte("B") // bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits
|
||||
|
||||
mipmapPre = []byte("mipmap-log-bloom-")
|
||||
MIPMapLevels = []uint64{1000000, 500000, 100000, 50000, 1000}
|
||||
preimagePrefix = "secure-key-" // preimagePrefix + hash -> preimage
|
||||
configPrefix = []byte("ethereum-config-") // config prefix for the db
|
||||
|
||||
configPrefix = []byte("ethereum-config-") // config prefix for the db
|
||||
// Chain index prefixes (use `i` + single byte to avoid mixing data types).
|
||||
BloomBitsIndexPrefix = []byte("iB") // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress
|
||||
|
||||
// used by old db, now only used for conversion
|
||||
oldReceiptsPrefix = []byte("receipts-")
|
||||
@ -59,8 +75,6 @@ var (
|
||||
|
||||
ErrChainConfigNotFound = errors.New("ChainConfig not found") // general config not found error
|
||||
|
||||
mipmapBloomMu sync.Mutex // protect against race condition when updating mipmap blooms
|
||||
|
||||
preimageCounter = metrics.NewCounter("db/preimage/total")
|
||||
preimageHitCounter = metrics.NewCounter("db/preimage/hits")
|
||||
)
|
||||
@ -81,7 +95,7 @@ func encodeBlockNumber(number uint64) []byte {
|
||||
}
|
||||
|
||||
// GetCanonicalHash retrieves a hash assigned to a canonical block number.
|
||||
func GetCanonicalHash(db ethdb.Database, number uint64) common.Hash {
|
||||
func GetCanonicalHash(db DatabaseReader, number uint64) common.Hash {
|
||||
data, _ := db.Get(append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...))
|
||||
if len(data) == 0 {
|
||||
return common.Hash{}
|
||||
@ -95,7 +109,7 @@ const missingNumber = uint64(0xffffffffffffffff)
|
||||
|
||||
// GetBlockNumber returns the block number assigned to a block hash
|
||||
// if the corresponding header is present in the database
|
||||
func GetBlockNumber(db ethdb.Database, hash common.Hash) uint64 {
|
||||
func GetBlockNumber(db DatabaseReader, hash common.Hash) uint64 {
|
||||
data, _ := db.Get(append(blockHashPrefix, hash.Bytes()...))
|
||||
if len(data) != 8 {
|
||||
return missingNumber
|
||||
@ -108,7 +122,7 @@ func GetBlockNumber(db ethdb.Database, hash common.Hash) uint64 {
|
||||
// last block hash is only updated upon a full block import, the last header
|
||||
// hash is updated already at header import, allowing head tracking for the
|
||||
// light synchronization mechanism.
|
||||
func GetHeadHeaderHash(db ethdb.Database) common.Hash {
|
||||
func GetHeadHeaderHash(db DatabaseReader) common.Hash {
|
||||
data, _ := db.Get(headHeaderKey)
|
||||
if len(data) == 0 {
|
||||
return common.Hash{}
|
||||
@ -117,7 +131,7 @@ func GetHeadHeaderHash(db ethdb.Database) common.Hash {
|
||||
}
|
||||
|
||||
// GetHeadBlockHash retrieves the hash of the current canonical head block.
|
||||
func GetHeadBlockHash(db ethdb.Database) common.Hash {
|
||||
func GetHeadBlockHash(db DatabaseReader) common.Hash {
|
||||
data, _ := db.Get(headBlockKey)
|
||||
if len(data) == 0 {
|
||||
return common.Hash{}
|
||||
@ -129,7 +143,7 @@ func GetHeadBlockHash(db ethdb.Database) common.Hash {
|
||||
// fast synchronization. The difference between this and GetHeadBlockHash is that
|
||||
// whereas the last block hash is only updated upon a full block import, the last
|
||||
// fast hash is updated when importing pre-processed blocks.
|
||||
func GetHeadFastBlockHash(db ethdb.Database) common.Hash {
|
||||
func GetHeadFastBlockHash(db DatabaseReader) common.Hash {
|
||||
data, _ := db.Get(headFastKey)
|
||||
if len(data) == 0 {
|
||||
return common.Hash{}
|
||||
@ -139,14 +153,14 @@ func GetHeadFastBlockHash(db ethdb.Database) common.Hash {
|
||||
|
||||
// GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil
|
||||
// if the header's not found.
|
||||
func GetHeaderRLP(db ethdb.Database, hash common.Hash, number uint64) rlp.RawValue {
|
||||
func GetHeaderRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue {
|
||||
data, _ := db.Get(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
|
||||
return data
|
||||
}
|
||||
|
||||
// GetHeader retrieves the block header corresponding to the hash, nil if none
|
||||
// found.
|
||||
func GetHeader(db ethdb.Database, hash common.Hash, number uint64) *types.Header {
|
||||
func GetHeader(db DatabaseReader, hash common.Hash, number uint64) *types.Header {
|
||||
data := GetHeaderRLP(db, hash, number)
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
@ -160,14 +174,14 @@ func GetHeader(db ethdb.Database, hash common.Hash, number uint64) *types.Header
|
||||
}
|
||||
|
||||
// GetBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
|
||||
func GetBodyRLP(db ethdb.Database, hash common.Hash, number uint64) rlp.RawValue {
|
||||
func GetBodyRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue {
|
||||
data, _ := db.Get(append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
|
||||
return data
|
||||
}
|
||||
|
||||
// GetBody retrieves the block body (transactons, uncles) corresponding to the
|
||||
// hash, nil if none found.
|
||||
func GetBody(db ethdb.Database, hash common.Hash, number uint64) *types.Body {
|
||||
func GetBody(db DatabaseReader, hash common.Hash, number uint64) *types.Body {
|
||||
data := GetBodyRLP(db, hash, number)
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
@ -182,7 +196,7 @@ func GetBody(db ethdb.Database, hash common.Hash, number uint64) *types.Body {
|
||||
|
||||
// GetTd retrieves a block's total difficulty corresponding to the hash, nil if
|
||||
// none found.
|
||||
func GetTd(db ethdb.Database, hash common.Hash, number uint64) *big.Int {
|
||||
func GetTd(db DatabaseReader, hash common.Hash, number uint64) *big.Int {
|
||||
data, _ := db.Get(append(append(append(headerPrefix, encodeBlockNumber(number)...), hash[:]...), tdSuffix...))
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
@ -201,7 +215,7 @@ func GetTd(db ethdb.Database, hash common.Hash, number uint64) *big.Int {
|
||||
//
|
||||
// Note, due to concurrent download of header and block body the header and thus
|
||||
// canonical hash can be stored in the database but the body data not (yet).
|
||||
func GetBlock(db ethdb.Database, hash common.Hash, number uint64) *types.Block {
|
||||
func GetBlock(db DatabaseReader, hash common.Hash, number uint64) *types.Block {
|
||||
// Retrieve the block header and body contents
|
||||
header := GetHeader(db, hash, number)
|
||||
if header == nil {
|
||||
@ -217,7 +231,7 @@ func GetBlock(db ethdb.Database, hash common.Hash, number uint64) *types.Block {
|
||||
|
||||
// GetBlockReceipts retrieves the receipts generated by the transactions included
|
||||
// in a block given by its hash.
|
||||
func GetBlockReceipts(db ethdb.Database, hash common.Hash, number uint64) types.Receipts {
|
||||
func GetBlockReceipts(db DatabaseReader, hash common.Hash, number uint64) types.Receipts {
|
||||
data, _ := db.Get(append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash[:]...))
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
@ -236,7 +250,7 @@ func GetBlockReceipts(db ethdb.Database, hash common.Hash, number uint64) types.
|
||||
|
||||
// GetTxLookupEntry retrieves the positional metadata associated with a transaction
|
||||
// hash to allow retrieving the transaction or receipt by hash.
|
||||
func GetTxLookupEntry(db ethdb.Database, hash common.Hash) (common.Hash, uint64, uint64) {
|
||||
func GetTxLookupEntry(db DatabaseReader, hash common.Hash) (common.Hash, uint64, uint64) {
|
||||
// Load the positional metadata from disk and bail if it fails
|
||||
data, _ := db.Get(append(lookupPrefix, hash.Bytes()...))
|
||||
if len(data) == 0 {
|
||||
@ -253,7 +267,7 @@ func GetTxLookupEntry(db ethdb.Database, hash common.Hash) (common.Hash, uint64,
|
||||
|
||||
// GetTransaction retrieves a specific transaction from the database, along with
|
||||
// its added positional metadata.
|
||||
func GetTransaction(db ethdb.Database, hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) {
|
||||
func GetTransaction(db DatabaseReader, hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) {
|
||||
// Retrieve the lookup metadata and resolve the transaction from the body
|
||||
blockHash, blockNumber, txIndex := GetTxLookupEntry(db, hash)
|
||||
|
||||
@ -288,7 +302,7 @@ func GetTransaction(db ethdb.Database, hash common.Hash) (*types.Transaction, co
|
||||
|
||||
// GetReceipt retrieves a specific transaction receipt from the database, along with
|
||||
// its added positional metadata.
|
||||
func GetReceipt(db ethdb.Database, hash common.Hash) (*types.Receipt, common.Hash, uint64, uint64) {
|
||||
func GetReceipt(db DatabaseReader, hash common.Hash) (*types.Receipt, common.Hash, uint64, uint64) {
|
||||
// Retrieve the lookup metadata and resolve the receipt from the receipts
|
||||
blockHash, blockNumber, receiptIndex := GetTxLookupEntry(db, hash)
|
||||
|
||||
@ -313,8 +327,20 @@ func GetReceipt(db ethdb.Database, hash common.Hash) (*types.Receipt, common.Has
|
||||
return (*types.Receipt)(&receipt), common.Hash{}, 0, 0
|
||||
}
|
||||
|
||||
// GetBloomBits retrieves the compressed bloom bit vector belonging to the given
|
||||
// section and bit index from the.
|
||||
func GetBloomBits(db DatabaseReader, bit uint, section uint64, head common.Hash) []byte {
|
||||
key := append(append(bloomBitsPrefix, make([]byte, 10)...), head.Bytes()...)
|
||||
|
||||
binary.BigEndian.PutUint16(key[1:], uint16(bit))
|
||||
binary.BigEndian.PutUint64(key[3:], section)
|
||||
|
||||
bits, _ := db.Get(key)
|
||||
return bits
|
||||
}
|
||||
|
||||
// WriteCanonicalHash stores the canonical hash for the given block number.
|
||||
func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) error {
|
||||
func WriteCanonicalHash(db DatabaseWriter, hash common.Hash, number uint64) error {
|
||||
key := append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...)
|
||||
if err := db.Put(key, hash.Bytes()); err != nil {
|
||||
log.Crit("Failed to store number to hash mapping", "err", err)
|
||||
@ -323,7 +349,7 @@ func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) erro
|
||||
}
|
||||
|
||||
// WriteHeadHeaderHash stores the head header's hash.
|
||||
func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error {
|
||||
func WriteHeadHeaderHash(db DatabaseWriter, hash common.Hash) error {
|
||||
if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
|
||||
log.Crit("Failed to store last header's hash", "err", err)
|
||||
}
|
||||
@ -331,7 +357,7 @@ func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error {
|
||||
}
|
||||
|
||||
// WriteHeadBlockHash stores the head block's hash.
|
||||
func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
|
||||
func WriteHeadBlockHash(db DatabaseWriter, hash common.Hash) error {
|
||||
if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
|
||||
log.Crit("Failed to store last block's hash", "err", err)
|
||||
}
|
||||
@ -339,7 +365,7 @@ func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
|
||||
}
|
||||
|
||||
// WriteHeadFastBlockHash stores the fast head block's hash.
|
||||
func WriteHeadFastBlockHash(db ethdb.Database, hash common.Hash) error {
|
||||
func WriteHeadFastBlockHash(db DatabaseWriter, hash common.Hash) error {
|
||||
if err := db.Put(headFastKey, hash.Bytes()); err != nil {
|
||||
log.Crit("Failed to store last fast block's hash", "err", err)
|
||||
}
|
||||
@ -347,7 +373,7 @@ func WriteHeadFastBlockHash(db ethdb.Database, hash common.Hash) error {
|
||||
}
|
||||
|
||||
// WriteHeader serializes a block header into the database.
|
||||
func WriteHeader(db ethdb.Database, header *types.Header) error {
|
||||
func WriteHeader(db DatabaseWriter, header *types.Header) error {
|
||||
data, err := rlp.EncodeToBytes(header)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -367,7 +393,7 @@ func WriteHeader(db ethdb.Database, header *types.Header) error {
|
||||
}
|
||||
|
||||
// WriteBody serializes the body of a block into the database.
|
||||
func WriteBody(db ethdb.Database, hash common.Hash, number uint64, body *types.Body) error {
|
||||
func WriteBody(db DatabaseWriter, hash common.Hash, number uint64, body *types.Body) error {
|
||||
data, err := rlp.EncodeToBytes(body)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -376,7 +402,7 @@ func WriteBody(db ethdb.Database, hash common.Hash, number uint64, body *types.B
|
||||
}
|
||||
|
||||
// WriteBodyRLP writes a serialized body of a block into the database.
|
||||
func WriteBodyRLP(db ethdb.Database, hash common.Hash, number uint64, rlp rlp.RawValue) error {
|
||||
func WriteBodyRLP(db DatabaseWriter, hash common.Hash, number uint64, rlp rlp.RawValue) error {
|
||||
key := append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
|
||||
if err := db.Put(key, rlp); err != nil {
|
||||
log.Crit("Failed to store block body", "err", err)
|
||||
@ -385,7 +411,7 @@ func WriteBodyRLP(db ethdb.Database, hash common.Hash, number uint64, rlp rlp.Ra
|
||||
}
|
||||
|
||||
// WriteTd serializes the total difficulty of a block into the database.
|
||||
func WriteTd(db ethdb.Database, hash common.Hash, number uint64, td *big.Int) error {
|
||||
func WriteTd(db DatabaseWriter, hash common.Hash, number uint64, td *big.Int) error {
|
||||
data, err := rlp.EncodeToBytes(td)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -398,7 +424,7 @@ func WriteTd(db ethdb.Database, hash common.Hash, number uint64, td *big.Int) er
|
||||
}
|
||||
|
||||
// WriteBlock serializes a block into the database, header and body separately.
|
||||
func WriteBlock(db ethdb.Database, block *types.Block) error {
|
||||
func WriteBlock(db DatabaseWriter, block *types.Block) error {
|
||||
// Store the body first to retain database consistency
|
||||
if err := WriteBody(db, block.Hash(), block.NumberU64(), block.Body()); err != nil {
|
||||
return err
|
||||
@ -413,7 +439,7 @@ func WriteBlock(db ethdb.Database, block *types.Block) error {
|
||||
// WriteBlockReceipts stores all the transaction receipts belonging to a block
|
||||
// as a single receipt slice. This is used during chain reorganisations for
|
||||
// rescheduling dropped transactions.
|
||||
func WriteBlockReceipts(db ethdb.Database, hash common.Hash, number uint64, receipts types.Receipts) error {
|
||||
func WriteBlockReceipts(db DatabaseWriter, hash common.Hash, number uint64, receipts types.Receipts) error {
|
||||
// Convert the receipts into their storage form and serialize them
|
||||
storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
|
||||
for i, receipt := range receipts {
|
||||
@ -458,29 +484,42 @@ func WriteTxLookupEntries(db ethdb.Database, block *types.Block) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteBloomBits writes the compressed bloom bits vector belonging to the given
|
||||
// section and bit index.
|
||||
func WriteBloomBits(db DatabaseWriter, bit uint, section uint64, head common.Hash, bits []byte) {
|
||||
key := append(append(bloomBitsPrefix, make([]byte, 10)...), head.Bytes()...)
|
||||
|
||||
binary.BigEndian.PutUint16(key[1:], uint16(bit))
|
||||
binary.BigEndian.PutUint64(key[3:], section)
|
||||
|
||||
if err := db.Put(key, bits); err != nil {
|
||||
log.Crit("Failed to store bloom bits", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteCanonicalHash removes the number to hash canonical mapping.
|
||||
func DeleteCanonicalHash(db ethdb.Database, number uint64) {
|
||||
func DeleteCanonicalHash(db DatabaseDeleter, number uint64) {
|
||||
db.Delete(append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...))
|
||||
}
|
||||
|
||||
// DeleteHeader removes all block header data associated with a hash.
|
||||
func DeleteHeader(db ethdb.Database, hash common.Hash, number uint64) {
|
||||
func DeleteHeader(db DatabaseDeleter, hash common.Hash, number uint64) {
|
||||
db.Delete(append(blockHashPrefix, hash.Bytes()...))
|
||||
db.Delete(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
|
||||
}
|
||||
|
||||
// DeleteBody removes all block body data associated with a hash.
|
||||
func DeleteBody(db ethdb.Database, hash common.Hash, number uint64) {
|
||||
func DeleteBody(db DatabaseDeleter, hash common.Hash, number uint64) {
|
||||
db.Delete(append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
|
||||
}
|
||||
|
||||
// DeleteTd removes all block total difficulty data associated with a hash.
|
||||
func DeleteTd(db ethdb.Database, hash common.Hash, number uint64) {
|
||||
func DeleteTd(db DatabaseDeleter, hash common.Hash, number uint64) {
|
||||
db.Delete(append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), tdSuffix...))
|
||||
}
|
||||
|
||||
// DeleteBlock removes all block data associated with a hash.
|
||||
func DeleteBlock(db ethdb.Database, hash common.Hash, number uint64) {
|
||||
func DeleteBlock(db DatabaseDeleter, hash common.Hash, number uint64) {
|
||||
DeleteBlockReceipts(db, hash, number)
|
||||
DeleteHeader(db, hash, number)
|
||||
DeleteBody(db, hash, number)
|
||||
@ -488,57 +527,15 @@ func DeleteBlock(db ethdb.Database, hash common.Hash, number uint64) {
|
||||
}
|
||||
|
||||
// DeleteBlockReceipts removes all receipt data associated with a block hash.
|
||||
func DeleteBlockReceipts(db ethdb.Database, hash common.Hash, number uint64) {
|
||||
func DeleteBlockReceipts(db DatabaseDeleter, hash common.Hash, number uint64) {
|
||||
db.Delete(append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
|
||||
}
|
||||
|
||||
// DeleteTxLookupEntry removes all transaction data associated with a hash.
|
||||
func DeleteTxLookupEntry(db ethdb.Database, hash common.Hash) {
|
||||
func DeleteTxLookupEntry(db DatabaseDeleter, hash common.Hash) {
|
||||
db.Delete(append(lookupPrefix, hash.Bytes()...))
|
||||
}
|
||||
|
||||
// returns a formatted MIP mapped key by adding prefix, canonical number and level
|
||||
//
|
||||
// ex. fn(98, 1000) = (prefix || 1000 || 0)
|
||||
func mipmapKey(num, level uint64) []byte {
|
||||
lkey := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(lkey, level)
|
||||
key := new(big.Int).SetUint64(num / level * level)
|
||||
|
||||
return append(mipmapPre, append(lkey, key.Bytes()...)...)
|
||||
}
|
||||
|
||||
// WriteMipmapBloom writes each address included in the receipts' logs to the
|
||||
// MIP bloom bin.
|
||||
func WriteMipmapBloom(db ethdb.Database, number uint64, receipts types.Receipts) error {
|
||||
mipmapBloomMu.Lock()
|
||||
defer mipmapBloomMu.Unlock()
|
||||
|
||||
batch := db.NewBatch()
|
||||
for _, level := range MIPMapLevels {
|
||||
key := mipmapKey(number, level)
|
||||
bloomDat, _ := db.Get(key)
|
||||
bloom := types.BytesToBloom(bloomDat)
|
||||
for _, receipt := range receipts {
|
||||
for _, log := range receipt.Logs {
|
||||
bloom.Add(log.Address.Big())
|
||||
}
|
||||
}
|
||||
batch.Put(key, bloom.Bytes())
|
||||
}
|
||||
if err := batch.Write(); err != nil {
|
||||
return fmt.Errorf("mipmap write fail for: %d: %v", number, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetMipmapBloom returns a bloom filter using the number and level as input
|
||||
// parameters. For available levels see MIPMapLevels.
|
||||
func GetMipmapBloom(db ethdb.Database, number, level uint64) types.Bloom {
|
||||
bloomDat, _ := db.Get(mipmapKey(number, level))
|
||||
return types.BytesToBloom(bloomDat)
|
||||
}
|
||||
|
||||
// PreimageTable returns a Database instance with the key prefix for preimage entries.
|
||||
func PreimageTable(db ethdb.Database) ethdb.Database {
|
||||
return ethdb.NewTable(db, preimagePrefix)
|
||||
@ -567,7 +564,7 @@ func WritePreimages(db ethdb.Database, number uint64, preimages map[common.Hash]
|
||||
}
|
||||
|
||||
// GetBlockChainVersion reads the version number from db.
|
||||
func GetBlockChainVersion(db ethdb.Database) int {
|
||||
func GetBlockChainVersion(db DatabaseReader) int {
|
||||
var vsn uint
|
||||
enc, _ := db.Get([]byte("BlockchainVersion"))
|
||||
rlp.DecodeBytes(enc, &vsn)
|
||||
@ -575,13 +572,13 @@ func GetBlockChainVersion(db ethdb.Database) int {
|
||||
}
|
||||
|
||||
// WriteBlockChainVersion writes vsn as the version number to db.
|
||||
func WriteBlockChainVersion(db ethdb.Database, vsn int) {
|
||||
func WriteBlockChainVersion(db DatabaseWriter, vsn int) {
|
||||
enc, _ := rlp.EncodeToBytes(uint(vsn))
|
||||
db.Put([]byte("BlockchainVersion"), enc)
|
||||
}
|
||||
|
||||
// WriteChainConfig writes the chain config settings to the database.
|
||||
func WriteChainConfig(db ethdb.Database, hash common.Hash, cfg *params.ChainConfig) error {
|
||||
func WriteChainConfig(db DatabaseWriter, hash common.Hash, cfg *params.ChainConfig) error {
|
||||
// short circuit and ignore if nil config. GetChainConfig
|
||||
// will return a default.
|
||||
if cfg == nil {
|
||||
@ -597,7 +594,7 @@ func WriteChainConfig(db ethdb.Database, hash common.Hash, cfg *params.ChainConf
|
||||
}
|
||||
|
||||
// GetChainConfig will fetch the network settings based on the given hash.
|
||||
func GetChainConfig(db ethdb.Database, hash common.Hash) (*params.ChainConfig, error) {
|
||||
func GetChainConfig(db DatabaseReader, hash common.Hash) (*params.ChainConfig, error) {
|
||||
jsonChainConfig, _ := db.Get(append(configPrefix, hash[:]...))
|
||||
if len(jsonChainConfig) == 0 {
|
||||
return nil, ErrChainConfigNotFound
|
||||
@ -612,7 +609,7 @@ func GetChainConfig(db ethdb.Database, hash common.Hash) (*params.ChainConfig, e
|
||||
}
|
||||
|
||||
// FindCommonAncestor returns the last common ancestor of two block headers
|
||||
func FindCommonAncestor(db ethdb.Database, a, b *types.Header) *types.Header {
|
||||
func FindCommonAncestor(db DatabaseReader, a, b *types.Header) *types.Header {
|
||||
for bn := b.Number.Uint64(); a.Number.Uint64() > bn; {
|
||||
a = GetHeader(db, a.ParentHash, a.Number.Uint64()-1)
|
||||
if a == nil {
|
||||
|
@ -18,17 +18,13 @@ package core
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
@ -390,107 +386,3 @@ func TestBlockReceiptStorage(t *testing.T) {
|
||||
t.Fatalf("deleted receipts returned: %v", rs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMipmapBloom(t *testing.T) {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
|
||||
receipt1 := new(types.Receipt)
|
||||
receipt1.Logs = []*types.Log{
|
||||
{Address: common.BytesToAddress([]byte("test"))},
|
||||
{Address: common.BytesToAddress([]byte("address"))},
|
||||
}
|
||||
receipt2 := new(types.Receipt)
|
||||
receipt2.Logs = []*types.Log{
|
||||
{Address: common.BytesToAddress([]byte("test"))},
|
||||
{Address: common.BytesToAddress([]byte("address1"))},
|
||||
}
|
||||
|
||||
WriteMipmapBloom(db, 1, types.Receipts{receipt1})
|
||||
WriteMipmapBloom(db, 2, types.Receipts{receipt2})
|
||||
|
||||
for _, level := range MIPMapLevels {
|
||||
bloom := GetMipmapBloom(db, 2, level)
|
||||
if !bloom.Test(new(big.Int).SetBytes([]byte("address1"))) {
|
||||
t.Error("expected test to be included on level:", level)
|
||||
}
|
||||
}
|
||||
|
||||
// reset
|
||||
db, _ = ethdb.NewMemDatabase()
|
||||
receipt := new(types.Receipt)
|
||||
receipt.Logs = []*types.Log{
|
||||
{Address: common.BytesToAddress([]byte("test"))},
|
||||
}
|
||||
WriteMipmapBloom(db, 999, types.Receipts{receipt1})
|
||||
|
||||
receipt = new(types.Receipt)
|
||||
receipt.Logs = []*types.Log{
|
||||
{Address: common.BytesToAddress([]byte("test 1"))},
|
||||
}
|
||||
WriteMipmapBloom(db, 1000, types.Receipts{receipt})
|
||||
|
||||
bloom := GetMipmapBloom(db, 1000, 1000)
|
||||
if bloom.TestBytes([]byte("test")) {
|
||||
t.Error("test should not have been included")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMipmapChain(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "mipmap")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
var (
|
||||
db, _ = ethdb.NewLDBDatabase(dir, 0, 0)
|
||||
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
addr = crypto.PubkeyToAddress(key1.PublicKey)
|
||||
addr2 = common.BytesToAddress([]byte("jeff"))
|
||||
|
||||
hash1 = common.BytesToHash([]byte("topic1"))
|
||||
)
|
||||
defer db.Close()
|
||||
|
||||
gspec := &Genesis{
|
||||
Config: params.TestChainConfig,
|
||||
Alloc: GenesisAlloc{addr: {Balance: big.NewInt(1000000)}},
|
||||
}
|
||||
genesis := gspec.MustCommit(db)
|
||||
chain, receipts := GenerateChain(params.TestChainConfig, genesis, db, 1010, func(i int, gen *BlockGen) {
|
||||
var receipts types.Receipts
|
||||
switch i {
|
||||
case 1:
|
||||
receipt := types.NewReceipt(nil, false, new(big.Int))
|
||||
receipt.Logs = []*types.Log{{Address: addr, Topics: []common.Hash{hash1}}}
|
||||
gen.AddUncheckedReceipt(receipt)
|
||||
receipts = types.Receipts{receipt}
|
||||
case 1000:
|
||||
receipt := types.NewReceipt(nil, false, new(big.Int))
|
||||
receipt.Logs = []*types.Log{{Address: addr2}}
|
||||
gen.AddUncheckedReceipt(receipt)
|
||||
receipts = types.Receipts{receipt}
|
||||
|
||||
}
|
||||
|
||||
// store the receipts
|
||||
WriteMipmapBloom(db, uint64(i+1), receipts)
|
||||
})
|
||||
for i, block := range chain {
|
||||
WriteBlock(db, block)
|
||||
if err := WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil {
|
||||
t.Fatalf("failed to insert block number: %v", err)
|
||||
}
|
||||
if err := WriteHeadBlockHash(db, block.Hash()); err != nil {
|
||||
t.Fatalf("failed to insert block number: %v", err)
|
||||
}
|
||||
if err := WriteBlockReceipts(db, block.Hash(), block.NumberU64(), receipts[i]); err != nil {
|
||||
t.Fatal("error writing block receipts:", err)
|
||||
}
|
||||
}
|
||||
|
||||
bloom := GetMipmapBloom(db, 0, 1000)
|
||||
if bloom.TestBytes(addr2[:]) {
|
||||
t.Error("address was included in bloom and should not have")
|
||||
}
|
||||
}
|
||||
|
@ -28,10 +28,16 @@ type bytesBacked interface {
|
||||
Bytes() []byte
|
||||
}
|
||||
|
||||
const bloomLength = 256
|
||||
const (
|
||||
// BloomByteLength represents the number of bytes used in a header log bloom.
|
||||
BloomByteLength = 256
|
||||
|
||||
// Bloom represents a 256 bit bloom filter.
|
||||
type Bloom [bloomLength]byte
|
||||
// BloomBitLength represents the number of bits used in a header log bloom.
|
||||
BloomBitLength = 8 * BloomByteLength
|
||||
)
|
||||
|
||||
// Bloom represents a 2048 bit bloom filter.
|
||||
type Bloom [BloomByteLength]byte
|
||||
|
||||
// BytesToBloom converts a byte slice to a bloom filter.
|
||||
// It panics if b is not of suitable size.
|
||||
@ -47,7 +53,7 @@ func (b *Bloom) SetBytes(d []byte) {
|
||||
if len(b) < len(d) {
|
||||
panic(fmt.Sprintf("bloom bytes too big %d %d", len(b), len(d)))
|
||||
}
|
||||
copy(b[bloomLength-len(d):], d)
|
||||
copy(b[BloomByteLength-len(d):], d)
|
||||
}
|
||||
|
||||
// Add adds d to the filter. Future calls of Test(d) will return true.
|
||||
|
Reference in New Issue
Block a user