swarm: initial instrumentation (#15969)

* swarm: initial instrumentation with go-metrics

* swarm: initialise metrics collection and add ResettingTimer to HTTP requests

* swarm: update metrics flags names. remove redundant Timer.

* swarm: rename method for periodically updating gauges

* swarm: finalise metrics after feedback

* swarm/network: always init kad metrics containers

* swarm/network: off-by-one index in metrics containers

* swarm, metrics: resolved conflicts
This commit is contained in:
Anton Evangelatov
2018-02-23 14:19:59 +01:00
committed by Balint Gabor
parent b677a07d36
commit dcca613a0b
16 changed files with 381 additions and 10 deletions

View File

@ -23,6 +23,8 @@ import (
"io"
"sync"
"time"
"github.com/ethereum/go-ethereum/metrics"
)
/*
@ -63,6 +65,11 @@ var (
errOperationTimedOut = errors.New("operation timed out")
)
//metrics variables
var (
newChunkCounter = metrics.NewRegisteredCounter("storage.chunks.new", nil)
)
type TreeChunker struct {
branches int64
hashFunc SwarmHasher
@ -298,6 +305,13 @@ func (self *TreeChunker) hashChunk(hasher SwarmHash, job *hashJob, chunkC chan *
job.parentWg.Done()
if chunkC != nil {
//NOTE: this increases the chunk count even if the local node already has this chunk;
//on file upload the node will increase this counter even if the same file has already been uploaded
//So it should be evaluated whether it is worth keeping this counter
//and/or actually better track when the chunk is Put to the local database
//(which may question the need for disambiguation when a completely new chunk has been created
//and/or a chunk is being put to the local DB; for chunk tracking it may be worth distinguishing
newChunkCounter.Inc(1)
chunkC <- newChunk
}
}

View File

@ -33,11 +33,18 @@ import (
"sync"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/iterator"
)
//metrics variables
var (
gcCounter = metrics.NewRegisteredCounter("storage.db.dbstore.gc.count", nil)
dbStoreDeleteCounter = metrics.NewRegisteredCounter("storage.db.dbstore.rm.count", nil)
)
const (
defaultDbCapacity = 5000000
defaultRadius = 0 // not yet used
@ -255,6 +262,7 @@ func (s *DbStore) collectGarbage(ratio float32) {
// actual gc
for i := 0; i < gcnt; i++ {
if s.gcArray[i].value <= cutval {
gcCounter.Inc(1)
s.delete(s.gcArray[i].idx, s.gcArray[i].idxKey)
}
}
@ -383,6 +391,7 @@ func (s *DbStore) delete(idx uint64, idxKey []byte) {
batch := new(leveldb.Batch)
batch.Delete(idxKey)
batch.Delete(getDataKey(idx))
dbStoreDeleteCounter.Inc(1)
s.entryCnt--
batch.Put(keyEntryCnt, U64ToBytes(s.entryCnt))
s.db.Write(batch)

View File

@ -18,6 +18,13 @@ package storage
import (
"encoding/binary"
"github.com/ethereum/go-ethereum/metrics"
)
//metrics variables
var (
dbStorePutCounter = metrics.NewRegisteredCounter("storage.db.dbstore.put.count", nil)
)
// LocalStore is a combination of inmemory db over a disk persisted db
@ -39,6 +46,14 @@ func NewLocalStore(hash SwarmHasher, params *StoreParams) (*LocalStore, error) {
}, nil
}
func (self *LocalStore) CacheCounter() uint64 {
return uint64(self.memStore.(*MemStore).Counter())
}
func (self *LocalStore) DbCounter() uint64 {
return self.DbStore.(*DbStore).Counter()
}
// LocalStore is itself a chunk store
// unsafe, in that the data is not integrity checked
func (self *LocalStore) Put(chunk *Chunk) {
@ -48,6 +63,7 @@ func (self *LocalStore) Put(chunk *Chunk) {
chunk.wg.Add(1)
}
go func() {
dbStorePutCounter.Inc(1)
self.DbStore.Put(chunk)
if chunk.wg != nil {
chunk.wg.Done()

View File

@ -23,6 +23,13 @@ import (
"sync"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
)
//metrics variables
var (
memstorePutCounter = metrics.NewRegisteredCounter("storage.db.memstore.put.count", nil)
memstoreRemoveCounter = metrics.NewRegisteredCounter("storage.db.memstore.rm.count", nil)
)
const (
@ -130,6 +137,10 @@ func (s *MemStore) setCapacity(c uint) {
s.capacity = c
}
func (s *MemStore) Counter() uint {
return s.entryCnt
}
// entry (not its copy) is going to be in MemStore
func (s *MemStore) Put(entry *Chunk) {
if s.capacity == 0 {
@ -145,6 +156,8 @@ func (s *MemStore) Put(entry *Chunk) {
s.accessCnt++
memstorePutCounter.Inc(1)
node := s.memtree
bitpos := uint(0)
for node.entry == nil {
@ -289,6 +302,7 @@ func (s *MemStore) removeOldest() {
}
if node.entry.SData != nil {
memstoreRemoveCounter.Inc(1)
node.entry = nil
s.entryCnt--
}