dashboard: send current block to the dashboard client (#19762)

This adds all dashboard changes from the last couple months.
We're about to remove the dashboard, but decided that we should
get all the recent work in first in case anyone wants to pick up this
project later on.

* cmd, dashboard, eth, p2p: send peer info to the dashboard
* dashboard: update npm packages, improve UI, rebase
* dashboard, p2p: remove println, change doc
* cmd, dashboard, eth, p2p: cleanup after review
* dashboard: send current block to the dashboard client
This commit is contained in:
Kurkó Mihály
2019-11-13 13:13:13 +02:00
committed by Felix Lange
parent 6f1a600f6c
commit 4ea9b62b5c
20 changed files with 11929 additions and 10886 deletions

View File

@@ -27,14 +27,16 @@ package dashboard
import (
"fmt"
"io"
"net"
"net/http"
"sync"
"sync/atomic"
"time"
"io"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/les"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/params"
@@ -44,7 +46,8 @@ import (
)
const (
sampleLimit = 200 // Maximum number of data samples
sampleLimit = 200 // Maximum number of data samples
dataCollectorCount = 4
)
// Dashboard contains the dashboard internals.
@@ -57,16 +60,23 @@ type Dashboard struct {
history *Message // Stored historical data
lock sync.Mutex // Lock protecting the dashboard's internals
sysLock sync.RWMutex // Lock protecting the stored system data
peerLock sync.RWMutex // Lock protecting the stored peer data
logLock sync.RWMutex // Lock protecting the stored log data
lock sync.Mutex // Lock protecting the dashboard's internals
chainLock sync.RWMutex // Lock protecting the stored blockchain data
sysLock sync.RWMutex // Lock protecting the stored system data
peerLock sync.RWMutex // Lock protecting the stored peer data
logLock sync.RWMutex // Lock protecting the stored log data
geodb *geoDB // geoip database instance for IP to geographical information conversions
logdir string // Directory containing the log files
quit chan chan error // Channel used for graceful exit
wg sync.WaitGroup // Wait group used to close the data collector threads
peerCh chan p2p.MeteredPeerEvent // Peer event channel.
subPeer event.Subscription // Peer event subscription.
ethServ *eth.Ethereum // Ethereum object serving internals.
lesServ *les.LightEthereum // LightEthereum object serving internals.
}
// client represents active websocket connection with a remote browser.
@@ -77,12 +87,23 @@ type client struct {
}
// New creates a new dashboard instance with the given configuration.
func New(config *Config, commit string, logdir string) *Dashboard {
now := time.Now()
func New(config *Config, ethServ *eth.Ethereum, lesServ *les.LightEthereum, commit string, logdir string) *Dashboard {
// There is a data race between the network layer and the dashboard, which
// can cause some lost peer events, therefore some peers might not appear
// on the dashboard.
// In order to solve this problem, the peer event subscription is registered
// here, before the network layer starts.
peerCh := make(chan p2p.MeteredPeerEvent, p2p.MeteredPeerLimit)
versionMeta := ""
if len(params.VersionMeta) > 0 {
versionMeta = fmt.Sprintf(" (%s)", params.VersionMeta)
}
var genesis common.Hash
if ethServ != nil {
genesis = ethServ.BlockChain().Genesis().Hash()
} else if lesServ != nil {
genesis = lesServ.BlockChain().Genesis().Hash()
}
return &Dashboard{
conns: make(map[uint32]*client),
config: config,
@@ -91,24 +112,29 @@ func New(config *Config, commit string, logdir string) *Dashboard {
General: &GeneralMessage{
Commit: commit,
Version: fmt.Sprintf("v%d.%d.%d%s", params.VersionMajor, params.VersionMinor, params.VersionPatch, versionMeta),
Genesis: genesis,
},
System: &SystemMessage{
ActiveMemory: emptyChartEntries(now, sampleLimit),
VirtualMemory: emptyChartEntries(now, sampleLimit),
NetworkIngress: emptyChartEntries(now, sampleLimit),
NetworkEgress: emptyChartEntries(now, sampleLimit),
ProcessCPU: emptyChartEntries(now, sampleLimit),
SystemCPU: emptyChartEntries(now, sampleLimit),
DiskRead: emptyChartEntries(now, sampleLimit),
DiskWrite: emptyChartEntries(now, sampleLimit),
ActiveMemory: emptyChartEntries(sampleLimit),
VirtualMemory: emptyChartEntries(sampleLimit),
NetworkIngress: emptyChartEntries(sampleLimit),
NetworkEgress: emptyChartEntries(sampleLimit),
ProcessCPU: emptyChartEntries(sampleLimit),
SystemCPU: emptyChartEntries(sampleLimit),
DiskRead: emptyChartEntries(sampleLimit),
DiskWrite: emptyChartEntries(sampleLimit),
},
},
logdir: logdir,
logdir: logdir,
peerCh: peerCh,
subPeer: p2p.SubscribeMeteredPeerEvent(peerCh),
ethServ: ethServ,
lesServ: lesServ,
}
}
// emptyChartEntries returns a ChartEntry array containing limit number of empty samples.
func emptyChartEntries(t time.Time, limit int) ChartEntries {
func emptyChartEntries(limit int) ChartEntries {
ce := make(ChartEntries, limit)
for i := 0; i < limit; i++ {
ce[i] = new(ChartEntry)
@@ -127,7 +153,8 @@ func (db *Dashboard) APIs() []rpc.API { return nil }
func (db *Dashboard) Start(server *p2p.Server) error {
log.Info("Starting dashboard", "url", fmt.Sprintf("http://%s:%d", db.config.Host, db.config.Port))
db.wg.Add(3)
db.wg.Add(dataCollectorCount)
go db.collectChainData()
go db.collectSystemData()
go db.streamLogs()
go db.collectPeerData()
@@ -141,7 +168,11 @@ func (db *Dashboard) Start(server *p2p.Server) error {
}
db.listener = listener
go http.Serve(listener, nil)
go func() {
if err := http.Serve(listener, nil); err != http.ErrServerClosed {
log.Warn("Could not accept incoming HTTP connections", "err", err)
}
}()
return nil
}
@@ -155,8 +186,8 @@ func (db *Dashboard) Stop() error {
errs = append(errs, err)
}
// Close the collectors.
errc := make(chan error, 1)
for i := 0; i < 3; i++ {
errc := make(chan error, dataCollectorCount)
for i := 0; i < dataCollectorCount; i++ {
db.quit <- errc
if err := <-errc; err != nil {
errs = append(errs, err)
@@ -230,20 +261,21 @@ func (db *Dashboard) apiHandler(conn *websocket.Conn) {
}()
// Send the past data.
db.chainLock.RLock()
db.sysLock.RLock()
db.peerLock.RLock()
db.logLock.RLock()
h := deepcopy.Copy(db.history).(*Message)
db.chainLock.RUnlock()
db.sysLock.RUnlock()
db.peerLock.RUnlock()
db.logLock.RUnlock()
client.msg <- h
// Start tracking the connection and drop at connection loss.
db.lock.Lock()
client.msg <- h
db.conns[id] = client
db.lock.Unlock()
defer func() {