dashboard, p2p, vendor: visualize peers (#19247)
* dashboard, p2p: visualize peers * dashboard: change scale to green to red
This commit is contained in:
committed by
Péter Szilágyi
parent
1591b63306
commit
1a29bf0ee2
79308
dashboard/assets.go
79308
dashboard/assets.go
File diff suppressed because one or more lines are too long
3
dashboard/assets/.eslintignore
Normal file
3
dashboard/assets/.eslintignore
Normal file
@@ -0,0 +1,3 @@
|
||||
node_modules/* #ignored by default
|
||||
flow-typed/*
|
||||
bundle.js
|
@@ -16,71 +16,66 @@
|
||||
|
||||
// React syntax style mostly according to https://github.com/airbnb/javascript/tree/master/react
|
||||
{
|
||||
'env': {
|
||||
'browser': true,
|
||||
'node': true,
|
||||
'es6': true,
|
||||
"env": {
|
||||
"browser": true,
|
||||
"node": true,
|
||||
"es6": true
|
||||
},
|
||||
'parser': 'babel-eslint',
|
||||
'parserOptions': {
|
||||
'sourceType': 'module',
|
||||
'ecmaVersion': 6,
|
||||
'ecmaFeatures': {
|
||||
'jsx': true,
|
||||
"parser": "babel-eslint",
|
||||
"parserOptions": {
|
||||
"sourceType": "module",
|
||||
"ecmaVersion": 6,
|
||||
"ecmaFeatures": {
|
||||
"jsx": true
|
||||
}
|
||||
},
|
||||
'extends': 'airbnb',
|
||||
'plugins': [
|
||||
'flowtype',
|
||||
'react',
|
||||
"extends": [
|
||||
"eslint:recommended",
|
||||
"airbnb",
|
||||
"plugin:flowtype/recommended",
|
||||
"plugin:react/recommended"
|
||||
],
|
||||
'rules': {
|
||||
'no-tabs': 'off',
|
||||
'indent': ['error', 'tab'],
|
||||
'react/jsx-indent': ['error', 'tab'],
|
||||
'react/jsx-indent-props': ['error', 'tab'],
|
||||
'react/prefer-stateless-function': 'off',
|
||||
'jsx-quotes': ['error', 'prefer-single'],
|
||||
'no-plusplus': 'off',
|
||||
'no-console': ['error', { allow: ['error'] }],
|
||||
|
||||
"plugins": [
|
||||
"flowtype",
|
||||
"react"
|
||||
],
|
||||
"rules": {
|
||||
"no-tabs": "off",
|
||||
"indent": ["error", "tab"],
|
||||
"react/jsx-indent": ["error", "tab"],
|
||||
"react/jsx-indent-props": ["error", "tab"],
|
||||
"react/prefer-stateless-function": "off",
|
||||
"react/destructuring-assignment": ["error", "always", {"ignoreClassFields": true}],
|
||||
"jsx-quotes": ["error", "prefer-single"],
|
||||
"no-plusplus": "off",
|
||||
"no-console": ["error", { "allow": ["error"] }],
|
||||
// Specifies the maximum length of a line.
|
||||
'max-len': ['warn', 120, 2, {
|
||||
'ignoreUrls': true,
|
||||
'ignoreComments': false,
|
||||
'ignoreRegExpLiterals': true,
|
||||
'ignoreStrings': true,
|
||||
'ignoreTemplateLiterals': true,
|
||||
"max-len": ["warn", 120, 2, {
|
||||
"ignoreUrls": true,
|
||||
"ignoreComments": false,
|
||||
"ignoreRegExpLiterals": true,
|
||||
"ignoreStrings": true,
|
||||
"ignoreTemplateLiterals": true
|
||||
}],
|
||||
// Enforces consistent spacing between keys and values in object literal properties.
|
||||
'key-spacing': ['error', {'align': {
|
||||
'beforeColon': false,
|
||||
'afterColon': true,
|
||||
'on': 'value'
|
||||
"key-spacing": ["error", {"align": {
|
||||
"beforeColon": false,
|
||||
"afterColon": true,
|
||||
"on": "value"
|
||||
}}],
|
||||
// Prohibits padding inside curly braces.
|
||||
'object-curly-spacing': ['error', 'never'],
|
||||
'no-use-before-define': 'off', // messageAPI
|
||||
'default-case': 'off',
|
||||
|
||||
'flowtype/boolean-style': ['error', 'boolean'],
|
||||
'flowtype/define-flow-type': 'warn',
|
||||
'flowtype/generic-spacing': ['error', 'never'],
|
||||
'flowtype/no-primitive-constructor-types': 'error',
|
||||
'flowtype/no-weak-types': 'error',
|
||||
'flowtype/object-type-delimiter': ['error', 'comma'],
|
||||
'flowtype/require-valid-file-annotation': 'error',
|
||||
'flowtype/semi': ['error', 'always'],
|
||||
'flowtype/space-after-type-colon': ['error', 'always'],
|
||||
'flowtype/space-before-generic-bracket': ['error', 'never'],
|
||||
'flowtype/space-before-type-colon': ['error', 'never'],
|
||||
'flowtype/union-intersection-spacing': ['error', 'always'],
|
||||
'flowtype/use-flow-type': 'warn',
|
||||
'flowtype/valid-syntax': 'warn',
|
||||
"object-curly-spacing": ["error", "never"],
|
||||
"no-use-before-define": "off", // message types
|
||||
"default-case": "off"
|
||||
},
|
||||
'settings': {
|
||||
'flowtype': {
|
||||
'onlyFilesWithFlowAnnotation': true,
|
||||
"settings": {
|
||||
"import/resolver": {
|
||||
"node": {
|
||||
"paths": ["components"] // import './components/Component' -> import 'Component'
|
||||
}
|
||||
},
|
||||
"flowtype": {
|
||||
"onlyFilesWithFlowAnnotation": true
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@@ -7,3 +7,5 @@ node_modules/jss/flow-typed
|
||||
|
||||
[options]
|
||||
include_warnings=true
|
||||
module.system.node.resolve_dirname=node_modules
|
||||
module.system.node.resolve_dirname=components
|
||||
|
@@ -16,43 +16,46 @@
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import {faHome, faLink, faGlobeEurope, faTachometerAlt, faList} from '@fortawesome/free-solid-svg-icons';
|
||||
import {faCreditCard} from '@fortawesome/free-regular-svg-icons';
|
||||
|
||||
type ProvidedMenuProp = {|title: string, icon: string|};
|
||||
const menuSkeletons: Array<{|id: string, menu: ProvidedMenuProp|}> = [
|
||||
{
|
||||
id: 'home',
|
||||
menu: {
|
||||
title: 'Home',
|
||||
icon: 'home',
|
||||
icon: faHome,
|
||||
},
|
||||
}, {
|
||||
id: 'chain',
|
||||
menu: {
|
||||
title: 'Chain',
|
||||
icon: 'link',
|
||||
icon: faLink,
|
||||
},
|
||||
}, {
|
||||
id: 'txpool',
|
||||
menu: {
|
||||
title: 'TxPool',
|
||||
icon: 'credit-card',
|
||||
icon: faCreditCard,
|
||||
},
|
||||
}, {
|
||||
id: 'network',
|
||||
menu: {
|
||||
title: 'Network',
|
||||
icon: 'globe',
|
||||
icon: faGlobeEurope,
|
||||
},
|
||||
}, {
|
||||
id: 'system',
|
||||
menu: {
|
||||
title: 'System',
|
||||
icon: 'tachometer',
|
||||
icon: faTachometerAlt,
|
||||
},
|
||||
}, {
|
||||
id: 'logs',
|
||||
menu: {
|
||||
title: 'Logs',
|
||||
icon: 'list',
|
||||
icon: faList,
|
||||
},
|
||||
},
|
||||
];
|
||||
@@ -64,8 +67,26 @@ export const MENU: Map<string, {...MenuProp}> = new Map(menuSkeletons.map(({id,
|
||||
|
||||
export const DURATION = 200;
|
||||
|
||||
export const chartStrokeWidth = 0.2;
|
||||
|
||||
export const styles = {
|
||||
light: {
|
||||
color: 'rgba(255, 255, 255, 0.54)',
|
||||
},
|
||||
};
|
||||
|
||||
// unit contains the units for the bytePlotter.
|
||||
export const unit = ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'];
|
||||
|
||||
// simplifyBytes returns the simplified version of the given value followed by the unit.
|
||||
export const simplifyBytes = (x: number) => {
|
||||
let i = 0;
|
||||
for (; x > 1024 && i < 8; i++) {
|
||||
x /= 1024;
|
||||
}
|
||||
return x.toFixed(2).toString().concat(' ', unit[i], 'B');
|
||||
};
|
||||
|
||||
// hues contains predefined colors for gradient stop colors.
|
||||
export const hues = ['#00FF00', '#FFFF00', '#FF7F00', '#FF0000'];
|
||||
export const hueScale = [0, 2048, 102400, 2097152];
|
||||
|
@@ -19,7 +19,7 @@
|
||||
import React, {Component} from 'react';
|
||||
import type {ChildrenArray} from 'react';
|
||||
|
||||
import Grid from 'material-ui/Grid';
|
||||
import Grid from '@material-ui/core/Grid';
|
||||
|
||||
// styles contains the constant styles of the component.
|
||||
const styles = {
|
||||
@@ -33,7 +33,7 @@ const styles = {
|
||||
flex: 1,
|
||||
padding: 0,
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
export type Props = {
|
||||
children: ChildrenArray<React$Element<any>>,
|
||||
|
@@ -18,8 +18,8 @@
|
||||
|
||||
import React, {Component} from 'react';
|
||||
|
||||
import Typography from 'material-ui/Typography';
|
||||
import {styles} from '../common';
|
||||
import Typography from '@material-ui/core/Typography';
|
||||
import {styles, simplifyBytes} from '../common';
|
||||
|
||||
// multiplier multiplies a number by another.
|
||||
export const multiplier = <T>(by: number = 1) => (x: number) => x * by;
|
||||
@@ -37,18 +37,6 @@ export const percentPlotter = <T>(text: string, mapper: (T => T) = multiplier(1)
|
||||
);
|
||||
};
|
||||
|
||||
// unit contains the units for the bytePlotter.
|
||||
const unit = ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'];
|
||||
|
||||
// simplifyBytes returns the simplified version of the given value followed by the unit.
|
||||
const simplifyBytes = (x: number) => {
|
||||
let i = 0;
|
||||
for (; x > 1024 && i < 8; i++) {
|
||||
x /= 1024;
|
||||
}
|
||||
return x.toFixed(2).toString().concat(' ', unit[i], 'B');
|
||||
};
|
||||
|
||||
// bytePlotter renders a tooltip, which displays the payload as a byte value.
|
||||
export const bytePlotter = <T>(text: string, mapper: (T => T) = multiplier(1)) => (payload: T) => {
|
||||
const p = mapper(payload);
|
||||
@@ -70,7 +58,8 @@ export const bytePerSecPlotter = <T>(text: string, mapper: (T => T) = multiplier
|
||||
}
|
||||
return (
|
||||
<Typography type='caption' color='inherit'>
|
||||
<span style={styles.light}>{text}</span> {simplifyBytes(p)}/s
|
||||
<span style={styles.light}>{text}</span>
|
||||
{simplifyBytes(p)}/s
|
||||
</Typography>
|
||||
);
|
||||
};
|
||||
|
@@ -17,14 +17,16 @@
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import React, {Component} from 'react';
|
||||
import {hot} from 'react-hot-loader';
|
||||
|
||||
import withStyles from 'material-ui/styles/withStyles';
|
||||
import withStyles from '@material-ui/core/styles/withStyles';
|
||||
|
||||
import Header from './Header';
|
||||
import Body from './Body';
|
||||
import Header from 'Header';
|
||||
import Body from 'Body';
|
||||
import {inserter as logInserter, SAME} from 'Logs';
|
||||
import {inserter as peerInserter} from 'Network';
|
||||
import {MENU} from '../common';
|
||||
import type {Content} from '../types/content';
|
||||
import {inserter as logInserter} from './Logs';
|
||||
|
||||
// deepUpdate updates an object corresponding to the given update data, which has
|
||||
// the shape of the same structure as the original object. updater also has the same
|
||||
@@ -37,7 +39,6 @@ import {inserter as logInserter} from './Logs';
|
||||
// of the update.
|
||||
const deepUpdate = (updater: Object, update: Object, prev: Object): $Shape<Content> => {
|
||||
if (typeof update === 'undefined') {
|
||||
// TODO (kurkomisi): originally this was deep copy, investigate it.
|
||||
return prev;
|
||||
}
|
||||
if (typeof updater === 'function') {
|
||||
@@ -88,8 +89,13 @@ const defaultContent: () => Content = () => ({
|
||||
home: {},
|
||||
chain: {},
|
||||
txpool: {},
|
||||
network: {},
|
||||
system: {
|
||||
network: {
|
||||
peers: {
|
||||
bundles: {},
|
||||
},
|
||||
diff: [],
|
||||
},
|
||||
system: {
|
||||
activeMemory: [],
|
||||
virtualMemory: [],
|
||||
networkIngress: [],
|
||||
@@ -103,8 +109,8 @@ const defaultContent: () => Content = () => ({
|
||||
chunks: [],
|
||||
endTop: false,
|
||||
endBottom: true,
|
||||
topChanged: 0,
|
||||
bottomChanged: 0,
|
||||
topChanged: SAME,
|
||||
bottomChanged: SAME,
|
||||
},
|
||||
});
|
||||
|
||||
@@ -119,7 +125,7 @@ const updaters = {
|
||||
home: null,
|
||||
chain: null,
|
||||
txpool: null,
|
||||
network: null,
|
||||
network: peerInserter(200),
|
||||
system: {
|
||||
activeMemory: appender(200),
|
||||
virtualMemory: appender(200),
|
||||
@@ -186,8 +192,8 @@ class Dashboard extends Component<Props, State> {
|
||||
// reconnect establishes a websocket connection with the server, listens for incoming messages
|
||||
// and tries to reconnect on connection loss.
|
||||
reconnect = () => {
|
||||
// PROD is defined by webpack.
|
||||
const server = new WebSocket(`${((window.location.protocol === 'https:') ? 'wss://' : 'ws://')}${PROD ? window.location.host : 'localhost:8080'}/api`);
|
||||
const host = process.env.NODE_ENV === 'production' ? window.location.host : 'localhost:8080';
|
||||
const server = new WebSocket(`${((window.location.protocol === 'https:') ? 'wss://' : 'ws://')}${host}/api`);
|
||||
server.onopen = () => {
|
||||
this.setState({content: defaultContent(), shouldUpdate: {}, server});
|
||||
};
|
||||
@@ -249,4 +255,4 @@ class Dashboard extends Component<Props, State> {
|
||||
}
|
||||
}
|
||||
|
||||
export default withStyles(themeStyles)(Dashboard);
|
||||
export default hot(module)(withStyles(themeStyles)(Dashboard));
|
||||
|
@@ -18,14 +18,19 @@
|
||||
|
||||
import React, {Component} from 'react';
|
||||
|
||||
import withStyles from 'material-ui/styles/withStyles';
|
||||
import Typography from 'material-ui/Typography';
|
||||
import Grid from 'material-ui/Grid';
|
||||
import {ResponsiveContainer, AreaChart, Area, Tooltip} from 'recharts';
|
||||
import withStyles from '@material-ui/core/styles/withStyles';
|
||||
import Typography from '@material-ui/core/Typography';
|
||||
import Grid from '@material-ui/core/Grid';
|
||||
import ResponsiveContainer from 'recharts/es6/component/ResponsiveContainer';
|
||||
import AreaChart from 'recharts/es6/chart/AreaChart';
|
||||
import Area from 'recharts/es6/cartesian/Area';
|
||||
import ReferenceLine from 'recharts/es6/cartesian/ReferenceLine';
|
||||
import Label from 'recharts/es6/component/Label';
|
||||
import Tooltip from 'recharts/es6/component/Tooltip';
|
||||
|
||||
import ChartRow from './ChartRow';
|
||||
import CustomTooltip, {bytePlotter, bytePerSecPlotter, percentPlotter, multiplier} from './CustomTooltip';
|
||||
import {styles as commonStyles} from '../common';
|
||||
import ChartRow from 'ChartRow';
|
||||
import CustomTooltip, {bytePlotter, bytePerSecPlotter, percentPlotter, multiplier} from 'CustomTooltip';
|
||||
import {chartStrokeWidth, styles as commonStyles} from '../common';
|
||||
import type {General, System} from '../types/content';
|
||||
|
||||
const FOOTER_SYNC_ID = 'footerSyncId';
|
||||
@@ -38,6 +43,15 @@ const TRAFFIC = 'traffic';
|
||||
const TOP = 'Top';
|
||||
const BOTTOM = 'Bottom';
|
||||
|
||||
const cpuLabelTop = 'Process load';
|
||||
const cpuLabelBottom = 'System load';
|
||||
const memoryLabelTop = 'Active memory';
|
||||
const memoryLabelBottom = 'Virtual memory';
|
||||
const diskLabelTop = 'Disk read';
|
||||
const diskLabelBottom = 'Disk write';
|
||||
const trafficLabelTop = 'Download';
|
||||
const trafficLabelBottom = 'Upload';
|
||||
|
||||
// styles contains the constant styles of the component.
|
||||
const styles = {
|
||||
footer: {
|
||||
@@ -53,6 +67,10 @@ const styles = {
|
||||
height: '100%',
|
||||
width: '99%',
|
||||
},
|
||||
link: {
|
||||
color: 'inherit',
|
||||
textDecoration: 'none',
|
||||
},
|
||||
};
|
||||
|
||||
// themeStyles returns the styles generated from the theme for the component.
|
||||
@@ -73,18 +91,23 @@ export type Props = {
|
||||
shouldUpdate: Object,
|
||||
};
|
||||
|
||||
type State = {};
|
||||
|
||||
// Footer renders the footer of the dashboard.
|
||||
class Footer extends Component<Props> {
|
||||
shouldComponentUpdate(nextProps) {
|
||||
class Footer extends Component<Props, State> {
|
||||
shouldComponentUpdate(nextProps: Readonly<Props>, nextState: Readonly<State>, nextContext: any) {
|
||||
return typeof nextProps.shouldUpdate.general !== 'undefined' || typeof nextProps.shouldUpdate.system !== 'undefined';
|
||||
}
|
||||
|
||||
// halfHeightChart renders an area chart with half of the height of its parent.
|
||||
halfHeightChart = (chartProps, tooltip, areaProps) => (
|
||||
halfHeightChart = (chartProps, tooltip, areaProps, label, position) => (
|
||||
<ResponsiveContainer width='100%' height='50%'>
|
||||
<AreaChart {...chartProps} >
|
||||
<AreaChart {...chartProps}>
|
||||
{!tooltip || (<Tooltip cursor={false} content={<CustomTooltip tooltip={tooltip} />} />)}
|
||||
<Area isAnimationActive={false} type='monotone' {...areaProps} />
|
||||
<Area isAnimationActive={false} strokeWidth={chartStrokeWidth} type='monotone' {...areaProps} />
|
||||
<ReferenceLine x={0} strokeWidth={0}>
|
||||
<Label fill={areaProps.fill} value={label} position={position} />
|
||||
</ReferenceLine>
|
||||
</AreaChart>
|
||||
</ResponsiveContainer>
|
||||
);
|
||||
@@ -111,6 +134,8 @@ class Footer extends Component<Props> {
|
||||
},
|
||||
topChart.tooltip,
|
||||
{dataKey: topKey, stroke: topColor, fill: topColor},
|
||||
topChart.label,
|
||||
'insideBottomLeft',
|
||||
)}
|
||||
{this.halfHeightChart(
|
||||
{
|
||||
@@ -120,6 +145,8 @@ class Footer extends Component<Props> {
|
||||
},
|
||||
bottomChart.tooltip,
|
||||
{dataKey: bottomKey, stroke: bottomColor, fill: bottomColor},
|
||||
bottomChart.label,
|
||||
'insideTopLeft',
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
@@ -135,37 +162,42 @@ class Footer extends Component<Props> {
|
||||
{this.doubleChart(
|
||||
FOOTER_SYNC_ID,
|
||||
CPU,
|
||||
{data: system.processCPU, tooltip: percentPlotter('Process load')},
|
||||
{data: system.systemCPU, tooltip: percentPlotter('System load', multiplier(-1))},
|
||||
{data: system.processCPU, tooltip: percentPlotter(cpuLabelTop), label: cpuLabelTop},
|
||||
{data: system.systemCPU, tooltip: percentPlotter(cpuLabelBottom, multiplier(-1)), label: cpuLabelBottom},
|
||||
)}
|
||||
{this.doubleChart(
|
||||
FOOTER_SYNC_ID,
|
||||
MEMORY,
|
||||
{data: system.activeMemory, tooltip: bytePlotter('Active memory')},
|
||||
{data: system.virtualMemory, tooltip: bytePlotter('Virtual memory', multiplier(-1))},
|
||||
{data: system.activeMemory, tooltip: bytePlotter(memoryLabelTop), label: memoryLabelTop},
|
||||
{data: system.virtualMemory, tooltip: bytePlotter(memoryLabelBottom, multiplier(-1)), label: memoryLabelBottom},
|
||||
)}
|
||||
{this.doubleChart(
|
||||
FOOTER_SYNC_ID,
|
||||
DISK,
|
||||
{data: system.diskRead, tooltip: bytePerSecPlotter('Disk read')},
|
||||
{data: system.diskWrite, tooltip: bytePerSecPlotter('Disk write', multiplier(-1))},
|
||||
{data: system.diskRead, tooltip: bytePerSecPlotter(diskLabelTop), label: diskLabelTop},
|
||||
{data: system.diskWrite, tooltip: bytePerSecPlotter(diskLabelBottom, multiplier(-1)), label: diskLabelBottom},
|
||||
)}
|
||||
{this.doubleChart(
|
||||
FOOTER_SYNC_ID,
|
||||
TRAFFIC,
|
||||
{data: system.networkIngress, tooltip: bytePerSecPlotter('Download')},
|
||||
{data: system.networkEgress, tooltip: bytePerSecPlotter('Upload', multiplier(-1))},
|
||||
{data: system.networkIngress, tooltip: bytePerSecPlotter(trafficLabelTop), label: trafficLabelTop},
|
||||
{data: system.networkEgress, tooltip: bytePerSecPlotter(trafficLabelBottom, multiplier(-1)), label: trafficLabelBottom},
|
||||
)}
|
||||
</ChartRow>
|
||||
</Grid>
|
||||
<Grid item >
|
||||
<Grid item>
|
||||
<Typography type='caption' color='inherit'>
|
||||
<span style={commonStyles.light}>Geth</span> {general.version}
|
||||
</Typography>
|
||||
{general.commit && (
|
||||
<Typography type='caption' color='inherit'>
|
||||
<span style={commonStyles.light}>{'Commit '}</span>
|
||||
<a href={`https://github.com/ethereum/go-ethereum/commit/${general.commit}`} target='_blank' style={{color: 'inherit', textDecoration: 'none'}} >
|
||||
<a
|
||||
href={`https://github.com/ethereum/go-ethereum/commit/${general.commit}`}
|
||||
target='_blank'
|
||||
rel='noopener noreferrer'
|
||||
style={styles.link}
|
||||
>
|
||||
{general.commit.substring(0, 8)}
|
||||
</a>
|
||||
</Typography>
|
||||
|
@@ -18,13 +18,13 @@
|
||||
|
||||
import React, {Component} from 'react';
|
||||
|
||||
import withStyles from 'material-ui/styles/withStyles';
|
||||
import AppBar from 'material-ui/AppBar';
|
||||
import Toolbar from 'material-ui/Toolbar';
|
||||
import IconButton from 'material-ui/IconButton';
|
||||
import Icon from 'material-ui/Icon';
|
||||
import MenuIcon from 'material-ui-icons/Menu';
|
||||
import Typography from 'material-ui/Typography';
|
||||
import withStyles from '@material-ui/core/styles/withStyles';
|
||||
import AppBar from '@material-ui/core/AppBar';
|
||||
import Toolbar from '@material-ui/core/Toolbar';
|
||||
import IconButton from '@material-ui/core/IconButton';
|
||||
import {FontAwesomeIcon} from '@fortawesome/react-fontawesome';
|
||||
import {faBars} from '@fortawesome/free-solid-svg-icons';
|
||||
import Typography from '@material-ui/core/Typography';
|
||||
|
||||
// styles contains the constant styles of the component.
|
||||
const styles = {
|
||||
@@ -67,9 +67,7 @@ class Header extends Component<Props> {
|
||||
<AppBar position='static' className={classes.header} style={styles.header}>
|
||||
<Toolbar className={classes.toolbar} style={styles.toolbar}>
|
||||
<IconButton onClick={this.props.switchSideBar}>
|
||||
<Icon>
|
||||
<MenuIcon />
|
||||
</Icon>
|
||||
<FontAwesomeIcon icon={faBars} />
|
||||
</IconButton>
|
||||
<Typography type='title' color='inherit' noWrap className={classes.title}>
|
||||
Go Ethereum Dashboard
|
||||
|
@@ -18,7 +18,8 @@
|
||||
|
||||
import React, {Component} from 'react';
|
||||
|
||||
import List, {ListItem} from 'material-ui/List';
|
||||
import List from '@material-ui/core/List';
|
||||
import ListItem from '@material-ui/core/ListItem';
|
||||
import escapeHtml from 'escape-html';
|
||||
import type {Record, Content, LogsMessage, Logs as LogsType} from '../types/content';
|
||||
|
||||
@@ -104,9 +105,9 @@ const createChunk = (records: Array<Record>) => {
|
||||
|
||||
// ADDED, SAME and REMOVED are used to track the change of the log chunk array.
|
||||
// The scroll position is set using these values.
|
||||
const ADDED = 1;
|
||||
const SAME = 0;
|
||||
const REMOVED = -1;
|
||||
export const ADDED = 1;
|
||||
export const SAME = 0;
|
||||
export const REMOVED = -1;
|
||||
|
||||
// inserter is a state updater function for the main component, which inserts the new log chunk into the chunk array.
|
||||
// limit is the maximum length of the chunk array, used in order to prevent the browser from OOM.
|
||||
@@ -166,7 +167,7 @@ export const inserter = (limit: number) => (update: LogsMessage, prev: LogsType)
|
||||
// styles contains the constant styles of the component.
|
||||
const styles = {
|
||||
logListItem: {
|
||||
padding: 0,
|
||||
padding: 0,
|
||||
lineHeight: 1.231,
|
||||
},
|
||||
logChunk: {
|
||||
@@ -251,15 +252,15 @@ class Logs extends Component<Props, State> {
|
||||
// atBottom checks if the scroll position it at the bottom of the container.
|
||||
atBottom = () => {
|
||||
const {container} = this.props;
|
||||
return container.scrollHeight - container.scrollTop <=
|
||||
container.clientHeight + container.scrollHeight * requestBand;
|
||||
return container.scrollHeight - container.scrollTop
|
||||
<= container.clientHeight + container.scrollHeight * requestBand;
|
||||
};
|
||||
|
||||
// beforeUpdate is called by the parent component, saves the previous scroll position
|
||||
// and the height of the first log chunk, which can be deleted during the insertion.
|
||||
beforeUpdate = () => {
|
||||
let firstHeight = 0;
|
||||
let chunkList = this.content.children[1];
|
||||
const chunkList = this.content.children[1];
|
||||
if (chunkList && chunkList.children[0]) {
|
||||
firstHeight = chunkList.children[0].clientHeight;
|
||||
}
|
||||
|
@@ -18,11 +18,12 @@
|
||||
|
||||
import React, {Component} from 'react';
|
||||
|
||||
import withStyles from 'material-ui/styles/withStyles';
|
||||
import withStyles from '@material-ui/core/styles/withStyles';
|
||||
|
||||
import Network from 'Network';
|
||||
import Logs from 'Logs';
|
||||
import Footer from 'Footer';
|
||||
import {MENU} from '../common';
|
||||
import Logs from './Logs';
|
||||
import Footer from './Footer';
|
||||
import type {Content} from '../types/content';
|
||||
|
||||
// styles contains the constant styles of the component.
|
||||
@@ -33,7 +34,7 @@ const styles = {
|
||||
width: '100%',
|
||||
},
|
||||
content: {
|
||||
flex: 1,
|
||||
flex: 1,
|
||||
overflow: 'auto',
|
||||
},
|
||||
};
|
||||
@@ -54,21 +55,16 @@ export type Props = {
|
||||
send: string => void,
|
||||
};
|
||||
|
||||
type State = {};
|
||||
|
||||
// Main renders the chosen content.
|
||||
class Main extends Component<Props> {
|
||||
class Main extends Component<Props, State> {
|
||||
constructor(props) {
|
||||
super(props);
|
||||
this.container = React.createRef();
|
||||
this.content = React.createRef();
|
||||
}
|
||||
|
||||
getSnapshotBeforeUpdate() {
|
||||
if (this.content && typeof this.content.beforeUpdate === 'function') {
|
||||
return this.content.beforeUpdate();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
componentDidUpdate(prevProps, prevState, snapshot) {
|
||||
if (this.content && typeof this.content.didUpdate === 'function') {
|
||||
this.content.didUpdate(prevProps, prevState, snapshot);
|
||||
@@ -81,6 +77,13 @@ class Main extends Component<Props> {
|
||||
}
|
||||
};
|
||||
|
||||
getSnapshotBeforeUpdate(prevProps: Readonly<P>, prevState: Readonly<S>) {
|
||||
if (this.content && typeof this.content.beforeUpdate === 'function') {
|
||||
return this.content.beforeUpdate();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
render() {
|
||||
const {
|
||||
classes, active, content, shouldUpdate,
|
||||
@@ -89,9 +92,20 @@ class Main extends Component<Props> {
|
||||
let children = null;
|
||||
switch (active) {
|
||||
case MENU.get('home').id:
|
||||
children = <div>Work in progress.</div>;
|
||||
break;
|
||||
case MENU.get('chain').id:
|
||||
children = <div>Work in progress.</div>;
|
||||
break;
|
||||
case MENU.get('txpool').id:
|
||||
children = <div>Work in progress.</div>;
|
||||
break;
|
||||
case MENU.get('network').id:
|
||||
children = <Network
|
||||
content={this.props.content.network}
|
||||
container={this.container}
|
||||
/>;
|
||||
break;
|
||||
case MENU.get('system').id:
|
||||
children = <div>Work in progress.</div>;
|
||||
break;
|
||||
|
529
dashboard/assets/components/Network.jsx
Normal file
529
dashboard/assets/components/Network.jsx
Normal file
@@ -0,0 +1,529 @@
|
||||
// @flow
|
||||
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import React, {Component} from 'react';
|
||||
|
||||
import Table from '@material-ui/core/Table';
|
||||
import TableHead from '@material-ui/core/TableHead';
|
||||
import TableBody from '@material-ui/core/TableBody';
|
||||
import TableRow from '@material-ui/core/TableRow';
|
||||
import TableCell from '@material-ui/core/TableCell';
|
||||
import Grid from '@material-ui/core/Grid/Grid';
|
||||
import Typography from '@material-ui/core/Typography';
|
||||
import {AreaChart, Area, Tooltip, YAxis} from 'recharts';
|
||||
import {FontAwesomeIcon} from '@fortawesome/react-fontawesome';
|
||||
import {faCircle as fasCircle} from '@fortawesome/free-solid-svg-icons';
|
||||
import {faCircle as farCircle} from '@fortawesome/free-regular-svg-icons';
|
||||
import convert from 'color-convert';
|
||||
|
||||
import CustomTooltip, {bytePlotter, multiplier} from 'CustomTooltip';
|
||||
import type {Network as NetworkType, PeerEvent} from '../types/content';
|
||||
import {styles as commonStyles, chartStrokeWidth, hues, hueScale} from '../common';
|
||||
|
||||
// Peer chart dimensions.
|
||||
const trafficChartHeight = 18;
|
||||
const trafficChartWidth = 400;
|
||||
|
||||
// setMaxIngress adjusts the peer chart's gradient values based on the given value.
|
||||
const setMaxIngress = (peer, value) => {
|
||||
peer.maxIngress = value;
|
||||
peer.ingressGradient = [];
|
||||
peer.ingressGradient.push({offset: hueScale[0], color: hues[0]});
|
||||
let i = 1;
|
||||
for (; i < hues.length && value > hueScale[i]; i++) {
|
||||
peer.ingressGradient.push({offset: Math.floor(hueScale[i] * 100 / value), color: hues[i]});
|
||||
}
|
||||
i--;
|
||||
if (i < hues.length - 1) {
|
||||
// Usually the maximum value gets between two points on the predefined
|
||||
// color scale (e.g. 123KB is somewhere between 100KB (#FFFF00) and
|
||||
// 1MB (#FF0000)), and the charts need to be comparable by the colors,
|
||||
// so we have to calculate the last hue using the maximum value and the
|
||||
// surrounding hues in order to avoid the uniformity of the top colors
|
||||
// on the charts. For this reason the two hues are translated into the
|
||||
// CIELAB color space, and the top color will be their weighted average
|
||||
// (CIELAB is perceptually uniform, meaning that any point on the line
|
||||
// between two pure color points is also a pure color, so the weighted
|
||||
// average will not lose from the saturation).
|
||||
//
|
||||
// In case the maximum value is greater than the biggest predefined
|
||||
// scale value, the top of the chart will have uniform color.
|
||||
const lastHue = convert.hex.lab(hues[i]);
|
||||
const proportion = (value - hueScale[i]) * 100 / (hueScale[i + 1] - hueScale[i]);
|
||||
convert.hex.lab(hues[i + 1]).forEach((val, j) => {
|
||||
lastHue[j] = (lastHue[j] * proportion + val * (100 - proportion)) / 100;
|
||||
});
|
||||
peer.ingressGradient.push({offset: 100, color: `#${convert.lab.hex(lastHue)}`});
|
||||
}
|
||||
};
|
||||
|
||||
// setMaxEgress adjusts the peer chart's gradient values based on the given value.
|
||||
// In case of the egress the chart is upside down, so the gradients need to be
|
||||
// calculated inversely compared to the ingress.
|
||||
const setMaxEgress = (peer, value) => {
|
||||
peer.maxEgress = value;
|
||||
peer.egressGradient = [];
|
||||
peer.egressGradient.push({offset: 100 - hueScale[0], color: hues[0]});
|
||||
let i = 1;
|
||||
for (; i < hues.length && value > hueScale[i]; i++) {
|
||||
peer.egressGradient.unshift({offset: 100 - Math.floor(hueScale[i] * 100 / value), color: hues[i]});
|
||||
}
|
||||
i--;
|
||||
if (i < hues.length - 1) {
|
||||
// Calculate the last hue.
|
||||
const lastHue = convert.hex.lab(hues[i]);
|
||||
const proportion = (value - hueScale[i]) * 100 / (hueScale[i + 1] - hueScale[i]);
|
||||
convert.hex.lab(hues[i + 1]).forEach((val, j) => {
|
||||
lastHue[j] = (lastHue[j] * proportion + val * (100 - proportion)) / 100;
|
||||
});
|
||||
peer.egressGradient.unshift({offset: 0, color: `#${convert.lab.hex(lastHue)}`});
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// setIngressChartAttributes searches for the maximum value of the ingress
|
||||
// samples, and adjusts the peer chart's gradient values accordingly.
|
||||
const setIngressChartAttributes = (peer) => {
|
||||
let max = 0;
|
||||
peer.ingress.forEach(({value}) => {
|
||||
if (value > max) {
|
||||
max = value;
|
||||
}
|
||||
});
|
||||
setMaxIngress(peer, max);
|
||||
};
|
||||
|
||||
// setEgressChartAttributes searches for the maximum value of the egress
|
||||
// samples, and adjusts the peer chart's gradient values accordingly.
|
||||
const setEgressChartAttributes = (peer) => {
|
||||
let max = 0;
|
||||
peer.egress.forEach(({value}) => {
|
||||
if (value > max) {
|
||||
max = value;
|
||||
}
|
||||
});
|
||||
setMaxEgress(peer, max);
|
||||
};
|
||||
|
||||
// inserter is a state updater function for the main component, which handles the peers.
|
||||
export const inserter = (sampleLimit: number) => (update: NetworkType, prev: NetworkType) => {
|
||||
// The first message contains the metered peer history.
|
||||
if (update.peers && update.peers.bundles) {
|
||||
prev.peers = update.peers;
|
||||
Object.values(prev.peers.bundles).forEach((bundle) => {
|
||||
if (bundle.knownPeers) {
|
||||
Object.values(bundle.knownPeers).forEach((peer) => {
|
||||
if (!peer.maxIngress) {
|
||||
setIngressChartAttributes(peer);
|
||||
}
|
||||
if (!peer.maxEgress) {
|
||||
setEgressChartAttributes(peer);
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
if (Array.isArray(update.diff)) {
|
||||
update.diff.forEach((event: PeerEvent) => {
|
||||
if (!event.ip) {
|
||||
console.error('Peer event without IP', event);
|
||||
return;
|
||||
}
|
||||
switch (event.remove) {
|
||||
case 'bundle': {
|
||||
delete prev.peers.bundles[event.ip];
|
||||
return;
|
||||
}
|
||||
case 'known': {
|
||||
if (!event.id) {
|
||||
console.error('Remove known peer event without ID', event.ip);
|
||||
return;
|
||||
}
|
||||
const bundle = prev.peers.bundles[event.ip];
|
||||
if (!bundle || !bundle.knownPeers || !bundle.knownPeers[event.id]) {
|
||||
console.error('No known peer to remove', event.ip, event.id);
|
||||
return;
|
||||
}
|
||||
delete bundle.knownPeers[event.id];
|
||||
return;
|
||||
}
|
||||
case 'attempt': {
|
||||
const bundle = prev.peers.bundles[event.ip];
|
||||
if (!bundle || !Array.isArray(bundle.attempts) || bundle.attempts.length < 1) {
|
||||
console.error('No unknown peer to remove', event.ip);
|
||||
return;
|
||||
}
|
||||
bundle.attempts.splice(0, 1);
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (!prev.peers.bundles[event.ip]) {
|
||||
prev.peers.bundles[event.ip] = {
|
||||
location: {
|
||||
country: '',
|
||||
city: '',
|
||||
latitude: 0,
|
||||
longitude: 0,
|
||||
},
|
||||
knownPeers: {},
|
||||
attempts: [],
|
||||
};
|
||||
}
|
||||
const bundle = prev.peers.bundles[event.ip];
|
||||
if (event.location) {
|
||||
bundle.location = event.location;
|
||||
return;
|
||||
}
|
||||
if (!event.id) {
|
||||
if (!bundle.attempts) {
|
||||
bundle.attempts = [];
|
||||
}
|
||||
bundle.attempts.push({
|
||||
connected: event.connected,
|
||||
disconnected: event.disconnected,
|
||||
});
|
||||
return;
|
||||
}
|
||||
if (!bundle.knownPeers) {
|
||||
bundle.knownPeers = {};
|
||||
}
|
||||
if (!bundle.knownPeers[event.id]) {
|
||||
bundle.knownPeers[event.id] = {
|
||||
connected: [],
|
||||
disconnected: [],
|
||||
ingress: [],
|
||||
egress: [],
|
||||
active: false,
|
||||
};
|
||||
}
|
||||
const peer = bundle.knownPeers[event.id];
|
||||
if (!peer.maxIngress) {
|
||||
setIngressChartAttributes(peer);
|
||||
}
|
||||
if (!peer.maxEgress) {
|
||||
setEgressChartAttributes(peer);
|
||||
}
|
||||
if (event.connected) {
|
||||
if (!peer.connected) {
|
||||
console.warn('peer.connected should exist');
|
||||
peer.connected = [];
|
||||
}
|
||||
peer.connected.push(event.connected);
|
||||
}
|
||||
if (event.disconnected) {
|
||||
if (!peer.disconnected) {
|
||||
console.warn('peer.disconnected should exist');
|
||||
peer.disconnected = [];
|
||||
}
|
||||
peer.disconnected.push(event.disconnected);
|
||||
}
|
||||
switch (event.activity) {
|
||||
case 'active':
|
||||
peer.active = true;
|
||||
break;
|
||||
case 'inactive':
|
||||
peer.active = false;
|
||||
break;
|
||||
}
|
||||
if (Array.isArray(event.ingress) && Array.isArray(event.egress)) {
|
||||
if (event.ingress.length !== event.egress.length) {
|
||||
console.error('Different traffic sample length', event);
|
||||
return;
|
||||
}
|
||||
// Check if there is a new maximum value, and reset the colors in case.
|
||||
let maxIngress = peer.maxIngress;
|
||||
event.ingress.forEach(({value}) => {
|
||||
if (value > maxIngress) {
|
||||
maxIngress = value;
|
||||
}
|
||||
});
|
||||
if (maxIngress > peer.maxIngress) {
|
||||
setMaxIngress(peer, maxIngress);
|
||||
}
|
||||
// Push the new values.
|
||||
peer.ingress.splice(peer.ingress.length, 0, ...event.ingress);
|
||||
const ingressDiff = peer.ingress.length - sampleLimit;
|
||||
if (ingressDiff > 0) {
|
||||
// Check if the maximum value is in the beginning.
|
||||
let i = 0;
|
||||
while (i < ingressDiff && peer.ingress[i].value < peer.maxIngress) {
|
||||
i++;
|
||||
}
|
||||
// Remove the old values from the beginning.
|
||||
peer.ingress.splice(0, ingressDiff);
|
||||
if (i < ingressDiff) {
|
||||
// Reset the colors if the maximum value leaves the chart.
|
||||
setIngressChartAttributes(peer);
|
||||
}
|
||||
}
|
||||
// Check if there is a new maximum value, and reset the colors in case.
|
||||
let maxEgress = peer.maxEgress;
|
||||
event.egress.forEach(({value}) => {
|
||||
if (value > maxEgress) {
|
||||
maxEgress = value;
|
||||
}
|
||||
});
|
||||
if (maxEgress > peer.maxEgress) {
|
||||
setMaxEgress(peer, maxEgress);
|
||||
}
|
||||
// Push the new values.
|
||||
peer.egress.splice(peer.egress.length, 0, ...event.egress);
|
||||
const egressDiff = peer.egress.length - sampleLimit;
|
||||
if (egressDiff > 0) {
|
||||
// Check if the maximum value is in the beginning.
|
||||
let i = 0;
|
||||
while (i < egressDiff && peer.egress[i].value < peer.maxEgress) {
|
||||
i++;
|
||||
}
|
||||
// Remove the old values from the beginning.
|
||||
peer.egress.splice(0, egressDiff);
|
||||
if (i < egressDiff) {
|
||||
// Reset the colors if the maximum value leaves the chart.
|
||||
setEgressChartAttributes(peer);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
return prev;
|
||||
};
|
||||
|
||||
// styles contains the constant styles of the component.
|
||||
const styles = {
|
||||
tableHead: {
|
||||
height: 'auto',
|
||||
},
|
||||
tableRow: {
|
||||
height: 'auto',
|
||||
},
|
||||
tableCell: {
|
||||
paddingTop: 0,
|
||||
paddingRight: 5,
|
||||
paddingBottom: 0,
|
||||
paddingLeft: 5,
|
||||
border: 'none',
|
||||
},
|
||||
};
|
||||
|
||||
export type Props = {
|
||||
container: Object,
|
||||
content: NetworkType,
|
||||
shouldUpdate: Object,
|
||||
};
|
||||
|
||||
type State = {};
|
||||
|
||||
// Network renders the network page.
|
||||
class Network extends Component<Props, State> {
|
||||
componentDidMount() {
|
||||
const {container} = this.props;
|
||||
if (typeof container === 'undefined') {
|
||||
return;
|
||||
}
|
||||
container.scrollTop = 0;
|
||||
}
|
||||
|
||||
formatTime = (t: string) => {
|
||||
const time = new Date(t);
|
||||
if (isNaN(time)) {
|
||||
return '';
|
||||
}
|
||||
const month = `0${time.getMonth() + 1}`.slice(-2);
|
||||
const date = `0${time.getDate()}`.slice(-2);
|
||||
const hours = `0${time.getHours()}`.slice(-2);
|
||||
const minutes = `0${time.getMinutes()}`.slice(-2);
|
||||
const seconds = `0${time.getSeconds()}`.slice(-2);
|
||||
return `${month}/${date}/${hours}:${minutes}:${seconds}`;
|
||||
};
|
||||
|
||||
copyToClipboard = (id) => (event) => {
|
||||
event.preventDefault();
|
||||
navigator.clipboard.writeText(id).then(() => {}, () => {
|
||||
console.error("Failed to copy node id", id);
|
||||
});
|
||||
};
|
||||
|
||||
peerTableRow = (ip, id, bundle, peer) => {
|
||||
const ingressValues = peer.ingress.map(({value}) => ({ingress: value || 0.001}));
|
||||
const egressValues = peer.egress.map(({value}) => ({egress: -value || -0.001}));
|
||||
|
||||
return (
|
||||
<TableRow key={`known_${ip}_${id}`} style={styles.tableRow}>
|
||||
<TableCell style={styles.tableCell}>
|
||||
{peer.active
|
||||
? <FontAwesomeIcon icon={fasCircle} color='green' />
|
||||
: <FontAwesomeIcon icon={farCircle} style={commonStyles.light} />
|
||||
}
|
||||
</TableCell>
|
||||
<TableCell style={{fontFamily: 'monospace', cursor: 'copy', ...styles.tableCell, ...commonStyles.light}} onClick={this.copyToClipboard(id)}>
|
||||
{id.substring(0, 10)}
|
||||
</TableCell>
|
||||
<TableCell style={styles.tableCell}>
|
||||
{bundle.location ? (() => {
|
||||
const l = bundle.location;
|
||||
return `${l.country ? l.country : ''}${l.city ? `/${l.city}` : ''}`;
|
||||
})() : ''}
|
||||
</TableCell>
|
||||
<TableCell style={styles.tableCell}>
|
||||
<AreaChart
|
||||
width={trafficChartWidth}
|
||||
height={trafficChartHeight}
|
||||
data={ingressValues}
|
||||
margin={{top: 5, right: 5, bottom: 0, left: 5}}
|
||||
syncId={`peerIngress_${ip}_${id}`}
|
||||
>
|
||||
<defs>
|
||||
<linearGradient id={`ingressGradient_${ip}_${id}`} x1='0' y1='1' x2='0' y2='0'>
|
||||
{peer.ingressGradient
|
||||
&& peer.ingressGradient.map(({offset, color}, i) => (
|
||||
<stop
|
||||
key={`ingressStop_${ip}_${id}_${i}`}
|
||||
offset={`${offset}%`}
|
||||
stopColor={color}
|
||||
/>
|
||||
))}
|
||||
</linearGradient>
|
||||
</defs>
|
||||
<Tooltip cursor={false} content={<CustomTooltip tooltip={bytePlotter('Download')} />} />
|
||||
<YAxis hide scale='sqrt' domain={[0.001, dataMax => Math.max(dataMax, 0)]} />
|
||||
<Area
|
||||
dataKey='ingress'
|
||||
isAnimationActive={false}
|
||||
type='monotone'
|
||||
fill={`url(#ingressGradient_${ip}_${id})`}
|
||||
stroke={peer.ingressGradient[peer.ingressGradient.length - 1].color}
|
||||
strokeWidth={chartStrokeWidth}
|
||||
/>
|
||||
</AreaChart>
|
||||
<AreaChart
|
||||
width={trafficChartWidth}
|
||||
height={trafficChartHeight}
|
||||
data={egressValues}
|
||||
margin={{top: 0, right: 5, bottom: 5, left: 5}}
|
||||
syncId={`peerIngress_${ip}_${id}`}
|
||||
>
|
||||
<defs>
|
||||
<linearGradient id={`egressGradient_${ip}_${id}`} x1='0' y1='1' x2='0' y2='0'>
|
||||
{peer.egressGradient
|
||||
&& peer.egressGradient.map(({offset, color}, i) => (
|
||||
<stop
|
||||
key={`egressStop_${ip}_${id}_${i}`}
|
||||
offset={`${offset}%`}
|
||||
stopColor={color}
|
||||
/>
|
||||
))}
|
||||
</linearGradient>
|
||||
</defs>
|
||||
<Tooltip cursor={false} content={<CustomTooltip tooltip={bytePlotter('Upload', multiplier(-1))} />} />
|
||||
<YAxis hide scale='sqrt' domain={[dataMin => Math.min(dataMin, 0), -0.001]} />
|
||||
<Area
|
||||
dataKey='egress'
|
||||
isAnimationActive={false}
|
||||
type='monotone'
|
||||
fill={`url(#egressGradient_${ip}_${id})`}
|
||||
stroke={peer.egressGradient[0].color}
|
||||
strokeWidth={chartStrokeWidth}
|
||||
/>
|
||||
</AreaChart>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
);
|
||||
};
|
||||
|
||||
render() {
|
||||
return (
|
||||
<Grid container direction='row' justify='space-between'>
|
||||
<Grid item>
|
||||
<Table>
|
||||
<TableHead style={styles.tableHead}>
|
||||
<TableRow style={styles.tableRow}>
|
||||
<TableCell style={styles.tableCell} />
|
||||
<TableCell style={styles.tableCell}>Node ID</TableCell>
|
||||
<TableCell style={styles.tableCell}>Location</TableCell>
|
||||
<TableCell style={styles.tableCell}>Traffic</TableCell>
|
||||
</TableRow>
|
||||
</TableHead>
|
||||
<TableBody>
|
||||
{Object.entries(this.props.content.peers.bundles).map(([ip, bundle]) => {
|
||||
if (!bundle.knownPeers || Object.keys(bundle.knownPeers).length < 1) {
|
||||
return null;
|
||||
}
|
||||
return Object.entries(bundle.knownPeers).map(([id, peer]) => {
|
||||
if (peer.active === false) {
|
||||
return null;
|
||||
}
|
||||
return this.peerTableRow(ip, id, bundle, peer);
|
||||
});
|
||||
})}
|
||||
</TableBody>
|
||||
<TableBody>
|
||||
{Object.entries(this.props.content.peers.bundles).map(([ip, bundle]) => {
|
||||
if (!bundle.knownPeers || Object.keys(bundle.knownPeers).length < 1) {
|
||||
return null;
|
||||
}
|
||||
return Object.entries(bundle.knownPeers).map(([id, peer]) => {
|
||||
if (peer.active === true) {
|
||||
return null;
|
||||
}
|
||||
return this.peerTableRow(ip, id, bundle, peer);
|
||||
});
|
||||
})}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</Grid>
|
||||
<Grid item>
|
||||
<Typography variant='subtitle1' gutterBottom>
|
||||
Connection attempts
|
||||
</Typography>
|
||||
<Table>
|
||||
<TableHead style={styles.tableHead}>
|
||||
<TableRow style={styles.tableRow}>
|
||||
<TableCell style={styles.tableCell}>IP</TableCell>
|
||||
<TableCell style={styles.tableCell}>Location</TableCell>
|
||||
<TableCell style={styles.tableCell}>Nr</TableCell>
|
||||
</TableRow>
|
||||
</TableHead>
|
||||
<TableBody>
|
||||
{Object.entries(this.props.content.peers.bundles).map(([ip, bundle]) => {
|
||||
if (!bundle.attempts || bundle.attempts.length < 1) {
|
||||
return null;
|
||||
}
|
||||
return (
|
||||
<TableRow key={`attempt_${ip}`} style={styles.tableRow}>
|
||||
<TableCell style={styles.tableCell}>{ip}</TableCell>
|
||||
<TableCell style={styles.tableCell}>
|
||||
{bundle.location ? (() => {
|
||||
const l = bundle.location;
|
||||
return `${l.country ? l.country : ''}${l.city ? `/${l.city}` : ''}`;
|
||||
})() : ''}
|
||||
</TableCell>
|
||||
<TableCell style={styles.tableCell}>
|
||||
{Object.values(bundle.attempts).length}
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
);
|
||||
})}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</Grid>
|
||||
</Grid>
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export default Network;
|
@@ -18,11 +18,14 @@
|
||||
|
||||
import React, {Component} from 'react';
|
||||
|
||||
import withStyles from 'material-ui/styles/withStyles';
|
||||
import List, {ListItem, ListItemIcon, ListItemText} from 'material-ui/List';
|
||||
import Icon from 'material-ui/Icon';
|
||||
import withStyles from '@material-ui/core/styles/withStyles';
|
||||
import List from '@material-ui/core/List';
|
||||
import ListItem from '@material-ui/core/ListItem';
|
||||
import ListItemIcon from '@material-ui/core/ListItemIcon';
|
||||
import ListItemText from '@material-ui/core/ListItemText';
|
||||
import Icon from '@material-ui/core/Icon';
|
||||
import Transition from 'react-transition-group/Transition';
|
||||
import {Icon as FontAwesome} from 'react-fa';
|
||||
import {FontAwesomeIcon} from '@fortawesome/react-fontawesome';
|
||||
|
||||
import {MENU, DURATION} from '../common';
|
||||
|
||||
@@ -48,6 +51,7 @@ const themeStyles = theme => ({
|
||||
},
|
||||
icon: {
|
||||
fontSize: theme.spacing.unit * 3,
|
||||
overflow: 'unset',
|
||||
},
|
||||
});
|
||||
|
||||
@@ -57,9 +61,11 @@ export type Props = {
|
||||
changeContent: string => void,
|
||||
};
|
||||
|
||||
type State = {}
|
||||
|
||||
// SideBar renders the sidebar of the dashboard.
|
||||
class SideBar extends Component<Props> {
|
||||
shouldComponentUpdate(nextProps) {
|
||||
class SideBar extends Component<Props, State> {
|
||||
shouldComponentUpdate(nextProps: Readonly<Props>, nextState: Readonly<State>, nextContext: any) {
|
||||
return nextProps.opened !== this.props.opened;
|
||||
}
|
||||
|
||||
@@ -78,7 +84,7 @@ class SideBar extends Component<Props> {
|
||||
<ListItem button key={menu.id} onClick={this.clickOn(menu.id)} className={classes.listItem}>
|
||||
<ListItemIcon>
|
||||
<Icon className={classes.icon}>
|
||||
<FontAwesome name={menu.icon} />
|
||||
<FontAwesomeIcon icon={menu.icon} />
|
||||
</Icon>
|
||||
</ListItemIcon>
|
||||
<ListItemText
|
||||
|
@@ -16,10 +16,8 @@
|
||||
|
||||
// fa-only-woff-loader removes the .eot, .ttf, .svg dependencies of the FontAwesome library,
|
||||
// because they produce unused extra blobs.
|
||||
module.exports = function(content) {
|
||||
return content
|
||||
.replace(/src.*url(?!.*url.*(\.eot)).*(\.eot)[^;]*;/,'')
|
||||
.replace(/url(?!.*url.*(\.eot)).*(\.eot)[^,]*,/,'')
|
||||
.replace(/url(?!.*url.*(\.ttf)).*(\.ttf)[^,]*,/,'')
|
||||
.replace(/,[^,]*url(?!.*url.*(\.svg)).*(\.svg)[^;]*;/,';');
|
||||
};
|
||||
module.exports = content => content
|
||||
.replace(/src.*url(?!.*url.*(\.eot)).*(\.eot)[^;]*;/, '')
|
||||
.replace(/url(?!.*url.*(\.eot)).*(\.eot)[^,]*,/, '')
|
||||
.replace(/url(?!.*url.*(\.ttf)).*(\.ttf)[^,]*,/, '')
|
||||
.replace(/,[^,]*url(?!.*url.*(\.svg)).*(\.svg)[^;]*;/, ';');
|
||||
|
@@ -21,6 +21,6 @@
|
||||
</head>
|
||||
<body style="height: 100%; margin: 0">
|
||||
<div id="dashboard" style="height: 100%"></div>
|
||||
<script src="bundle.js"></script>
|
||||
<script type="text/javascript" src="bundle.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
|
@@ -19,12 +19,15 @@
|
||||
import React from 'react';
|
||||
import {render} from 'react-dom';
|
||||
|
||||
import MuiThemeProvider from 'material-ui/styles/MuiThemeProvider';
|
||||
import createMuiTheme from 'material-ui/styles/createMuiTheme';
|
||||
import MuiThemeProvider from '@material-ui/core/styles/MuiThemeProvider';
|
||||
import createMuiTheme from '@material-ui/core/styles/createMuiTheme';
|
||||
|
||||
import Dashboard from './components/Dashboard';
|
||||
|
||||
const theme: Object = createMuiTheme({
|
||||
// typography: {
|
||||
// useNextVariants: true,
|
||||
// },
|
||||
palette: {
|
||||
type: 'dark',
|
||||
},
|
||||
|
@@ -1,48 +1,65 @@
|
||||
{
|
||||
"private": true,
|
||||
"dependencies": {
|
||||
"babel-core": "^6.26.0",
|
||||
"babel-eslint": "^8.2.1",
|
||||
"babel-loader": "^7.1.2",
|
||||
"babel-plugin-transform-class-properties": "^6.24.1",
|
||||
"babel-plugin-transform-decorators-legacy": "^1.3.4",
|
||||
"babel-plugin-transform-flow-strip-types": "^6.22.0",
|
||||
"babel-plugin-transform-runtime": "^6.23.0",
|
||||
"babel-preset-env": "^1.6.1",
|
||||
"babel-preset-react": "^6.24.1",
|
||||
"babel-preset-stage-0": "^6.24.1",
|
||||
"babel-runtime": "^6.26.0",
|
||||
"classnames": "^2.2.5",
|
||||
"css-loader": "^0.28.9",
|
||||
"@babel/core": "7.3.4",
|
||||
"@babel/plugin-proposal-class-properties": "7.3.4",
|
||||
"@babel/plugin-proposal-function-bind": "7.2.0",
|
||||
"@babel/plugin-transform-flow-strip-types": "7.3.4",
|
||||
"@babel/preset-env": "7.3.4",
|
||||
"@babel/preset-react": "^7.0.0",
|
||||
"@babel/preset-stage-0": "^7.0.0",
|
||||
"@fortawesome/fontawesome-free-regular": "^5.0.13",
|
||||
"@fortawesome/fontawesome-svg-core": "^1.2.15",
|
||||
"@fortawesome/free-regular-svg-icons": "^5.7.2",
|
||||
"@fortawesome/free-solid-svg-icons": "^5.7.2",
|
||||
"@fortawesome/react-fontawesome": "^0.1.4",
|
||||
"@material-ui/core": "3.9.2",
|
||||
"@material-ui/icons": "3.0.2",
|
||||
"babel-eslint": "10.0.1",
|
||||
"babel-loader": "8.0.5",
|
||||
"classnames": "^2.2.6",
|
||||
"color-convert": "^2.0.0",
|
||||
"css-loader": "2.1.1",
|
||||
"escape-html": "^1.0.3",
|
||||
"eslint": "^4.16.0",
|
||||
"eslint-config-airbnb": "^16.1.0",
|
||||
"eslint-loader": "^2.0.0",
|
||||
"eslint-plugin-flowtype": "^2.41.0",
|
||||
"eslint-plugin-import": "^2.8.0",
|
||||
"eslint-plugin-jsx-a11y": "^6.0.3",
|
||||
"eslint-plugin-react": "^7.5.1",
|
||||
"file-loader": "^1.1.6",
|
||||
"flow-bin": "^0.63.1",
|
||||
"flow-bin-loader": "^1.0.2",
|
||||
"flow-typed": "^2.2.3",
|
||||
"material-ui": "^1.0.0-beta.30",
|
||||
"material-ui-icons": "^1.0.0-beta.17",
|
||||
"eslint": "5.15.1",
|
||||
"eslint-config-airbnb": "^17.0.0",
|
||||
"eslint-loader": "2.1.2",
|
||||
"eslint-plugin-flowtype": "3.4.2",
|
||||
"eslint-plugin-import": "2.16.0",
|
||||
"eslint-plugin-jsx-a11y": "6.2.1",
|
||||
"eslint-plugin-node": "8.0.1",
|
||||
"eslint-plugin-promise": "4.0.1",
|
||||
"eslint-plugin-react": "7.12.4",
|
||||
"file-loader": "3.0.1",
|
||||
"flow-bin": "0.94.0",
|
||||
"flow-bin-loader": "^1.0.3",
|
||||
"flow-typed": "^2.5.1",
|
||||
"js-beautify": "1.9.0",
|
||||
"path": "^0.12.7",
|
||||
"react": "^16.2.0",
|
||||
"react-dom": "^16.2.0",
|
||||
"react-fa": "^5.0.0",
|
||||
"react-transition-group": "^2.2.1",
|
||||
"recharts": "^1.0.0-beta.9",
|
||||
"style-loader": "^0.19.1",
|
||||
"react": "16.8.4",
|
||||
"react-dom": "16.8.4",
|
||||
"react-hot-loader": "4.8.0",
|
||||
"react-transition-group": "2.6.0",
|
||||
"recharts": "1.5.0",
|
||||
"style-loader": "0.23.1",
|
||||
"terser-webpack-plugin": "^1.2.3",
|
||||
"url": "^0.11.0",
|
||||
"url-loader": "^0.6.2",
|
||||
"webpack": "^3.10.0",
|
||||
"webpack-dev-server": "^2.11.1"
|
||||
"url-loader": "1.1.2",
|
||||
"webpack": "4.29.6",
|
||||
"webpack-cli": "3.2.3",
|
||||
"webpack-dashboard": "3.0.0",
|
||||
"webpack-dev-server": "3.2.1",
|
||||
"webpack-merge": "4.2.1"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "NODE_ENV=production webpack",
|
||||
"stats": "webpack --profile --json > stats.json",
|
||||
"dev": "webpack-dev-server --port 8081",
|
||||
"flow": "flow-typed install"
|
||||
}
|
||||
"build": "webpack --config webpack.config.prod.js",
|
||||
"stats": "webpack --config webpack.config.prod.js --profile --json > stats.json",
|
||||
"dev": "webpack-dev-server --open --config webpack.config.dev.js",
|
||||
"dash": "webpack-dashboard -- yarn dev",
|
||||
"install-flow": "flow-typed install",
|
||||
"flow": "flow status --show-all-errors",
|
||||
"eslint": "eslint **/*"
|
||||
},
|
||||
"sideEffects": false,
|
||||
"license": "LGPL-3.0-or-later"
|
||||
}
|
||||
|
@@ -29,7 +29,6 @@ export type Content = {
|
||||
export type ChartEntries = Array<ChartEntry>;
|
||||
|
||||
export type ChartEntry = {
|
||||
time: Date,
|
||||
value: number,
|
||||
};
|
||||
|
||||
@@ -51,7 +50,50 @@ export type TxPool = {
|
||||
};
|
||||
|
||||
export type Network = {
|
||||
/* TODO (kurkomisi) */
|
||||
peers: Peers,
|
||||
diff: Array<PeerEvent>
|
||||
};
|
||||
|
||||
export type PeerEvent = {
|
||||
ip: string,
|
||||
id: string,
|
||||
remove: string,
|
||||
location: GeoLocation,
|
||||
connected: Date,
|
||||
disconnected: Date,
|
||||
ingress: ChartEntries,
|
||||
egress: ChartEntries,
|
||||
activity: string,
|
||||
};
|
||||
|
||||
export type Peers = {
|
||||
bundles: {[string]: PeerBundle},
|
||||
};
|
||||
|
||||
export type PeerBundle = {
|
||||
location: GeoLocation,
|
||||
knownPeers: {[string]: KnownPeer},
|
||||
attempts: Array<UnknownPeer>,
|
||||
};
|
||||
|
||||
export type KnownPeer = {
|
||||
connected: Array<Date>,
|
||||
disconnected: Array<Date>,
|
||||
ingress: Array<ChartEntries>,
|
||||
egress: Array<ChartEntries>,
|
||||
active: boolean,
|
||||
};
|
||||
|
||||
export type UnknownPeer = {
|
||||
connected: Date,
|
||||
disconnected: Date,
|
||||
};
|
||||
|
||||
export type GeoLocation = {
|
||||
country: string,
|
||||
city: string,
|
||||
latitude: number,
|
||||
longitude: number,
|
||||
};
|
||||
|
||||
export type System = {
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
@@ -14,28 +14,25 @@
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
const webpack = require('webpack');
|
||||
const path = require('path');
|
||||
|
||||
module.exports = {
|
||||
target: 'web',
|
||||
entry: {
|
||||
bundle: './index',
|
||||
},
|
||||
output: {
|
||||
filename: '[name].js',
|
||||
path: path.resolve(__dirname, ''),
|
||||
sourceMapFilename: '[file].map',
|
||||
},
|
||||
resolve: {
|
||||
modules: [
|
||||
'node_modules',
|
||||
path.resolve(__dirname, 'components'), // import './components/Component' -> import 'Component'
|
||||
],
|
||||
extensions: ['.js', '.jsx'],
|
||||
},
|
||||
entry: './index',
|
||||
output: {
|
||||
path: path.resolve(__dirname, ''),
|
||||
filename: 'bundle.js',
|
||||
},
|
||||
plugins: [
|
||||
new webpack.optimize.UglifyJsPlugin({
|
||||
comments: false,
|
||||
mangle: false,
|
||||
beautify: true,
|
||||
}),
|
||||
new webpack.DefinePlugin({
|
||||
PROD: process.env.NODE_ENV === 'production',
|
||||
}),
|
||||
],
|
||||
module: {
|
||||
rules: [
|
||||
{
|
||||
@@ -45,27 +42,38 @@ module.exports = {
|
||||
{
|
||||
loader: 'babel-loader',
|
||||
options: {
|
||||
plugins: [ // order: from top to bottom
|
||||
// 'transform-decorators-legacy', // @withStyles, @withTheme
|
||||
'transform-class-properties', // static defaultProps
|
||||
'transform-flow-strip-types',
|
||||
],
|
||||
presets: [ // order: from bottom to top
|
||||
'env',
|
||||
'react',
|
||||
'stage-0',
|
||||
'@babel/env',
|
||||
'@babel/react',
|
||||
],
|
||||
plugins: [ // order: from top to bottom
|
||||
'@babel/proposal-function-bind', // instead of stage 0
|
||||
'@babel/proposal-class-properties', // static defaultProps
|
||||
'@babel/transform-flow-strip-types',
|
||||
'react-hot-loader/babel',
|
||||
],
|
||||
},
|
||||
},
|
||||
// 'eslint-loader', // show errors not only in the editor, but also in the console
|
||||
// 'eslint-loader', // show errors in the console
|
||||
],
|
||||
},
|
||||
{
|
||||
test: /font-awesome\.css$/,
|
||||
use: [
|
||||
'style-loader',
|
||||
'css-loader',
|
||||
path.resolve(__dirname, './fa-only-woff-loader.js'),
|
||||
test: /\.css$/,
|
||||
oneOf: [
|
||||
{
|
||||
test: /font-awesome/,
|
||||
use: [
|
||||
'style-loader',
|
||||
'css-loader',
|
||||
path.resolve(__dirname, './fa-only-woff-loader.js'),
|
||||
],
|
||||
},
|
||||
{
|
||||
use: [
|
||||
'style-loader',
|
||||
'css-loader',
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
35
dashboard/assets/webpack.config.dev.js
Normal file
35
dashboard/assets/webpack.config.dev.js
Normal file
@@ -0,0 +1,35 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
const webpack = require('webpack');
|
||||
const merge = require('webpack-merge');
|
||||
const WebpackDashboard = require('webpack-dashboard/plugin');
|
||||
const common = require('./webpack.config.common.js');
|
||||
|
||||
module.exports = merge(common, {
|
||||
mode: 'development',
|
||||
plugins: [
|
||||
new WebpackDashboard(),
|
||||
new webpack.HotModuleReplacementPlugin(),
|
||||
],
|
||||
// devtool: 'eval',
|
||||
devtool: 'source-map',
|
||||
devServer: {
|
||||
port: 8081,
|
||||
hot: true,
|
||||
compress: true,
|
||||
},
|
||||
});
|
41
dashboard/assets/webpack.config.prod.js
Normal file
41
dashboard/assets/webpack.config.prod.js
Normal file
@@ -0,0 +1,41 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
const TerserPlugin = require('terser-webpack-plugin');
|
||||
const merge = require('webpack-merge');
|
||||
const common = require('./webpack.config.common.js');
|
||||
|
||||
module.exports = merge(common, {
|
||||
mode: 'production',
|
||||
devtool: 'source-map',
|
||||
optimization: {
|
||||
minimize: true,
|
||||
namedModules: true, // Module names instead of numbers - resolves the large diff problem.
|
||||
minimizer: [
|
||||
new TerserPlugin({
|
||||
cache: true,
|
||||
parallel: true,
|
||||
sourceMap: true,
|
||||
terserOptions: {
|
||||
output: {
|
||||
comments: false,
|
||||
beautify: true,
|
||||
},
|
||||
},
|
||||
}),
|
||||
],
|
||||
},
|
||||
});
|
File diff suppressed because it is too large
Load Diff
@@ -18,8 +18,10 @@ package dashboard
|
||||
|
||||
//go:generate yarn --cwd ./assets install
|
||||
//go:generate yarn --cwd ./assets build
|
||||
//go:generate go-bindata -nometadata -o assets.go -prefix assets -nocompress -pkg dashboard assets/index.html assets/bundle.js
|
||||
//go:generate yarn --cwd ./assets js-beautify -f bundle.js.map -r -w 1
|
||||
//go:generate go-bindata -nometadata -o assets.go -prefix assets -nocompress -pkg dashboard assets/index.html assets/bundle.js assets/bundle.js.map
|
||||
//go:generate sh -c "sed 's#var _bundleJs#//nolint:misspell\\\n&#' assets.go > assets.go.tmp && mv assets.go.tmp assets.go"
|
||||
//go:generate sh -c "sed 's#var _bundleJsMap#//nolint:misspell\\\n&#' assets.go > assets.go.tmp && mv assets.go.tmp assets.go"
|
||||
//go:generate sh -c "sed 's#var _indexHtml#//nolint:misspell\\\n&#' assets.go > assets.go.tmp && mv assets.go.tmp assets.go"
|
||||
//go:generate gofmt -w -s assets.go
|
||||
|
||||
@@ -27,16 +29,13 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"io"
|
||||
|
||||
"github.com/elastic/gosigar"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
@@ -45,31 +44,29 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
activeMemorySampleLimit = 200 // Maximum number of active memory data samples
|
||||
virtualMemorySampleLimit = 200 // Maximum number of virtual memory data samples
|
||||
networkIngressSampleLimit = 200 // Maximum number of network ingress data samples
|
||||
networkEgressSampleLimit = 200 // Maximum number of network egress data samples
|
||||
processCPUSampleLimit = 200 // Maximum number of process cpu data samples
|
||||
systemCPUSampleLimit = 200 // Maximum number of system cpu data samples
|
||||
diskReadSampleLimit = 200 // Maximum number of disk read data samples
|
||||
diskWriteSampleLimit = 200 // Maximum number of disk write data samples
|
||||
sampleLimit = 200 // Maximum number of data samples
|
||||
)
|
||||
|
||||
var nextID uint32 // Next connection id
|
||||
|
||||
// Dashboard contains the dashboard internals.
|
||||
type Dashboard struct {
|
||||
config *Config
|
||||
config *Config // Configuration values for the dashboard
|
||||
|
||||
listener net.Listener
|
||||
conns map[uint32]*client // Currently live websocket connections
|
||||
history *Message
|
||||
lock sync.RWMutex // Lock protecting the dashboard's internals
|
||||
listener net.Listener // Network listener listening for dashboard clients
|
||||
conns map[uint32]*client // Currently live websocket connections
|
||||
nextConnID uint32 // Next connection id
|
||||
|
||||
logdir string
|
||||
history *Message // Stored historical data
|
||||
|
||||
lock sync.Mutex // Lock protecting the dashboard's internals
|
||||
sysLock sync.RWMutex // Lock protecting the stored system data
|
||||
peerLock sync.RWMutex // Lock protecting the stored peer data
|
||||
logLock sync.RWMutex // Lock protecting the stored log data
|
||||
|
||||
geodb *geoDB // geoip database instance for IP to geographical information conversions
|
||||
logdir string // Directory containing the log files
|
||||
|
||||
quit chan chan error // Channel used for graceful exit
|
||||
wg sync.WaitGroup
|
||||
wg sync.WaitGroup // Wait group used to close the data collector threads
|
||||
}
|
||||
|
||||
// client represents active websocket connection with a remote browser.
|
||||
@@ -96,14 +93,14 @@ func New(config *Config, commit string, logdir string) *Dashboard {
|
||||
Version: fmt.Sprintf("v%d.%d.%d%s", params.VersionMajor, params.VersionMinor, params.VersionPatch, versionMeta),
|
||||
},
|
||||
System: &SystemMessage{
|
||||
ActiveMemory: emptyChartEntries(now, activeMemorySampleLimit, config.Refresh),
|
||||
VirtualMemory: emptyChartEntries(now, virtualMemorySampleLimit, config.Refresh),
|
||||
NetworkIngress: emptyChartEntries(now, networkIngressSampleLimit, config.Refresh),
|
||||
NetworkEgress: emptyChartEntries(now, networkEgressSampleLimit, config.Refresh),
|
||||
ProcessCPU: emptyChartEntries(now, processCPUSampleLimit, config.Refresh),
|
||||
SystemCPU: emptyChartEntries(now, systemCPUSampleLimit, config.Refresh),
|
||||
DiskRead: emptyChartEntries(now, diskReadSampleLimit, config.Refresh),
|
||||
DiskWrite: emptyChartEntries(now, diskWriteSampleLimit, config.Refresh),
|
||||
ActiveMemory: emptyChartEntries(now, sampleLimit),
|
||||
VirtualMemory: emptyChartEntries(now, sampleLimit),
|
||||
NetworkIngress: emptyChartEntries(now, sampleLimit),
|
||||
NetworkEgress: emptyChartEntries(now, sampleLimit),
|
||||
ProcessCPU: emptyChartEntries(now, sampleLimit),
|
||||
SystemCPU: emptyChartEntries(now, sampleLimit),
|
||||
DiskRead: emptyChartEntries(now, sampleLimit),
|
||||
DiskWrite: emptyChartEntries(now, sampleLimit),
|
||||
},
|
||||
},
|
||||
logdir: logdir,
|
||||
@@ -111,12 +108,10 @@ func New(config *Config, commit string, logdir string) *Dashboard {
|
||||
}
|
||||
|
||||
// emptyChartEntries returns a ChartEntry array containing limit number of empty samples.
|
||||
func emptyChartEntries(t time.Time, limit int, refresh time.Duration) ChartEntries {
|
||||
func emptyChartEntries(t time.Time, limit int) ChartEntries {
|
||||
ce := make(ChartEntries, limit)
|
||||
for i := 0; i < limit; i++ {
|
||||
ce[i] = &ChartEntry{
|
||||
Time: t.Add(-time.Duration(i) * refresh),
|
||||
}
|
||||
ce[i] = new(ChartEntry)
|
||||
}
|
||||
return ce
|
||||
}
|
||||
@@ -132,9 +127,10 @@ func (db *Dashboard) APIs() []rpc.API { return nil }
|
||||
func (db *Dashboard) Start(server *p2p.Server) error {
|
||||
log.Info("Starting dashboard")
|
||||
|
||||
db.wg.Add(2)
|
||||
go db.collectData()
|
||||
db.wg.Add(3)
|
||||
go db.collectSystemData()
|
||||
go db.streamLogs()
|
||||
go db.collectPeerData()
|
||||
|
||||
http.HandleFunc("/", db.webHandler)
|
||||
http.Handle("/api", websocket.Handler(db.apiHandler))
|
||||
@@ -160,7 +156,7 @@ func (db *Dashboard) Stop() error {
|
||||
}
|
||||
// Close the collectors.
|
||||
errc := make(chan error, 1)
|
||||
for i := 0; i < 2; i++ {
|
||||
for i := 0; i < 3; i++ {
|
||||
db.quit <- errc
|
||||
if err := <-errc; err != nil {
|
||||
errs = append(errs, err)
|
||||
@@ -206,7 +202,7 @@ func (db *Dashboard) webHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// apiHandler handles requests for the dashboard.
|
||||
func (db *Dashboard) apiHandler(conn *websocket.Conn) {
|
||||
id := atomic.AddUint32(&nextID, 1)
|
||||
id := atomic.AddUint32(&db.nextConnID, 1)
|
||||
client := &client{
|
||||
conn: conn,
|
||||
msg: make(chan *Message, 128),
|
||||
@@ -233,10 +229,21 @@ func (db *Dashboard) apiHandler(conn *websocket.Conn) {
|
||||
}
|
||||
}()
|
||||
|
||||
db.lock.Lock()
|
||||
// Send the past data.
|
||||
client.msg <- deepcopy.Copy(db.history).(*Message)
|
||||
db.sysLock.RLock()
|
||||
db.peerLock.RLock()
|
||||
db.logLock.RLock()
|
||||
|
||||
h := deepcopy.Copy(db.history).(*Message)
|
||||
|
||||
db.sysLock.RUnlock()
|
||||
db.peerLock.RUnlock()
|
||||
db.logLock.RUnlock()
|
||||
|
||||
client.msg <- h
|
||||
|
||||
// Start tracking the connection and drop at connection loss.
|
||||
db.lock.Lock()
|
||||
db.conns[id] = client
|
||||
db.lock.Unlock()
|
||||
defer func() {
|
||||
@@ -259,136 +266,6 @@ func (db *Dashboard) apiHandler(conn *websocket.Conn) {
|
||||
}
|
||||
}
|
||||
|
||||
// meterCollector returns a function, which retrieves a specific meter.
|
||||
func meterCollector(name string) func() int64 {
|
||||
if metric := metrics.DefaultRegistry.Get(name); metric != nil {
|
||||
m := metric.(metrics.Meter)
|
||||
return func() int64 {
|
||||
return m.Count()
|
||||
}
|
||||
}
|
||||
return func() int64 {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// collectData collects the required data to plot on the dashboard.
|
||||
func (db *Dashboard) collectData() {
|
||||
defer db.wg.Done()
|
||||
|
||||
systemCPUUsage := gosigar.Cpu{}
|
||||
systemCPUUsage.Get()
|
||||
var (
|
||||
mem runtime.MemStats
|
||||
|
||||
collectNetworkIngress = meterCollector("p2p/InboundTraffic")
|
||||
collectNetworkEgress = meterCollector("p2p/OutboundTraffic")
|
||||
collectDiskRead = meterCollector("eth/db/chaindata/disk/read")
|
||||
collectDiskWrite = meterCollector("eth/db/chaindata/disk/write")
|
||||
|
||||
prevNetworkIngress = collectNetworkIngress()
|
||||
prevNetworkEgress = collectNetworkEgress()
|
||||
prevProcessCPUTime = getProcessCPUTime()
|
||||
prevSystemCPUUsage = systemCPUUsage
|
||||
prevDiskRead = collectDiskRead()
|
||||
prevDiskWrite = collectDiskWrite()
|
||||
|
||||
frequency = float64(db.config.Refresh / time.Second)
|
||||
numCPU = float64(runtime.NumCPU())
|
||||
)
|
||||
|
||||
for {
|
||||
select {
|
||||
case errc := <-db.quit:
|
||||
errc <- nil
|
||||
return
|
||||
case <-time.After(db.config.Refresh):
|
||||
systemCPUUsage.Get()
|
||||
var (
|
||||
curNetworkIngress = collectNetworkIngress()
|
||||
curNetworkEgress = collectNetworkEgress()
|
||||
curProcessCPUTime = getProcessCPUTime()
|
||||
curSystemCPUUsage = systemCPUUsage
|
||||
curDiskRead = collectDiskRead()
|
||||
curDiskWrite = collectDiskWrite()
|
||||
|
||||
deltaNetworkIngress = float64(curNetworkIngress - prevNetworkIngress)
|
||||
deltaNetworkEgress = float64(curNetworkEgress - prevNetworkEgress)
|
||||
deltaProcessCPUTime = curProcessCPUTime - prevProcessCPUTime
|
||||
deltaSystemCPUUsage = curSystemCPUUsage.Delta(prevSystemCPUUsage)
|
||||
deltaDiskRead = curDiskRead - prevDiskRead
|
||||
deltaDiskWrite = curDiskWrite - prevDiskWrite
|
||||
)
|
||||
prevNetworkIngress = curNetworkIngress
|
||||
prevNetworkEgress = curNetworkEgress
|
||||
prevProcessCPUTime = curProcessCPUTime
|
||||
prevSystemCPUUsage = curSystemCPUUsage
|
||||
prevDiskRead = curDiskRead
|
||||
prevDiskWrite = curDiskWrite
|
||||
|
||||
now := time.Now()
|
||||
|
||||
runtime.ReadMemStats(&mem)
|
||||
activeMemory := &ChartEntry{
|
||||
Time: now,
|
||||
Value: float64(mem.Alloc) / frequency,
|
||||
}
|
||||
virtualMemory := &ChartEntry{
|
||||
Time: now,
|
||||
Value: float64(mem.Sys) / frequency,
|
||||
}
|
||||
networkIngress := &ChartEntry{
|
||||
Time: now,
|
||||
Value: deltaNetworkIngress / frequency,
|
||||
}
|
||||
networkEgress := &ChartEntry{
|
||||
Time: now,
|
||||
Value: deltaNetworkEgress / frequency,
|
||||
}
|
||||
processCPU := &ChartEntry{
|
||||
Time: now,
|
||||
Value: deltaProcessCPUTime / frequency / numCPU * 100,
|
||||
}
|
||||
systemCPU := &ChartEntry{
|
||||
Time: now,
|
||||
Value: float64(deltaSystemCPUUsage.Sys+deltaSystemCPUUsage.User) / frequency / numCPU,
|
||||
}
|
||||
diskRead := &ChartEntry{
|
||||
Time: now,
|
||||
Value: float64(deltaDiskRead) / frequency,
|
||||
}
|
||||
diskWrite := &ChartEntry{
|
||||
Time: now,
|
||||
Value: float64(deltaDiskWrite) / frequency,
|
||||
}
|
||||
sys := db.history.System
|
||||
db.lock.Lock()
|
||||
sys.ActiveMemory = append(sys.ActiveMemory[1:], activeMemory)
|
||||
sys.VirtualMemory = append(sys.VirtualMemory[1:], virtualMemory)
|
||||
sys.NetworkIngress = append(sys.NetworkIngress[1:], networkIngress)
|
||||
sys.NetworkEgress = append(sys.NetworkEgress[1:], networkEgress)
|
||||
sys.ProcessCPU = append(sys.ProcessCPU[1:], processCPU)
|
||||
sys.SystemCPU = append(sys.SystemCPU[1:], systemCPU)
|
||||
sys.DiskRead = append(sys.DiskRead[1:], diskRead)
|
||||
sys.DiskWrite = append(sys.DiskWrite[1:], diskWrite)
|
||||
db.lock.Unlock()
|
||||
|
||||
db.sendToAll(&Message{
|
||||
System: &SystemMessage{
|
||||
ActiveMemory: ChartEntries{activeMemory},
|
||||
VirtualMemory: ChartEntries{virtualMemory},
|
||||
NetworkIngress: ChartEntries{networkIngress},
|
||||
NetworkEgress: ChartEntries{networkEgress},
|
||||
ProcessCPU: ChartEntries{processCPU},
|
||||
SystemCPU: ChartEntries{systemCPU},
|
||||
DiskRead: ChartEntries{diskRead},
|
||||
DiskWrite: ChartEntries{diskWrite},
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sendToAll sends the given message to the active dashboards.
|
||||
func (db *Dashboard) sendToAll(msg *Message) {
|
||||
db.lock.Lock()
|
||||
|
98
dashboard/geoip.go
Normal file
98
dashboard/geoip.go
Normal file
@@ -0,0 +1,98 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package dashboard
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/apilayer/freegeoip"
|
||||
)
|
||||
|
||||
// geoDBInfo contains all the geographical information we could extract based on an IP
|
||||
// address.
|
||||
type geoDBInfo struct {
|
||||
Country struct {
|
||||
Names struct {
|
||||
English string `maxminddb:"en" json:"en,omitempty"`
|
||||
} `maxminddb:"names" json:"names,omitempty"`
|
||||
} `maxminddb:"country" json:"country,omitempty"`
|
||||
City struct {
|
||||
Names struct {
|
||||
English string `maxminddb:"en" json:"en,omitempty"`
|
||||
} `maxminddb:"names" json:"names,omitempty"`
|
||||
} `maxminddb:"city" json:"city,omitempty"`
|
||||
Location struct {
|
||||
Latitude float64 `maxminddb:"latitude" json:"latitude,omitempty"`
|
||||
Longitude float64 `maxminddb:"longitude" json:"longitude,omitempty"`
|
||||
} `maxminddb:"location" json:"location,omitempty"`
|
||||
}
|
||||
|
||||
// geoLocation contains geographical information.
|
||||
type geoLocation struct {
|
||||
Country string `json:"country,omitempty"`
|
||||
City string `json:"city,omitempty"`
|
||||
Latitude float64 `json:"latitude,omitempty"`
|
||||
Longitude float64 `json:"longitude,omitempty"`
|
||||
}
|
||||
|
||||
// geoDB represents a geoip database that can be queried for IP to geographical
|
||||
// information conversions.
|
||||
type geoDB struct {
|
||||
geodb *freegeoip.DB
|
||||
}
|
||||
|
||||
// Open creates a new geoip database with an up-to-date database from the internet.
|
||||
func openGeoDB() (*geoDB, error) {
|
||||
// Initiate a geoip database to cross reference locations
|
||||
db, err := freegeoip.OpenURL(freegeoip.MaxMindDB, 24*time.Hour, time.Hour)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Wait until the database is updated to the latest data
|
||||
select {
|
||||
case <-db.NotifyOpen():
|
||||
case err := <-db.NotifyError():
|
||||
return nil, err
|
||||
}
|
||||
// Assemble and return our custom wrapper
|
||||
return &geoDB{geodb: db}, nil
|
||||
}
|
||||
|
||||
// Close terminates the database background updater.
|
||||
func (db *geoDB) close() error {
|
||||
db.geodb.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Lookup converts an IP address to a geographical location.
|
||||
func (db *geoDB) lookup(ip net.IP) *geoDBInfo {
|
||||
result := new(geoDBInfo)
|
||||
db.geodb.Lookup(ip, result)
|
||||
return result
|
||||
}
|
||||
|
||||
// Location retrieves the geographical location of the given IP address.
|
||||
func (db *geoDB) location(ip string) *geoLocation {
|
||||
location := db.lookup(net.ParseIP(ip))
|
||||
return &geoLocation{
|
||||
Country: location.Country.Names.English,
|
||||
City: location.City.Names.English,
|
||||
Latitude: location.Location.Latitude,
|
||||
Longitude: location.Location.Longitude,
|
||||
}
|
||||
}
|
@@ -94,13 +94,13 @@ func (db *Dashboard) handleLogRequest(r *LogsRequest, c *client) {
|
||||
// The last file is continuously updated, and its chunks are streamed,
|
||||
// so in order to avoid log record duplication on the client side, it is
|
||||
// handled differently. Its actual content is always saved in the history.
|
||||
db.lock.Lock()
|
||||
db.logLock.RLock()
|
||||
if db.history.Logs != nil {
|
||||
c.msg <- &Message{
|
||||
Logs: db.history.Logs,
|
||||
Logs: deepcopy.Copy(db.history.Logs).(*LogsMessage),
|
||||
}
|
||||
}
|
||||
db.lock.Unlock()
|
||||
db.logLock.RUnlock()
|
||||
return
|
||||
case fileNames[idx] == r.Name:
|
||||
idx++
|
||||
@@ -174,7 +174,7 @@ func (db *Dashboard) streamLogs() {
|
||||
log.Warn("Problem with file", "name", opened.Name(), "err", err)
|
||||
return
|
||||
}
|
||||
db.lock.Lock()
|
||||
db.logLock.Lock()
|
||||
db.history.Logs = &LogsMessage{
|
||||
Source: &LogFile{
|
||||
Name: fi.Name(),
|
||||
@@ -182,7 +182,7 @@ func (db *Dashboard) streamLogs() {
|
||||
},
|
||||
Chunk: emptyChunk,
|
||||
}
|
||||
db.lock.Unlock()
|
||||
db.logLock.Unlock()
|
||||
|
||||
watcher := make(chan notify.EventInfo, 10)
|
||||
if err := notify.Watch(db.logdir, watcher, notify.Create); err != nil {
|
||||
@@ -240,10 +240,10 @@ loop:
|
||||
log.Warn("Problem with file", "name", opened.Name(), "err", err)
|
||||
break loop
|
||||
}
|
||||
db.lock.Lock()
|
||||
db.logLock.Lock()
|
||||
db.history.Logs.Source.Name = fi.Name()
|
||||
db.history.Logs.Chunk = emptyChunk
|
||||
db.lock.Unlock()
|
||||
db.logLock.Unlock()
|
||||
case <-ticker.C: // Send log updates to the client.
|
||||
if opened == nil {
|
||||
log.Warn("The last log file is not opened")
|
||||
@@ -266,7 +266,7 @@ loop:
|
||||
|
||||
var l *LogsMessage
|
||||
// Update the history.
|
||||
db.lock.Lock()
|
||||
db.logLock.Lock()
|
||||
if bytes.Equal(db.history.Logs.Chunk, emptyChunk) {
|
||||
db.history.Logs.Chunk = chunk
|
||||
l = deepcopy.Copy(db.history.Logs).(*LogsMessage)
|
||||
@@ -278,7 +278,7 @@ loop:
|
||||
db.history.Logs.Chunk = b
|
||||
l = &LogsMessage{Chunk: chunk}
|
||||
}
|
||||
db.lock.Unlock()
|
||||
db.logLock.Unlock()
|
||||
|
||||
db.sendToAll(&Message{Logs: l})
|
||||
case errc = <-db.quit:
|
||||
|
@@ -18,7 +18,6 @@ package dashboard
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Message struct {
|
||||
@@ -34,8 +33,7 @@ type Message struct {
|
||||
type ChartEntries []*ChartEntry
|
||||
|
||||
type ChartEntry struct {
|
||||
Time time.Time `json:"time,omitempty"`
|
||||
Value float64 `json:"value,omitempty"`
|
||||
Value float64 `json:"value"`
|
||||
}
|
||||
|
||||
type GeneralMessage struct {
|
||||
@@ -55,10 +53,14 @@ type TxPoolMessage struct {
|
||||
/* TODO (kurkomisi) */
|
||||
}
|
||||
|
||||
// NetworkMessage contains information about the peers
|
||||
// organized based on their IP address and node ID.
|
||||
type NetworkMessage struct {
|
||||
/* TODO (kurkomisi) */
|
||||
Peers *peerContainer `json:"peers,omitempty"` // Peer tree.
|
||||
Diff []*peerEvent `json:"diff,omitempty"` // Events that change the peer tree.
|
||||
}
|
||||
|
||||
// SystemMessage contains the metered system data samples.
|
||||
type SystemMessage struct {
|
||||
ActiveMemory ChartEntries `json:"activeMemory,omitempty"`
|
||||
VirtualMemory ChartEntries `json:"virtualMemory,omitempty"`
|
||||
@@ -70,7 +72,7 @@ type SystemMessage struct {
|
||||
DiskWrite ChartEntries `json:"diskWrite,omitempty"`
|
||||
}
|
||||
|
||||
// LogsMessage wraps up a log chunk. If Source isn't present, the chunk is a stream chunk.
|
||||
// LogsMessage wraps up a log chunk. If 'Source' isn't present, the chunk is a stream chunk.
|
||||
type LogsMessage struct {
|
||||
Source *LogFile `json:"source,omitempty"` // Attributes of the log file.
|
||||
Chunk json.RawMessage `json:"chunk"` // Contains log records.
|
||||
@@ -87,6 +89,7 @@ type Request struct {
|
||||
Logs *LogsRequest `json:"logs,omitempty"`
|
||||
}
|
||||
|
||||
// LogsRequest contains the attributes of the log file the client wants to receive.
|
||||
type LogsRequest struct {
|
||||
Name string `json:"name"` // The request handler searches for log file based on this file name.
|
||||
Past bool `json:"past"` // Denotes whether the client wants the previous or the next file.
|
||||
|
552
dashboard/peers.go
Normal file
552
dashboard/peers.go
Normal file
@@ -0,0 +1,552 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package dashboard
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
)
|
||||
|
||||
const (
|
||||
eventBufferLimit = 128 // Maximum number of buffered peer events.
|
||||
knownPeerLimit = 100 // Maximum number of stored peers, which successfully made the handshake.
|
||||
attemptLimit = 200 // Maximum number of stored peers, which failed to make the handshake.
|
||||
|
||||
// eventLimit is the maximum number of the dashboard's custom peer events,
|
||||
// that are collected between two metering period and sent to the clients
|
||||
// as one message.
|
||||
// TODO (kurkomisi): Limit the number of events.
|
||||
eventLimit = knownPeerLimit << 2
|
||||
)
|
||||
|
||||
// peerContainer contains information about the node's peers. This data structure
|
||||
// maintains the metered peer data based on the different behaviours of the peers.
|
||||
//
|
||||
// Every peer has an IP address, and the peers that manage to make the handshake
|
||||
// (known peers) have node IDs too. There can appear more peers with the same IP,
|
||||
// therefore the peer container data structure is a tree consisting of a map of
|
||||
// maps, where the first key groups the peers by IP, while the second one groups
|
||||
// them by the node ID. The known peers can be active if their connection is still
|
||||
// open, or inactive otherwise. The peers failing before the handshake (unknown
|
||||
// peers) only have IP addresses, so their connection attempts are stored as part
|
||||
// of the value of the outer map.
|
||||
//
|
||||
// Another criteria is to limit the number of metered peers so that
|
||||
// they don't fill the memory. The selection order is based on the
|
||||
// peers activity: the peers that are inactive for the longest time
|
||||
// are thrown first. For the selection a fifo list is used which is
|
||||
// linked to the bottom of the peer tree in a way that every activity
|
||||
// of the peer pushes the peer to the end of the list, so the inactive
|
||||
// ones come to the front. When a peer has some activity, it is removed
|
||||
// from and reinserted into the list. When the length of the list reaches
|
||||
// the limit, the first element is removed from the list, as well as from
|
||||
// the tree.
|
||||
//
|
||||
// The active peers have priority over the inactive ones, therefore
|
||||
// they have their own list. The separation makes it sure that the
|
||||
// inactive peers are always removed before the active ones.
|
||||
//
|
||||
// The peers that don't manage to make handshake are not inserted into the list,
|
||||
// only their connection attempts are appended to the array belonging to their IP.
|
||||
// In order to keep the fifo principle, a super array contains the order of the
|
||||
// attempts, and when the overall count reaches the limit, the earliest attempt is
|
||||
// removed from the beginning of its array.
|
||||
//
|
||||
// This data structure makes it possible to marshal the peer
|
||||
// history simply by passing it to the JSON marshaler.
|
||||
type peerContainer struct {
|
||||
// Bundles is the outer map using the peer's IP address as key.
|
||||
Bundles map[string]*peerBundle `json:"bundles,omitempty"`
|
||||
|
||||
activeCount int // Number of the still connected peers
|
||||
|
||||
// inactivePeers contains the peers with closed connection in chronological order.
|
||||
inactivePeers *list.List
|
||||
|
||||
// attemptOrder is the super array containing the IP addresses, from which
|
||||
// the peers attempted to connect then failed before/during the handshake.
|
||||
// Its values are appended in chronological order, which means that the
|
||||
// oldest attempt is at the beginning of the array. When the first element
|
||||
// is removed, the first element of the related bundle's attempt array is
|
||||
// removed too, ensuring that always the latest attempts are stored.
|
||||
attemptOrder []string
|
||||
|
||||
// geodb is the geoip database used to retrieve the peers' geographical location.
|
||||
geodb *geoDB
|
||||
}
|
||||
|
||||
// newPeerContainer returns a new instance of the peer container.
|
||||
func newPeerContainer(geodb *geoDB) *peerContainer {
|
||||
return &peerContainer{
|
||||
Bundles: make(map[string]*peerBundle),
|
||||
inactivePeers: list.New(),
|
||||
attemptOrder: make([]string, 0, attemptLimit),
|
||||
geodb: geodb,
|
||||
}
|
||||
}
|
||||
|
||||
// bundle inserts a new peer bundle into the map, if the peer belonging
|
||||
// to the given IP wasn't metered so far. In this case retrieves the location of
|
||||
// the IP address from the database and creates a corresponding peer event.
|
||||
// Returns the bundle belonging to the given IP and the events occurring during
|
||||
// the initialization.
|
||||
func (pc *peerContainer) bundle(ip string) (*peerBundle, []*peerEvent) {
|
||||
var events []*peerEvent
|
||||
if _, ok := pc.Bundles[ip]; !ok {
|
||||
location := pc.geodb.location(ip)
|
||||
events = append(events, &peerEvent{
|
||||
IP: ip,
|
||||
Location: location,
|
||||
})
|
||||
pc.Bundles[ip] = &peerBundle{
|
||||
Location: location,
|
||||
KnownPeers: make(map[string]*knownPeer),
|
||||
}
|
||||
}
|
||||
return pc.Bundles[ip], events
|
||||
}
|
||||
|
||||
// extendKnown handles the events of the successfully connected peers.
|
||||
// Returns the events occurring during the extension.
|
||||
func (pc *peerContainer) extendKnown(event *peerEvent) []*peerEvent {
|
||||
bundle, events := pc.bundle(event.IP)
|
||||
peer, peerEvents := bundle.knownPeer(event.IP, event.ID)
|
||||
events = append(events, peerEvents...)
|
||||
// Append the connect and the disconnect events to
|
||||
// the corresponding arrays keeping the limit.
|
||||
switch {
|
||||
case event.Connected != nil:
|
||||
peer.Connected = append(peer.Connected, event.Connected)
|
||||
if first := len(peer.Connected) - sampleLimit; first > 0 {
|
||||
peer.Connected = peer.Connected[first:]
|
||||
}
|
||||
peer.Active = true
|
||||
events = append(events, &peerEvent{
|
||||
Activity: Active,
|
||||
IP: peer.ip,
|
||||
ID: peer.id,
|
||||
})
|
||||
pc.activeCount++
|
||||
if peer.listElement != nil {
|
||||
_ = pc.inactivePeers.Remove(peer.listElement)
|
||||
peer.listElement = nil
|
||||
}
|
||||
case event.Disconnected != nil:
|
||||
peer.Disconnected = append(peer.Disconnected, event.Disconnected)
|
||||
if first := len(peer.Disconnected) - sampleLimit; first > 0 {
|
||||
peer.Disconnected = peer.Disconnected[first:]
|
||||
}
|
||||
peer.Active = false
|
||||
events = append(events, &peerEvent{
|
||||
Activity: Inactive,
|
||||
IP: peer.ip,
|
||||
ID: peer.id,
|
||||
})
|
||||
pc.activeCount--
|
||||
if peer.listElement != nil {
|
||||
// If the peer is already in the list, remove and reinsert it.
|
||||
_ = pc.inactivePeers.Remove(peer.listElement)
|
||||
}
|
||||
// Insert the peer into the list.
|
||||
peer.listElement = pc.inactivePeers.PushBack(peer)
|
||||
}
|
||||
for pc.inactivePeers.Len() > 0 && pc.activeCount+pc.inactivePeers.Len() > knownPeerLimit {
|
||||
// While the count of the known peers is greater than the limit,
|
||||
// remove the first element from the inactive peer list and from the map.
|
||||
if removedPeer, ok := pc.inactivePeers.Remove(pc.inactivePeers.Front()).(*knownPeer); ok {
|
||||
events = append(events, pc.removeKnown(removedPeer.ip, removedPeer.id)...)
|
||||
} else {
|
||||
log.Warn("Failed to parse the removed peer")
|
||||
}
|
||||
}
|
||||
if pc.activeCount > knownPeerLimit {
|
||||
log.Warn("Number of active peers is greater than the limit")
|
||||
}
|
||||
return events
|
||||
}
|
||||
|
||||
// handleAttempt handles the events of the peers failing before/during the handshake.
|
||||
// Returns the events occurring during the extension.
|
||||
func (pc *peerContainer) handleAttempt(event *peerEvent) []*peerEvent {
|
||||
bundle, events := pc.bundle(event.IP)
|
||||
bundle.Attempts = append(bundle.Attempts, &peerAttempt{
|
||||
Connected: *event.Connected,
|
||||
Disconnected: *event.Disconnected,
|
||||
})
|
||||
pc.attemptOrder = append(pc.attemptOrder, event.IP)
|
||||
for len(pc.attemptOrder) > attemptLimit {
|
||||
// While the length of the connection attempt order array is greater
|
||||
// than the limit, remove the first element from the involved peer's
|
||||
// array and also from the super array.
|
||||
events = append(events, pc.removeAttempt(pc.attemptOrder[0])...)
|
||||
pc.attemptOrder = pc.attemptOrder[1:]
|
||||
}
|
||||
return events
|
||||
}
|
||||
|
||||
// peerBundle contains the peers belonging to a given IP address.
|
||||
type peerBundle struct {
|
||||
// Location contains the geographical location based on the bundle's IP address.
|
||||
Location *geoLocation `json:"location,omitempty"`
|
||||
|
||||
// KnownPeers is the inner map of the metered peer
|
||||
// maintainer data structure using the node ID as key.
|
||||
KnownPeers map[string]*knownPeer `json:"knownPeers,omitempty"`
|
||||
|
||||
// Attempts contains the failed connection attempts of the
|
||||
// peers belonging to a given IP address in chronological order.
|
||||
Attempts []*peerAttempt `json:"attempts,omitempty"`
|
||||
}
|
||||
|
||||
// removeKnown removes the known peer belonging to the
|
||||
// given IP address and node ID from the peer tree.
|
||||
func (pc *peerContainer) removeKnown(ip, id string) (events []*peerEvent) {
|
||||
// TODO (kurkomisi): Remove peers that don't have traffic samples anymore.
|
||||
if bundle, ok := pc.Bundles[ip]; ok {
|
||||
if _, ok := bundle.KnownPeers[id]; ok {
|
||||
events = append(events, &peerEvent{
|
||||
Remove: RemoveKnown,
|
||||
IP: ip,
|
||||
ID: id,
|
||||
})
|
||||
delete(bundle.KnownPeers, id)
|
||||
} else {
|
||||
log.Warn("No peer to remove", "ip", ip, "id", id)
|
||||
}
|
||||
if len(bundle.KnownPeers) < 1 && len(bundle.Attempts) < 1 {
|
||||
events = append(events, &peerEvent{
|
||||
Remove: RemoveBundle,
|
||||
IP: ip,
|
||||
})
|
||||
delete(pc.Bundles, ip)
|
||||
}
|
||||
} else {
|
||||
log.Warn("No bundle to remove", "ip", ip)
|
||||
}
|
||||
return events
|
||||
}
|
||||
|
||||
// removeAttempt removes the peer attempt belonging to the
|
||||
// given IP address and node ID from the peer tree.
|
||||
func (pc *peerContainer) removeAttempt(ip string) (events []*peerEvent) {
|
||||
if bundle, ok := pc.Bundles[ip]; ok {
|
||||
if len(bundle.Attempts) > 0 {
|
||||
events = append(events, &peerEvent{
|
||||
Remove: RemoveAttempt,
|
||||
IP: ip,
|
||||
})
|
||||
bundle.Attempts = bundle.Attempts[1:]
|
||||
}
|
||||
if len(bundle.Attempts) < 1 && len(bundle.KnownPeers) < 1 {
|
||||
events = append(events, &peerEvent{
|
||||
Remove: RemoveBundle,
|
||||
IP: ip,
|
||||
})
|
||||
delete(pc.Bundles, ip)
|
||||
}
|
||||
}
|
||||
return events
|
||||
}
|
||||
|
||||
// knownPeer inserts a new peer into the map, if the peer belonging
|
||||
// to the given IP address and node ID wasn't metered so far. Returns the peer
|
||||
// belonging to the given IP and ID as well as the events occurring during the
|
||||
// initialization.
|
||||
func (bundle *peerBundle) knownPeer(ip, id string) (*knownPeer, []*peerEvent) {
|
||||
var events []*peerEvent
|
||||
if _, ok := bundle.KnownPeers[id]; !ok {
|
||||
now := time.Now()
|
||||
ingress := emptyChartEntries(now, sampleLimit)
|
||||
egress := emptyChartEntries(now, sampleLimit)
|
||||
events = append(events, &peerEvent{
|
||||
IP: ip,
|
||||
ID: id,
|
||||
Ingress: append([]*ChartEntry{}, ingress...),
|
||||
Egress: append([]*ChartEntry{}, egress...),
|
||||
})
|
||||
bundle.KnownPeers[id] = &knownPeer{
|
||||
ip: ip,
|
||||
id: id,
|
||||
Ingress: ingress,
|
||||
Egress: egress,
|
||||
}
|
||||
}
|
||||
return bundle.KnownPeers[id], events
|
||||
}
|
||||
|
||||
// knownPeer contains the metered data of a particular peer.
|
||||
type knownPeer struct {
|
||||
// Connected contains the timestamps of the peer's connection events.
|
||||
Connected []*time.Time `json:"connected,omitempty"`
|
||||
|
||||
// Disconnected contains the timestamps of the peer's disconnection events.
|
||||
Disconnected []*time.Time `json:"disconnected,omitempty"`
|
||||
|
||||
// Ingress and Egress contain the peer's traffic samples, which are collected
|
||||
// periodically from the metrics registry.
|
||||
//
|
||||
// A peer can connect multiple times, and we want to visualize the time
|
||||
// passed between two connections, so after the first connection a 0 value
|
||||
// is appended to the traffic arrays even if the peer is inactive until the
|
||||
// peer is removed.
|
||||
Ingress ChartEntries `json:"ingress,omitempty"`
|
||||
Egress ChartEntries `json:"egress,omitempty"`
|
||||
|
||||
Active bool `json:"active"` // Denotes if the peer is still connected.
|
||||
|
||||
listElement *list.Element // Pointer to the peer element in the list.
|
||||
ip, id string // The IP and the ID by which the peer can be accessed in the tree.
|
||||
prevIngress float64
|
||||
prevEgress float64
|
||||
}
|
||||
|
||||
// peerAttempt contains a failed peer connection attempt's attributes.
|
||||
type peerAttempt struct {
|
||||
// Connected contains the timestamp of the connection attempt's moment.
|
||||
Connected time.Time `json:"connected"`
|
||||
|
||||
// Disconnected contains the timestamp of the
|
||||
// moment when the connection attempt failed.
|
||||
Disconnected time.Time `json:"disconnected"`
|
||||
}
|
||||
|
||||
type RemovedPeerType string
|
||||
type ActivityType string
|
||||
|
||||
const (
|
||||
RemoveKnown RemovedPeerType = "known"
|
||||
RemoveAttempt RemovedPeerType = "attempt"
|
||||
RemoveBundle RemovedPeerType = "bundle"
|
||||
|
||||
Active ActivityType = "active"
|
||||
Inactive ActivityType = "inactive"
|
||||
)
|
||||
|
||||
// peerEvent contains the attributes of a peer event.
|
||||
type peerEvent struct {
|
||||
IP string `json:"ip,omitempty"` // IP address of the peer.
|
||||
ID string `json:"id,omitempty"` // Node ID of the peer.
|
||||
Remove RemovedPeerType `json:"remove,omitempty"` // Type of the peer that is to be removed.
|
||||
Location *geoLocation `json:"location,omitempty"` // Geographical location of the peer.
|
||||
Connected *time.Time `json:"connected,omitempty"` // Timestamp of the connection moment.
|
||||
Disconnected *time.Time `json:"disconnected,omitempty"` // Timestamp of the disonnection moment.
|
||||
Ingress ChartEntries `json:"ingress,omitempty"` // Ingress samples.
|
||||
Egress ChartEntries `json:"egress,omitempty"` // Egress samples.
|
||||
Activity ActivityType `json:"activity,omitempty"` // Connection status change.
|
||||
}
|
||||
|
||||
// trafficMap is a container for the periodically collected peer traffic.
|
||||
type trafficMap map[string]map[string]float64
|
||||
|
||||
// insert inserts a new value to the traffic map. Overwrites
|
||||
// the value at the given ip and id if that already exists.
|
||||
func (m *trafficMap) insert(ip, id string, val float64) {
|
||||
if _, ok := (*m)[ip]; !ok {
|
||||
(*m)[ip] = make(map[string]float64)
|
||||
}
|
||||
(*m)[ip][id] = val
|
||||
}
|
||||
|
||||
// collectPeerData gathers data about the peers and sends it to the clients.
|
||||
func (db *Dashboard) collectPeerData() {
|
||||
defer db.wg.Done()
|
||||
|
||||
// Open the geodb database for IP to geographical information conversions.
|
||||
var err error
|
||||
db.geodb, err = openGeoDB()
|
||||
if err != nil {
|
||||
log.Warn("Failed to open geodb", "err", err)
|
||||
return
|
||||
}
|
||||
defer db.geodb.close()
|
||||
|
||||
peerCh := make(chan p2p.MeteredPeerEvent, eventBufferLimit) // Peer event channel.
|
||||
subPeer := p2p.SubscribeMeteredPeerEvent(peerCh) // Subscribe to peer events.
|
||||
defer subPeer.Unsubscribe() // Unsubscribe at the end.
|
||||
|
||||
ticker := time.NewTicker(db.config.Refresh)
|
||||
defer ticker.Stop()
|
||||
|
||||
type registryFunc func(name string, i interface{})
|
||||
type collectorFunc func(traffic *trafficMap) registryFunc
|
||||
|
||||
// trafficCollector generates a function that can be passed to
|
||||
// the prefixed peer registry in order to collect the metered
|
||||
// traffic data from each peer meter.
|
||||
trafficCollector := func(prefix string) collectorFunc {
|
||||
// This part makes is possible to collect the
|
||||
// traffic data into a map from outside.
|
||||
return func(traffic *trafficMap) registryFunc {
|
||||
// The function which can be passed to the registry.
|
||||
return func(name string, i interface{}) {
|
||||
if m, ok := i.(metrics.Meter); ok {
|
||||
// The name of the meter has the format: <common traffic prefix><IP>/<ID>
|
||||
if k := strings.Split(strings.TrimPrefix(name, prefix), "/"); len(k) == 2 {
|
||||
traffic.insert(k[0], k[1], float64(m.Count()))
|
||||
} else {
|
||||
log.Warn("Invalid meter name", "name", name, "prefix", prefix)
|
||||
}
|
||||
} else {
|
||||
log.Warn("Invalid meter type", "name", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
collectIngress := trafficCollector(p2p.MetricsInboundTraffic + "/")
|
||||
collectEgress := trafficCollector(p2p.MetricsOutboundTraffic + "/")
|
||||
|
||||
peers := newPeerContainer(db.geodb)
|
||||
db.peerLock.Lock()
|
||||
db.history.Network = &NetworkMessage{
|
||||
Peers: peers,
|
||||
}
|
||||
db.peerLock.Unlock()
|
||||
|
||||
// newPeerEvents contains peer events, which trigger operations that
|
||||
// will be executed on the peer tree after a metering period.
|
||||
newPeerEvents := make([]*peerEvent, 0, eventLimit)
|
||||
ingress, egress := new(trafficMap), new(trafficMap)
|
||||
*ingress, *egress = make(trafficMap), make(trafficMap)
|
||||
|
||||
for {
|
||||
select {
|
||||
case event := <-peerCh:
|
||||
now := time.Now()
|
||||
switch event.Type {
|
||||
case p2p.PeerConnected:
|
||||
connected := now.Add(-event.Elapsed)
|
||||
newPeerEvents = append(newPeerEvents, &peerEvent{
|
||||
IP: event.IP.String(),
|
||||
ID: event.ID.String(),
|
||||
Connected: &connected,
|
||||
})
|
||||
case p2p.PeerDisconnected:
|
||||
ip, id := event.IP.String(), event.ID.String()
|
||||
newPeerEvents = append(newPeerEvents, &peerEvent{
|
||||
IP: ip,
|
||||
ID: id,
|
||||
Disconnected: &now,
|
||||
})
|
||||
// The disconnect event comes with the last metered traffic count,
|
||||
// because after the disconnection the peer's meter is removed
|
||||
// from the registry. It can happen, that between two metering
|
||||
// period the same peer disconnects multiple times, and appending
|
||||
// all the samples to the traffic arrays would shift the metering,
|
||||
// so only the last metering is stored, overwriting the previous one.
|
||||
ingress.insert(ip, id, float64(event.Ingress))
|
||||
egress.insert(ip, id, float64(event.Egress))
|
||||
case p2p.PeerHandshakeFailed:
|
||||
connected := now.Add(-event.Elapsed)
|
||||
newPeerEvents = append(newPeerEvents, &peerEvent{
|
||||
IP: event.IP.String(),
|
||||
Connected: &connected,
|
||||
Disconnected: &now,
|
||||
})
|
||||
default:
|
||||
log.Error("Unknown metered peer event type", "type", event.Type)
|
||||
}
|
||||
case <-ticker.C:
|
||||
// Collect the traffic samples from the registry.
|
||||
p2p.PeerIngressRegistry.Each(collectIngress(ingress))
|
||||
p2p.PeerEgressRegistry.Each(collectEgress(egress))
|
||||
|
||||
// Protect 'peers', because it is part of the history.
|
||||
db.peerLock.Lock()
|
||||
|
||||
var diff []*peerEvent
|
||||
for i := 0; i < len(newPeerEvents); i++ {
|
||||
if newPeerEvents[i].IP == "" {
|
||||
log.Warn("Peer event without IP", "event", *newPeerEvents[i])
|
||||
continue
|
||||
}
|
||||
diff = append(diff, newPeerEvents[i])
|
||||
// There are two main branches of peer events coming from the event
|
||||
// feed, one belongs to the known peers, one to the unknown peers.
|
||||
// If the event has node ID, it belongs to a known peer, otherwise
|
||||
// to an unknown one, which is considered as connection attempt.
|
||||
//
|
||||
// The extension can produce additional peer events, such
|
||||
// as remove, location and initial samples events.
|
||||
if newPeerEvents[i].ID == "" {
|
||||
diff = append(diff, peers.handleAttempt(newPeerEvents[i])...)
|
||||
continue
|
||||
}
|
||||
diff = append(diff, peers.extendKnown(newPeerEvents[i])...)
|
||||
}
|
||||
// Update the peer tree using the traffic maps.
|
||||
for ip, bundle := range peers.Bundles {
|
||||
for id, peer := range bundle.KnownPeers {
|
||||
// Value is 0 if the traffic map doesn't have the
|
||||
// entry corresponding to the given IP and ID.
|
||||
curIngress, curEgress := (*ingress)[ip][id], (*egress)[ip][id]
|
||||
deltaIngress, deltaEgress := curIngress, curEgress
|
||||
if deltaIngress >= peer.prevIngress {
|
||||
deltaIngress -= peer.prevIngress
|
||||
}
|
||||
if deltaEgress >= peer.prevEgress {
|
||||
deltaEgress -= peer.prevEgress
|
||||
}
|
||||
peer.prevIngress, peer.prevEgress = curIngress, curEgress
|
||||
i := &ChartEntry{
|
||||
Value: deltaIngress,
|
||||
}
|
||||
e := &ChartEntry{
|
||||
Value: deltaEgress,
|
||||
}
|
||||
peer.Ingress = append(peer.Ingress, i)
|
||||
peer.Egress = append(peer.Egress, e)
|
||||
if first := len(peer.Ingress) - sampleLimit; first > 0 {
|
||||
peer.Ingress = peer.Ingress[first:]
|
||||
}
|
||||
if first := len(peer.Egress) - sampleLimit; first > 0 {
|
||||
peer.Egress = peer.Egress[first:]
|
||||
}
|
||||
// Creating the traffic sample events.
|
||||
diff = append(diff, &peerEvent{
|
||||
IP: ip,
|
||||
ID: id,
|
||||
Ingress: ChartEntries{i},
|
||||
Egress: ChartEntries{e},
|
||||
})
|
||||
}
|
||||
}
|
||||
db.peerLock.Unlock()
|
||||
|
||||
if len(diff) > 0 {
|
||||
db.sendToAll(&Message{Network: &NetworkMessage{
|
||||
Diff: diff,
|
||||
}})
|
||||
}
|
||||
// Clear the traffic maps, and the event array,
|
||||
// prepare them for the next metering.
|
||||
*ingress, *egress = make(trafficMap), make(trafficMap)
|
||||
newPeerEvents = newPeerEvents[:0]
|
||||
case err := <-subPeer.Err():
|
||||
log.Warn("Peer subscription error", "err", err)
|
||||
return
|
||||
case errc := <-db.quit:
|
||||
errc <- nil
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
146
dashboard/system.go
Normal file
146
dashboard/system.go
Normal file
@@ -0,0 +1,146 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package dashboard
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/elastic/gosigar"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
)
|
||||
|
||||
// meterCollector returns a function, which retrieves the count of a specific meter.
|
||||
func meterCollector(name string) func() int64 {
|
||||
if meter := metrics.Get(name); meter != nil {
|
||||
m := meter.(metrics.Meter)
|
||||
return func() int64 {
|
||||
return m.Count()
|
||||
}
|
||||
}
|
||||
return func() int64 {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// collectSystemData gathers data about the system and sends it to the clients.
|
||||
func (db *Dashboard) collectSystemData() {
|
||||
defer db.wg.Done()
|
||||
|
||||
systemCPUUsage := gosigar.Cpu{}
|
||||
systemCPUUsage.Get()
|
||||
var (
|
||||
mem runtime.MemStats
|
||||
|
||||
collectNetworkIngress = meterCollector(p2p.MetricsInboundTraffic)
|
||||
collectNetworkEgress = meterCollector(p2p.MetricsOutboundTraffic)
|
||||
collectDiskRead = meterCollector("eth/db/chaindata/disk/read")
|
||||
collectDiskWrite = meterCollector("eth/db/chaindata/disk/write")
|
||||
|
||||
prevNetworkIngress = collectNetworkIngress()
|
||||
prevNetworkEgress = collectNetworkEgress()
|
||||
prevProcessCPUTime = getProcessCPUTime()
|
||||
prevSystemCPUUsage = systemCPUUsage
|
||||
prevDiskRead = collectDiskRead()
|
||||
prevDiskWrite = collectDiskWrite()
|
||||
|
||||
frequency = float64(db.config.Refresh / time.Second)
|
||||
numCPU = float64(runtime.NumCPU())
|
||||
)
|
||||
|
||||
for {
|
||||
select {
|
||||
case errc := <-db.quit:
|
||||
errc <- nil
|
||||
return
|
||||
case <-time.After(db.config.Refresh):
|
||||
systemCPUUsage.Get()
|
||||
var (
|
||||
curNetworkIngress = collectNetworkIngress()
|
||||
curNetworkEgress = collectNetworkEgress()
|
||||
curProcessCPUTime = getProcessCPUTime()
|
||||
curSystemCPUUsage = systemCPUUsage
|
||||
curDiskRead = collectDiskRead()
|
||||
curDiskWrite = collectDiskWrite()
|
||||
|
||||
deltaNetworkIngress = float64(curNetworkIngress - prevNetworkIngress)
|
||||
deltaNetworkEgress = float64(curNetworkEgress - prevNetworkEgress)
|
||||
deltaProcessCPUTime = curProcessCPUTime - prevProcessCPUTime
|
||||
deltaSystemCPUUsage = curSystemCPUUsage.Delta(prevSystemCPUUsage)
|
||||
deltaDiskRead = curDiskRead - prevDiskRead
|
||||
deltaDiskWrite = curDiskWrite - prevDiskWrite
|
||||
)
|
||||
prevNetworkIngress = curNetworkIngress
|
||||
prevNetworkEgress = curNetworkEgress
|
||||
prevProcessCPUTime = curProcessCPUTime
|
||||
prevSystemCPUUsage = curSystemCPUUsage
|
||||
prevDiskRead = curDiskRead
|
||||
prevDiskWrite = curDiskWrite
|
||||
|
||||
runtime.ReadMemStats(&mem)
|
||||
activeMemory := &ChartEntry{
|
||||
Value: float64(mem.Alloc) / frequency,
|
||||
}
|
||||
virtualMemory := &ChartEntry{
|
||||
Value: float64(mem.Sys) / frequency,
|
||||
}
|
||||
networkIngress := &ChartEntry{
|
||||
Value: deltaNetworkIngress / frequency,
|
||||
}
|
||||
networkEgress := &ChartEntry{
|
||||
Value: deltaNetworkEgress / frequency,
|
||||
}
|
||||
processCPU := &ChartEntry{
|
||||
Value: deltaProcessCPUTime / frequency / numCPU * 100,
|
||||
}
|
||||
systemCPU := &ChartEntry{
|
||||
Value: float64(deltaSystemCPUUsage.Sys+deltaSystemCPUUsage.User) / frequency / numCPU,
|
||||
}
|
||||
diskRead := &ChartEntry{
|
||||
Value: float64(deltaDiskRead) / frequency,
|
||||
}
|
||||
diskWrite := &ChartEntry{
|
||||
Value: float64(deltaDiskWrite) / frequency,
|
||||
}
|
||||
db.sysLock.Lock()
|
||||
sys := db.history.System
|
||||
sys.ActiveMemory = append(sys.ActiveMemory[1:], activeMemory)
|
||||
sys.VirtualMemory = append(sys.VirtualMemory[1:], virtualMemory)
|
||||
sys.NetworkIngress = append(sys.NetworkIngress[1:], networkIngress)
|
||||
sys.NetworkEgress = append(sys.NetworkEgress[1:], networkEgress)
|
||||
sys.ProcessCPU = append(sys.ProcessCPU[1:], processCPU)
|
||||
sys.SystemCPU = append(sys.SystemCPU[1:], systemCPU)
|
||||
sys.DiskRead = append(sys.DiskRead[1:], diskRead)
|
||||
sys.DiskWrite = append(sys.DiskWrite[1:], diskWrite)
|
||||
db.sysLock.Unlock()
|
||||
|
||||
db.sendToAll(&Message{
|
||||
System: &SystemMessage{
|
||||
ActiveMemory: ChartEntries{activeMemory},
|
||||
VirtualMemory: ChartEntries{virtualMemory},
|
||||
NetworkIngress: ChartEntries{networkIngress},
|
||||
NetworkEgress: ChartEntries{networkEgress},
|
||||
ProcessCPU: ChartEntries{processCPU},
|
||||
SystemCPU: ChartEntries{systemCPU},
|
||||
DiskRead: ChartEntries{diskRead},
|
||||
DiskWrite: ChartEntries{diskWrite},
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user