* p2p/discover, p2p/discv5: add marshaling methods to Node * p2p/netutil: make Netlist decodable from TOML * common/math: encode nil HexOrDecimal256 as 0x0 * cmd/geth: add --config file flag * cmd/geth: add missing license header * eth: prettify Config again, fix tests * eth: use gasprice.Config instead of duplicating its fields * eth/gasprice: hide nil default from dumpconfig output * cmd/geth: hide genesis block in dumpconfig output * node: make tests compile * console: fix tests * cmd/geth: make TOML keys look exactly like Go struct fields * p2p: use discovery by default This makes the zero Config slightly more useful. It also fixes package node tests because Node detects reuse of the datadir through the NodeDatabase. * cmd/geth: make ethstats URL settable through config file * cmd/faucet: fix configuration * cmd/geth: dedup attach tests * eth: add comment for DefaultConfig * eth: pass downloader.SyncMode in Config This removes the FastSync, LightSync flags in favour of a more general SyncMode flag. * cmd/utils: remove jitvm flags * cmd/utils: make mutually exclusive flag error prettier It now reads: Fatal: flags --dev, --testnet can't be used at the same time * p2p: fix typo * node: add DefaultConfig, use it for geth * mobile: add missing NoDiscovery option * cmd/utils: drop MakeNode This exposed a couple of places that needed to be updated to use node.DefaultConfig. * node: fix typo * eth: make fast sync the default mode * cmd/utils: remove IPCApiFlag (unused) * node: remove default IPC path Set it in the frontends instead. * cmd/geth: add --syncmode * cmd/utils: make --ipcdisable and --ipcpath mutually exclusive * cmd/utils: don't enable WS, HTTP when setting addr * cmd/utils: fix --identity
		
			
				
	
	
		
			679 lines
		
	
	
		
			19 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
			
		
		
	
	
			679 lines
		
	
	
		
			19 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
// Copyright 2015 The go-ethereum Authors
 | 
						|
// This file is part of the go-ethereum library.
 | 
						|
//
 | 
						|
// The go-ethereum library is free software: you can redistribute it and/or modify
 | 
						|
// it under the terms of the GNU Lesser General Public License as published by
 | 
						|
// the Free Software Foundation, either version 3 of the License, or
 | 
						|
// (at your option) any later version.
 | 
						|
//
 | 
						|
// The go-ethereum library is distributed in the hope that it will be useful,
 | 
						|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
						|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | 
						|
// GNU Lesser General Public License for more details.
 | 
						|
//
 | 
						|
// You should have received a copy of the GNU Lesser General Public License
 | 
						|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | 
						|
 | 
						|
package node
 | 
						|
 | 
						|
import (
 | 
						|
	"errors"
 | 
						|
	"fmt"
 | 
						|
	"net"
 | 
						|
	"os"
 | 
						|
	"path/filepath"
 | 
						|
	"reflect"
 | 
						|
	"strings"
 | 
						|
	"sync"
 | 
						|
	"syscall"
 | 
						|
 | 
						|
	"github.com/ethereum/go-ethereum/accounts"
 | 
						|
	"github.com/ethereum/go-ethereum/ethdb"
 | 
						|
	"github.com/ethereum/go-ethereum/event"
 | 
						|
	"github.com/ethereum/go-ethereum/internal/debug"
 | 
						|
	"github.com/ethereum/go-ethereum/log"
 | 
						|
	"github.com/ethereum/go-ethereum/p2p"
 | 
						|
	"github.com/ethereum/go-ethereum/rpc"
 | 
						|
	"github.com/syndtr/goleveldb/leveldb/storage"
 | 
						|
)
 | 
						|
 | 
						|
var (
 | 
						|
	ErrDatadirUsed    = errors.New("datadir already used")
 | 
						|
	ErrNodeStopped    = errors.New("node not started")
 | 
						|
	ErrNodeRunning    = errors.New("node already running")
 | 
						|
	ErrServiceUnknown = errors.New("unknown service")
 | 
						|
 | 
						|
	datadirInUseErrnos = map[uint]bool{11: true, 32: true, 35: true}
 | 
						|
)
 | 
						|
 | 
						|
// Node is a container on which services can be registered.
 | 
						|
type Node struct {
 | 
						|
	eventmux *event.TypeMux // Event multiplexer used between the services of a stack
 | 
						|
	config   *Config
 | 
						|
	accman   *accounts.Manager
 | 
						|
 | 
						|
	ephemeralKeystore string          // if non-empty, the key directory that will be removed by Stop
 | 
						|
	instanceDirLock   storage.Storage // prevents concurrent use of instance directory
 | 
						|
 | 
						|
	serverConfig p2p.Config
 | 
						|
	server       *p2p.Server // Currently running P2P networking layer
 | 
						|
 | 
						|
	serviceFuncs []ServiceConstructor     // Service constructors (in dependency order)
 | 
						|
	services     map[reflect.Type]Service // Currently running services
 | 
						|
 | 
						|
	rpcAPIs       []rpc.API   // List of APIs currently provided by the node
 | 
						|
	inprocHandler *rpc.Server // In-process RPC request handler to process the API requests
 | 
						|
 | 
						|
	ipcEndpoint string       // IPC endpoint to listen at (empty = IPC disabled)
 | 
						|
	ipcListener net.Listener // IPC RPC listener socket to serve API requests
 | 
						|
	ipcHandler  *rpc.Server  // IPC RPC request handler to process the API requests
 | 
						|
 | 
						|
	httpEndpoint  string       // HTTP endpoint (interface + port) to listen at (empty = HTTP disabled)
 | 
						|
	httpWhitelist []string     // HTTP RPC modules to allow through this endpoint
 | 
						|
	httpListener  net.Listener // HTTP RPC listener socket to server API requests
 | 
						|
	httpHandler   *rpc.Server  // HTTP RPC request handler to process the API requests
 | 
						|
 | 
						|
	wsEndpoint string       // Websocket endpoint (interface + port) to listen at (empty = websocket disabled)
 | 
						|
	wsListener net.Listener // Websocket RPC listener socket to server API requests
 | 
						|
	wsHandler  *rpc.Server  // Websocket RPC request handler to process the API requests
 | 
						|
 | 
						|
	stop chan struct{} // Channel to wait for termination notifications
 | 
						|
	lock sync.RWMutex
 | 
						|
}
 | 
						|
 | 
						|
// New creates a new P2P node, ready for protocol registration.
 | 
						|
func New(conf *Config) (*Node, error) {
 | 
						|
	// Copy config and resolve the datadir so future changes to the current
 | 
						|
	// working directory don't affect the node.
 | 
						|
	confCopy := *conf
 | 
						|
	conf = &confCopy
 | 
						|
	if conf.DataDir != "" {
 | 
						|
		absdatadir, err := filepath.Abs(conf.DataDir)
 | 
						|
		if err != nil {
 | 
						|
			return nil, err
 | 
						|
		}
 | 
						|
		conf.DataDir = absdatadir
 | 
						|
	}
 | 
						|
	// Ensure that the instance name doesn't cause weird conflicts with
 | 
						|
	// other files in the data directory.
 | 
						|
	if strings.ContainsAny(conf.Name, `/\`) {
 | 
						|
		return nil, errors.New(`Config.Name must not contain '/' or '\'`)
 | 
						|
	}
 | 
						|
	if conf.Name == datadirDefaultKeyStore {
 | 
						|
		return nil, errors.New(`Config.Name cannot be "` + datadirDefaultKeyStore + `"`)
 | 
						|
	}
 | 
						|
	if strings.HasSuffix(conf.Name, ".ipc") {
 | 
						|
		return nil, errors.New(`Config.Name cannot end in ".ipc"`)
 | 
						|
	}
 | 
						|
	// Ensure that the AccountManager method works before the node has started.
 | 
						|
	// We rely on this in cmd/geth.
 | 
						|
	am, ephemeralKeystore, err := makeAccountManager(conf)
 | 
						|
	if err != nil {
 | 
						|
		return nil, err
 | 
						|
	}
 | 
						|
	// Note: any interaction with Config that would create/touch files
 | 
						|
	// in the data directory or instance directory is delayed until Start.
 | 
						|
	return &Node{
 | 
						|
		accman:            am,
 | 
						|
		ephemeralKeystore: ephemeralKeystore,
 | 
						|
		config:            conf,
 | 
						|
		serviceFuncs:      []ServiceConstructor{},
 | 
						|
		ipcEndpoint:       conf.IPCEndpoint(),
 | 
						|
		httpEndpoint:      conf.HTTPEndpoint(),
 | 
						|
		wsEndpoint:        conf.WSEndpoint(),
 | 
						|
		eventmux:          new(event.TypeMux),
 | 
						|
	}, nil
 | 
						|
}
 | 
						|
 | 
						|
// Register injects a new service into the node's stack. The service created by
 | 
						|
// the passed constructor must be unique in its type with regard to sibling ones.
 | 
						|
func (n *Node) Register(constructor ServiceConstructor) error {
 | 
						|
	n.lock.Lock()
 | 
						|
	defer n.lock.Unlock()
 | 
						|
 | 
						|
	if n.server != nil {
 | 
						|
		return ErrNodeRunning
 | 
						|
	}
 | 
						|
	n.serviceFuncs = append(n.serviceFuncs, constructor)
 | 
						|
	return nil
 | 
						|
}
 | 
						|
 | 
						|
// Start create a live P2P node and starts running it.
 | 
						|
func (n *Node) Start() error {
 | 
						|
	n.lock.Lock()
 | 
						|
	defer n.lock.Unlock()
 | 
						|
 | 
						|
	// Short circuit if the node's already running
 | 
						|
	if n.server != nil {
 | 
						|
		return ErrNodeRunning
 | 
						|
	}
 | 
						|
	if err := n.openDataDir(); err != nil {
 | 
						|
		return err
 | 
						|
	}
 | 
						|
 | 
						|
	// Initialize the p2p server. This creates the node key and
 | 
						|
	// discovery databases.
 | 
						|
	n.serverConfig = n.config.P2P
 | 
						|
	n.serverConfig.PrivateKey = n.config.NodeKey()
 | 
						|
	n.serverConfig.Name = n.config.NodeName()
 | 
						|
	if n.serverConfig.StaticNodes == nil {
 | 
						|
		n.serverConfig.StaticNodes = n.config.StaticNodes()
 | 
						|
	}
 | 
						|
	if n.serverConfig.TrustedNodes == nil {
 | 
						|
		n.serverConfig.TrustedNodes = n.config.TrusterNodes()
 | 
						|
	}
 | 
						|
	if n.serverConfig.NodeDatabase == "" {
 | 
						|
		n.serverConfig.NodeDatabase = n.config.NodeDB()
 | 
						|
	}
 | 
						|
	running := &p2p.Server{Config: n.serverConfig}
 | 
						|
	log.Info("Starting peer-to-peer node", "instance", n.serverConfig.Name)
 | 
						|
 | 
						|
	// Otherwise copy and specialize the P2P configuration
 | 
						|
	services := make(map[reflect.Type]Service)
 | 
						|
	for _, constructor := range n.serviceFuncs {
 | 
						|
		// Create a new context for the particular service
 | 
						|
		ctx := &ServiceContext{
 | 
						|
			config:         n.config,
 | 
						|
			services:       make(map[reflect.Type]Service),
 | 
						|
			EventMux:       n.eventmux,
 | 
						|
			AccountManager: n.accman,
 | 
						|
		}
 | 
						|
		for kind, s := range services { // copy needed for threaded access
 | 
						|
			ctx.services[kind] = s
 | 
						|
		}
 | 
						|
		// Construct and save the service
 | 
						|
		service, err := constructor(ctx)
 | 
						|
		if err != nil {
 | 
						|
			return err
 | 
						|
		}
 | 
						|
		kind := reflect.TypeOf(service)
 | 
						|
		if _, exists := services[kind]; exists {
 | 
						|
			return &DuplicateServiceError{Kind: kind}
 | 
						|
		}
 | 
						|
		services[kind] = service
 | 
						|
	}
 | 
						|
	// Gather the protocols and start the freshly assembled P2P server
 | 
						|
	for _, service := range services {
 | 
						|
		running.Protocols = append(running.Protocols, service.Protocols()...)
 | 
						|
	}
 | 
						|
	if err := running.Start(); err != nil {
 | 
						|
		if errno, ok := err.(syscall.Errno); ok && datadirInUseErrnos[uint(errno)] {
 | 
						|
			return ErrDatadirUsed
 | 
						|
		}
 | 
						|
		return err
 | 
						|
	}
 | 
						|
	// Start each of the services
 | 
						|
	started := []reflect.Type{}
 | 
						|
	for kind, service := range services {
 | 
						|
		// Start the next service, stopping all previous upon failure
 | 
						|
		if err := service.Start(running); err != nil {
 | 
						|
			for _, kind := range started {
 | 
						|
				services[kind].Stop()
 | 
						|
			}
 | 
						|
			running.Stop()
 | 
						|
 | 
						|
			return err
 | 
						|
		}
 | 
						|
		// Mark the service started for potential cleanup
 | 
						|
		started = append(started, kind)
 | 
						|
	}
 | 
						|
	// Lastly start the configured RPC interfaces
 | 
						|
	if err := n.startRPC(services); err != nil {
 | 
						|
		for _, service := range services {
 | 
						|
			service.Stop()
 | 
						|
		}
 | 
						|
		running.Stop()
 | 
						|
		return err
 | 
						|
	}
 | 
						|
	// Finish initializing the startup
 | 
						|
	n.services = services
 | 
						|
	n.server = running
 | 
						|
	n.stop = make(chan struct{})
 | 
						|
 | 
						|
	return nil
 | 
						|
}
 | 
						|
 | 
						|
func (n *Node) openDataDir() error {
 | 
						|
	if n.config.DataDir == "" {
 | 
						|
		return nil // ephemeral
 | 
						|
	}
 | 
						|
 | 
						|
	instdir := filepath.Join(n.config.DataDir, n.config.name())
 | 
						|
	if err := os.MkdirAll(instdir, 0700); err != nil {
 | 
						|
		return err
 | 
						|
	}
 | 
						|
	// Try to open the instance directory as LevelDB storage. This creates a lock file
 | 
						|
	// which prevents concurrent use by another instance as well as accidental use of the
 | 
						|
	// instance directory as a database.
 | 
						|
	storage, err := storage.OpenFile(instdir, true)
 | 
						|
	if err != nil {
 | 
						|
		return err
 | 
						|
	}
 | 
						|
	n.instanceDirLock = storage
 | 
						|
	return nil
 | 
						|
}
 | 
						|
 | 
						|
// startRPC is a helper method to start all the various RPC endpoint during node
 | 
						|
// startup. It's not meant to be called at any time afterwards as it makes certain
 | 
						|
// assumptions about the state of the node.
 | 
						|
func (n *Node) startRPC(services map[reflect.Type]Service) error {
 | 
						|
	// Gather all the possible APIs to surface
 | 
						|
	apis := n.apis()
 | 
						|
	for _, service := range services {
 | 
						|
		apis = append(apis, service.APIs()...)
 | 
						|
	}
 | 
						|
	// Start the various API endpoints, terminating all in case of errors
 | 
						|
	if err := n.startInProc(apis); err != nil {
 | 
						|
		return err
 | 
						|
	}
 | 
						|
	if err := n.startIPC(apis); err != nil {
 | 
						|
		n.stopInProc()
 | 
						|
		return err
 | 
						|
	}
 | 
						|
	if err := n.startHTTP(n.httpEndpoint, apis, n.config.HTTPModules, n.config.HTTPCors); err != nil {
 | 
						|
		n.stopIPC()
 | 
						|
		n.stopInProc()
 | 
						|
		return err
 | 
						|
	}
 | 
						|
	if err := n.startWS(n.wsEndpoint, apis, n.config.WSModules, n.config.WSOrigins); err != nil {
 | 
						|
		n.stopHTTP()
 | 
						|
		n.stopIPC()
 | 
						|
		n.stopInProc()
 | 
						|
		return err
 | 
						|
	}
 | 
						|
	// All API endpoints started successfully
 | 
						|
	n.rpcAPIs = apis
 | 
						|
	return nil
 | 
						|
}
 | 
						|
 | 
						|
// startInProc initializes an in-process RPC endpoint.
 | 
						|
func (n *Node) startInProc(apis []rpc.API) error {
 | 
						|
	// Register all the APIs exposed by the services
 | 
						|
	handler := rpc.NewServer()
 | 
						|
	for _, api := range apis {
 | 
						|
		if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
 | 
						|
			return err
 | 
						|
		}
 | 
						|
		log.Debug(fmt.Sprintf("InProc registered %T under '%s'", api.Service, api.Namespace))
 | 
						|
	}
 | 
						|
	n.inprocHandler = handler
 | 
						|
	return nil
 | 
						|
}
 | 
						|
 | 
						|
// stopInProc terminates the in-process RPC endpoint.
 | 
						|
func (n *Node) stopInProc() {
 | 
						|
	if n.inprocHandler != nil {
 | 
						|
		n.inprocHandler.Stop()
 | 
						|
		n.inprocHandler = nil
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
// startIPC initializes and starts the IPC RPC endpoint.
 | 
						|
func (n *Node) startIPC(apis []rpc.API) error {
 | 
						|
	// Short circuit if the IPC endpoint isn't being exposed
 | 
						|
	if n.ipcEndpoint == "" {
 | 
						|
		return nil
 | 
						|
	}
 | 
						|
	// Register all the APIs exposed by the services
 | 
						|
	handler := rpc.NewServer()
 | 
						|
	for _, api := range apis {
 | 
						|
		if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
 | 
						|
			return err
 | 
						|
		}
 | 
						|
		log.Debug(fmt.Sprintf("IPC registered %T under '%s'", api.Service, api.Namespace))
 | 
						|
	}
 | 
						|
	// All APIs registered, start the IPC listener
 | 
						|
	var (
 | 
						|
		listener net.Listener
 | 
						|
		err      error
 | 
						|
	)
 | 
						|
	if listener, err = rpc.CreateIPCListener(n.ipcEndpoint); err != nil {
 | 
						|
		return err
 | 
						|
	}
 | 
						|
	go func() {
 | 
						|
		log.Info(fmt.Sprintf("IPC endpoint opened: %s", n.ipcEndpoint))
 | 
						|
 | 
						|
		for {
 | 
						|
			conn, err := listener.Accept()
 | 
						|
			if err != nil {
 | 
						|
				// Terminate if the listener was closed
 | 
						|
				n.lock.RLock()
 | 
						|
				closed := n.ipcListener == nil
 | 
						|
				n.lock.RUnlock()
 | 
						|
				if closed {
 | 
						|
					return
 | 
						|
				}
 | 
						|
				// Not closed, just some error; report and continue
 | 
						|
				log.Error(fmt.Sprintf("IPC accept failed: %v", err))
 | 
						|
				continue
 | 
						|
			}
 | 
						|
			go handler.ServeCodec(rpc.NewJSONCodec(conn), rpc.OptionMethodInvocation|rpc.OptionSubscriptions)
 | 
						|
		}
 | 
						|
	}()
 | 
						|
	// All listeners booted successfully
 | 
						|
	n.ipcListener = listener
 | 
						|
	n.ipcHandler = handler
 | 
						|
 | 
						|
	return nil
 | 
						|
}
 | 
						|
 | 
						|
// stopIPC terminates the IPC RPC endpoint.
 | 
						|
func (n *Node) stopIPC() {
 | 
						|
	if n.ipcListener != nil {
 | 
						|
		n.ipcListener.Close()
 | 
						|
		n.ipcListener = nil
 | 
						|
 | 
						|
		log.Info(fmt.Sprintf("IPC endpoint closed: %s", n.ipcEndpoint))
 | 
						|
	}
 | 
						|
	if n.ipcHandler != nil {
 | 
						|
		n.ipcHandler.Stop()
 | 
						|
		n.ipcHandler = nil
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
// startHTTP initializes and starts the HTTP RPC endpoint.
 | 
						|
func (n *Node) startHTTP(endpoint string, apis []rpc.API, modules []string, cors string) error {
 | 
						|
	// Short circuit if the HTTP endpoint isn't being exposed
 | 
						|
	if endpoint == "" {
 | 
						|
		return nil
 | 
						|
	}
 | 
						|
	// Generate the whitelist based on the allowed modules
 | 
						|
	whitelist := make(map[string]bool)
 | 
						|
	for _, module := range modules {
 | 
						|
		whitelist[module] = true
 | 
						|
	}
 | 
						|
	// Register all the APIs exposed by the services
 | 
						|
	handler := rpc.NewServer()
 | 
						|
	for _, api := range apis {
 | 
						|
		if whitelist[api.Namespace] || (len(whitelist) == 0 && api.Public) {
 | 
						|
			if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
 | 
						|
				return err
 | 
						|
			}
 | 
						|
			log.Debug(fmt.Sprintf("HTTP registered %T under '%s'", api.Service, api.Namespace))
 | 
						|
		}
 | 
						|
	}
 | 
						|
	// All APIs registered, start the HTTP listener
 | 
						|
	var (
 | 
						|
		listener net.Listener
 | 
						|
		err      error
 | 
						|
	)
 | 
						|
	if listener, err = net.Listen("tcp", endpoint); err != nil {
 | 
						|
		return err
 | 
						|
	}
 | 
						|
	go rpc.NewHTTPServer(cors, handler).Serve(listener)
 | 
						|
	log.Info(fmt.Sprintf("HTTP endpoint opened: http://%s", endpoint))
 | 
						|
 | 
						|
	// All listeners booted successfully
 | 
						|
	n.httpEndpoint = endpoint
 | 
						|
	n.httpListener = listener
 | 
						|
	n.httpHandler = handler
 | 
						|
 | 
						|
	return nil
 | 
						|
}
 | 
						|
 | 
						|
// stopHTTP terminates the HTTP RPC endpoint.
 | 
						|
func (n *Node) stopHTTP() {
 | 
						|
	if n.httpListener != nil {
 | 
						|
		n.httpListener.Close()
 | 
						|
		n.httpListener = nil
 | 
						|
 | 
						|
		log.Info(fmt.Sprintf("HTTP endpoint closed: http://%s", n.httpEndpoint))
 | 
						|
	}
 | 
						|
	if n.httpHandler != nil {
 | 
						|
		n.httpHandler.Stop()
 | 
						|
		n.httpHandler = nil
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
// startWS initializes and starts the websocket RPC endpoint.
 | 
						|
func (n *Node) startWS(endpoint string, apis []rpc.API, modules []string, wsOrigins string) error {
 | 
						|
	// Short circuit if the WS endpoint isn't being exposed
 | 
						|
	if endpoint == "" {
 | 
						|
		return nil
 | 
						|
	}
 | 
						|
	// Generate the whitelist based on the allowed modules
 | 
						|
	whitelist := make(map[string]bool)
 | 
						|
	for _, module := range modules {
 | 
						|
		whitelist[module] = true
 | 
						|
	}
 | 
						|
	// Register all the APIs exposed by the services
 | 
						|
	handler := rpc.NewServer()
 | 
						|
	for _, api := range apis {
 | 
						|
		if whitelist[api.Namespace] || (len(whitelist) == 0 && api.Public) {
 | 
						|
			if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
 | 
						|
				return err
 | 
						|
			}
 | 
						|
			log.Debug(fmt.Sprintf("WebSocket registered %T under '%s'", api.Service, api.Namespace))
 | 
						|
		}
 | 
						|
	}
 | 
						|
	// All APIs registered, start the HTTP listener
 | 
						|
	var (
 | 
						|
		listener net.Listener
 | 
						|
		err      error
 | 
						|
	)
 | 
						|
	if listener, err = net.Listen("tcp", endpoint); err != nil {
 | 
						|
		return err
 | 
						|
	}
 | 
						|
	go rpc.NewWSServer(wsOrigins, handler).Serve(listener)
 | 
						|
	log.Info(fmt.Sprintf("WebSocket endpoint opened: ws://%s", endpoint))
 | 
						|
 | 
						|
	// All listeners booted successfully
 | 
						|
	n.wsEndpoint = endpoint
 | 
						|
	n.wsListener = listener
 | 
						|
	n.wsHandler = handler
 | 
						|
 | 
						|
	return nil
 | 
						|
}
 | 
						|
 | 
						|
// stopWS terminates the websocket RPC endpoint.
 | 
						|
func (n *Node) stopWS() {
 | 
						|
	if n.wsListener != nil {
 | 
						|
		n.wsListener.Close()
 | 
						|
		n.wsListener = nil
 | 
						|
 | 
						|
		log.Info(fmt.Sprintf("WebSocket endpoint closed: ws://%s", n.wsEndpoint))
 | 
						|
	}
 | 
						|
	if n.wsHandler != nil {
 | 
						|
		n.wsHandler.Stop()
 | 
						|
		n.wsHandler = nil
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
// Stop terminates a running node along with all it's services. In the node was
 | 
						|
// not started, an error is returned.
 | 
						|
func (n *Node) Stop() error {
 | 
						|
	n.lock.Lock()
 | 
						|
	defer n.lock.Unlock()
 | 
						|
 | 
						|
	// Short circuit if the node's not running
 | 
						|
	if n.server == nil {
 | 
						|
		return ErrNodeStopped
 | 
						|
	}
 | 
						|
 | 
						|
	// Terminate the API, services and the p2p server.
 | 
						|
	n.stopWS()
 | 
						|
	n.stopHTTP()
 | 
						|
	n.stopIPC()
 | 
						|
	n.rpcAPIs = nil
 | 
						|
	failure := &StopError{
 | 
						|
		Services: make(map[reflect.Type]error),
 | 
						|
	}
 | 
						|
	for kind, service := range n.services {
 | 
						|
		if err := service.Stop(); err != nil {
 | 
						|
			failure.Services[kind] = err
 | 
						|
		}
 | 
						|
	}
 | 
						|
	n.server.Stop()
 | 
						|
	n.services = nil
 | 
						|
	n.server = nil
 | 
						|
 | 
						|
	// Release instance directory lock.
 | 
						|
	if n.instanceDirLock != nil {
 | 
						|
		n.instanceDirLock.Close()
 | 
						|
		n.instanceDirLock = nil
 | 
						|
	}
 | 
						|
 | 
						|
	// unblock n.Wait
 | 
						|
	close(n.stop)
 | 
						|
 | 
						|
	// Remove the keystore if it was created ephemerally.
 | 
						|
	var keystoreErr error
 | 
						|
	if n.ephemeralKeystore != "" {
 | 
						|
		keystoreErr = os.RemoveAll(n.ephemeralKeystore)
 | 
						|
	}
 | 
						|
 | 
						|
	if len(failure.Services) > 0 {
 | 
						|
		return failure
 | 
						|
	}
 | 
						|
	if keystoreErr != nil {
 | 
						|
		return keystoreErr
 | 
						|
	}
 | 
						|
	return nil
 | 
						|
}
 | 
						|
 | 
						|
// Wait blocks the thread until the node is stopped. If the node is not running
 | 
						|
// at the time of invocation, the method immediately returns.
 | 
						|
func (n *Node) Wait() {
 | 
						|
	n.lock.RLock()
 | 
						|
	if n.server == nil {
 | 
						|
		return
 | 
						|
	}
 | 
						|
	stop := n.stop
 | 
						|
	n.lock.RUnlock()
 | 
						|
 | 
						|
	<-stop
 | 
						|
}
 | 
						|
 | 
						|
// Restart terminates a running node and boots up a new one in its place. If the
 | 
						|
// node isn't running, an error is returned.
 | 
						|
func (n *Node) Restart() error {
 | 
						|
	if err := n.Stop(); err != nil {
 | 
						|
		return err
 | 
						|
	}
 | 
						|
	if err := n.Start(); err != nil {
 | 
						|
		return err
 | 
						|
	}
 | 
						|
	return nil
 | 
						|
}
 | 
						|
 | 
						|
// Attach creates an RPC client attached to an in-process API handler.
 | 
						|
func (n *Node) Attach() (*rpc.Client, error) {
 | 
						|
	n.lock.RLock()
 | 
						|
	defer n.lock.RUnlock()
 | 
						|
 | 
						|
	if n.server == nil {
 | 
						|
		return nil, ErrNodeStopped
 | 
						|
	}
 | 
						|
	return rpc.DialInProc(n.inprocHandler), nil
 | 
						|
}
 | 
						|
 | 
						|
// Server retrieves the currently running P2P network layer. This method is meant
 | 
						|
// only to inspect fields of the currently running server, life cycle management
 | 
						|
// should be left to this Node entity.
 | 
						|
func (n *Node) Server() *p2p.Server {
 | 
						|
	n.lock.RLock()
 | 
						|
	defer n.lock.RUnlock()
 | 
						|
 | 
						|
	return n.server
 | 
						|
}
 | 
						|
 | 
						|
// Service retrieves a currently running service registered of a specific type.
 | 
						|
func (n *Node) Service(service interface{}) error {
 | 
						|
	n.lock.RLock()
 | 
						|
	defer n.lock.RUnlock()
 | 
						|
 | 
						|
	// Short circuit if the node's not running
 | 
						|
	if n.server == nil {
 | 
						|
		return ErrNodeStopped
 | 
						|
	}
 | 
						|
	// Otherwise try to find the service to return
 | 
						|
	element := reflect.ValueOf(service).Elem()
 | 
						|
	if running, ok := n.services[element.Type()]; ok {
 | 
						|
		element.Set(reflect.ValueOf(running))
 | 
						|
		return nil
 | 
						|
	}
 | 
						|
	return ErrServiceUnknown
 | 
						|
}
 | 
						|
 | 
						|
// DataDir retrieves the current datadir used by the protocol stack.
 | 
						|
// Deprecated: No files should be stored in this directory, use InstanceDir instead.
 | 
						|
func (n *Node) DataDir() string {
 | 
						|
	return n.config.DataDir
 | 
						|
}
 | 
						|
 | 
						|
// InstanceDir retrieves the instance directory used by the protocol stack.
 | 
						|
func (n *Node) InstanceDir() string {
 | 
						|
	return n.config.instanceDir()
 | 
						|
}
 | 
						|
 | 
						|
// AccountManager retrieves the account manager used by the protocol stack.
 | 
						|
func (n *Node) AccountManager() *accounts.Manager {
 | 
						|
	return n.accman
 | 
						|
}
 | 
						|
 | 
						|
// IPCEndpoint retrieves the current IPC endpoint used by the protocol stack.
 | 
						|
func (n *Node) IPCEndpoint() string {
 | 
						|
	return n.ipcEndpoint
 | 
						|
}
 | 
						|
 | 
						|
// HTTPEndpoint retrieves the current HTTP endpoint used by the protocol stack.
 | 
						|
func (n *Node) HTTPEndpoint() string {
 | 
						|
	return n.httpEndpoint
 | 
						|
}
 | 
						|
 | 
						|
// WSEndpoint retrieves the current WS endpoint used by the protocol stack.
 | 
						|
func (n *Node) WSEndpoint() string {
 | 
						|
	return n.wsEndpoint
 | 
						|
}
 | 
						|
 | 
						|
// EventMux retrieves the event multiplexer used by all the network services in
 | 
						|
// the current protocol stack.
 | 
						|
func (n *Node) EventMux() *event.TypeMux {
 | 
						|
	return n.eventmux
 | 
						|
}
 | 
						|
 | 
						|
// OpenDatabase opens an existing database with the given name (or creates one if no
 | 
						|
// previous can be found) from within the node's instance directory. If the node is
 | 
						|
// ephemeral, a memory database is returned.
 | 
						|
func (n *Node) OpenDatabase(name string, cache, handles int) (ethdb.Database, error) {
 | 
						|
	if n.config.DataDir == "" {
 | 
						|
		return ethdb.NewMemDatabase()
 | 
						|
	}
 | 
						|
	return ethdb.NewLDBDatabase(n.config.resolvePath(name), cache, handles)
 | 
						|
}
 | 
						|
 | 
						|
// ResolvePath returns the absolute path of a resource in the instance directory.
 | 
						|
func (n *Node) ResolvePath(x string) string {
 | 
						|
	return n.config.resolvePath(x)
 | 
						|
}
 | 
						|
 | 
						|
// apis returns the collection of RPC descriptors this node offers.
 | 
						|
func (n *Node) apis() []rpc.API {
 | 
						|
	return []rpc.API{
 | 
						|
		{
 | 
						|
			Namespace: "admin",
 | 
						|
			Version:   "1.0",
 | 
						|
			Service:   NewPrivateAdminAPI(n),
 | 
						|
		}, {
 | 
						|
			Namespace: "admin",
 | 
						|
			Version:   "1.0",
 | 
						|
			Service:   NewPublicAdminAPI(n),
 | 
						|
			Public:    true,
 | 
						|
		}, {
 | 
						|
			Namespace: "debug",
 | 
						|
			Version:   "1.0",
 | 
						|
			Service:   debug.Handler,
 | 
						|
		}, {
 | 
						|
			Namespace: "debug",
 | 
						|
			Version:   "1.0",
 | 
						|
			Service:   NewPublicDebugAPI(n),
 | 
						|
			Public:    true,
 | 
						|
		}, {
 | 
						|
			Namespace: "web3",
 | 
						|
			Version:   "1.0",
 | 
						|
			Service:   NewPublicWeb3API(n),
 | 
						|
			Public:    true,
 | 
						|
		},
 | 
						|
	}
 | 
						|
}
 |