This PR significantly changes the APIs for instantiating Ethereum nodes in a Go program. The new APIs are not backwards-compatible, but we feel that this is made up for by the much simpler way of registering services on node.Node. You can find more information and rationale in the design document: https://gist.github.com/renaynay/5bec2de19fde66f4d04c535fd24f0775. There is also a new feature in Node's Go API: it is now possible to register arbitrary handlers on the user-facing HTTP server. In geth, this facility is used to enable GraphQL. There is a single minor change relevant for geth users in this PR: The GraphQL API is no longer available separately from the JSON-RPC HTTP server. If you want GraphQL, you need to enable it using the ./geth --http --graphql flag combination. The --graphql.port and --graphql.addr flags are no longer available.
		
			
				
	
	
		
			626 lines
		
	
	
		
			17 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
			
		
		
	
	
			626 lines
		
	
	
		
			17 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
| // Copyright 2015 The go-ethereum Authors
 | |
| // This file is part of the go-ethereum library.
 | |
| //
 | |
| // The go-ethereum library is free software: you can redistribute it and/or modify
 | |
| // it under the terms of the GNU Lesser General Public License as published by
 | |
| // the Free Software Foundation, either version 3 of the License, or
 | |
| // (at your option) any later version.
 | |
| //
 | |
| // The go-ethereum library is distributed in the hope that it will be useful,
 | |
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | |
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | |
| // GNU Lesser General Public License for more details.
 | |
| //
 | |
| // You should have received a copy of the GNU Lesser General Public License
 | |
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | |
| 
 | |
| package node
 | |
| 
 | |
| import (
 | |
| 	"errors"
 | |
| 	"fmt"
 | |
| 	"net/http"
 | |
| 	"os"
 | |
| 	"path/filepath"
 | |
| 	"reflect"
 | |
| 	"strings"
 | |
| 	"sync"
 | |
| 
 | |
| 	"github.com/ethereum/go-ethereum/accounts"
 | |
| 	"github.com/ethereum/go-ethereum/core/rawdb"
 | |
| 	"github.com/ethereum/go-ethereum/ethdb"
 | |
| 	"github.com/ethereum/go-ethereum/event"
 | |
| 	"github.com/ethereum/go-ethereum/log"
 | |
| 	"github.com/ethereum/go-ethereum/p2p"
 | |
| 	"github.com/ethereum/go-ethereum/rpc"
 | |
| 	"github.com/prometheus/tsdb/fileutil"
 | |
| )
 | |
| 
 | |
| // Node is a container on which services can be registered.
 | |
| type Node struct {
 | |
| 	eventmux      *event.TypeMux
 | |
| 	config        *Config
 | |
| 	accman        *accounts.Manager
 | |
| 	log           log.Logger
 | |
| 	ephemKeystore string            // if non-empty, the key directory that will be removed by Stop
 | |
| 	dirLock       fileutil.Releaser // prevents concurrent use of instance directory
 | |
| 	stop          chan struct{}     // Channel to wait for termination notifications
 | |
| 	server        *p2p.Server       // Currently running P2P networking layer
 | |
| 	startStopLock sync.Mutex        // Start/Stop are protected by an additional lock
 | |
| 	state         int               // Tracks state of node lifecycle
 | |
| 
 | |
| 	lock          sync.Mutex
 | |
| 	lifecycles    []Lifecycle // All registered backends, services, and auxiliary services that have a lifecycle
 | |
| 	rpcAPIs       []rpc.API   // List of APIs currently provided by the node
 | |
| 	http          *httpServer //
 | |
| 	ws            *httpServer //
 | |
| 	ipc           *ipcServer  // Stores information about the ipc http server
 | |
| 	inprocHandler *rpc.Server // In-process RPC request handler to process the API requests
 | |
| 
 | |
| 	databases map[*closeTrackingDB]struct{} // All open databases
 | |
| }
 | |
| 
 | |
| const (
 | |
| 	initializingState = iota
 | |
| 	runningState
 | |
| 	closedState
 | |
| )
 | |
| 
 | |
| // New creates a new P2P node, ready for protocol registration.
 | |
| func New(conf *Config) (*Node, error) {
 | |
| 	// Copy config and resolve the datadir so future changes to the current
 | |
| 	// working directory don't affect the node.
 | |
| 	confCopy := *conf
 | |
| 	conf = &confCopy
 | |
| 	if conf.DataDir != "" {
 | |
| 		absdatadir, err := filepath.Abs(conf.DataDir)
 | |
| 		if err != nil {
 | |
| 			return nil, err
 | |
| 		}
 | |
| 		conf.DataDir = absdatadir
 | |
| 	}
 | |
| 	if conf.Logger == nil {
 | |
| 		conf.Logger = log.New()
 | |
| 	}
 | |
| 
 | |
| 	// Ensure that the instance name doesn't cause weird conflicts with
 | |
| 	// other files in the data directory.
 | |
| 	if strings.ContainsAny(conf.Name, `/\`) {
 | |
| 		return nil, errors.New(`Config.Name must not contain '/' or '\'`)
 | |
| 	}
 | |
| 	if conf.Name == datadirDefaultKeyStore {
 | |
| 		return nil, errors.New(`Config.Name cannot be "` + datadirDefaultKeyStore + `"`)
 | |
| 	}
 | |
| 	if strings.HasSuffix(conf.Name, ".ipc") {
 | |
| 		return nil, errors.New(`Config.Name cannot end in ".ipc"`)
 | |
| 	}
 | |
| 
 | |
| 	node := &Node{
 | |
| 		config:        conf,
 | |
| 		inprocHandler: rpc.NewServer(),
 | |
| 		eventmux:      new(event.TypeMux),
 | |
| 		log:           conf.Logger,
 | |
| 		stop:          make(chan struct{}),
 | |
| 		server:        &p2p.Server{Config: conf.P2P},
 | |
| 		databases:     make(map[*closeTrackingDB]struct{}),
 | |
| 	}
 | |
| 
 | |
| 	// Register built-in APIs.
 | |
| 	node.rpcAPIs = append(node.rpcAPIs, node.apis()...)
 | |
| 
 | |
| 	// Acquire the instance directory lock.
 | |
| 	if err := node.openDataDir(); err != nil {
 | |
| 		return nil, err
 | |
| 	}
 | |
| 	// Ensure that the AccountManager method works before the node has started. We rely on
 | |
| 	// this in cmd/geth.
 | |
| 	am, ephemeralKeystore, err := makeAccountManager(conf)
 | |
| 	if err != nil {
 | |
| 		return nil, err
 | |
| 	}
 | |
| 	node.accman = am
 | |
| 	node.ephemKeystore = ephemeralKeystore
 | |
| 
 | |
| 	// Initialize the p2p server. This creates the node key and discovery databases.
 | |
| 	node.server.Config.PrivateKey = node.config.NodeKey()
 | |
| 	node.server.Config.Name = node.config.NodeName()
 | |
| 	node.server.Config.Logger = node.log
 | |
| 	if node.server.Config.StaticNodes == nil {
 | |
| 		node.server.Config.StaticNodes = node.config.StaticNodes()
 | |
| 	}
 | |
| 	if node.server.Config.TrustedNodes == nil {
 | |
| 		node.server.Config.TrustedNodes = node.config.TrustedNodes()
 | |
| 	}
 | |
| 	if node.server.Config.NodeDatabase == "" {
 | |
| 		node.server.Config.NodeDatabase = node.config.NodeDB()
 | |
| 	}
 | |
| 
 | |
| 	// Configure RPC servers.
 | |
| 	node.http = newHTTPServer(node.log, conf.HTTPTimeouts)
 | |
| 	node.ws = newHTTPServer(node.log, rpc.DefaultHTTPTimeouts)
 | |
| 	node.ipc = newIPCServer(node.log, conf.IPCEndpoint())
 | |
| 
 | |
| 	return node, nil
 | |
| }
 | |
| 
 | |
| // Start starts all registered lifecycles, RPC services and p2p networking.
 | |
| // Node can only be started once.
 | |
| func (n *Node) Start() error {
 | |
| 	n.startStopLock.Lock()
 | |
| 	defer n.startStopLock.Unlock()
 | |
| 
 | |
| 	n.lock.Lock()
 | |
| 	switch n.state {
 | |
| 	case runningState:
 | |
| 		n.lock.Unlock()
 | |
| 		return ErrNodeRunning
 | |
| 	case closedState:
 | |
| 		n.lock.Unlock()
 | |
| 		return ErrNodeStopped
 | |
| 	}
 | |
| 	n.state = runningState
 | |
| 	err := n.startNetworking()
 | |
| 	lifecycles := make([]Lifecycle, len(n.lifecycles))
 | |
| 	copy(lifecycles, n.lifecycles)
 | |
| 	n.lock.Unlock()
 | |
| 
 | |
| 	// Check if networking startup failed.
 | |
| 	if err != nil {
 | |
| 		n.doClose(nil)
 | |
| 		return err
 | |
| 	}
 | |
| 	// Start all registered lifecycles.
 | |
| 	var started []Lifecycle
 | |
| 	for _, lifecycle := range lifecycles {
 | |
| 		if err = lifecycle.Start(); err != nil {
 | |
| 			break
 | |
| 		}
 | |
| 		started = append(started, lifecycle)
 | |
| 	}
 | |
| 	// Check if any lifecycle failed to start.
 | |
| 	if err != nil {
 | |
| 		n.stopServices(started)
 | |
| 		n.doClose(nil)
 | |
| 	}
 | |
| 	return err
 | |
| }
 | |
| 
 | |
| // Close stops the Node and releases resources acquired in
 | |
| // Node constructor New.
 | |
| func (n *Node) Close() error {
 | |
| 	n.startStopLock.Lock()
 | |
| 	defer n.startStopLock.Unlock()
 | |
| 
 | |
| 	n.lock.Lock()
 | |
| 	state := n.state
 | |
| 	n.lock.Unlock()
 | |
| 	switch state {
 | |
| 	case initializingState:
 | |
| 		// The node was never started.
 | |
| 		return n.doClose(nil)
 | |
| 	case runningState:
 | |
| 		// The node was started, release resources acquired by Start().
 | |
| 		var errs []error
 | |
| 		if err := n.stopServices(n.lifecycles); err != nil {
 | |
| 			errs = append(errs, err)
 | |
| 		}
 | |
| 		return n.doClose(errs)
 | |
| 	case closedState:
 | |
| 		return ErrNodeStopped
 | |
| 	default:
 | |
| 		panic(fmt.Sprintf("node is in unknown state %d", state))
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // doClose releases resources acquired by New(), collecting errors.
 | |
| func (n *Node) doClose(errs []error) error {
 | |
| 	// Close databases. This needs the lock because it needs to
 | |
| 	// synchronize with OpenDatabase*.
 | |
| 	n.lock.Lock()
 | |
| 	n.state = closedState
 | |
| 	errs = append(errs, n.closeDatabases()...)
 | |
| 	n.lock.Unlock()
 | |
| 
 | |
| 	if err := n.accman.Close(); err != nil {
 | |
| 		errs = append(errs, err)
 | |
| 	}
 | |
| 	if n.ephemKeystore != "" {
 | |
| 		if err := os.RemoveAll(n.ephemKeystore); err != nil {
 | |
| 			errs = append(errs, err)
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	// Release instance directory lock.
 | |
| 	n.closeDataDir()
 | |
| 
 | |
| 	// Unblock n.Wait.
 | |
| 	close(n.stop)
 | |
| 
 | |
| 	// Report any errors that might have occurred.
 | |
| 	switch len(errs) {
 | |
| 	case 0:
 | |
| 		return nil
 | |
| 	case 1:
 | |
| 		return errs[0]
 | |
| 	default:
 | |
| 		return fmt.Errorf("%v", errs)
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // startNetworking starts all network endpoints.
 | |
| func (n *Node) startNetworking() error {
 | |
| 	n.log.Info("Starting peer-to-peer node", "instance", n.server.Name)
 | |
| 	if err := n.server.Start(); err != nil {
 | |
| 		return convertFileLockError(err)
 | |
| 	}
 | |
| 	err := n.startRPC()
 | |
| 	if err != nil {
 | |
| 		n.stopRPC()
 | |
| 		n.server.Stop()
 | |
| 	}
 | |
| 	return err
 | |
| }
 | |
| 
 | |
| // containsLifecycle checks if 'lfs' contains 'l'.
 | |
| func containsLifecycle(lfs []Lifecycle, l Lifecycle) bool {
 | |
| 	for _, obj := range lfs {
 | |
| 		if obj == l {
 | |
| 			return true
 | |
| 		}
 | |
| 	}
 | |
| 	return false
 | |
| }
 | |
| 
 | |
| // stopServices terminates running services, RPC and p2p networking.
 | |
| // It is the inverse of Start.
 | |
| func (n *Node) stopServices(running []Lifecycle) error {
 | |
| 	n.stopRPC()
 | |
| 
 | |
| 	// Stop running lifecycles in reverse order.
 | |
| 	failure := &StopError{Services: make(map[reflect.Type]error)}
 | |
| 	for i := len(running) - 1; i >= 0; i-- {
 | |
| 		if err := running[i].Stop(); err != nil {
 | |
| 			failure.Services[reflect.TypeOf(running[i])] = err
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	// Stop p2p networking.
 | |
| 	n.server.Stop()
 | |
| 
 | |
| 	if len(failure.Services) > 0 {
 | |
| 		return failure
 | |
| 	}
 | |
| 	return nil
 | |
| }
 | |
| 
 | |
| func (n *Node) openDataDir() error {
 | |
| 	if n.config.DataDir == "" {
 | |
| 		return nil // ephemeral
 | |
| 	}
 | |
| 
 | |
| 	instdir := filepath.Join(n.config.DataDir, n.config.name())
 | |
| 	if err := os.MkdirAll(instdir, 0700); err != nil {
 | |
| 		return err
 | |
| 	}
 | |
| 	// Lock the instance directory to prevent concurrent use by another instance as well as
 | |
| 	// accidental use of the instance directory as a database.
 | |
| 	release, _, err := fileutil.Flock(filepath.Join(instdir, "LOCK"))
 | |
| 	if err != nil {
 | |
| 		return convertFileLockError(err)
 | |
| 	}
 | |
| 	n.dirLock = release
 | |
| 	return nil
 | |
| }
 | |
| 
 | |
| func (n *Node) closeDataDir() {
 | |
| 	// Release instance directory lock.
 | |
| 	if n.dirLock != nil {
 | |
| 		if err := n.dirLock.Release(); err != nil {
 | |
| 			n.log.Error("Can't release datadir lock", "err", err)
 | |
| 		}
 | |
| 		n.dirLock = nil
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // configureRPC is a helper method to configure all the various RPC endpoints during node
 | |
| // startup. It's not meant to be called at any time afterwards as it makes certain
 | |
| // assumptions about the state of the node.
 | |
| func (n *Node) startRPC() error {
 | |
| 	if err := n.startInProc(); err != nil {
 | |
| 		return err
 | |
| 	}
 | |
| 
 | |
| 	// Configure IPC.
 | |
| 	if n.ipc.endpoint != "" {
 | |
| 		if err := n.ipc.start(n.rpcAPIs); err != nil {
 | |
| 			return err
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	// Configure HTTP.
 | |
| 	if n.config.HTTPHost != "" {
 | |
| 		config := httpConfig{
 | |
| 			CorsAllowedOrigins: n.config.HTTPCors,
 | |
| 			Vhosts:             n.config.HTTPVirtualHosts,
 | |
| 			Modules:            n.config.HTTPModules,
 | |
| 		}
 | |
| 		if err := n.http.setListenAddr(n.config.HTTPHost, n.config.HTTPPort); err != nil {
 | |
| 			return err
 | |
| 		}
 | |
| 		if err := n.http.enableRPC(n.rpcAPIs, config); err != nil {
 | |
| 			return err
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	// Configure WebSocket.
 | |
| 	if n.config.WSHost != "" {
 | |
| 		server := n.wsServerForPort(n.config.WSPort)
 | |
| 		config := wsConfig{
 | |
| 			Modules: n.config.WSModules,
 | |
| 			Origins: n.config.WSOrigins,
 | |
| 		}
 | |
| 		if err := server.setListenAddr(n.config.WSHost, n.config.WSPort); err != nil {
 | |
| 			return err
 | |
| 		}
 | |
| 		if err := server.enableWS(n.rpcAPIs, config); err != nil {
 | |
| 			return err
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	if err := n.http.start(); err != nil {
 | |
| 		return err
 | |
| 	}
 | |
| 	return n.ws.start()
 | |
| }
 | |
| 
 | |
| func (n *Node) wsServerForPort(port int) *httpServer {
 | |
| 	if n.config.HTTPHost == "" || n.http.port == port {
 | |
| 		return n.http
 | |
| 	}
 | |
| 	return n.ws
 | |
| }
 | |
| 
 | |
| func (n *Node) stopRPC() {
 | |
| 	n.http.stop()
 | |
| 	n.ws.stop()
 | |
| 	n.ipc.stop()
 | |
| 	n.stopInProc()
 | |
| }
 | |
| 
 | |
| // startInProc registers all RPC APIs on the inproc server.
 | |
| func (n *Node) startInProc() error {
 | |
| 	for _, api := range n.rpcAPIs {
 | |
| 		if err := n.inprocHandler.RegisterName(api.Namespace, api.Service); err != nil {
 | |
| 			return err
 | |
| 		}
 | |
| 	}
 | |
| 	return nil
 | |
| }
 | |
| 
 | |
| // stopInProc terminates the in-process RPC endpoint.
 | |
| func (n *Node) stopInProc() {
 | |
| 	n.inprocHandler.Stop()
 | |
| }
 | |
| 
 | |
| // Wait blocks until the node is closed.
 | |
| func (n *Node) Wait() {
 | |
| 	<-n.stop
 | |
| }
 | |
| 
 | |
| // RegisterLifecycle registers the given Lifecycle on the node.
 | |
| func (n *Node) RegisterLifecycle(lifecycle Lifecycle) {
 | |
| 	n.lock.Lock()
 | |
| 	defer n.lock.Unlock()
 | |
| 
 | |
| 	if n.state != initializingState {
 | |
| 		panic("can't register lifecycle on running/stopped node")
 | |
| 	}
 | |
| 	if containsLifecycle(n.lifecycles, lifecycle) {
 | |
| 		panic(fmt.Sprintf("attempt to register lifecycle %T more than once", lifecycle))
 | |
| 	}
 | |
| 	n.lifecycles = append(n.lifecycles, lifecycle)
 | |
| }
 | |
| 
 | |
| // RegisterProtocols adds backend's protocols to the node's p2p server.
 | |
| func (n *Node) RegisterProtocols(protocols []p2p.Protocol) {
 | |
| 	n.lock.Lock()
 | |
| 	defer n.lock.Unlock()
 | |
| 
 | |
| 	if n.state != initializingState {
 | |
| 		panic("can't register protocols on running/stopped node")
 | |
| 	}
 | |
| 	n.server.Protocols = append(n.server.Protocols, protocols...)
 | |
| }
 | |
| 
 | |
| // RegisterAPIs registers the APIs a service provides on the node.
 | |
| func (n *Node) RegisterAPIs(apis []rpc.API) {
 | |
| 	n.lock.Lock()
 | |
| 	defer n.lock.Unlock()
 | |
| 
 | |
| 	if n.state != initializingState {
 | |
| 		panic("can't register APIs on running/stopped node")
 | |
| 	}
 | |
| 	n.rpcAPIs = append(n.rpcAPIs, apis...)
 | |
| }
 | |
| 
 | |
| // RegisterHandler mounts a handler on the given path on the canonical HTTP server.
 | |
| //
 | |
| // The name of the handler is shown in a log message when the HTTP server starts
 | |
| // and should be a descriptive term for the service provided by the handler.
 | |
| func (n *Node) RegisterHandler(name, path string, handler http.Handler) {
 | |
| 	n.lock.Lock()
 | |
| 	defer n.lock.Unlock()
 | |
| 
 | |
| 	if n.state != initializingState {
 | |
| 		panic("can't register HTTP handler on running/stopped node")
 | |
| 	}
 | |
| 	n.http.mux.Handle(path, handler)
 | |
| 	n.http.handlerNames[path] = name
 | |
| }
 | |
| 
 | |
| // Attach creates an RPC client attached to an in-process API handler.
 | |
| func (n *Node) Attach() (*rpc.Client, error) {
 | |
| 	return rpc.DialInProc(n.inprocHandler), nil
 | |
| }
 | |
| 
 | |
| // RPCHandler returns the in-process RPC request handler.
 | |
| func (n *Node) RPCHandler() (*rpc.Server, error) {
 | |
| 	n.lock.Lock()
 | |
| 	defer n.lock.Unlock()
 | |
| 
 | |
| 	if n.state == closedState {
 | |
| 		return nil, ErrNodeStopped
 | |
| 	}
 | |
| 	return n.inprocHandler, nil
 | |
| }
 | |
| 
 | |
| // Config returns the configuration of node.
 | |
| func (n *Node) Config() *Config {
 | |
| 	return n.config
 | |
| }
 | |
| 
 | |
| // Server retrieves the currently running P2P network layer. This method is meant
 | |
| // only to inspect fields of the currently running server. Callers should not
 | |
| // start or stop the returned server.
 | |
| func (n *Node) Server() *p2p.Server {
 | |
| 	n.lock.Lock()
 | |
| 	defer n.lock.Unlock()
 | |
| 
 | |
| 	return n.server
 | |
| }
 | |
| 
 | |
| // DataDir retrieves the current datadir used by the protocol stack.
 | |
| // Deprecated: No files should be stored in this directory, use InstanceDir instead.
 | |
| func (n *Node) DataDir() string {
 | |
| 	return n.config.DataDir
 | |
| }
 | |
| 
 | |
| // InstanceDir retrieves the instance directory used by the protocol stack.
 | |
| func (n *Node) InstanceDir() string {
 | |
| 	return n.config.instanceDir()
 | |
| }
 | |
| 
 | |
| // AccountManager retrieves the account manager used by the protocol stack.
 | |
| func (n *Node) AccountManager() *accounts.Manager {
 | |
| 	return n.accman
 | |
| }
 | |
| 
 | |
| // IPCEndpoint retrieves the current IPC endpoint used by the protocol stack.
 | |
| func (n *Node) IPCEndpoint() string {
 | |
| 	return n.ipc.endpoint
 | |
| }
 | |
| 
 | |
| // HTTPEndpoint returns the URL of the HTTP server.
 | |
| func (n *Node) HTTPEndpoint() string {
 | |
| 	return "http://" + n.http.listenAddr()
 | |
| }
 | |
| 
 | |
| // WSEndpoint retrieves the current WS endpoint used by the protocol stack.
 | |
| func (n *Node) WSEndpoint() string {
 | |
| 	if n.http.wsAllowed() {
 | |
| 		return "ws://" + n.http.listenAddr()
 | |
| 	}
 | |
| 	return "ws://" + n.ws.listenAddr()
 | |
| }
 | |
| 
 | |
| // EventMux retrieves the event multiplexer used by all the network services in
 | |
| // the current protocol stack.
 | |
| func (n *Node) EventMux() *event.TypeMux {
 | |
| 	return n.eventmux
 | |
| }
 | |
| 
 | |
| // OpenDatabase opens an existing database with the given name (or creates one if no
 | |
| // previous can be found) from within the node's instance directory. If the node is
 | |
| // ephemeral, a memory database is returned.
 | |
| func (n *Node) OpenDatabase(name string, cache, handles int, namespace string) (ethdb.Database, error) {
 | |
| 	n.lock.Lock()
 | |
| 	defer n.lock.Unlock()
 | |
| 	if n.state == closedState {
 | |
| 		return nil, ErrNodeStopped
 | |
| 	}
 | |
| 
 | |
| 	var db ethdb.Database
 | |
| 	var err error
 | |
| 	if n.config.DataDir == "" {
 | |
| 		db = rawdb.NewMemoryDatabase()
 | |
| 	} else {
 | |
| 		db, err = rawdb.NewLevelDBDatabase(n.ResolvePath(name), cache, handles, namespace)
 | |
| 	}
 | |
| 
 | |
| 	if err == nil {
 | |
| 		db = n.wrapDatabase(db)
 | |
| 	}
 | |
| 	return db, err
 | |
| }
 | |
| 
 | |
| // OpenDatabaseWithFreezer opens an existing database with the given name (or
 | |
| // creates one if no previous can be found) from within the node's data directory,
 | |
| // also attaching a chain freezer to it that moves ancient chain data from the
 | |
| // database to immutable append-only files. If the node is an ephemeral one, a
 | |
| // memory database is returned.
 | |
| func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, freezer, namespace string) (ethdb.Database, error) {
 | |
| 	n.lock.Lock()
 | |
| 	defer n.lock.Unlock()
 | |
| 	if n.state == closedState {
 | |
| 		return nil, ErrNodeStopped
 | |
| 	}
 | |
| 
 | |
| 	var db ethdb.Database
 | |
| 	var err error
 | |
| 	if n.config.DataDir == "" {
 | |
| 		db = rawdb.NewMemoryDatabase()
 | |
| 	} else {
 | |
| 		root := n.ResolvePath(name)
 | |
| 		switch {
 | |
| 		case freezer == "":
 | |
| 			freezer = filepath.Join(root, "ancient")
 | |
| 		case !filepath.IsAbs(freezer):
 | |
| 			freezer = n.ResolvePath(freezer)
 | |
| 		}
 | |
| 		db, err = rawdb.NewLevelDBDatabaseWithFreezer(root, cache, handles, freezer, namespace)
 | |
| 	}
 | |
| 
 | |
| 	if err == nil {
 | |
| 		db = n.wrapDatabase(db)
 | |
| 	}
 | |
| 	return db, err
 | |
| }
 | |
| 
 | |
| // ResolvePath returns the absolute path of a resource in the instance directory.
 | |
| func (n *Node) ResolvePath(x string) string {
 | |
| 	return n.config.ResolvePath(x)
 | |
| }
 | |
| 
 | |
| // closeTrackingDB wraps the Close method of a database. When the database is closed by the
 | |
| // service, the wrapper removes it from the node's database map. This ensures that Node
 | |
| // won't auto-close the database if it is closed by the service that opened it.
 | |
| type closeTrackingDB struct {
 | |
| 	ethdb.Database
 | |
| 	n *Node
 | |
| }
 | |
| 
 | |
| func (db *closeTrackingDB) Close() error {
 | |
| 	db.n.lock.Lock()
 | |
| 	delete(db.n.databases, db)
 | |
| 	db.n.lock.Unlock()
 | |
| 	return db.Database.Close()
 | |
| }
 | |
| 
 | |
| // wrapDatabase ensures the database will be auto-closed when Node is closed.
 | |
| func (n *Node) wrapDatabase(db ethdb.Database) ethdb.Database {
 | |
| 	wrapper := &closeTrackingDB{db, n}
 | |
| 	n.databases[wrapper] = struct{}{}
 | |
| 	return wrapper
 | |
| }
 | |
| 
 | |
| // closeDatabases closes all open databases.
 | |
| func (n *Node) closeDatabases() (errors []error) {
 | |
| 	for db := range n.databases {
 | |
| 		delete(n.databases, db)
 | |
| 		if err := db.Database.Close(); err != nil {
 | |
| 			errors = append(errors, err)
 | |
| 		}
 | |
| 	}
 | |
| 	return errors
 | |
| }
 |