Compare commits
61 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
37685930d9 | ||
|
51df1c1f20 | ||
|
f524ec4326 | ||
|
eb794af833 | ||
|
67a7857124 | ||
|
c73b654fd1 | ||
|
9da128db70 | ||
|
4e5d1f1c39 | ||
|
d57e85ecc9 | ||
|
1990c9e621 | ||
|
319098cc1c | ||
|
223d943481 | ||
|
8974e2e5e0 | ||
|
a4a2343cdc | ||
|
fdfd6d3c39 | ||
|
b5537c5601 | ||
|
e916f9786d | ||
|
909e968ebb | ||
|
598f786aab | ||
|
461291882e | ||
|
1f0f6f0272 | ||
|
0a22ae5572 | ||
|
b0cfd9c786 | ||
|
6d8a1bfb08 | ||
|
4895665670 | ||
|
eaff89291c | ||
|
e187711c65 | ||
|
d926bf2c7e | ||
|
8db8d074e2 | ||
|
1a70338734 | ||
|
61a5976368 | ||
|
88c42ab4e7 | ||
|
4210dd1500 | ||
|
c971ab617d | ||
|
f1986f86f2 | ||
|
28aca90716 | ||
|
9b1536b26a | ||
|
3e57c33147 | ||
|
baa7eb901e | ||
|
574378edb5 | ||
|
c95e4a80d1 | ||
|
897ea01d5f | ||
|
ec192f18b4 | ||
|
aa34173f13 | ||
|
c343f75c26 | ||
|
52b1d09457 | ||
|
9402f96597 | ||
|
d0fd8d6fc2 | ||
|
cfde0b5f52 | ||
|
de06185fc3 | ||
|
3fb5f3ae11 | ||
|
e75d0a6e4c | ||
|
947e0afeb3 | ||
|
1836366ac1 | ||
|
591cef17d4 | ||
|
e33a5de454 | ||
|
f04c0e341e | ||
|
ea89f40f0d | ||
|
1fc54d92ec | ||
|
8c4a7fa8d3 | ||
|
423d4254f5 |
38
.github/CODEOWNERS
vendored
38
.github/CODEOWNERS
vendored
@@ -1,12 +1,32 @@
|
||||
# Lines starting with '#' are comments.
|
||||
# Each line is a file pattern followed by one or more owners.
|
||||
|
||||
accounts/usbwallet @karalabe
|
||||
consensus @karalabe
|
||||
core/ @karalabe @holiman
|
||||
eth/ @karalabe
|
||||
les/ @zsfelfoldi
|
||||
light/ @zsfelfoldi
|
||||
mobile/ @karalabe
|
||||
p2p/ @fjl @zsfelfoldi
|
||||
whisper/ @gballet @gluk256
|
||||
accounts/usbwallet @karalabe
|
||||
consensus @karalabe
|
||||
core/ @karalabe @holiman
|
||||
eth/ @karalabe
|
||||
les/ @zsfelfoldi
|
||||
light/ @zsfelfoldi
|
||||
mobile/ @karalabe
|
||||
p2p/ @fjl @zsfelfoldi
|
||||
swarm/bmt @zelig
|
||||
swarm/dev @lmars
|
||||
swarm/fuse @jmozah @holisticode
|
||||
swarm/grafana_dashboards @nonsense
|
||||
swarm/metrics @nonsense @holisticode
|
||||
swarm/multihash @nolash
|
||||
swarm/network/bitvector @zelig @janos @gbalint
|
||||
swarm/network/priorityqueue @zelig @janos @gbalint
|
||||
swarm/network/simulations @zelig
|
||||
swarm/network/stream @janos @zelig @gbalint @holisticode @justelad
|
||||
swarm/network/stream/intervals @janos
|
||||
swarm/network/stream/testing @zelig
|
||||
swarm/pot @zelig
|
||||
swarm/pss @nolash @zelig @nonsense
|
||||
swarm/services @zelig
|
||||
swarm/state @justelad
|
||||
swarm/storage/encryption @gbalint @zelig @nagydani
|
||||
swarm/storage/mock @janos
|
||||
swarm/storage/mru @nolash
|
||||
swarm/testutil @lmars
|
||||
whisper/ @gballet @gluk256
|
||||
|
10
.travis.yml
10
.travis.yml
@@ -146,16 +146,16 @@ matrix:
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
before_install:
|
||||
- curl https://storage.googleapis.com/golang/go1.10.2.linux-amd64.tar.gz | tar -xz
|
||||
- curl https://storage.googleapis.com/golang/go1.10.3.linux-amd64.tar.gz | tar -xz
|
||||
- export PATH=`pwd`/go/bin:$PATH
|
||||
- export GOROOT=`pwd`/go
|
||||
- export GOPATH=$HOME/go
|
||||
script:
|
||||
# Build the Android archive and upload it to Maven Central and Azure
|
||||
- curl https://dl.google.com/android/repository/android-ndk-r16b-linux-x86_64.zip -o android-ndk-r16b.zip
|
||||
- unzip -q android-ndk-r16b.zip && rm android-ndk-r16b.zip
|
||||
- mv android-ndk-r16b $HOME
|
||||
- export ANDROID_NDK=$HOME/android-ndk-r16b
|
||||
- curl https://dl.google.com/android/repository/android-ndk-r17b-linux-x86_64.zip -o android-ndk-r17b.zip
|
||||
- unzip -q android-ndk-r17b.zip && rm android-ndk-r17b.zip
|
||||
- mv android-ndk-r17b $HOME
|
||||
- export ANDROID_NDK=$HOME/android-ndk-r17b
|
||||
|
||||
- mkdir -p $GOPATH/src/github.com/ethereum
|
||||
- ln -s `pwd` $GOPATH/src/github.com/ethereum
|
||||
|
@@ -31,29 +31,14 @@ var (
|
||||
uint16T = reflect.TypeOf(uint16(0))
|
||||
uint32T = reflect.TypeOf(uint32(0))
|
||||
uint64T = reflect.TypeOf(uint64(0))
|
||||
intT = reflect.TypeOf(int(0))
|
||||
int8T = reflect.TypeOf(int8(0))
|
||||
int16T = reflect.TypeOf(int16(0))
|
||||
int32T = reflect.TypeOf(int32(0))
|
||||
int64T = reflect.TypeOf(int64(0))
|
||||
addressT = reflect.TypeOf(common.Address{})
|
||||
intTS = reflect.TypeOf([]int(nil))
|
||||
int8TS = reflect.TypeOf([]int8(nil))
|
||||
int16TS = reflect.TypeOf([]int16(nil))
|
||||
int32TS = reflect.TypeOf([]int32(nil))
|
||||
int64TS = reflect.TypeOf([]int64(nil))
|
||||
)
|
||||
|
||||
// U256 converts a big Int into a 256bit EVM number.
|
||||
func U256(n *big.Int) []byte {
|
||||
return math.PaddedBigBytes(math.U256(n), 32)
|
||||
}
|
||||
|
||||
// checks whether the given reflect value is signed. This also works for slices with a number type
|
||||
func isSigned(v reflect.Value) bool {
|
||||
switch v.Type() {
|
||||
case intTS, int8TS, int16TS, int32TS, int64TS, intT, int8T, int16T, int32T, int64T:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@@ -19,7 +19,6 @@ package abi
|
||||
import (
|
||||
"bytes"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -32,13 +31,3 @@ func TestNumberTypes(t *testing.T) {
|
||||
t.Errorf("expected %x got %x", ubytes, unsigned)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSigned(t *testing.T) {
|
||||
if isSigned(reflect.ValueOf(uint(10))) {
|
||||
t.Error("signed")
|
||||
}
|
||||
|
||||
if !isSigned(reflect.ValueOf(int(10))) {
|
||||
t.Error("not signed")
|
||||
}
|
||||
}
|
||||
|
@@ -50,7 +50,7 @@ var (
|
||||
var KeyStoreType = reflect.TypeOf(&KeyStore{})
|
||||
|
||||
// KeyStoreScheme is the protocol scheme prefixing account and wallet URLs.
|
||||
var KeyStoreScheme = "keystore"
|
||||
const KeyStoreScheme = "keystore"
|
||||
|
||||
// Maximum time between wallet refreshes (if filesystem notifications don't work).
|
||||
const walletRefreshCycle = 3 * time.Second
|
||||
|
@@ -36,7 +36,7 @@ func Type(msg proto.Message) uint16 {
|
||||
}
|
||||
|
||||
// Name returns the friendly message type name of a specific protocol buffer
|
||||
// type numbers.
|
||||
// type number.
|
||||
func Name(kind uint16) string {
|
||||
name := MessageType_name[int32(kind)]
|
||||
if len(name) < 12 {
|
||||
|
@@ -302,7 +302,7 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
|
||||
for i, component := range derivationPath {
|
||||
binary.BigEndian.PutUint32(path[1+4*i:], component)
|
||||
}
|
||||
// Create the transaction RLP based on whether legacy or EIP155 signing was requeste
|
||||
// Create the transaction RLP based on whether legacy or EIP155 signing was requested
|
||||
var (
|
||||
txrlp []byte
|
||||
err error
|
||||
|
@@ -23,8 +23,8 @@ environment:
|
||||
install:
|
||||
- git submodule update --init
|
||||
- rmdir C:\go /s /q
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.10.2.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.10.2.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.10.3.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.10.3.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- go version
|
||||
- gcc --version
|
||||
|
||||
|
560
bmt/bmt.go
560
bmt/bmt.go
@@ -1,560 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package bmt provides a binary merkle tree implementation
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
/*
|
||||
Binary Merkle Tree Hash is a hash function over arbitrary datachunks of limited size
|
||||
It is defined as the root hash of the binary merkle tree built over fixed size segments
|
||||
of the underlying chunk using any base hash function (e.g keccak 256 SHA3)
|
||||
|
||||
It is used as the chunk hash function in swarm which in turn is the basis for the
|
||||
128 branching swarm hash http://swarm-guide.readthedocs.io/en/latest/architecture.html#swarm-hash
|
||||
|
||||
The BMT is optimal for providing compact inclusion proofs, i.e. prove that a
|
||||
segment is a substring of a chunk starting at a particular offset
|
||||
The size of the underlying segments is fixed at 32 bytes (called the resolution
|
||||
of the BMT hash), the EVM word size to optimize for on-chain BMT verification
|
||||
as well as the hash size optimal for inclusion proofs in the merkle tree of the swarm hash.
|
||||
|
||||
Two implementations are provided:
|
||||
|
||||
* RefHasher is optimized for code simplicity and meant as a reference implementation
|
||||
* Hasher is optimized for speed taking advantage of concurrency with minimalistic
|
||||
control structure to coordinate the concurrent routines
|
||||
It implements the ChunkHash interface as well as the go standard hash.Hash interface
|
||||
|
||||
*/
|
||||
|
||||
const (
|
||||
// DefaultSegmentCount is the maximum number of segments of the underlying chunk
|
||||
DefaultSegmentCount = 128 // Should be equal to storage.DefaultBranches
|
||||
// DefaultPoolSize is the maximum number of bmt trees used by the hashers, i.e,
|
||||
// the maximum number of concurrent BMT hashing operations performed by the same hasher
|
||||
DefaultPoolSize = 8
|
||||
)
|
||||
|
||||
// BaseHasher is a hash.Hash constructor function used for the base hash of the BMT.
|
||||
type BaseHasher func() hash.Hash
|
||||
|
||||
// Hasher a reusable hasher for fixed maximum size chunks representing a BMT
|
||||
// implements the hash.Hash interface
|
||||
// reuse pool of Tree-s for amortised memory allocation and resource control
|
||||
// supports order-agnostic concurrent segment writes
|
||||
// as well as sequential read and write
|
||||
// can not be called concurrently on more than one chunk
|
||||
// can be further appended after Sum
|
||||
// Reset gives back the Tree to the pool and guaranteed to leave
|
||||
// the tree and itself in a state reusable for hashing a new chunk
|
||||
type Hasher struct {
|
||||
pool *TreePool // BMT resource pool
|
||||
bmt *Tree // prebuilt BMT resource for flowcontrol and proofs
|
||||
blocksize int // segment size (size of hash) also for hash.Hash
|
||||
count int // segment count
|
||||
size int // for hash.Hash same as hashsize
|
||||
cur int // cursor position for rightmost currently open chunk
|
||||
segment []byte // the rightmost open segment (not complete)
|
||||
depth int // index of last level
|
||||
result chan []byte // result channel
|
||||
hash []byte // to record the result
|
||||
max int32 // max segments for SegmentWriter interface
|
||||
blockLength []byte // The block length that needes to be added in Sum
|
||||
}
|
||||
|
||||
// New creates a reusable Hasher
|
||||
// implements the hash.Hash interface
|
||||
// pulls a new Tree from a resource pool for hashing each chunk
|
||||
func New(p *TreePool) *Hasher {
|
||||
return &Hasher{
|
||||
pool: p,
|
||||
depth: depth(p.SegmentCount),
|
||||
size: p.SegmentSize,
|
||||
blocksize: p.SegmentSize,
|
||||
count: p.SegmentCount,
|
||||
result: make(chan []byte),
|
||||
}
|
||||
}
|
||||
|
||||
// Node is a reuseable segment hasher representing a node in a BMT
|
||||
// it allows for continued writes after a Sum
|
||||
// and is left in completely reusable state after Reset
|
||||
type Node struct {
|
||||
level, index int // position of node for information/logging only
|
||||
initial bool // first and last node
|
||||
root bool // whether the node is root to a smaller BMT
|
||||
isLeft bool // whether it is left side of the parent double segment
|
||||
unbalanced bool // indicates if a node has only the left segment
|
||||
parent *Node // BMT connections
|
||||
state int32 // atomic increment impl concurrent boolean toggle
|
||||
left, right []byte
|
||||
}
|
||||
|
||||
// NewNode constructor for segment hasher nodes in the BMT
|
||||
func NewNode(level, index int, parent *Node) *Node {
|
||||
return &Node{
|
||||
parent: parent,
|
||||
level: level,
|
||||
index: index,
|
||||
initial: index == 0,
|
||||
isLeft: index%2 == 0,
|
||||
}
|
||||
}
|
||||
|
||||
// TreePool provides a pool of Trees used as resources by Hasher
|
||||
// a Tree popped from the pool is guaranteed to have clean state
|
||||
// for hashing a new chunk
|
||||
// Hasher Reset releases the Tree to the pool
|
||||
type TreePool struct {
|
||||
lock sync.Mutex
|
||||
c chan *Tree
|
||||
hasher BaseHasher
|
||||
SegmentSize int
|
||||
SegmentCount int
|
||||
Capacity int
|
||||
count int
|
||||
}
|
||||
|
||||
// NewTreePool creates a Tree pool with hasher, segment size, segment count and capacity
|
||||
// on GetTree it reuses free Trees or creates a new one if size is not reached
|
||||
func NewTreePool(hasher BaseHasher, segmentCount, capacity int) *TreePool {
|
||||
return &TreePool{
|
||||
c: make(chan *Tree, capacity),
|
||||
hasher: hasher,
|
||||
SegmentSize: hasher().Size(),
|
||||
SegmentCount: segmentCount,
|
||||
Capacity: capacity,
|
||||
}
|
||||
}
|
||||
|
||||
// Drain drains the pool until it has no more than n resources
|
||||
func (p *TreePool) Drain(n int) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
for len(p.c) > n {
|
||||
<-p.c
|
||||
p.count--
|
||||
}
|
||||
}
|
||||
|
||||
// Reserve is blocking until it returns an available Tree
|
||||
// it reuses free Trees or creates a new one if size is not reached
|
||||
func (p *TreePool) Reserve() *Tree {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
var t *Tree
|
||||
if p.count == p.Capacity {
|
||||
return <-p.c
|
||||
}
|
||||
select {
|
||||
case t = <-p.c:
|
||||
default:
|
||||
t = NewTree(p.hasher, p.SegmentSize, p.SegmentCount)
|
||||
p.count++
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Release gives back a Tree to the pool.
|
||||
// This Tree is guaranteed to be in reusable state
|
||||
// does not need locking
|
||||
func (p *TreePool) Release(t *Tree) {
|
||||
p.c <- t // can never fail but...
|
||||
}
|
||||
|
||||
// Tree is a reusable control structure representing a BMT
|
||||
// organised in a binary tree
|
||||
// Hasher uses a TreePool to pick one for each chunk hash
|
||||
// the Tree is 'locked' while not in the pool
|
||||
type Tree struct {
|
||||
leaves []*Node
|
||||
}
|
||||
|
||||
// Draw draws the BMT (badly)
|
||||
func (t *Tree) Draw(hash []byte, d int) string {
|
||||
var left, right []string
|
||||
var anc []*Node
|
||||
for i, n := range t.leaves {
|
||||
left = append(left, fmt.Sprintf("%v", hashstr(n.left)))
|
||||
if i%2 == 0 {
|
||||
anc = append(anc, n.parent)
|
||||
}
|
||||
right = append(right, fmt.Sprintf("%v", hashstr(n.right)))
|
||||
}
|
||||
anc = t.leaves
|
||||
var hashes [][]string
|
||||
for l := 0; len(anc) > 0; l++ {
|
||||
var nodes []*Node
|
||||
hash := []string{""}
|
||||
for i, n := range anc {
|
||||
hash = append(hash, fmt.Sprintf("%v|%v", hashstr(n.left), hashstr(n.right)))
|
||||
if i%2 == 0 && n.parent != nil {
|
||||
nodes = append(nodes, n.parent)
|
||||
}
|
||||
}
|
||||
hash = append(hash, "")
|
||||
hashes = append(hashes, hash)
|
||||
anc = nodes
|
||||
}
|
||||
hashes = append(hashes, []string{"", fmt.Sprintf("%v", hashstr(hash)), ""})
|
||||
total := 60
|
||||
del := " "
|
||||
var rows []string
|
||||
for i := len(hashes) - 1; i >= 0; i-- {
|
||||
var textlen int
|
||||
hash := hashes[i]
|
||||
for _, s := range hash {
|
||||
textlen += len(s)
|
||||
}
|
||||
if total < textlen {
|
||||
total = textlen + len(hash)
|
||||
}
|
||||
delsize := (total - textlen) / (len(hash) - 1)
|
||||
if delsize > len(del) {
|
||||
delsize = len(del)
|
||||
}
|
||||
row := fmt.Sprintf("%v: %v", len(hashes)-i-1, strings.Join(hash, del[:delsize]))
|
||||
rows = append(rows, row)
|
||||
|
||||
}
|
||||
rows = append(rows, strings.Join(left, " "))
|
||||
rows = append(rows, strings.Join(right, " "))
|
||||
return strings.Join(rows, "\n") + "\n"
|
||||
}
|
||||
|
||||
// NewTree initialises the Tree by building up the nodes of a BMT
|
||||
// segment size is stipulated to be the size of the hash
|
||||
// segmentCount needs to be positive integer and does not need to be
|
||||
// a power of two and can even be an odd number
|
||||
// segmentSize * segmentCount determines the maximum chunk size
|
||||
// hashed using the tree
|
||||
func NewTree(hasher BaseHasher, segmentSize, segmentCount int) *Tree {
|
||||
n := NewNode(0, 0, nil)
|
||||
n.root = true
|
||||
prevlevel := []*Node{n}
|
||||
// iterate over levels and creates 2^level nodes
|
||||
level := 1
|
||||
count := 2
|
||||
for d := 1; d <= depth(segmentCount); d++ {
|
||||
nodes := make([]*Node, count)
|
||||
for i := 0; i < len(nodes); i++ {
|
||||
parent := prevlevel[i/2]
|
||||
t := NewNode(level, i, parent)
|
||||
nodes[i] = t
|
||||
}
|
||||
prevlevel = nodes
|
||||
level++
|
||||
count *= 2
|
||||
}
|
||||
// the datanode level is the nodes on the last level where
|
||||
return &Tree{
|
||||
leaves: prevlevel,
|
||||
}
|
||||
}
|
||||
|
||||
// methods needed by hash.Hash
|
||||
|
||||
// Size returns the size
|
||||
func (h *Hasher) Size() int {
|
||||
return h.size
|
||||
}
|
||||
|
||||
// BlockSize returns the block size
|
||||
func (h *Hasher) BlockSize() int {
|
||||
return h.blocksize
|
||||
}
|
||||
|
||||
// Sum returns the hash of the buffer
|
||||
// hash.Hash interface Sum method appends the byte slice to the underlying
|
||||
// data before it calculates and returns the hash of the chunk
|
||||
func (h *Hasher) Sum(b []byte) (r []byte) {
|
||||
t := h.bmt
|
||||
i := h.cur
|
||||
n := t.leaves[i]
|
||||
j := i
|
||||
// must run strictly before all nodes calculate
|
||||
// datanodes are guaranteed to have a parent
|
||||
if len(h.segment) > h.size && i > 0 && n.parent != nil {
|
||||
n = n.parent
|
||||
} else {
|
||||
i *= 2
|
||||
}
|
||||
d := h.finalise(n, i)
|
||||
h.writeSegment(j, h.segment, d)
|
||||
c := <-h.result
|
||||
h.releaseTree()
|
||||
|
||||
// sha3(length + BMT(pure_chunk))
|
||||
if h.blockLength == nil {
|
||||
return c
|
||||
}
|
||||
res := h.pool.hasher()
|
||||
res.Reset()
|
||||
res.Write(h.blockLength)
|
||||
res.Write(c)
|
||||
return res.Sum(nil)
|
||||
}
|
||||
|
||||
// Hasher implements the SwarmHash interface
|
||||
|
||||
// Hash waits for the hasher result and returns it
|
||||
// caller must call this on a BMT Hasher being written to
|
||||
func (h *Hasher) Hash() []byte {
|
||||
return <-h.result
|
||||
}
|
||||
|
||||
// Hasher implements the io.Writer interface
|
||||
|
||||
// Write fills the buffer to hash
|
||||
// with every full segment complete launches a hasher go routine
|
||||
// that shoots up the BMT
|
||||
func (h *Hasher) Write(b []byte) (int, error) {
|
||||
l := len(b)
|
||||
if l <= 0 {
|
||||
return 0, nil
|
||||
}
|
||||
s := h.segment
|
||||
i := h.cur
|
||||
count := (h.count + 1) / 2
|
||||
need := h.count*h.size - h.cur*2*h.size
|
||||
size := h.size
|
||||
if need > size {
|
||||
size *= 2
|
||||
}
|
||||
if l < need {
|
||||
need = l
|
||||
}
|
||||
// calculate missing bit to complete current open segment
|
||||
rest := size - len(s)
|
||||
if need < rest {
|
||||
rest = need
|
||||
}
|
||||
s = append(s, b[:rest]...)
|
||||
need -= rest
|
||||
// read full segments and the last possibly partial segment
|
||||
for need > 0 && i < count-1 {
|
||||
// push all finished chunks we read
|
||||
h.writeSegment(i, s, h.depth)
|
||||
need -= size
|
||||
if need < 0 {
|
||||
size += need
|
||||
}
|
||||
s = b[rest : rest+size]
|
||||
rest += size
|
||||
i++
|
||||
}
|
||||
h.segment = s
|
||||
h.cur = i
|
||||
// otherwise, we can assume len(s) == 0, so all buffer is read and chunk is not yet full
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// Hasher implements the io.ReaderFrom interface
|
||||
|
||||
// ReadFrom reads from io.Reader and appends to the data to hash using Write
|
||||
// it reads so that chunk to hash is maximum length or reader reaches EOF
|
||||
// caller must Reset the hasher prior to call
|
||||
func (h *Hasher) ReadFrom(r io.Reader) (m int64, err error) {
|
||||
bufsize := h.size*h.count - h.size*h.cur - len(h.segment)
|
||||
buf := make([]byte, bufsize)
|
||||
var read int
|
||||
for {
|
||||
var n int
|
||||
n, err = r.Read(buf)
|
||||
read += n
|
||||
if err == io.EOF || read == len(buf) {
|
||||
hash := h.Sum(buf[:n])
|
||||
if read == len(buf) {
|
||||
err = NewEOC(hash)
|
||||
}
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
n, err = h.Write(buf[:n])
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return int64(read), err
|
||||
}
|
||||
|
||||
// Reset needs to be called before writing to the hasher
|
||||
func (h *Hasher) Reset() {
|
||||
h.getTree()
|
||||
h.blockLength = nil
|
||||
}
|
||||
|
||||
// Hasher implements the SwarmHash interface
|
||||
|
||||
// ResetWithLength needs to be called before writing to the hasher
|
||||
// the argument is supposed to be the byte slice binary representation of
|
||||
// the length of the data subsumed under the hash
|
||||
func (h *Hasher) ResetWithLength(l []byte) {
|
||||
h.Reset()
|
||||
h.blockLength = l
|
||||
}
|
||||
|
||||
// Release gives back the Tree to the pool whereby it unlocks
|
||||
// it resets tree, segment and index
|
||||
func (h *Hasher) releaseTree() {
|
||||
if h.bmt != nil {
|
||||
n := h.bmt.leaves[h.cur]
|
||||
for ; n != nil; n = n.parent {
|
||||
n.unbalanced = false
|
||||
if n.parent != nil {
|
||||
n.root = false
|
||||
}
|
||||
}
|
||||
h.pool.Release(h.bmt)
|
||||
h.bmt = nil
|
||||
|
||||
}
|
||||
h.cur = 0
|
||||
h.segment = nil
|
||||
}
|
||||
|
||||
func (h *Hasher) writeSegment(i int, s []byte, d int) {
|
||||
hash := h.pool.hasher()
|
||||
n := h.bmt.leaves[i]
|
||||
|
||||
if len(s) > h.size && n.parent != nil {
|
||||
go func() {
|
||||
hash.Reset()
|
||||
hash.Write(s)
|
||||
s = hash.Sum(nil)
|
||||
|
||||
if n.root {
|
||||
h.result <- s
|
||||
return
|
||||
}
|
||||
h.run(n.parent, hash, d, n.index, s)
|
||||
}()
|
||||
return
|
||||
}
|
||||
go h.run(n, hash, d, i*2, s)
|
||||
}
|
||||
|
||||
func (h *Hasher) run(n *Node, hash hash.Hash, d int, i int, s []byte) {
|
||||
isLeft := i%2 == 0
|
||||
for {
|
||||
if isLeft {
|
||||
n.left = s
|
||||
} else {
|
||||
n.right = s
|
||||
}
|
||||
if !n.unbalanced && n.toggle() {
|
||||
return
|
||||
}
|
||||
if !n.unbalanced || !isLeft || i == 0 && d == 0 {
|
||||
hash.Reset()
|
||||
hash.Write(n.left)
|
||||
hash.Write(n.right)
|
||||
s = hash.Sum(nil)
|
||||
|
||||
} else {
|
||||
s = append(n.left, n.right...)
|
||||
}
|
||||
|
||||
h.hash = s
|
||||
if n.root {
|
||||
h.result <- s
|
||||
return
|
||||
}
|
||||
|
||||
isLeft = n.isLeft
|
||||
n = n.parent
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
// getTree obtains a BMT resource by reserving one from the pool
|
||||
func (h *Hasher) getTree() *Tree {
|
||||
if h.bmt != nil {
|
||||
return h.bmt
|
||||
}
|
||||
t := h.pool.Reserve()
|
||||
h.bmt = t
|
||||
return t
|
||||
}
|
||||
|
||||
// atomic bool toggle implementing a concurrent reusable 2-state object
|
||||
// atomic addint with %2 implements atomic bool toggle
|
||||
// it returns true if the toggler just put it in the active/waiting state
|
||||
func (n *Node) toggle() bool {
|
||||
return atomic.AddInt32(&n.state, 1)%2 == 1
|
||||
}
|
||||
|
||||
func hashstr(b []byte) string {
|
||||
end := len(b)
|
||||
if end > 4 {
|
||||
end = 4
|
||||
}
|
||||
return fmt.Sprintf("%x", b[:end])
|
||||
}
|
||||
|
||||
func depth(n int) (d int) {
|
||||
for l := (n - 1) / 2; l > 0; l /= 2 {
|
||||
d++
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// finalise is following the zigzags on the tree belonging
|
||||
// to the final datasegment
|
||||
func (h *Hasher) finalise(n *Node, i int) (d int) {
|
||||
isLeft := i%2 == 0
|
||||
for {
|
||||
// when the final segment's path is going via left segments
|
||||
// the incoming data is pushed to the parent upon pulling the left
|
||||
// we do not need toggle the state since this condition is
|
||||
// detectable
|
||||
n.unbalanced = isLeft
|
||||
n.right = nil
|
||||
if n.initial {
|
||||
n.root = true
|
||||
return d
|
||||
}
|
||||
isLeft = n.isLeft
|
||||
n = n.parent
|
||||
d++
|
||||
}
|
||||
}
|
||||
|
||||
// EOC (end of chunk) implements the error interface
|
||||
type EOC struct {
|
||||
Hash []byte // read the hash of the chunk off the error
|
||||
}
|
||||
|
||||
// Error returns the error string
|
||||
func (e *EOC) Error() string {
|
||||
return fmt.Sprintf("hasher limit reached, chunk hash: %x", e.Hash)
|
||||
}
|
||||
|
||||
// NewEOC creates new end of chunk error with the hash
|
||||
func NewEOC(hash []byte) *EOC {
|
||||
return &EOC{hash}
|
||||
}
|
85
bmt/bmt_r.go
85
bmt/bmt_r.go
@@ -1,85 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// simple nonconcurrent reference implementation for hashsize segment based
|
||||
// Binary Merkle tree hash on arbitrary but fixed maximum chunksize
|
||||
//
|
||||
// This implementation does not take advantage of any paralellisms and uses
|
||||
// far more memory than necessary, but it is easy to see that it is correct.
|
||||
// It can be used for generating test cases for optimized implementations.
|
||||
// see testBMTHasherCorrectness function in bmt_test.go
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"hash"
|
||||
)
|
||||
|
||||
// RefHasher is the non-optimized easy to read reference implementation of BMT
|
||||
type RefHasher struct {
|
||||
span int
|
||||
section int
|
||||
cap int
|
||||
h hash.Hash
|
||||
}
|
||||
|
||||
// NewRefHasher returns a new RefHasher
|
||||
func NewRefHasher(hasher BaseHasher, count int) *RefHasher {
|
||||
h := hasher()
|
||||
hashsize := h.Size()
|
||||
maxsize := hashsize * count
|
||||
c := 2
|
||||
for ; c < count; c *= 2 {
|
||||
}
|
||||
if c > 2 {
|
||||
c /= 2
|
||||
}
|
||||
return &RefHasher{
|
||||
section: 2 * hashsize,
|
||||
span: c * hashsize,
|
||||
cap: maxsize,
|
||||
h: h,
|
||||
}
|
||||
}
|
||||
|
||||
// Hash returns the BMT hash of the byte slice
|
||||
// implements the SwarmHash interface
|
||||
func (rh *RefHasher) Hash(d []byte) []byte {
|
||||
if len(d) > rh.cap {
|
||||
d = d[:rh.cap]
|
||||
}
|
||||
|
||||
return rh.hash(d, rh.span)
|
||||
}
|
||||
|
||||
func (rh *RefHasher) hash(d []byte, s int) []byte {
|
||||
l := len(d)
|
||||
left := d
|
||||
var right []byte
|
||||
if l > rh.section {
|
||||
for ; s >= l; s /= 2 {
|
||||
}
|
||||
left = rh.hash(d[:s], s)
|
||||
right = d[s:]
|
||||
if l-s > rh.section/2 {
|
||||
right = rh.hash(right, s)
|
||||
}
|
||||
}
|
||||
defer rh.h.Reset()
|
||||
rh.h.Write(left)
|
||||
rh.h.Write(right)
|
||||
h := rh.h.Sum(nil)
|
||||
return h
|
||||
}
|
481
bmt/bmt_test.go
481
bmt/bmt_test.go
@@ -1,481 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
crand "crypto/rand"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||
)
|
||||
|
||||
const (
|
||||
maxproccnt = 8
|
||||
)
|
||||
|
||||
// TestRefHasher tests that the RefHasher computes the expected BMT hash for
|
||||
// all data lengths between 0 and 256 bytes
|
||||
func TestRefHasher(t *testing.T) {
|
||||
hashFunc := sha3.NewKeccak256
|
||||
|
||||
sha3 := func(data ...[]byte) []byte {
|
||||
h := hashFunc()
|
||||
for _, v := range data {
|
||||
h.Write(v)
|
||||
}
|
||||
return h.Sum(nil)
|
||||
}
|
||||
|
||||
// the test struct is used to specify the expected BMT hash for data
|
||||
// lengths between "from" and "to"
|
||||
type test struct {
|
||||
from int64
|
||||
to int64
|
||||
expected func([]byte) []byte
|
||||
}
|
||||
|
||||
var tests []*test
|
||||
|
||||
// all lengths in [0,64] should be:
|
||||
//
|
||||
// sha3(data)
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 0,
|
||||
to: 64,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(data)
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [65,96] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// data[64:]
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 65,
|
||||
to: 96,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(data[:64]), data[64:])
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [97,128] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:])
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 97,
|
||||
to: 128,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(data[:64]), sha3(data[64:]))
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [129,160] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// data[128:]
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 129,
|
||||
to: 160,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), data[128:])
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [161,192] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(data[128:])
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 161,
|
||||
to: 192,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(data[128:]))
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [193,224] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(
|
||||
// sha3(data[128:192])
|
||||
// data[192:]
|
||||
// )
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 193,
|
||||
to: 224,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), data[192:]))
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [225,256] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(
|
||||
// sha3(data[128:192])
|
||||
// sha3(data[192:])
|
||||
// )
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 225,
|
||||
to: 256,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), sha3(data[192:])))
|
||||
},
|
||||
})
|
||||
|
||||
// run the tests
|
||||
for _, x := range tests {
|
||||
for length := x.from; length <= x.to; length++ {
|
||||
t.Run(fmt.Sprintf("%d_bytes", length), func(t *testing.T) {
|
||||
data := make([]byte, length)
|
||||
if _, err := io.ReadFull(crand.Reader, data); err != nil && err != io.EOF {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := x.expected(data)
|
||||
actual := NewRefHasher(hashFunc, 128).Hash(data)
|
||||
if !bytes.Equal(actual, expected) {
|
||||
t.Fatalf("expected %x, got %x", expected, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testDataReader(l int) (r io.Reader) {
|
||||
return io.LimitReader(crand.Reader, int64(l))
|
||||
}
|
||||
|
||||
func TestHasherCorrectness(t *testing.T) {
|
||||
err := testHasher(testBaseHasher)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testHasher(f func(BaseHasher, []byte, int, int) error) error {
|
||||
tdata := testDataReader(4128)
|
||||
data := make([]byte, 4128)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
size := hasher().Size()
|
||||
counts := []int{1, 2, 3, 4, 5, 8, 16, 32, 64, 128}
|
||||
|
||||
var err error
|
||||
for _, count := range counts {
|
||||
max := count * size
|
||||
incr := 1
|
||||
for n := 0; n <= max+incr; n += incr {
|
||||
err = f(hasher, data, n, count)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestHasherReuseWithoutRelease(t *testing.T) {
|
||||
testHasherReuse(1, t)
|
||||
}
|
||||
|
||||
func TestHasherReuseWithRelease(t *testing.T) {
|
||||
testHasherReuse(maxproccnt, t)
|
||||
}
|
||||
|
||||
func testHasherReuse(i int, t *testing.T) {
|
||||
hasher := sha3.NewKeccak256
|
||||
pool := NewTreePool(hasher, 128, i)
|
||||
defer pool.Drain(0)
|
||||
bmt := New(pool)
|
||||
|
||||
for i := 0; i < 500; i++ {
|
||||
n := rand.Intn(4096)
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
|
||||
err := testHasherCorrectness(bmt, hasher, data, n, 128)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasherConcurrency(t *testing.T) {
|
||||
hasher := sha3.NewKeccak256
|
||||
pool := NewTreePool(hasher, 128, maxproccnt)
|
||||
defer pool.Drain(0)
|
||||
wg := sync.WaitGroup{}
|
||||
cycles := 100
|
||||
wg.Add(maxproccnt * cycles)
|
||||
errc := make(chan error)
|
||||
|
||||
for p := 0; p < maxproccnt; p++ {
|
||||
for i := 0; i < cycles; i++ {
|
||||
go func() {
|
||||
bmt := New(pool)
|
||||
n := rand.Intn(4096)
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
err := testHasherCorrectness(bmt, hasher, data, n, 128)
|
||||
wg.Done()
|
||||
if err != nil {
|
||||
errc <- err
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(errc)
|
||||
}()
|
||||
var err error
|
||||
select {
|
||||
case <-time.NewTimer(5 * time.Second).C:
|
||||
err = fmt.Errorf("timed out")
|
||||
case err = <-errc:
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testBaseHasher(hasher BaseHasher, d []byte, n, count int) error {
|
||||
pool := NewTreePool(hasher, count, 1)
|
||||
defer pool.Drain(0)
|
||||
bmt := New(pool)
|
||||
return testHasherCorrectness(bmt, hasher, d, n, count)
|
||||
}
|
||||
|
||||
func testHasherCorrectness(bmt hash.Hash, hasher BaseHasher, d []byte, n, count int) (err error) {
|
||||
data := d[:n]
|
||||
rbmt := NewRefHasher(hasher, count)
|
||||
exp := rbmt.Hash(data)
|
||||
timeout := time.NewTimer(time.Second)
|
||||
c := make(chan error)
|
||||
|
||||
go func() {
|
||||
bmt.Reset()
|
||||
bmt.Write(data)
|
||||
got := bmt.Sum(nil)
|
||||
if !bytes.Equal(got, exp) {
|
||||
c <- fmt.Errorf("wrong hash: expected %x, got %x", exp, got)
|
||||
}
|
||||
close(c)
|
||||
}()
|
||||
select {
|
||||
case <-timeout.C:
|
||||
err = fmt.Errorf("BMT hash calculation timed out")
|
||||
case err = <-c:
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func BenchmarkSHA3_4k(t *testing.B) { benchmarkSHA3(4096, t) }
|
||||
func BenchmarkSHA3_2k(t *testing.B) { benchmarkSHA3(4096/2, t) }
|
||||
func BenchmarkSHA3_1k(t *testing.B) { benchmarkSHA3(4096/4, t) }
|
||||
func BenchmarkSHA3_512b(t *testing.B) { benchmarkSHA3(4096/8, t) }
|
||||
func BenchmarkSHA3_256b(t *testing.B) { benchmarkSHA3(4096/16, t) }
|
||||
func BenchmarkSHA3_128b(t *testing.B) { benchmarkSHA3(4096/32, t) }
|
||||
|
||||
func BenchmarkBMTBaseline_4k(t *testing.B) { benchmarkBMTBaseline(4096, t) }
|
||||
func BenchmarkBMTBaseline_2k(t *testing.B) { benchmarkBMTBaseline(4096/2, t) }
|
||||
func BenchmarkBMTBaseline_1k(t *testing.B) { benchmarkBMTBaseline(4096/4, t) }
|
||||
func BenchmarkBMTBaseline_512b(t *testing.B) { benchmarkBMTBaseline(4096/8, t) }
|
||||
func BenchmarkBMTBaseline_256b(t *testing.B) { benchmarkBMTBaseline(4096/16, t) }
|
||||
func BenchmarkBMTBaseline_128b(t *testing.B) { benchmarkBMTBaseline(4096/32, t) }
|
||||
|
||||
func BenchmarkRefHasher_4k(t *testing.B) { benchmarkRefHasher(4096, t) }
|
||||
func BenchmarkRefHasher_2k(t *testing.B) { benchmarkRefHasher(4096/2, t) }
|
||||
func BenchmarkRefHasher_1k(t *testing.B) { benchmarkRefHasher(4096/4, t) }
|
||||
func BenchmarkRefHasher_512b(t *testing.B) { benchmarkRefHasher(4096/8, t) }
|
||||
func BenchmarkRefHasher_256b(t *testing.B) { benchmarkRefHasher(4096/16, t) }
|
||||
func BenchmarkRefHasher_128b(t *testing.B) { benchmarkRefHasher(4096/32, t) }
|
||||
|
||||
func BenchmarkHasher_4k(t *testing.B) { benchmarkHasher(4096, t) }
|
||||
func BenchmarkHasher_2k(t *testing.B) { benchmarkHasher(4096/2, t) }
|
||||
func BenchmarkHasher_1k(t *testing.B) { benchmarkHasher(4096/4, t) }
|
||||
func BenchmarkHasher_512b(t *testing.B) { benchmarkHasher(4096/8, t) }
|
||||
func BenchmarkHasher_256b(t *testing.B) { benchmarkHasher(4096/16, t) }
|
||||
func BenchmarkHasher_128b(t *testing.B) { benchmarkHasher(4096/32, t) }
|
||||
|
||||
func BenchmarkHasherNoReuse_4k(t *testing.B) { benchmarkHasherReuse(1, 4096, t) }
|
||||
func BenchmarkHasherNoReuse_2k(t *testing.B) { benchmarkHasherReuse(1, 4096/2, t) }
|
||||
func BenchmarkHasherNoReuse_1k(t *testing.B) { benchmarkHasherReuse(1, 4096/4, t) }
|
||||
func BenchmarkHasherNoReuse_512b(t *testing.B) { benchmarkHasherReuse(1, 4096/8, t) }
|
||||
func BenchmarkHasherNoReuse_256b(t *testing.B) { benchmarkHasherReuse(1, 4096/16, t) }
|
||||
func BenchmarkHasherNoReuse_128b(t *testing.B) { benchmarkHasherReuse(1, 4096/32, t) }
|
||||
|
||||
func BenchmarkHasherReuse_4k(t *testing.B) { benchmarkHasherReuse(16, 4096, t) }
|
||||
func BenchmarkHasherReuse_2k(t *testing.B) { benchmarkHasherReuse(16, 4096/2, t) }
|
||||
func BenchmarkHasherReuse_1k(t *testing.B) { benchmarkHasherReuse(16, 4096/4, t) }
|
||||
func BenchmarkHasherReuse_512b(t *testing.B) { benchmarkHasherReuse(16, 4096/8, t) }
|
||||
func BenchmarkHasherReuse_256b(t *testing.B) { benchmarkHasherReuse(16, 4096/16, t) }
|
||||
func BenchmarkHasherReuse_128b(t *testing.B) { benchmarkHasherReuse(16, 4096/32, t) }
|
||||
|
||||
// benchmarks the minimum hashing time for a balanced (for simplicity) BMT
|
||||
// by doing count/segmentsize parallel hashings of 2*segmentsize bytes
|
||||
// doing it on n maxproccnt each reusing the base hasher
|
||||
// the premise is that this is the minimum computation needed for a BMT
|
||||
// therefore this serves as a theoretical optimum for concurrent implementations
|
||||
func benchmarkBMTBaseline(n int, t *testing.B) {
|
||||
tdata := testDataReader(64)
|
||||
data := make([]byte, 64)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
count := int32((n-1)/hasher().Size() + 1)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(maxproccnt)
|
||||
var i int32
|
||||
for j := 0; j < maxproccnt; j++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
h := hasher()
|
||||
for atomic.AddInt32(&i, 1) < count {
|
||||
h.Reset()
|
||||
h.Write(data)
|
||||
h.Sum(nil)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkHasher(n int, t *testing.B) {
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
|
||||
size := 1
|
||||
hasher := sha3.NewKeccak256
|
||||
segmentCount := 128
|
||||
pool := NewTreePool(hasher, segmentCount, size)
|
||||
bmt := New(pool)
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
bmt.Reset()
|
||||
bmt.Write(data)
|
||||
bmt.Sum(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkHasherReuse(poolsize, n int, t *testing.B) {
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
|
||||
hasher := sha3.NewKeccak256
|
||||
segmentCount := 128
|
||||
pool := NewTreePool(hasher, segmentCount, poolsize)
|
||||
cycles := 200
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(cycles)
|
||||
for j := 0; j < cycles; j++ {
|
||||
bmt := New(pool)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
bmt.Reset()
|
||||
bmt.Write(data)
|
||||
bmt.Sum(nil)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkSHA3(n int, t *testing.B) {
|
||||
data := make([]byte, n)
|
||||
tdata := testDataReader(n)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
h := hasher()
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
h.Reset()
|
||||
h.Write(data)
|
||||
h.Sum(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkRefHasher(n int, t *testing.B) {
|
||||
data := make([]byte, n)
|
||||
tdata := testDataReader(n)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
rbmt := NewRefHasher(hasher, 128)
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
rbmt.Hash(data)
|
||||
}
|
||||
}
|
@@ -330,6 +330,7 @@ func doLint(cmdline []string) {
|
||||
configs := []string{
|
||||
"--vendor",
|
||||
"--tests",
|
||||
"--deadline=2m",
|
||||
"--disable-all",
|
||||
"--enable=goimports",
|
||||
"--enable=varcheck",
|
||||
|
@@ -1,18 +1,18 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/sh
|
||||
|
||||
find_files() {
|
||||
find . -not \( \
|
||||
find . ! \( \
|
||||
\( \
|
||||
-wholename '.github' \
|
||||
-o -wholename './build/_workspace' \
|
||||
-o -wholename './build/bin' \
|
||||
-o -wholename './crypto/bn256' \
|
||||
-o -wholename '*/vendor/*' \
|
||||
-path '.github' \
|
||||
-o -path './build/_workspace' \
|
||||
-o -path './build/bin' \
|
||||
-o -path './crypto/bn256' \
|
||||
-o -path '*/vendor/*' \
|
||||
\) -prune \
|
||||
\) -name '*.go'
|
||||
}
|
||||
|
||||
GOFMT="gofmt -s -w";
|
||||
GOIMPORTS="goimports -w";
|
||||
find_files | xargs $GOFMT;
|
||||
find_files | xargs $GOIMPORTS;
|
||||
GOFMT="gofmt -s -w"
|
||||
GOIMPORTS="goimports -w"
|
||||
find_files | xargs $GOFMT
|
||||
find_files | xargs $GOIMPORTS
|
||||
|
@@ -77,9 +77,6 @@ var (
|
||||
accJSONFlag = flag.String("account.json", "", "Key json file to fund user requests with")
|
||||
accPassFlag = flag.String("account.pass", "", "Decryption password to access faucet funds")
|
||||
|
||||
githubUser = flag.String("github.user", "", "GitHub user to authenticate with for Gist access")
|
||||
githubToken = flag.String("github.token", "", "GitHub personal token to access Gists with")
|
||||
|
||||
captchaToken = flag.String("captcha.token", "", "Recaptcha site key to authenticate client side")
|
||||
captchaSecret = flag.String("captcha.secret", "", "Recaptcha secret key to authenticate server side")
|
||||
|
||||
@@ -638,59 +635,6 @@ func sendSuccess(conn *websocket.Conn, msg string) error {
|
||||
return send(conn, map[string]string{"success": msg}, time.Second)
|
||||
}
|
||||
|
||||
// authGitHub tries to authenticate a faucet request using GitHub gists, returning
|
||||
// the username, avatar URL and Ethereum address to fund on success.
|
||||
func authGitHub(url string) (string, string, common.Address, error) {
|
||||
// Retrieve the gist from the GitHub Gist APIs
|
||||
parts := strings.Split(url, "/")
|
||||
req, _ := http.NewRequest("GET", "https://api.github.com/gists/"+parts[len(parts)-1], nil)
|
||||
if *githubUser != "" {
|
||||
req.SetBasicAuth(*githubUser, *githubToken)
|
||||
}
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
var gist struct {
|
||||
Owner struct {
|
||||
Login string `json:"login"`
|
||||
} `json:"owner"`
|
||||
Files map[string]struct {
|
||||
Content string `json:"content"`
|
||||
} `json:"files"`
|
||||
}
|
||||
err = json.NewDecoder(res.Body).Decode(&gist)
|
||||
res.Body.Close()
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
if gist.Owner.Login == "" {
|
||||
return "", "", common.Address{}, errors.New("Anonymous Gists not allowed")
|
||||
}
|
||||
// Iterate over all the files and look for Ethereum addresses
|
||||
var address common.Address
|
||||
for _, file := range gist.Files {
|
||||
content := strings.TrimSpace(file.Content)
|
||||
if len(content) == 2+common.AddressLength*2 {
|
||||
address = common.HexToAddress(content)
|
||||
}
|
||||
}
|
||||
if address == (common.Address{}) {
|
||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
// Validate the user's existence since the API is unhelpful here
|
||||
if res, err = http.Head("https://github.com/" + gist.Owner.Login); err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
res.Body.Close()
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
return "", "", common.Address{}, errors.New("Invalid user... boom!")
|
||||
}
|
||||
// Everything passed validation, return the gathered infos
|
||||
return gist.Owner.Login + "@github", fmt.Sprintf("https://github.com/%s.png?size=64", gist.Owner.Login), address, nil
|
||||
}
|
||||
|
||||
// authTwitter tries to authenticate a faucet request using Twitter posts, returning
|
||||
// the username, avatar URL and Ethereum address to fund on success.
|
||||
func authTwitter(url string) (string, string, common.Address, error) {
|
||||
|
@@ -77,7 +77,7 @@ var customGenesisTests = []struct {
|
||||
"homesteadBlock" : 314,
|
||||
"daoForkBlock" : 141,
|
||||
"daoForkSupport" : true
|
||||
},
|
||||
}
|
||||
}`,
|
||||
query: "eth.getBlock(0).nonce",
|
||||
result: "0x0000000000000042",
|
||||
|
@@ -144,6 +144,15 @@ var (
|
||||
utils.WhisperMaxMessageSizeFlag,
|
||||
utils.WhisperMinPOWFlag,
|
||||
}
|
||||
|
||||
metricsFlags = []cli.Flag{
|
||||
utils.MetricsEnableInfluxDBFlag,
|
||||
utils.MetricsInfluxDBEndpointFlag,
|
||||
utils.MetricsInfluxDBDatabaseFlag,
|
||||
utils.MetricsInfluxDBUsernameFlag,
|
||||
utils.MetricsInfluxDBPasswordFlag,
|
||||
utils.MetricsInfluxDBHostTagFlag,
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -186,6 +195,7 @@ func init() {
|
||||
app.Flags = append(app.Flags, consoleFlags...)
|
||||
app.Flags = append(app.Flags, debug.Flags...)
|
||||
app.Flags = append(app.Flags, whisperFlags...)
|
||||
app.Flags = append(app.Flags, metricsFlags...)
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
@@ -208,6 +218,9 @@ func init() {
|
||||
log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc))
|
||||
godebug.SetGCPercent(int(gogc))
|
||||
|
||||
// Start metrics export if enabled
|
||||
utils.SetupMetrics(ctx)
|
||||
|
||||
// Start system runtime metrics collection
|
||||
go metrics.CollectProcessMetrics(3 * time.Second)
|
||||
|
||||
|
@@ -206,11 +206,22 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
{
|
||||
Name: "LOGGING AND DEBUGGING",
|
||||
Flags: append([]cli.Flag{
|
||||
utils.MetricsEnabledFlag,
|
||||
utils.FakePoWFlag,
|
||||
utils.NoCompactionFlag,
|
||||
}, debug.Flags...),
|
||||
},
|
||||
{
|
||||
Name: "METRICS AND STATS",
|
||||
Flags: []cli.Flag{
|
||||
utils.MetricsEnabledFlag,
|
||||
utils.MetricsEnableInfluxDBFlag,
|
||||
utils.MetricsInfluxDBEndpointFlag,
|
||||
utils.MetricsInfluxDBDatabaseFlag,
|
||||
utils.MetricsInfluxDBUsernameFlag,
|
||||
utils.MetricsInfluxDBPasswordFlag,
|
||||
utils.MetricsInfluxDBHostTagFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "WHISPER (EXPERIMENTAL)",
|
||||
Flags: whisperFlags,
|
||||
|
@@ -180,7 +180,10 @@ func main() {
|
||||
},
|
||||
},
|
||||
}
|
||||
app.Run(os.Args)
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func showNetwork(ctx *cli.Context) error {
|
||||
@@ -275,9 +278,8 @@ func createNode(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
config := &adapters.NodeConfig{
|
||||
Name: ctx.String("name"),
|
||||
}
|
||||
config := adapters.RandomNodeConfig()
|
||||
config.Name = ctx.String("name")
|
||||
if key := ctx.String("key"); key != "" {
|
||||
privKey, err := crypto.HexToECDSA(key)
|
||||
if err != nil {
|
||||
|
@@ -276,13 +276,3 @@ func (stats serverStats) render() {
|
||||
}
|
||||
table.Render()
|
||||
}
|
||||
|
||||
// protips contains a collection of network infos to report pro-tips
|
||||
// based on.
|
||||
type protips struct {
|
||||
genesis string
|
||||
network int64
|
||||
bootFull []string
|
||||
bootLight []string
|
||||
ethstats string
|
||||
}
|
||||
|
@@ -24,6 +24,7 @@ import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
@@ -37,6 +38,8 @@ import (
|
||||
bzzapi "github.com/ethereum/go-ethereum/swarm/api"
|
||||
)
|
||||
|
||||
const SWARM_VERSION = "0.3"
|
||||
|
||||
var (
|
||||
//flag definition for the dumpconfig command
|
||||
DumpConfigCommand = cli.Command{
|
||||
@@ -58,19 +61,25 @@ var (
|
||||
|
||||
//constants for environment variables
|
||||
const (
|
||||
SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR"
|
||||
SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT"
|
||||
SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR"
|
||||
SWARM_ENV_PORT = "SWARM_PORT"
|
||||
SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID"
|
||||
SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE"
|
||||
SWARM_ENV_SWAP_API = "SWARM_SWAP_API"
|
||||
SWARM_ENV_SYNC_ENABLE = "SWARM_SYNC_ENABLE"
|
||||
SWARM_ENV_ENS_API = "SWARM_ENS_API"
|
||||
SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR"
|
||||
SWARM_ENV_CORS = "SWARM_CORS"
|
||||
SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES"
|
||||
GETH_ENV_DATADIR = "GETH_DATADIR"
|
||||
SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR"
|
||||
SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT"
|
||||
SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR"
|
||||
SWARM_ENV_PORT = "SWARM_PORT"
|
||||
SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID"
|
||||
SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE"
|
||||
SWARM_ENV_SWAP_API = "SWARM_SWAP_API"
|
||||
SWARM_ENV_SYNC_DISABLE = "SWARM_SYNC_DISABLE"
|
||||
SWARM_ENV_SYNC_UPDATE_DELAY = "SWARM_ENV_SYNC_UPDATE_DELAY"
|
||||
SWARM_ENV_DELIVERY_SKIP_CHECK = "SWARM_DELIVERY_SKIP_CHECK"
|
||||
SWARM_ENV_ENS_API = "SWARM_ENS_API"
|
||||
SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR"
|
||||
SWARM_ENV_CORS = "SWARM_CORS"
|
||||
SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES"
|
||||
SWARM_ENV_PSS_ENABLE = "SWARM_PSS_ENABLE"
|
||||
SWARM_ENV_STORE_PATH = "SWARM_STORE_PATH"
|
||||
SWARM_ENV_STORE_CAPACITY = "SWARM_STORE_CAPACITY"
|
||||
SWARM_ENV_STORE_CACHE_CAPACITY = "SWARM_STORE_CACHE_CAPACITY"
|
||||
GETH_ENV_DATADIR = "GETH_DATADIR"
|
||||
)
|
||||
|
||||
// These settings ensure that TOML keys use the same names as Go struct fields.
|
||||
@@ -92,10 +101,8 @@ var tomlSettings = toml.Config{
|
||||
|
||||
//before booting the swarm node, build the configuration
|
||||
func buildConfig(ctx *cli.Context) (config *bzzapi.Config, err error) {
|
||||
//check for deprecated flags
|
||||
checkDeprecated(ctx)
|
||||
//start by creating a default config
|
||||
config = bzzapi.NewDefaultConfig()
|
||||
config = bzzapi.NewConfig()
|
||||
//first load settings from config file (if provided)
|
||||
config, err = configFileOverride(config, ctx)
|
||||
if err != nil {
|
||||
@@ -168,7 +175,7 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
||||
|
||||
if networkid := ctx.GlobalString(SwarmNetworkIdFlag.Name); networkid != "" {
|
||||
if id, _ := strconv.Atoi(networkid); id != 0 {
|
||||
currentConfig.NetworkId = uint64(id)
|
||||
currentConfig.NetworkID = uint64(id)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -191,12 +198,20 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
||||
currentConfig.SwapEnabled = true
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(SwarmSyncEnabledFlag.Name) {
|
||||
currentConfig.SyncEnabled = true
|
||||
if ctx.GlobalIsSet(SwarmSyncDisabledFlag.Name) {
|
||||
currentConfig.SyncEnabled = false
|
||||
}
|
||||
|
||||
currentConfig.SwapApi = ctx.GlobalString(SwarmSwapAPIFlag.Name)
|
||||
if currentConfig.SwapEnabled && currentConfig.SwapApi == "" {
|
||||
if d := ctx.GlobalDuration(SwarmSyncUpdateDelay.Name); d > 0 {
|
||||
currentConfig.SyncUpdateDelay = d
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(SwarmDeliverySkipCheckFlag.Name) {
|
||||
currentConfig.DeliverySkipCheck = true
|
||||
}
|
||||
|
||||
currentConfig.SwapAPI = ctx.GlobalString(SwarmSwapAPIFlag.Name)
|
||||
if currentConfig.SwapEnabled && currentConfig.SwapAPI == "" {
|
||||
utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API)
|
||||
}
|
||||
|
||||
@@ -209,10 +224,6 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
||||
currentConfig.EnsAPIs = ensAPIs
|
||||
}
|
||||
|
||||
if ensaddr := ctx.GlobalString(DeprecatedEnsAddrFlag.Name); ensaddr != "" {
|
||||
currentConfig.EnsRoot = common.HexToAddress(ensaddr)
|
||||
}
|
||||
|
||||
if cors := ctx.GlobalString(CorsStringFlag.Name); cors != "" {
|
||||
currentConfig.Cors = cors
|
||||
}
|
||||
@@ -221,6 +232,18 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
||||
currentConfig.BootNodes = ctx.GlobalString(utils.BootnodesFlag.Name)
|
||||
}
|
||||
|
||||
if storePath := ctx.GlobalString(SwarmStorePath.Name); storePath != "" {
|
||||
currentConfig.LocalStoreParams.ChunkDbPath = storePath
|
||||
}
|
||||
|
||||
if storeCapacity := ctx.GlobalUint64(SwarmStoreCapacity.Name); storeCapacity != 0 {
|
||||
currentConfig.LocalStoreParams.DbCapacity = storeCapacity
|
||||
}
|
||||
|
||||
if storeCacheCapacity := ctx.GlobalUint(SwarmStoreCacheCapacity.Name); storeCacheCapacity != 0 {
|
||||
currentConfig.LocalStoreParams.CacheCapacity = storeCacheCapacity
|
||||
}
|
||||
|
||||
return currentConfig
|
||||
|
||||
}
|
||||
@@ -239,7 +262,7 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
|
||||
|
||||
if networkid := os.Getenv(SWARM_ENV_NETWORK_ID); networkid != "" {
|
||||
if id, _ := strconv.Atoi(networkid); id != 0 {
|
||||
currentConfig.NetworkId = uint64(id)
|
||||
currentConfig.NetworkID = uint64(id)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -262,17 +285,29 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
if syncenable := os.Getenv(SWARM_ENV_SYNC_ENABLE); syncenable != "" {
|
||||
if sync, err := strconv.ParseBool(syncenable); err != nil {
|
||||
currentConfig.SyncEnabled = sync
|
||||
if syncdisable := os.Getenv(SWARM_ENV_SYNC_DISABLE); syncdisable != "" {
|
||||
if sync, err := strconv.ParseBool(syncdisable); err != nil {
|
||||
currentConfig.SyncEnabled = !sync
|
||||
}
|
||||
}
|
||||
|
||||
if v := os.Getenv(SWARM_ENV_DELIVERY_SKIP_CHECK); v != "" {
|
||||
if skipCheck, err := strconv.ParseBool(v); err != nil {
|
||||
currentConfig.DeliverySkipCheck = skipCheck
|
||||
}
|
||||
}
|
||||
|
||||
if v := os.Getenv(SWARM_ENV_SYNC_UPDATE_DELAY); v != "" {
|
||||
if d, err := time.ParseDuration(v); err != nil {
|
||||
currentConfig.SyncUpdateDelay = d
|
||||
}
|
||||
}
|
||||
|
||||
if swapapi := os.Getenv(SWARM_ENV_SWAP_API); swapapi != "" {
|
||||
currentConfig.SwapApi = swapapi
|
||||
currentConfig.SwapAPI = swapapi
|
||||
}
|
||||
|
||||
if currentConfig.SwapEnabled && currentConfig.SwapApi == "" {
|
||||
if currentConfig.SwapEnabled && currentConfig.SwapAPI == "" {
|
||||
utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API)
|
||||
}
|
||||
|
||||
@@ -312,18 +347,6 @@ func dumpConfig(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//deprecated flags checked here
|
||||
func checkDeprecated(ctx *cli.Context) {
|
||||
// exit if the deprecated --ethapi flag is set
|
||||
if ctx.GlobalString(DeprecatedEthAPIFlag.Name) != "" {
|
||||
utils.Fatalf("--ethapi is no longer a valid command line flag, please use --ens-api and/or --swap-api.")
|
||||
}
|
||||
// warn if --ens-api flag is set
|
||||
if ctx.GlobalString(DeprecatedEnsAddrFlag.Name) != "" {
|
||||
log.Warn("--ens-addr is no longer a valid command line flag, please use --ens-api to specify contract address.")
|
||||
}
|
||||
}
|
||||
|
||||
//validate configuration parameters
|
||||
func validateConfig(cfg *bzzapi.Config) (err error) {
|
||||
for _, ensAPI := range cfg.EnsAPIs {
|
||||
|
@@ -34,7 +34,7 @@ import (
|
||||
|
||||
func TestDumpConfig(t *testing.T) {
|
||||
swarm := runSwarm(t, "dumpconfig")
|
||||
defaultConf := api.NewDefaultConfig()
|
||||
defaultConf := api.NewConfig()
|
||||
out, err := tomlSettings.Marshal(&defaultConf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -43,7 +43,7 @@ func TestDumpConfig(t *testing.T) {
|
||||
swarm.ExpectExit()
|
||||
}
|
||||
|
||||
func TestFailsSwapEnabledNoSwapApi(t *testing.T) {
|
||||
func TestConfigFailsSwapEnabledNoSwapApi(t *testing.T) {
|
||||
flags := []string{
|
||||
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
|
||||
fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545",
|
||||
@@ -55,7 +55,7 @@ func TestFailsSwapEnabledNoSwapApi(t *testing.T) {
|
||||
swarm.ExpectExit()
|
||||
}
|
||||
|
||||
func TestFailsNoBzzAccount(t *testing.T) {
|
||||
func TestConfigFailsNoBzzAccount(t *testing.T) {
|
||||
flags := []string{
|
||||
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
|
||||
fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545",
|
||||
@@ -66,7 +66,7 @@ func TestFailsNoBzzAccount(t *testing.T) {
|
||||
swarm.ExpectExit()
|
||||
}
|
||||
|
||||
func TestCmdLineOverrides(t *testing.T) {
|
||||
func TestConfigCmdLineOverrides(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "bzztest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -85,9 +85,10 @@ func TestCmdLineOverrides(t *testing.T) {
|
||||
flags := []string{
|
||||
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
|
||||
fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort,
|
||||
fmt.Sprintf("--%s", SwarmSyncEnabledFlag.Name),
|
||||
fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name),
|
||||
fmt.Sprintf("--%s", CorsStringFlag.Name), "*",
|
||||
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
|
||||
fmt.Sprintf("--%s", SwarmDeliverySkipCheckFlag.Name),
|
||||
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
|
||||
"--datadir", dir,
|
||||
"--ipcpath", conf.IPCPath,
|
||||
@@ -120,12 +121,16 @@ func TestCmdLineOverrides(t *testing.T) {
|
||||
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
|
||||
}
|
||||
|
||||
if info.NetworkId != 42 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 42, info.NetworkId)
|
||||
if info.NetworkID != 42 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 42, info.NetworkID)
|
||||
}
|
||||
|
||||
if !info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be enabled, but is false")
|
||||
if info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be disabled, but is true")
|
||||
}
|
||||
|
||||
if !info.DeliverySkipCheck {
|
||||
t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not")
|
||||
}
|
||||
|
||||
if info.Cors != "*" {
|
||||
@@ -135,7 +140,7 @@ func TestCmdLineOverrides(t *testing.T) {
|
||||
node.Shutdown()
|
||||
}
|
||||
|
||||
func TestFileOverrides(t *testing.T) {
|
||||
func TestConfigFileOverrides(t *testing.T) {
|
||||
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort()
|
||||
@@ -145,16 +150,16 @@ func TestFileOverrides(t *testing.T) {
|
||||
|
||||
//create a config file
|
||||
//first, create a default conf
|
||||
defaultConf := api.NewDefaultConfig()
|
||||
defaultConf := api.NewConfig()
|
||||
//change some values in order to test if they have been loaded
|
||||
defaultConf.SyncEnabled = true
|
||||
defaultConf.NetworkId = 54
|
||||
defaultConf.SyncEnabled = false
|
||||
defaultConf.DeliverySkipCheck = true
|
||||
defaultConf.NetworkID = 54
|
||||
defaultConf.Port = httpPort
|
||||
defaultConf.StoreParams.DbCapacity = 9000000
|
||||
defaultConf.ChunkerParams.Branches = 64
|
||||
defaultConf.HiveParams.CallInterval = 6000000000
|
||||
defaultConf.DbCapacity = 9000000
|
||||
defaultConf.HiveParams.KeepAliveInterval = 6000000000
|
||||
defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second
|
||||
defaultConf.SyncParams.KeyBufferSize = 512
|
||||
//defaultConf.SyncParams.KeyBufferSize = 512
|
||||
//create a TOML string
|
||||
out, err := tomlSettings.Marshal(&defaultConf)
|
||||
if err != nil {
|
||||
@@ -215,38 +220,38 @@ func TestFileOverrides(t *testing.T) {
|
||||
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
|
||||
}
|
||||
|
||||
if info.NetworkId != 54 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId)
|
||||
if info.NetworkID != 54 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkID)
|
||||
}
|
||||
|
||||
if !info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be enabled, but is false")
|
||||
if info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be disabled, but is true")
|
||||
}
|
||||
|
||||
if info.StoreParams.DbCapacity != 9000000 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId)
|
||||
if !info.DeliverySkipCheck {
|
||||
t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not")
|
||||
}
|
||||
|
||||
if info.ChunkerParams.Branches != 64 {
|
||||
t.Fatalf("Expected chunker params branches to be %d, got %d", 64, info.ChunkerParams.Branches)
|
||||
if info.DbCapacity != 9000000 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkID)
|
||||
}
|
||||
|
||||
if info.HiveParams.CallInterval != 6000000000 {
|
||||
t.Fatalf("Expected HiveParams CallInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.CallInterval))
|
||||
if info.HiveParams.KeepAliveInterval != 6000000000 {
|
||||
t.Fatalf("Expected HiveParams KeepAliveInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.KeepAliveInterval))
|
||||
}
|
||||
|
||||
if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second {
|
||||
t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval)
|
||||
}
|
||||
|
||||
if info.SyncParams.KeyBufferSize != 512 {
|
||||
t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
|
||||
}
|
||||
// if info.SyncParams.KeyBufferSize != 512 {
|
||||
// t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
|
||||
// }
|
||||
|
||||
node.Shutdown()
|
||||
}
|
||||
|
||||
func TestEnvVars(t *testing.T) {
|
||||
func TestConfigEnvVars(t *testing.T) {
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort()
|
||||
if err != nil {
|
||||
@@ -257,7 +262,8 @@ func TestEnvVars(t *testing.T) {
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmPortFlag.EnvVar, httpPort))
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmNetworkIdFlag.EnvVar, "999"))
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", CorsStringFlag.EnvVar, "*"))
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmSyncEnabledFlag.EnvVar, "true"))
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmSyncDisabledFlag.EnvVar, "true"))
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmDeliverySkipCheckFlag.EnvVar, "true"))
|
||||
|
||||
dir, err := ioutil.TempDir("", "bzztest")
|
||||
if err != nil {
|
||||
@@ -326,23 +332,27 @@ func TestEnvVars(t *testing.T) {
|
||||
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
|
||||
}
|
||||
|
||||
if info.NetworkId != 999 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 999, info.NetworkId)
|
||||
if info.NetworkID != 999 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 999, info.NetworkID)
|
||||
}
|
||||
|
||||
if info.Cors != "*" {
|
||||
t.Fatalf("Expected Cors flag to be set to %s, got %s", "*", info.Cors)
|
||||
}
|
||||
|
||||
if !info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be enabled, but is false")
|
||||
if info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be disabled, but is true")
|
||||
}
|
||||
|
||||
if !info.DeliverySkipCheck {
|
||||
t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not")
|
||||
}
|
||||
|
||||
node.Shutdown()
|
||||
cmd.Process.Kill()
|
||||
}
|
||||
|
||||
func TestCmdLineOverridesFile(t *testing.T) {
|
||||
func TestConfigCmdLineOverridesFile(t *testing.T) {
|
||||
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort()
|
||||
@@ -352,26 +362,27 @@ func TestCmdLineOverridesFile(t *testing.T) {
|
||||
|
||||
//create a config file
|
||||
//first, create a default conf
|
||||
defaultConf := api.NewDefaultConfig()
|
||||
defaultConf := api.NewConfig()
|
||||
//change some values in order to test if they have been loaded
|
||||
defaultConf.SyncEnabled = false
|
||||
defaultConf.NetworkId = 54
|
||||
defaultConf.SyncEnabled = true
|
||||
defaultConf.NetworkID = 54
|
||||
defaultConf.Port = "8588"
|
||||
defaultConf.StoreParams.DbCapacity = 9000000
|
||||
defaultConf.ChunkerParams.Branches = 64
|
||||
defaultConf.HiveParams.CallInterval = 6000000000
|
||||
defaultConf.DbCapacity = 9000000
|
||||
defaultConf.HiveParams.KeepAliveInterval = 6000000000
|
||||
defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second
|
||||
defaultConf.SyncParams.KeyBufferSize = 512
|
||||
//defaultConf.SyncParams.KeyBufferSize = 512
|
||||
//create a TOML file
|
||||
out, err := tomlSettings.Marshal(&defaultConf)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating TOML file in TestFileOverride: %v", err)
|
||||
}
|
||||
//write file
|
||||
f, err := ioutil.TempFile("", "testconfig.toml")
|
||||
fname := "testconfig.toml"
|
||||
f, err := ioutil.TempFile("", fname)
|
||||
if err != nil {
|
||||
t.Fatalf("Error writing TOML file in TestFileOverride: %v", err)
|
||||
}
|
||||
defer os.Remove(fname)
|
||||
//write file
|
||||
_, err = f.WriteString(string(out))
|
||||
if err != nil {
|
||||
@@ -392,7 +403,7 @@ func TestCmdLineOverridesFile(t *testing.T) {
|
||||
flags := []string{
|
||||
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "77",
|
||||
fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort,
|
||||
fmt.Sprintf("--%s", SwarmSyncEnabledFlag.Name),
|
||||
fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name),
|
||||
fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(),
|
||||
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
|
||||
"--ens-api", "",
|
||||
@@ -427,33 +438,29 @@ func TestCmdLineOverridesFile(t *testing.T) {
|
||||
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
|
||||
}
|
||||
|
||||
if info.NetworkId != expectNetworkId {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", expectNetworkId, info.NetworkId)
|
||||
if info.NetworkID != expectNetworkId {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", expectNetworkId, info.NetworkID)
|
||||
}
|
||||
|
||||
if !info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be enabled, but is false")
|
||||
if info.SyncEnabled {
|
||||
t.Fatal("Expected Sync to be disabled, but is true")
|
||||
}
|
||||
|
||||
if info.StoreParams.DbCapacity != 9000000 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId)
|
||||
if info.LocalStoreParams.DbCapacity != 9000000 {
|
||||
t.Fatalf("Expected Capacity to be %d, got %d", 9000000, info.LocalStoreParams.DbCapacity)
|
||||
}
|
||||
|
||||
if info.ChunkerParams.Branches != 64 {
|
||||
t.Fatalf("Expected chunker params branches to be %d, got %d", 64, info.ChunkerParams.Branches)
|
||||
}
|
||||
|
||||
if info.HiveParams.CallInterval != 6000000000 {
|
||||
t.Fatalf("Expected HiveParams CallInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.CallInterval))
|
||||
if info.HiveParams.KeepAliveInterval != 6000000000 {
|
||||
t.Fatalf("Expected HiveParams KeepAliveInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.KeepAliveInterval))
|
||||
}
|
||||
|
||||
if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second {
|
||||
t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval)
|
||||
}
|
||||
|
||||
if info.SyncParams.KeyBufferSize != 512 {
|
||||
t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
|
||||
}
|
||||
// if info.SyncParams.KeyBufferSize != 512 {
|
||||
// t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
|
||||
// }
|
||||
|
||||
node.Shutdown()
|
||||
}
|
||||
|
@@ -23,6 +23,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
@@ -30,11 +31,11 @@ import (
|
||||
|
||||
func dbExport(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database) and <file> (path to write the tar archive to, - for stdout)")
|
||||
if len(args) != 3 {
|
||||
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to write the tar archive to, - for stdout) and the base key")
|
||||
}
|
||||
|
||||
store, err := openDbStore(args[0])
|
||||
store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
|
||||
if err != nil {
|
||||
utils.Fatalf("error opening local chunk database: %s", err)
|
||||
}
|
||||
@@ -62,11 +63,11 @@ func dbExport(ctx *cli.Context) {
|
||||
|
||||
func dbImport(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database) and <file> (path to read the tar archive from, - for stdin)")
|
||||
if len(args) != 3 {
|
||||
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to read the tar archive from, - for stdin) and the base key")
|
||||
}
|
||||
|
||||
store, err := openDbStore(args[0])
|
||||
store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
|
||||
if err != nil {
|
||||
utils.Fatalf("error opening local chunk database: %s", err)
|
||||
}
|
||||
@@ -94,11 +95,11 @@ func dbImport(ctx *cli.Context) {
|
||||
|
||||
func dbClean(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 1 {
|
||||
utils.Fatalf("invalid arguments, please specify <chunkdb> (path to a local chunk database)")
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf("invalid arguments, please specify <chunkdb> (path to a local chunk database) and the base key")
|
||||
}
|
||||
|
||||
store, err := openDbStore(args[0])
|
||||
store, err := openLDBStore(args[0], common.Hex2Bytes(args[1]))
|
||||
if err != nil {
|
||||
utils.Fatalf("error opening local chunk database: %s", err)
|
||||
}
|
||||
@@ -107,10 +108,13 @@ func dbClean(ctx *cli.Context) {
|
||||
store.Cleanup()
|
||||
}
|
||||
|
||||
func openDbStore(path string) (*storage.DbStore, error) {
|
||||
func openLDBStore(path string, basekey []byte) (*storage.LDBStore, error) {
|
||||
if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
|
||||
return nil, fmt.Errorf("invalid chunkdb path: %s", err)
|
||||
}
|
||||
hash := storage.MakeHashFunc("SHA3")
|
||||
return storage.NewDbStore(path, hash, 10000000, 0)
|
||||
|
||||
storeparams := storage.NewDefaultStoreParams()
|
||||
ldbparams := storage.NewLDBStoreParams(storeparams, path)
|
||||
ldbparams.BaseKey = basekey
|
||||
return storage.NewLDBStore(ldbparams)
|
||||
}
|
||||
|
85
cmd/swarm/download.go
Normal file
85
cmd/swarm/download.go
Normal file
@@ -0,0 +1,85 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
func download(ctx *cli.Context) {
|
||||
log.Debug("downloading content using swarm down")
|
||||
args := ctx.Args()
|
||||
dest := "."
|
||||
|
||||
switch len(args) {
|
||||
case 0:
|
||||
utils.Fatalf("Usage: swarm down [options] <bzz locator> [<destination path>]")
|
||||
case 1:
|
||||
log.Trace(fmt.Sprintf("swarm down: no destination path - assuming working dir"))
|
||||
default:
|
||||
log.Trace(fmt.Sprintf("destination path arg: %s", args[1]))
|
||||
if absDest, err := filepath.Abs(args[1]); err == nil {
|
||||
dest = absDest
|
||||
} else {
|
||||
utils.Fatalf("could not get download path: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
isRecursive = ctx.Bool(SwarmRecursiveFlag.Name)
|
||||
client = swarm.NewClient(bzzapi)
|
||||
)
|
||||
|
||||
if fi, err := os.Stat(dest); err == nil {
|
||||
if isRecursive && !fi.Mode().IsDir() {
|
||||
utils.Fatalf("destination path is not a directory!")
|
||||
}
|
||||
} else {
|
||||
if !os.IsNotExist(err) {
|
||||
utils.Fatalf("could not stat path: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
uri, err := api.Parse(args[0])
|
||||
if err != nil {
|
||||
utils.Fatalf("could not parse uri argument: %v", err)
|
||||
}
|
||||
|
||||
// assume behaviour according to --recursive switch
|
||||
if isRecursive {
|
||||
if err := client.DownloadDirectory(uri.Addr, uri.Path, dest); err != nil {
|
||||
utils.Fatalf("encoutered an error while downloading directory: %v", err)
|
||||
}
|
||||
} else {
|
||||
// we are downloading a file
|
||||
log.Debug(fmt.Sprintf("downloading file/path from a manifest. hash: %s, path:%s", uri.Addr, uri.Path))
|
||||
|
||||
err := client.DownloadFile(uri.Addr, uri.Path, dest)
|
||||
if err != nil {
|
||||
utils.Fatalf("could not download %s from given address: %s. error: %v", uri.Path, uri.Addr, err)
|
||||
}
|
||||
}
|
||||
}
|
139
cmd/swarm/export_test.go
Normal file
139
cmd/swarm/export_test.go
Normal file
@@ -0,0 +1,139 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm"
|
||||
)
|
||||
|
||||
// TestCLISwarmExportImport perform the following test:
|
||||
// 1. runs swarm node
|
||||
// 2. uploads a random file
|
||||
// 3. runs an export of the local datastore
|
||||
// 4. runs a second swarm node
|
||||
// 5. imports the exported datastore
|
||||
// 6. fetches the uploaded random file from the second node
|
||||
func TestCLISwarmExportImport(t *testing.T) {
|
||||
cluster := newTestCluster(t, 1)
|
||||
|
||||
// generate random 10mb file
|
||||
f, cleanup := generateRandomFile(t, 10000000)
|
||||
defer cleanup()
|
||||
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", f.Name())
|
||||
_, matches := up.ExpectRegexp(`[a-f\d]{64}`)
|
||||
up.ExpectExit()
|
||||
hash := matches[0]
|
||||
|
||||
var info swarm.Info
|
||||
if err := cluster.Nodes[0].Client.Call(&info, "bzz_info"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cluster.Stop()
|
||||
defer cluster.Cleanup()
|
||||
|
||||
// generate an export.tar
|
||||
exportCmd := runSwarm(t, "db", "export", info.Path+"/chunks", info.Path+"/export.tar", strings.TrimPrefix(info.BzzKey, "0x"))
|
||||
exportCmd.ExpectExit()
|
||||
|
||||
// start second cluster
|
||||
cluster2 := newTestCluster(t, 1)
|
||||
|
||||
var info2 swarm.Info
|
||||
if err := cluster2.Nodes[0].Client.Call(&info2, "bzz_info"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// stop second cluster, so that we close LevelDB
|
||||
cluster2.Stop()
|
||||
defer cluster2.Cleanup()
|
||||
|
||||
// import the export.tar
|
||||
importCmd := runSwarm(t, "db", "import", info2.Path+"/chunks", info.Path+"/export.tar", strings.TrimPrefix(info2.BzzKey, "0x"))
|
||||
importCmd.ExpectExit()
|
||||
|
||||
// spin second cluster back up
|
||||
cluster2.StartExistingNodes(t, 1, strings.TrimPrefix(info2.BzzAccount, "0x"))
|
||||
|
||||
// try to fetch imported file
|
||||
res, err := http.Get(cluster2.Nodes[0].URL + "/bzz:/" + hash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
t.Fatalf("expected HTTP status %d, got %s", 200, res.Status)
|
||||
}
|
||||
|
||||
// compare downloaded file with the generated random file
|
||||
mustEqualFiles(t, f, res.Body)
|
||||
}
|
||||
|
||||
func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) {
|
||||
h := md5.New()
|
||||
upLen, err := io.Copy(h, up)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
upHash := h.Sum(nil)
|
||||
h.Reset()
|
||||
downLen, err := io.Copy(h, down)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
downHash := h.Sum(nil)
|
||||
|
||||
if !bytes.Equal(upHash, downHash) || upLen != downLen {
|
||||
t.Fatalf("downloaded imported file md5=%x (length %v) is not the same as the generated one mp5=%x (length %v)", downHash, downLen, upHash, upLen)
|
||||
}
|
||||
}
|
||||
|
||||
func generateRandomFile(t *testing.T, size int) (f *os.File, teardown func()) {
|
||||
// create a tmp file
|
||||
tmp, err := ioutil.TempFile("", "swarm-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// callback for tmp file cleanup
|
||||
teardown = func() {
|
||||
tmp.Close()
|
||||
os.Remove(tmp.Name())
|
||||
}
|
||||
|
||||
// write 10mb random data to file
|
||||
buf := make([]byte, 10000000)
|
||||
_, err = rand.Read(buf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ioutil.WriteFile(tmp.Name(), buf, 0755)
|
||||
|
||||
return tmp, teardown
|
||||
}
|
127
cmd/swarm/fs.go
Normal file
127
cmd/swarm/fs.go
Normal file
@@ -0,0 +1,127 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/swarm/fuse"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
func mount(cliContext *cli.Context) {
|
||||
args := cliContext.Args()
|
||||
if len(args) < 2 {
|
||||
utils.Fatalf("Usage: swarm fs mount --ipcpath <path to bzzd.ipc> <manifestHash> <file name>")
|
||||
}
|
||||
|
||||
client, err := dialRPC(cliContext)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error dailing to RPC endpoint: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
mf := &fuse.MountInfo{}
|
||||
mountPoint, err := filepath.Abs(filepath.Clean(args[1]))
|
||||
if err != nil {
|
||||
utils.Fatalf("error expanding path for mount point: %v", err)
|
||||
}
|
||||
err = client.CallContext(ctx, mf, "swarmfs_mount", args[0], mountPoint)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error calling the RPC endpoint while mounting: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func unmount(cliContext *cli.Context) {
|
||||
args := cliContext.Args()
|
||||
|
||||
if len(args) < 1 {
|
||||
utils.Fatalf("Usage: swarm fs unmount --ipcpath <path to bzzd.ipc> <mount path>")
|
||||
}
|
||||
client, err := dialRPC(cliContext)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error dailing to RPC endpoint: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
mf := fuse.MountInfo{}
|
||||
err = client.CallContext(ctx, &mf, "swarmfs_unmount", args[0])
|
||||
if err != nil {
|
||||
utils.Fatalf("encountered an error calling the RPC endpoint while unmounting: %v", err)
|
||||
}
|
||||
fmt.Printf("%s\n", mf.LatestManifest) //print the latest manifest hash for user reference
|
||||
}
|
||||
|
||||
func listMounts(cliContext *cli.Context) {
|
||||
client, err := dialRPC(cliContext)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error dailing to RPC endpoint: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
mf := []fuse.MountInfo{}
|
||||
err = client.CallContext(ctx, &mf, "swarmfs_listmounts")
|
||||
if err != nil {
|
||||
utils.Fatalf("encountered an error calling the RPC endpoint while unmounting: %v", err)
|
||||
}
|
||||
if len(mf) == 0 {
|
||||
fmt.Print("Could not found any swarmfs mounts. Please make sure you've specified the correct RPC endpoint\n")
|
||||
} else {
|
||||
fmt.Printf("Found %d swarmfs mount(s):\n", len(mf))
|
||||
for i, mountInfo := range mf {
|
||||
fmt.Printf("%d:\n", i)
|
||||
fmt.Printf("\tMount point: %s\n", mountInfo.MountPoint)
|
||||
fmt.Printf("\tLatest Manifest: %s\n", mountInfo.LatestManifest)
|
||||
fmt.Printf("\tStart Manifest: %s\n", mountInfo.StartManifest)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func dialRPC(ctx *cli.Context) (*rpc.Client, error) {
|
||||
var endpoint string
|
||||
|
||||
if ctx.IsSet(utils.IPCPathFlag.Name) {
|
||||
endpoint = ctx.String(utils.IPCPathFlag.Name)
|
||||
} else {
|
||||
utils.Fatalf("swarm ipc endpoint not specified")
|
||||
}
|
||||
|
||||
if endpoint == "" {
|
||||
endpoint = node.DefaultIPCEndpoint(clientIdentifier)
|
||||
} else if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") {
|
||||
// Backwards compatibility with geth < 1.5 which required
|
||||
// these prefixes.
|
||||
endpoint = endpoint[4:]
|
||||
}
|
||||
return rpc.Dial(endpoint)
|
||||
}
|
234
cmd/swarm/fs_test.go
Normal file
234
cmd/swarm/fs_test.go
Normal file
@@ -0,0 +1,234 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
colorable "github.com/mattn/go-colorable"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.PrintOrigins(true)
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
|
||||
}
|
||||
|
||||
type testFile struct {
|
||||
filePath string
|
||||
content string
|
||||
}
|
||||
|
||||
// TestCLISwarmFs is a high-level test of swarmfs
|
||||
func TestCLISwarmFs(t *testing.T) {
|
||||
cluster := newTestCluster(t, 3)
|
||||
defer cluster.Shutdown()
|
||||
|
||||
// create a tmp dir
|
||||
mountPoint, err := ioutil.TempDir("", "swarm-test")
|
||||
log.Debug("swarmfs cli test", "1st mount", mountPoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(mountPoint)
|
||||
|
||||
handlingNode := cluster.Nodes[0]
|
||||
mhash := doUploadEmptyDir(t, handlingNode)
|
||||
log.Debug("swarmfs cli test: mounting first run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
|
||||
|
||||
mount := runSwarm(t, []string{
|
||||
"fs",
|
||||
"mount",
|
||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||
mhash,
|
||||
mountPoint,
|
||||
}...)
|
||||
mount.ExpectExit()
|
||||
|
||||
filesToAssert := []*testFile{}
|
||||
|
||||
dirPath, err := createDirInDir(mountPoint, "testSubDir")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dirPath2, err := createDirInDir(dirPath, "AnotherTestSubDir")
|
||||
|
||||
dummyContent := "somerandomtestcontentthatshouldbeasserted"
|
||||
dirs := []string{
|
||||
mountPoint,
|
||||
dirPath,
|
||||
dirPath2,
|
||||
}
|
||||
files := []string{"f1.tmp", "f2.tmp"}
|
||||
for _, d := range dirs {
|
||||
for _, entry := range files {
|
||||
tFile, err := createTestFileInPath(d, entry, dummyContent)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
filesToAssert = append(filesToAssert, tFile)
|
||||
}
|
||||
}
|
||||
if len(filesToAssert) != len(dirs)*len(files) {
|
||||
t.Fatalf("should have %d files to assert now, got %d", len(dirs)*len(files), len(filesToAssert))
|
||||
}
|
||||
hashRegexp := `[a-f\d]{64}`
|
||||
log.Debug("swarmfs cli test: unmounting first run...", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
|
||||
|
||||
unmount := runSwarm(t, []string{
|
||||
"fs",
|
||||
"unmount",
|
||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||
mountPoint,
|
||||
}...)
|
||||
_, matches := unmount.ExpectRegexp(hashRegexp)
|
||||
unmount.ExpectExit()
|
||||
|
||||
hash := matches[0]
|
||||
if hash == mhash {
|
||||
t.Fatal("this should not be equal")
|
||||
}
|
||||
log.Debug("swarmfs cli test: asserting no files in mount point")
|
||||
|
||||
//check that there's nothing in the mount folder
|
||||
filesInDir, err := ioutil.ReadDir(mountPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("had an error reading the directory: %v", err)
|
||||
}
|
||||
|
||||
if len(filesInDir) != 0 {
|
||||
t.Fatal("there shouldn't be anything here")
|
||||
}
|
||||
|
||||
secondMountPoint, err := ioutil.TempDir("", "swarm-test")
|
||||
log.Debug("swarmfs cli test", "2nd mount point at", secondMountPoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(secondMountPoint)
|
||||
|
||||
log.Debug("swarmfs cli test: remounting at second mount point", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
|
||||
|
||||
//remount, check files
|
||||
newMount := runSwarm(t, []string{
|
||||
"fs",
|
||||
"mount",
|
||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||
hash, // the latest hash
|
||||
secondMountPoint,
|
||||
}...)
|
||||
|
||||
newMount.ExpectExit()
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
filesInDir, err = ioutil.ReadDir(secondMountPoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(filesInDir) == 0 {
|
||||
t.Fatal("there should be something here")
|
||||
}
|
||||
|
||||
log.Debug("swarmfs cli test: traversing file tree to see it matches previous mount")
|
||||
|
||||
for _, file := range filesToAssert {
|
||||
file.filePath = strings.Replace(file.filePath, mountPoint, secondMountPoint, -1)
|
||||
fileBytes, err := ioutil.ReadFile(file.filePath)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(fileBytes, bytes.NewBufferString(file.content).Bytes()) {
|
||||
t.Fatal("this should be equal")
|
||||
}
|
||||
}
|
||||
|
||||
log.Debug("swarmfs cli test: unmounting second run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
|
||||
|
||||
unmountSec := runSwarm(t, []string{
|
||||
"fs",
|
||||
"unmount",
|
||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||
secondMountPoint,
|
||||
}...)
|
||||
|
||||
_, matches = unmountSec.ExpectRegexp(hashRegexp)
|
||||
unmountSec.ExpectExit()
|
||||
|
||||
if matches[0] != hash {
|
||||
t.Fatal("these should be equal - no changes made")
|
||||
}
|
||||
}
|
||||
|
||||
func doUploadEmptyDir(t *testing.T, node *testNode) string {
|
||||
// create a tmp dir
|
||||
tmpDir, err := ioutil.TempDir("", "swarm-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
hashRegexp := `[a-f\d]{64}`
|
||||
|
||||
flags := []string{
|
||||
"--bzzapi", node.URL,
|
||||
"--recursive",
|
||||
"up",
|
||||
tmpDir}
|
||||
|
||||
log.Info("swarmfs cli test: uploading dir with 'swarm up'")
|
||||
up := runSwarm(t, flags...)
|
||||
_, matches := up.ExpectRegexp(hashRegexp)
|
||||
up.ExpectExit()
|
||||
hash := matches[0]
|
||||
log.Info("swarmfs cli test: dir uploaded", "hash", hash)
|
||||
return hash
|
||||
}
|
||||
|
||||
func createDirInDir(createInDir string, dirToCreate string) (string, error) {
|
||||
fullpath := filepath.Join(createInDir, dirToCreate)
|
||||
err := os.MkdirAll(fullpath, 0777)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fullpath, nil
|
||||
}
|
||||
|
||||
func createTestFileInPath(dir, filename, content string) (*testFile, error) {
|
||||
tFile := &testFile{}
|
||||
filePath := filepath.Join(dir, filename)
|
||||
if file, err := os.Create(filePath); err == nil {
|
||||
tFile.content = content
|
||||
tFile.filePath = filePath
|
||||
|
||||
_, err = io.WriteString(file, content)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
file.Close()
|
||||
}
|
||||
|
||||
return tFile, nil
|
||||
}
|
@@ -38,11 +38,11 @@ func hash(ctx *cli.Context) {
|
||||
defer f.Close()
|
||||
|
||||
stat, _ := f.Stat()
|
||||
chunker := storage.NewTreeChunker(storage.NewChunkerParams())
|
||||
key, err := chunker.Split(f, stat.Size(), nil, nil, nil)
|
||||
fileStore := storage.NewFileStore(storage.NewMapChunkStore(), storage.NewFileStoreParams())
|
||||
addr, _, err := fileStore.Store(f, stat.Size(), false)
|
||||
if err != nil {
|
||||
utils.Fatalf("%v\n", err)
|
||||
} else {
|
||||
fmt.Printf("%v\n", key)
|
||||
fmt.Printf("%v\n", addr)
|
||||
}
|
||||
}
|
||||
|
@@ -34,7 +34,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/console"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
@@ -49,6 +48,22 @@ import (
|
||||
)
|
||||
|
||||
const clientIdentifier = "swarm"
|
||||
const helpTemplate = `NAME:
|
||||
{{.HelpName}} - {{.Usage}}
|
||||
|
||||
USAGE:
|
||||
{{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Category}}
|
||||
|
||||
CATEGORY:
|
||||
{{.Category}}{{end}}{{if .Description}}
|
||||
|
||||
DESCRIPTION:
|
||||
{{.Description}}{{end}}{{if .VisibleFlags}}
|
||||
|
||||
OPTIONS:
|
||||
{{range .VisibleFlags}}{{.}}
|
||||
{{end}}{{end}}
|
||||
`
|
||||
|
||||
var (
|
||||
gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
|
||||
@@ -87,10 +102,6 @@ var (
|
||||
Usage: "Network identifier (integer, default 3=swarm testnet)",
|
||||
EnvVar: SWARM_ENV_NETWORK_ID,
|
||||
}
|
||||
SwarmConfigPathFlag = cli.StringFlag{
|
||||
Name: "bzzconfig",
|
||||
Usage: "DEPRECATED: please use --config path/to/TOML-file",
|
||||
}
|
||||
SwarmSwapEnabledFlag = cli.BoolFlag{
|
||||
Name: "swap",
|
||||
Usage: "Swarm SWAP enabled (default false)",
|
||||
@@ -101,10 +112,20 @@ var (
|
||||
Usage: "URL of the Ethereum API provider to use to settle SWAP payments",
|
||||
EnvVar: SWARM_ENV_SWAP_API,
|
||||
}
|
||||
SwarmSyncEnabledFlag = cli.BoolTFlag{
|
||||
Name: "sync",
|
||||
Usage: "Swarm Syncing enabled (default true)",
|
||||
EnvVar: SWARM_ENV_SYNC_ENABLE,
|
||||
SwarmSyncDisabledFlag = cli.BoolTFlag{
|
||||
Name: "nosync",
|
||||
Usage: "Disable swarm syncing",
|
||||
EnvVar: SWARM_ENV_SYNC_DISABLE,
|
||||
}
|
||||
SwarmSyncUpdateDelay = cli.DurationFlag{
|
||||
Name: "sync-update-delay",
|
||||
Usage: "Duration for sync subscriptions update after no new peers are added (default 15s)",
|
||||
EnvVar: SWARM_ENV_SYNC_UPDATE_DELAY,
|
||||
}
|
||||
SwarmDeliverySkipCheckFlag = cli.BoolFlag{
|
||||
Name: "delivery-skip-check",
|
||||
Usage: "Skip chunk delivery check (default false)",
|
||||
EnvVar: SWARM_ENV_DELIVERY_SKIP_CHECK,
|
||||
}
|
||||
EnsAPIFlag = cli.StringSliceFlag{
|
||||
Name: "ens-api",
|
||||
@@ -116,7 +137,7 @@ var (
|
||||
Usage: "Swarm HTTP endpoint",
|
||||
Value: "http://127.0.0.1:8500",
|
||||
}
|
||||
SwarmRecursiveUploadFlag = cli.BoolFlag{
|
||||
SwarmRecursiveFlag = cli.BoolFlag{
|
||||
Name: "recursive",
|
||||
Usage: "Upload directories recursively",
|
||||
}
|
||||
@@ -136,20 +157,29 @@ var (
|
||||
Name: "mime",
|
||||
Usage: "force mime type",
|
||||
}
|
||||
SwarmEncryptedFlag = cli.BoolFlag{
|
||||
Name: "encrypt",
|
||||
Usage: "use encrypted upload",
|
||||
}
|
||||
CorsStringFlag = cli.StringFlag{
|
||||
Name: "corsdomain",
|
||||
Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')",
|
||||
EnvVar: SWARM_ENV_CORS,
|
||||
}
|
||||
|
||||
// the following flags are deprecated and should be removed in the future
|
||||
DeprecatedEthAPIFlag = cli.StringFlag{
|
||||
Name: "ethapi",
|
||||
Usage: "DEPRECATED: please use --ens-api and --swap-api",
|
||||
SwarmStorePath = cli.StringFlag{
|
||||
Name: "store.path",
|
||||
Usage: "Path to leveldb chunk DB (default <$GETH_ENV_DIR>/swarm/bzz-<$BZZ_KEY>/chunks)",
|
||||
EnvVar: SWARM_ENV_STORE_PATH,
|
||||
}
|
||||
DeprecatedEnsAddrFlag = cli.StringFlag{
|
||||
Name: "ens-addr",
|
||||
Usage: "DEPRECATED: ENS contract address, please use --ens-api with contract address according to its format",
|
||||
SwarmStoreCapacity = cli.Uint64Flag{
|
||||
Name: "store.size",
|
||||
Usage: "Number of chunks (5M is roughly 20-25GB) (default 5000000)",
|
||||
EnvVar: SWARM_ENV_STORE_CAPACITY,
|
||||
}
|
||||
SwarmStoreCacheCapacity = cli.UintFlag{
|
||||
Name: "store.cache.size",
|
||||
Usage: "Number of recent chunks cached in memory (default 5000)",
|
||||
EnvVar: SWARM_ENV_STORE_CACHE_CAPACITY,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -180,91 +210,130 @@ func init() {
|
||||
app.Copyright = "Copyright 2013-2016 The go-ethereum Authors"
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Action: version,
|
||||
Name: "version",
|
||||
Usage: "Print version numbers",
|
||||
ArgsUsage: " ",
|
||||
Description: `
|
||||
The output of this command is supposed to be machine-readable.
|
||||
`,
|
||||
Action: version,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "version",
|
||||
Usage: "Print version numbers",
|
||||
Description: "The output of this command is supposed to be machine-readable",
|
||||
},
|
||||
{
|
||||
Action: upload,
|
||||
Name: "up",
|
||||
Usage: "upload a file or directory to swarm using the HTTP API",
|
||||
ArgsUsage: " <file>",
|
||||
Description: `
|
||||
"upload a file or directory to swarm using the HTTP API and prints the root hash",
|
||||
`,
|
||||
Action: upload,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "up",
|
||||
Usage: "uploads a file or directory to swarm using the HTTP API",
|
||||
ArgsUsage: "<file>",
|
||||
Flags: []cli.Flag{SwarmEncryptedFlag},
|
||||
Description: "uploads a file or directory to swarm using the HTTP API and prints the root hash",
|
||||
},
|
||||
{
|
||||
Action: list,
|
||||
Name: "ls",
|
||||
Usage: "list files and directories contained in a manifest",
|
||||
ArgsUsage: " <manifest> [<prefix>]",
|
||||
Description: `
|
||||
Lists files and directories contained in a manifest.
|
||||
`,
|
||||
Action: list,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "ls",
|
||||
Usage: "list files and directories contained in a manifest",
|
||||
ArgsUsage: "<manifest> [<prefix>]",
|
||||
Description: "Lists files and directories contained in a manifest",
|
||||
},
|
||||
{
|
||||
Action: hash,
|
||||
Name: "hash",
|
||||
Usage: "print the swarm hash of a file or directory",
|
||||
ArgsUsage: " <file>",
|
||||
Description: `
|
||||
Prints the swarm hash of file or directory.
|
||||
`,
|
||||
Action: hash,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "hash",
|
||||
Usage: "print the swarm hash of a file or directory",
|
||||
ArgsUsage: "<file>",
|
||||
Description: "Prints the swarm hash of file or directory",
|
||||
},
|
||||
{
|
||||
Name: "manifest",
|
||||
Usage: "update a MANIFEST",
|
||||
ArgsUsage: "manifest COMMAND",
|
||||
Action: download,
|
||||
Name: "down",
|
||||
Flags: []cli.Flag{SwarmRecursiveFlag},
|
||||
Usage: "downloads a swarm manifest or a file inside a manifest",
|
||||
ArgsUsage: " <uri> [<dir>]",
|
||||
Description: `
|
||||
Updates a MANIFEST by adding/removing/updating the hash of a path.
|
||||
Downloads a swarm bzz uri to the given dir. When no dir is provided, working directory is assumed. --recursive flag is expected when downloading a manifest with multiple entries.
|
||||
`,
|
||||
},
|
||||
|
||||
{
|
||||
Name: "manifest",
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Usage: "perform operations on swarm manifests",
|
||||
ArgsUsage: "COMMAND",
|
||||
Description: "Updates a MANIFEST by adding/removing/updating the hash of a path.\nCOMMAND could be: add, update, remove",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: add,
|
||||
Name: "add",
|
||||
Usage: "add a new path to the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path> <hash> [<content-type>]",
|
||||
Description: `
|
||||
Adds a new path to the manifest
|
||||
`,
|
||||
Action: add,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "add",
|
||||
Usage: "add a new path to the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path> <hash> [<content-type>]",
|
||||
Description: "Adds a new path to the manifest",
|
||||
},
|
||||
{
|
||||
Action: update,
|
||||
Name: "update",
|
||||
Usage: "update the hash for an already existing path in the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path> <newhash> [<newcontent-type>]",
|
||||
Description: `
|
||||
Update the hash for an already existing path in the manifest
|
||||
`,
|
||||
Action: update,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "update",
|
||||
Usage: "update the hash for an already existing path in the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path> <newhash> [<newcontent-type>]",
|
||||
Description: "Update the hash for an already existing path in the manifest",
|
||||
},
|
||||
{
|
||||
Action: remove,
|
||||
Name: "remove",
|
||||
Usage: "removes a path from the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path>",
|
||||
Description: `
|
||||
Removes a path from the manifest
|
||||
`,
|
||||
Action: remove,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "remove",
|
||||
Usage: "removes a path from the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path>",
|
||||
Description: "Removes a path from the manifest",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "db",
|
||||
Usage: "manage the local chunk database",
|
||||
ArgsUsage: "db COMMAND",
|
||||
Description: `
|
||||
Manage the local chunk database.
|
||||
`,
|
||||
Name: "fs",
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Usage: "perform FUSE operations",
|
||||
ArgsUsage: "fs COMMAND",
|
||||
Description: "Performs FUSE operations by mounting/unmounting/listing mount points. This assumes you already have a Swarm node running locally. For all operation you must reference the correct path to bzzd.ipc in order to communicate with the node",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: dbExport,
|
||||
Name: "export",
|
||||
Usage: "export a local chunk database as a tar archive (use - to send to stdout)",
|
||||
ArgsUsage: "<chunkdb> <file>",
|
||||
Action: mount,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "mount",
|
||||
Flags: []cli.Flag{utils.IPCPathFlag},
|
||||
Usage: "mount a swarm hash to a mount point",
|
||||
ArgsUsage: "swarm fs mount --ipcpath <path to bzzd.ipc> <manifest hash> <mount point>",
|
||||
Description: "Mounts a Swarm manifest hash to a given mount point. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
|
||||
},
|
||||
{
|
||||
Action: unmount,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "unmount",
|
||||
Flags: []cli.Flag{utils.IPCPathFlag},
|
||||
Usage: "unmount a swarmfs mount",
|
||||
ArgsUsage: "swarm fs unmount --ipcpath <path to bzzd.ipc> <mount point>",
|
||||
Description: "Unmounts a swarmfs mount residing at <mount point>. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
|
||||
},
|
||||
{
|
||||
Action: listMounts,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "list",
|
||||
Flags: []cli.Flag{utils.IPCPathFlag},
|
||||
Usage: "list swarmfs mounts",
|
||||
ArgsUsage: "swarm fs list --ipcpath <path to bzzd.ipc>",
|
||||
Description: "Lists all mounted swarmfs volumes. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "db",
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Usage: "manage the local chunk database",
|
||||
ArgsUsage: "db COMMAND",
|
||||
Description: "Manage the local chunk database",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: dbExport,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "export",
|
||||
Usage: "export a local chunk database as a tar archive (use - to send to stdout)",
|
||||
ArgsUsage: "<chunkdb> <file>",
|
||||
Description: `
|
||||
Export a local chunk database as a tar archive (use - to send to stdout).
|
||||
|
||||
@@ -277,10 +346,11 @@ pv(1) tool to get a progress bar:
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: dbImport,
|
||||
Name: "import",
|
||||
Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)",
|
||||
ArgsUsage: "<chunkdb> <file>",
|
||||
Action: dbImport,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "import",
|
||||
Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)",
|
||||
ArgsUsage: "<chunkdb> <file>",
|
||||
Description: `
|
||||
Import chunks from a tar archive into a local chunk database (use - to read from stdin).
|
||||
|
||||
@@ -293,27 +363,16 @@ pv(1) tool to get a progress bar:
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: dbClean,
|
||||
Name: "clean",
|
||||
Usage: "remove corrupt entries from a local chunk database",
|
||||
ArgsUsage: "<chunkdb>",
|
||||
Description: `
|
||||
Remove corrupt entries from a local chunk database.
|
||||
`,
|
||||
Action: dbClean,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "clean",
|
||||
Usage: "remove corrupt entries from a local chunk database",
|
||||
ArgsUsage: "<chunkdb>",
|
||||
Description: "Remove corrupt entries from a local chunk database",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: func(ctx *cli.Context) {
|
||||
utils.Fatalf("ERROR: 'swarm cleandb' has been removed, please use 'swarm db clean'.")
|
||||
},
|
||||
Name: "cleandb",
|
||||
Usage: "DEPRECATED: use 'swarm db clean'",
|
||||
ArgsUsage: " ",
|
||||
Description: `
|
||||
DEPRECATED: use 'swarm db clean'.
|
||||
`,
|
||||
},
|
||||
|
||||
// See config.go
|
||||
DumpConfigCommand,
|
||||
}
|
||||
@@ -339,10 +398,11 @@ DEPRECATED: use 'swarm db clean'.
|
||||
CorsStringFlag,
|
||||
EnsAPIFlag,
|
||||
SwarmTomlConfigPathFlag,
|
||||
SwarmConfigPathFlag,
|
||||
SwarmSwapEnabledFlag,
|
||||
SwarmSwapAPIFlag,
|
||||
SwarmSyncEnabledFlag,
|
||||
SwarmSyncDisabledFlag,
|
||||
SwarmSyncUpdateDelay,
|
||||
SwarmDeliverySkipCheckFlag,
|
||||
SwarmListenAddrFlag,
|
||||
SwarmPortFlag,
|
||||
SwarmAccountFlag,
|
||||
@@ -350,15 +410,24 @@ DEPRECATED: use 'swarm db clean'.
|
||||
ChequebookAddrFlag,
|
||||
// upload flags
|
||||
SwarmApiFlag,
|
||||
SwarmRecursiveUploadFlag,
|
||||
SwarmRecursiveFlag,
|
||||
SwarmWantManifestFlag,
|
||||
SwarmUploadDefaultPath,
|
||||
SwarmUpFromStdinFlag,
|
||||
SwarmUploadMimeType,
|
||||
//deprecated flags
|
||||
DeprecatedEthAPIFlag,
|
||||
DeprecatedEnsAddrFlag,
|
||||
// storage flags
|
||||
SwarmStorePath,
|
||||
SwarmStoreCapacity,
|
||||
SwarmStoreCacheCapacity,
|
||||
}
|
||||
rpcFlags := []cli.Flag{
|
||||
utils.WSEnabledFlag,
|
||||
utils.WSListenAddrFlag,
|
||||
utils.WSPortFlag,
|
||||
utils.WSApiFlag,
|
||||
utils.WSAllowedOriginsFlag,
|
||||
}
|
||||
app.Flags = append(app.Flags, rpcFlags...)
|
||||
app.Flags = append(app.Flags, debug.Flags...)
|
||||
app.Flags = append(app.Flags, swarmmetrics.Flags...)
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
@@ -383,16 +452,12 @@ func main() {
|
||||
}
|
||||
|
||||
func version(ctx *cli.Context) error {
|
||||
fmt.Println(strings.Title(clientIdentifier))
|
||||
fmt.Println("Version:", params.Version)
|
||||
fmt.Println("Version:", SWARM_VERSION)
|
||||
if gitCommit != "" {
|
||||
fmt.Println("Git Commit:", gitCommit)
|
||||
}
|
||||
fmt.Println("Network Id:", ctx.GlobalInt(utils.NetworkIdFlag.Name))
|
||||
fmt.Println("Go Version:", runtime.Version())
|
||||
fmt.Println("OS:", runtime.GOOS)
|
||||
fmt.Printf("GOPATH=%s\n", os.Getenv("GOPATH"))
|
||||
fmt.Printf("GOROOT=%s\n", runtime.GOROOT())
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -405,6 +470,10 @@ func bzzd(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
cfg := defaultNodeConfig
|
||||
|
||||
//pss operates on ws
|
||||
cfg.WSModules = append(cfg.WSModules, "pss")
|
||||
|
||||
//geth only supports --datadir via command line
|
||||
//in order to be consistent within swarm, if we pass --datadir via environment variable
|
||||
//or via config file, we get the same directory for geth and swarm
|
||||
@@ -421,7 +490,7 @@ func bzzd(ctx *cli.Context) error {
|
||||
//due to overriding behavior
|
||||
initSwarmNode(bzzconfig, stack, ctx)
|
||||
//register BZZ as node.Service in the ethereum node
|
||||
registerBzzService(bzzconfig, ctx, stack)
|
||||
registerBzzService(bzzconfig, stack)
|
||||
//start the node
|
||||
utils.StartNode(stack)
|
||||
|
||||
@@ -439,7 +508,7 @@ func bzzd(ctx *cli.Context) error {
|
||||
bootnodes := strings.Split(bzzconfig.BootNodes, ",")
|
||||
injectBootnodes(stack.Server(), bootnodes)
|
||||
} else {
|
||||
if bzzconfig.NetworkId == 3 {
|
||||
if bzzconfig.NetworkID == 3 {
|
||||
injectBootnodes(stack.Server(), testbetBootNodes)
|
||||
}
|
||||
}
|
||||
@@ -448,21 +517,11 @@ func bzzd(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func registerBzzService(bzzconfig *bzzapi.Config, ctx *cli.Context, stack *node.Node) {
|
||||
|
||||
func registerBzzService(bzzconfig *bzzapi.Config, stack *node.Node) {
|
||||
//define the swarm service boot function
|
||||
boot := func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
var swapClient *ethclient.Client
|
||||
var err error
|
||||
if bzzconfig.SwapApi != "" {
|
||||
log.Info("connecting to SWAP API", "url", bzzconfig.SwapApi)
|
||||
swapClient, err = ethclient.Dial(bzzconfig.SwapApi)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error connecting to SWAP API %s: %s", bzzconfig.SwapApi, err)
|
||||
}
|
||||
}
|
||||
|
||||
return swarm.NewSwarm(ctx, swapClient, bzzconfig)
|
||||
boot := func(_ *node.ServiceContext) (node.Service, error) {
|
||||
// In production, mockStore must be always nil.
|
||||
return swarm.NewSwarm(bzzconfig, nil)
|
||||
}
|
||||
//register within the ethereum node
|
||||
if err := stack.Register(boot); err != nil {
|
||||
|
@@ -131,13 +131,13 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin
|
||||
longestPathEntry = api.ManifestEntry{}
|
||||
)
|
||||
|
||||
mroot, err := client.DownloadManifest(mhash)
|
||||
mroot, isEncrypted, err := client.DownloadManifest(mhash)
|
||||
if err != nil {
|
||||
utils.Fatalf("Manifest download failed: %v", err)
|
||||
}
|
||||
|
||||
//TODO: check if the "hash" to add is valid and present in swarm
|
||||
_, err = client.DownloadManifest(hash)
|
||||
_, _, err = client.DownloadManifest(hash)
|
||||
if err != nil {
|
||||
utils.Fatalf("Hash to add is not present: %v", err)
|
||||
}
|
||||
@@ -180,7 +180,7 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin
|
||||
mroot.Entries = append(mroot.Entries, newEntry)
|
||||
}
|
||||
|
||||
newManifestHash, err := client.UploadManifest(mroot)
|
||||
newManifestHash, err := client.UploadManifest(mroot, isEncrypted)
|
||||
if err != nil {
|
||||
utils.Fatalf("Manifest upload failed: %v", err)
|
||||
}
|
||||
@@ -197,7 +197,7 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st
|
||||
longestPathEntry = api.ManifestEntry{}
|
||||
)
|
||||
|
||||
mroot, err := client.DownloadManifest(mhash)
|
||||
mroot, isEncrypted, err := client.DownloadManifest(mhash)
|
||||
if err != nil {
|
||||
utils.Fatalf("Manifest download failed: %v", err)
|
||||
}
|
||||
@@ -257,7 +257,7 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st
|
||||
mroot = newMRoot
|
||||
}
|
||||
|
||||
newManifestHash, err := client.UploadManifest(mroot)
|
||||
newManifestHash, err := client.UploadManifest(mroot, isEncrypted)
|
||||
if err != nil {
|
||||
utils.Fatalf("Manifest upload failed: %v", err)
|
||||
}
|
||||
@@ -273,7 +273,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string {
|
||||
longestPathEntry = api.ManifestEntry{}
|
||||
)
|
||||
|
||||
mroot, err := client.DownloadManifest(mhash)
|
||||
mroot, isEncrypted, err := client.DownloadManifest(mhash)
|
||||
if err != nil {
|
||||
utils.Fatalf("Manifest download failed: %v", err)
|
||||
}
|
||||
@@ -323,7 +323,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string {
|
||||
mroot = newMRoot
|
||||
}
|
||||
|
||||
newManifestHash, err := client.UploadManifest(mroot)
|
||||
newManifestHash, err := client.UploadManifest(mroot, isEncrypted)
|
||||
if err != nil {
|
||||
utils.Fatalf("Manifest upload failed: %v", err)
|
||||
}
|
||||
|
@@ -81,6 +81,7 @@ type testCluster struct {
|
||||
//
|
||||
// When starting more than one node, they are connected together using the
|
||||
// admin SetPeer RPC method.
|
||||
|
||||
func newTestCluster(t *testing.T, size int) *testCluster {
|
||||
cluster := &testCluster{}
|
||||
defer func() {
|
||||
@@ -96,18 +97,7 @@ func newTestCluster(t *testing.T, size int) *testCluster {
|
||||
cluster.TmpDir = tmpdir
|
||||
|
||||
// start the nodes
|
||||
cluster.Nodes = make([]*testNode, 0, size)
|
||||
for i := 0; i < size; i++ {
|
||||
dir := filepath.Join(cluster.TmpDir, fmt.Sprintf("swarm%02d", i))
|
||||
if err := os.Mkdir(dir, 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
node := newTestNode(t, dir)
|
||||
node.Name = fmt.Sprintf("swarm%02d", i)
|
||||
|
||||
cluster.Nodes = append(cluster.Nodes, node)
|
||||
}
|
||||
cluster.StartNewNodes(t, size)
|
||||
|
||||
if size == 1 {
|
||||
return cluster
|
||||
@@ -145,14 +135,51 @@ func (c *testCluster) Shutdown() {
|
||||
os.RemoveAll(c.TmpDir)
|
||||
}
|
||||
|
||||
func (c *testCluster) Stop() {
|
||||
for _, node := range c.Nodes {
|
||||
node.Shutdown()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *testCluster) StartNewNodes(t *testing.T, size int) {
|
||||
c.Nodes = make([]*testNode, 0, size)
|
||||
for i := 0; i < size; i++ {
|
||||
dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i))
|
||||
if err := os.Mkdir(dir, 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
node := newTestNode(t, dir)
|
||||
node.Name = fmt.Sprintf("swarm%02d", i)
|
||||
|
||||
c.Nodes = append(c.Nodes, node)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *testCluster) StartExistingNodes(t *testing.T, size int, bzzaccount string) {
|
||||
c.Nodes = make([]*testNode, 0, size)
|
||||
for i := 0; i < size; i++ {
|
||||
dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i))
|
||||
node := existingTestNode(t, dir, bzzaccount)
|
||||
node.Name = fmt.Sprintf("swarm%02d", i)
|
||||
|
||||
c.Nodes = append(c.Nodes, node)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *testCluster) Cleanup() {
|
||||
os.RemoveAll(c.TmpDir)
|
||||
}
|
||||
|
||||
type testNode struct {
|
||||
Name string
|
||||
Addr string
|
||||
URL string
|
||||
Enode string
|
||||
Dir string
|
||||
Client *rpc.Client
|
||||
Cmd *cmdtest.TestCmd
|
||||
Name string
|
||||
Addr string
|
||||
URL string
|
||||
Enode string
|
||||
Dir string
|
||||
IpcPath string
|
||||
Client *rpc.Client
|
||||
Cmd *cmdtest.TestCmd
|
||||
}
|
||||
|
||||
const testPassphrase = "swarm-test-passphrase"
|
||||
@@ -181,6 +208,72 @@ func getTestAccount(t *testing.T, dir string) (conf *node.Config, account accoun
|
||||
return conf, account
|
||||
}
|
||||
|
||||
func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode {
|
||||
conf, _ := getTestAccount(t, dir)
|
||||
node := &testNode{Dir: dir}
|
||||
|
||||
// use a unique IPCPath when running tests on Windows
|
||||
if runtime.GOOS == "windows" {
|
||||
conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", bzzaccount)
|
||||
}
|
||||
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
p2pPort, err := assignTCPPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// start the node
|
||||
node.Cmd = runSwarm(t,
|
||||
"--port", p2pPort,
|
||||
"--nodiscover",
|
||||
"--datadir", dir,
|
||||
"--ipcpath", conf.IPCPath,
|
||||
"--ens-api", "",
|
||||
"--bzzaccount", bzzaccount,
|
||||
"--bzznetworkid", "321",
|
||||
"--bzzport", httpPort,
|
||||
"--verbosity", "6",
|
||||
)
|
||||
node.Cmd.InputLine(testPassphrase)
|
||||
defer func() {
|
||||
if t.Failed() {
|
||||
node.Shutdown()
|
||||
}
|
||||
}()
|
||||
|
||||
// wait for the node to start
|
||||
for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
|
||||
node.Client, err = rpc.Dial(conf.IPCEndpoint())
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if node.Client == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// load info
|
||||
var info swarm.Info
|
||||
if err := node.Client.Call(&info, "bzz_info"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
node.Addr = net.JoinHostPort("127.0.0.1", info.Port)
|
||||
node.URL = "http://" + node.Addr
|
||||
|
||||
var nodeInfo p2p.NodeInfo
|
||||
if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort)
|
||||
|
||||
return node
|
||||
}
|
||||
|
||||
func newTestNode(t *testing.T, dir string) *testNode {
|
||||
|
||||
conf, account := getTestAccount(t, dir)
|
||||
@@ -239,6 +332,7 @@ func newTestNode(t *testing.T, dir string) *testNode {
|
||||
t.Fatal(err)
|
||||
}
|
||||
node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort)
|
||||
node.IpcPath = conf.IPCPath
|
||||
|
||||
return node
|
||||
}
|
||||
|
101
cmd/swarm/swarm-smoke/main.go
Normal file
101
cmd/swarm/swarm-smoke/main.go
Normal file
@@ -0,0 +1,101 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sort"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
colorable "github.com/mattn/go-colorable"
|
||||
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
endpoints []string
|
||||
includeLocalhost bool
|
||||
cluster string
|
||||
scheme string
|
||||
filesize int
|
||||
from int
|
||||
to int
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.PrintOrigins(true)
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
|
||||
|
||||
app := cli.NewApp()
|
||||
app.Name = "smoke-test"
|
||||
app.Usage = ""
|
||||
|
||||
app.Flags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "cluster-endpoint",
|
||||
Value: "testing",
|
||||
Usage: "cluster to point to (open, or testing)",
|
||||
Destination: &cluster,
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "cluster-from",
|
||||
Value: 8501,
|
||||
Usage: "swarm node (from)",
|
||||
Destination: &from,
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "cluster-to",
|
||||
Value: 8512,
|
||||
Usage: "swarm node (to)",
|
||||
Destination: &to,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cluster-scheme",
|
||||
Value: "http",
|
||||
Usage: "http or https",
|
||||
Destination: &scheme,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "include-localhost",
|
||||
Usage: "whether to include localhost:8500 as an endpoint",
|
||||
Destination: &includeLocalhost,
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "filesize",
|
||||
Value: 1,
|
||||
Usage: "file size for generated random file in MB",
|
||||
Destination: &filesize,
|
||||
},
|
||||
}
|
||||
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "upload_and_sync",
|
||||
Aliases: []string{"c"},
|
||||
Usage: "upload and sync",
|
||||
Action: cliUploadAndSync,
|
||||
},
|
||||
}
|
||||
|
||||
sort.Sort(cli.FlagsByName(app.Flags))
|
||||
sort.Sort(cli.CommandsByName(app.Commands))
|
||||
|
||||
err := app.Run(os.Args)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
}
|
||||
}
|
184
cmd/swarm/swarm-smoke/upload_and_sync.go
Normal file
184
cmd/swarm/swarm-smoke/upload_and_sync.go
Normal file
@@ -0,0 +1,184 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/pborman/uuid"
|
||||
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
func generateEndpoints(scheme string, cluster string, from int, to int) {
|
||||
for port := from; port <= to; port++ {
|
||||
endpoints = append(endpoints, fmt.Sprintf("%s://%v.%s.swarm-gateways.net", scheme, port, cluster))
|
||||
}
|
||||
|
||||
if includeLocalhost {
|
||||
endpoints = append(endpoints, "http://localhost:8500")
|
||||
}
|
||||
}
|
||||
|
||||
func cliUploadAndSync(c *cli.Context) error {
|
||||
defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size", filesize) }(time.Now())
|
||||
|
||||
generateEndpoints(scheme, cluster, from, to)
|
||||
|
||||
log.Info("uploading to " + endpoints[0] + " and syncing")
|
||||
|
||||
f, cleanup := generateRandomFile(filesize * 1000000)
|
||||
defer cleanup()
|
||||
|
||||
hash, err := upload(f, endpoints[0])
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
fhash, err := digest(f)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash))
|
||||
|
||||
if filesize < 10 {
|
||||
time.Sleep(15 * time.Second)
|
||||
} else {
|
||||
time.Sleep(2 * time.Duration(filesize) * time.Second)
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
for _, endpoint := range endpoints {
|
||||
endpoint := endpoint
|
||||
ruid := uuid.New()[:8]
|
||||
wg.Add(1)
|
||||
go func(endpoint string, ruid string) {
|
||||
for {
|
||||
err := fetch(hash, endpoint, fhash, ruid)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Done()
|
||||
return
|
||||
}
|
||||
}(endpoint, ruid)
|
||||
}
|
||||
wg.Wait()
|
||||
log.Info("all endpoints synced random file successfully")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file
|
||||
func fetch(hash string, endpoint string, original []byte, ruid string) error {
|
||||
log.Trace("sleeping", "ruid", ruid)
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
log.Trace("http get request", "ruid", ruid, "api", endpoint, "hash", hash)
|
||||
res, err := http.Get(endpoint + "/bzz:/" + hash + "/")
|
||||
if err != nil {
|
||||
log.Warn(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
log.Trace("http get response", "ruid", ruid, "api", endpoint, "hash", hash, "code", res.StatusCode, "len", res.ContentLength)
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
err := fmt.Errorf("expected status code %d, got %v", 200, res.StatusCode)
|
||||
log.Warn(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
|
||||
defer res.Body.Close()
|
||||
|
||||
rdigest, err := digest(res.Body)
|
||||
if err != nil {
|
||||
log.Warn(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(rdigest, original) {
|
||||
err := fmt.Errorf("downloaded imported file md5=%x is not the same as the generated one=%x", rdigest, original)
|
||||
log.Warn(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Trace("downloaded file matches random file", "ruid", ruid, "len", res.ContentLength)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// upload is uploading a file `f` to `endpoint` via the `swarm up` cmd
|
||||
func upload(f *os.File, endpoint string) (string, error) {
|
||||
var out bytes.Buffer
|
||||
cmd := exec.Command("swarm", "--bzzapi", endpoint, "up", f.Name())
|
||||
cmd.Stdout = &out
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
hash := strings.TrimRight(out.String(), "\r\n")
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
func digest(r io.Reader) ([]byte, error) {
|
||||
h := md5.New()
|
||||
_, err := io.Copy(h, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return h.Sum(nil), nil
|
||||
}
|
||||
|
||||
// generateRandomFile is creating a temporary file with the requested byte size
|
||||
func generateRandomFile(size int) (f *os.File, teardown func()) {
|
||||
// create a tmp file
|
||||
tmp, err := ioutil.TempFile("", "swarm-test")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// callback for tmp file cleanup
|
||||
teardown = func() {
|
||||
tmp.Close()
|
||||
os.Remove(tmp.Name())
|
||||
}
|
||||
|
||||
buf := make([]byte, size)
|
||||
_, err = rand.Read(buf)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ioutil.WriteFile(tmp.Name(), buf, 0755)
|
||||
|
||||
return tmp, teardown
|
||||
}
|
@@ -40,12 +40,13 @@ func upload(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
var (
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
recursive = ctx.GlobalBool(SwarmRecursiveUploadFlag.Name)
|
||||
recursive = ctx.GlobalBool(SwarmRecursiveFlag.Name)
|
||||
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
|
||||
defaultPath = ctx.GlobalString(SwarmUploadDefaultPath.Name)
|
||||
fromStdin = ctx.GlobalBool(SwarmUpFromStdinFlag.Name)
|
||||
mimeType = ctx.GlobalString(SwarmUploadMimeType.Name)
|
||||
client = swarm.NewClient(bzzapi)
|
||||
toEncrypt = ctx.Bool(SwarmEncryptedFlag.Name)
|
||||
file string
|
||||
)
|
||||
|
||||
@@ -76,7 +77,7 @@ func upload(ctx *cli.Context) {
|
||||
utils.Fatalf("Error opening file: %s", err)
|
||||
}
|
||||
defer f.Close()
|
||||
hash, err := client.UploadRaw(f, f.Size)
|
||||
hash, err := client.UploadRaw(f, f.Size, toEncrypt)
|
||||
if err != nil {
|
||||
utils.Fatalf("Upload failed: %s", err)
|
||||
}
|
||||
@@ -97,7 +98,7 @@ func upload(ctx *cli.Context) {
|
||||
if !recursive {
|
||||
return "", errors.New("Argument is a directory and recursive upload is disabled")
|
||||
}
|
||||
return client.UploadDirectory(file, defaultPath, "")
|
||||
return client.UploadDirectory(file, defaultPath, "", toEncrypt)
|
||||
}
|
||||
} else {
|
||||
doUpload = func() (string, error) {
|
||||
@@ -110,7 +111,7 @@ func upload(ctx *cli.Context) {
|
||||
mimeType = detectMimeType(file)
|
||||
}
|
||||
f.ContentType = mimeType
|
||||
return client.Upload(f, "")
|
||||
return client.Upload(f, "", toEncrypt)
|
||||
}
|
||||
}
|
||||
hash, err := doUpload()
|
||||
|
@@ -17,60 +17,259 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||
colorable "github.com/mattn/go-colorable"
|
||||
)
|
||||
|
||||
var loglevel = flag.Int("loglevel", 3, "verbosity of logs")
|
||||
|
||||
func init() {
|
||||
log.PrintOrigins(true)
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
|
||||
}
|
||||
|
||||
// TestCLISwarmUp tests that running 'swarm up' makes the resulting file
|
||||
// available from all nodes via the HTTP API
|
||||
func TestCLISwarmUp(t *testing.T) {
|
||||
// start 3 node cluster
|
||||
t.Log("starting 3 node cluster")
|
||||
testCLISwarmUp(false, t)
|
||||
}
|
||||
func TestCLISwarmUpRecursive(t *testing.T) {
|
||||
testCLISwarmUpRecursive(false, t)
|
||||
}
|
||||
|
||||
// TestCLISwarmUpEncrypted tests that running 'swarm encrypted-up' makes the resulting file
|
||||
// available from all nodes via the HTTP API
|
||||
func TestCLISwarmUpEncrypted(t *testing.T) {
|
||||
testCLISwarmUp(true, t)
|
||||
}
|
||||
func TestCLISwarmUpEncryptedRecursive(t *testing.T) {
|
||||
testCLISwarmUpRecursive(true, t)
|
||||
}
|
||||
|
||||
func testCLISwarmUp(toEncrypt bool, t *testing.T) {
|
||||
log.Info("starting 3 node cluster")
|
||||
cluster := newTestCluster(t, 3)
|
||||
defer cluster.Shutdown()
|
||||
|
||||
// create a tmp file
|
||||
tmp, err := ioutil.TempFile("", "swarm-test")
|
||||
assertNil(t, err)
|
||||
defer tmp.Close()
|
||||
defer os.Remove(tmp.Name())
|
||||
_, err = io.WriteString(tmp, "data")
|
||||
assertNil(t, err)
|
||||
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
t.Log("uploading file with 'swarm up'")
|
||||
up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", tmp.Name())
|
||||
_, matches := up.ExpectRegexp(`[a-f\d]{64}`)
|
||||
up.ExpectExit()
|
||||
hash := matches[0]
|
||||
t.Logf("file uploaded with hash %s", hash)
|
||||
|
||||
// get the file from the HTTP API of each node
|
||||
for _, node := range cluster.Nodes {
|
||||
t.Logf("getting file from %s", node.Name)
|
||||
res, err := http.Get(node.URL + "/bzz:/" + hash)
|
||||
assertNil(t, err)
|
||||
assertHTTPResponse(t, res, http.StatusOK, "data")
|
||||
}
|
||||
}
|
||||
|
||||
func assertNil(t *testing.T, err error) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer tmp.Close()
|
||||
defer os.Remove(tmp.Name())
|
||||
|
||||
// write data to file
|
||||
data := "notsorandomdata"
|
||||
_, err = io.WriteString(tmp, data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hashRegexp := `[a-f\d]{64}`
|
||||
flags := []string{
|
||||
"--bzzapi", cluster.Nodes[0].URL,
|
||||
"up",
|
||||
tmp.Name()}
|
||||
if toEncrypt {
|
||||
hashRegexp = `[a-f\d]{128}`
|
||||
flags = []string{
|
||||
"--bzzapi", cluster.Nodes[0].URL,
|
||||
"up",
|
||||
"--encrypt",
|
||||
tmp.Name()}
|
||||
}
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
log.Info(fmt.Sprintf("uploading file with 'swarm up'"))
|
||||
up := runSwarm(t, flags...)
|
||||
_, matches := up.ExpectRegexp(hashRegexp)
|
||||
up.ExpectExit()
|
||||
hash := matches[0]
|
||||
log.Info("file uploaded", "hash", hash)
|
||||
|
||||
// get the file from the HTTP API of each node
|
||||
for _, node := range cluster.Nodes {
|
||||
log.Info("getting file from node", "node", node.Name)
|
||||
|
||||
res, err := http.Get(node.URL + "/bzz:/" + hash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
reply, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
t.Fatalf("expected HTTP status 200, got %s", res.Status)
|
||||
}
|
||||
if string(reply) != data {
|
||||
t.Fatalf("expected HTTP body %q, got %q", data, reply)
|
||||
}
|
||||
log.Debug("verifying uploaded file using `swarm down`")
|
||||
//try to get the content with `swarm down`
|
||||
tmpDownload, err := ioutil.TempDir("", "swarm-test")
|
||||
tmpDownload = path.Join(tmpDownload, "tmpfile.tmp")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDownload)
|
||||
|
||||
bzzLocator := "bzz:/" + hash
|
||||
flags = []string{
|
||||
"--bzzapi", cluster.Nodes[0].URL,
|
||||
"down",
|
||||
bzzLocator,
|
||||
tmpDownload,
|
||||
}
|
||||
|
||||
down := runSwarm(t, flags...)
|
||||
down.ExpectExit()
|
||||
|
||||
fi, err := os.Stat(tmpDownload)
|
||||
if err != nil {
|
||||
t.Fatalf("could not stat path: %v", err)
|
||||
}
|
||||
|
||||
switch mode := fi.Mode(); {
|
||||
case mode.IsRegular():
|
||||
downloadedBytes, err := ioutil.ReadFile(tmpDownload)
|
||||
if err != nil {
|
||||
t.Fatalf("had an error reading the downloaded file: %v", err)
|
||||
}
|
||||
if !bytes.Equal(downloadedBytes, bytes.NewBufferString(data).Bytes()) {
|
||||
t.Fatalf("retrieved data and posted data not equal!")
|
||||
}
|
||||
|
||||
default:
|
||||
t.Fatalf("expected to download regular file, got %s", fi.Mode())
|
||||
}
|
||||
}
|
||||
|
||||
timeout := time.Duration(2 * time.Second)
|
||||
httpClient := http.Client{
|
||||
Timeout: timeout,
|
||||
}
|
||||
|
||||
// try to squeeze a timeout by getting an non-existent hash from each node
|
||||
for _, node := range cluster.Nodes {
|
||||
_, err := httpClient.Get(node.URL + "/bzz:/1023e8bae0f70be7d7b5f74343088ba408a218254391490c85ae16278e230340")
|
||||
// we're speeding up the timeout here since netstore has a 60 seconds timeout on a request
|
||||
if err != nil && !strings.Contains(err.Error(), "Client.Timeout exceeded while awaiting headers") {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// this is disabled since it takes 60s due to netstore timeout
|
||||
// if res.StatusCode != 404 {
|
||||
// t.Fatalf("expected HTTP status 404, got %s", res.Status)
|
||||
// }
|
||||
}
|
||||
}
|
||||
|
||||
func assertHTTPResponse(t *testing.T, res *http.Response, expectedStatus int, expectedBody string) {
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != expectedStatus {
|
||||
t.Fatalf("expected HTTP status %d, got %s", expectedStatus, res.Status)
|
||||
func testCLISwarmUpRecursive(toEncrypt bool, t *testing.T) {
|
||||
fmt.Println("starting 3 node cluster")
|
||||
cluster := newTestCluster(t, 3)
|
||||
defer cluster.Shutdown()
|
||||
|
||||
tmpUploadDir, err := ioutil.TempDir("", "swarm-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
data, err := ioutil.ReadAll(res.Body)
|
||||
assertNil(t, err)
|
||||
if string(data) != expectedBody {
|
||||
t.Fatalf("expected HTTP body %q, got %q", expectedBody, data)
|
||||
defer os.RemoveAll(tmpUploadDir)
|
||||
// create tmp files
|
||||
data := "notsorandomdata"
|
||||
for _, path := range []string{"tmp1", "tmp2"} {
|
||||
if err := ioutil.WriteFile(filepath.Join(tmpUploadDir, path), bytes.NewBufferString(data).Bytes(), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
hashRegexp := `[a-f\d]{64}`
|
||||
flags := []string{
|
||||
"--bzzapi", cluster.Nodes[0].URL,
|
||||
"--recursive",
|
||||
"up",
|
||||
tmpUploadDir}
|
||||
if toEncrypt {
|
||||
hashRegexp = `[a-f\d]{128}`
|
||||
flags = []string{
|
||||
"--bzzapi", cluster.Nodes[0].URL,
|
||||
"--recursive",
|
||||
"up",
|
||||
"--encrypt",
|
||||
tmpUploadDir}
|
||||
}
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
log.Info(fmt.Sprintf("uploading file with 'swarm up'"))
|
||||
up := runSwarm(t, flags...)
|
||||
_, matches := up.ExpectRegexp(hashRegexp)
|
||||
up.ExpectExit()
|
||||
hash := matches[0]
|
||||
log.Info("dir uploaded", "hash", hash)
|
||||
|
||||
// get the file from the HTTP API of each node
|
||||
for _, node := range cluster.Nodes {
|
||||
log.Info("getting file from node", "node", node.Name)
|
||||
//try to get the content with `swarm down`
|
||||
tmpDownload, err := ioutil.TempDir("", "swarm-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDownload)
|
||||
bzzLocator := "bzz:/" + hash
|
||||
flagss := []string{}
|
||||
flagss = []string{
|
||||
"--bzzapi", cluster.Nodes[0].URL,
|
||||
"down",
|
||||
"--recursive",
|
||||
bzzLocator,
|
||||
tmpDownload,
|
||||
}
|
||||
|
||||
fmt.Println("downloading from swarm with recursive")
|
||||
down := runSwarm(t, flagss...)
|
||||
down.ExpectExit()
|
||||
|
||||
files, err := ioutil.ReadDir(tmpDownload)
|
||||
for _, v := range files {
|
||||
fi, err := os.Stat(path.Join(tmpDownload, v.Name()))
|
||||
if err != nil {
|
||||
t.Fatalf("got an error: %v", err)
|
||||
}
|
||||
|
||||
switch mode := fi.Mode(); {
|
||||
case mode.IsRegular():
|
||||
if file, err := swarm.Open(path.Join(tmpDownload, v.Name())); err != nil {
|
||||
t.Fatalf("encountered an error opening the file returned from the CLI: %v", err)
|
||||
} else {
|
||||
ff := make([]byte, len(data))
|
||||
io.ReadFull(file, ff)
|
||||
buf := bytes.NewBufferString(data)
|
||||
|
||||
if !bytes.Equal(ff, buf.Bytes()) {
|
||||
t.Fatalf("retrieved data and posted data not equal!")
|
||||
}
|
||||
}
|
||||
default:
|
||||
t.Fatalf("this shouldnt happen")
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("could not list files at: %v", files)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -27,6 +27,7 @@ import (
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
@@ -48,6 +49,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/les"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/metrics/influxdb"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
@@ -360,10 +362,6 @@ var (
|
||||
Name: "ethstats",
|
||||
Usage: "Reporting URL of a ethstats service (nodename:secret@host:port)",
|
||||
}
|
||||
MetricsEnabledFlag = cli.BoolFlag{
|
||||
Name: metrics.MetricsEnabledFlag,
|
||||
Usage: "Enable metrics collection and reporting",
|
||||
}
|
||||
FakePoWFlag = cli.BoolFlag{
|
||||
Name: "fakepow",
|
||||
Usage: "Disables proof-of-work verification",
|
||||
@@ -532,6 +530,45 @@ var (
|
||||
Usage: "Minimum POW accepted",
|
||||
Value: whisper.DefaultMinimumPoW,
|
||||
}
|
||||
|
||||
// Metrics flags
|
||||
MetricsEnabledFlag = cli.BoolFlag{
|
||||
Name: metrics.MetricsEnabledFlag,
|
||||
Usage: "Enable metrics collection and reporting",
|
||||
}
|
||||
MetricsEnableInfluxDBFlag = cli.BoolFlag{
|
||||
Name: "metrics.influxdb",
|
||||
Usage: "Enable metrics export/push to an external InfluxDB database",
|
||||
}
|
||||
MetricsInfluxDBEndpointFlag = cli.StringFlag{
|
||||
Name: "metrics.influxdb.endpoint",
|
||||
Usage: "InfluxDB API endpoint to report metrics to",
|
||||
Value: "http://localhost:8086",
|
||||
}
|
||||
MetricsInfluxDBDatabaseFlag = cli.StringFlag{
|
||||
Name: "metrics.influxdb.database",
|
||||
Usage: "InfluxDB database name to push reported metrics to",
|
||||
Value: "geth",
|
||||
}
|
||||
MetricsInfluxDBUsernameFlag = cli.StringFlag{
|
||||
Name: "metrics.influxdb.username",
|
||||
Usage: "Username to authorize access to the database",
|
||||
Value: "test",
|
||||
}
|
||||
MetricsInfluxDBPasswordFlag = cli.StringFlag{
|
||||
Name: "metrics.influxdb.password",
|
||||
Usage: "Password to authorize access to the database",
|
||||
Value: "test",
|
||||
}
|
||||
// The `host` tag is part of every measurement sent to InfluxDB. Queries on tags are faster in InfluxDB.
|
||||
// It is used so that we can group all nodes and average a measurement across all of them, but also so
|
||||
// that we can select a specific node and inspect its measurements.
|
||||
// https://docs.influxdata.com/influxdb/v1.4/concepts/key_concepts/#tag-key
|
||||
MetricsInfluxDBHostTagFlag = cli.StringFlag{
|
||||
Name: "metrics.influxdb.host.tag",
|
||||
Usage: "InfluxDB `host` tag attached to all measurements",
|
||||
Value: "localhost",
|
||||
}
|
||||
)
|
||||
|
||||
// MakeDataDir retrieves the currently requested data directory, terminating
|
||||
@@ -1084,6 +1121,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
}
|
||||
cfg.Genesis = core.DefaultRinkebyGenesisBlock()
|
||||
case ctx.GlobalBool(DeveloperFlag.Name):
|
||||
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||
cfg.NetworkId = 1337
|
||||
}
|
||||
// Create new developer account or reuse existing one
|
||||
var (
|
||||
developer accounts.Account
|
||||
@@ -1181,6 +1221,27 @@ func SetupNetwork(ctx *cli.Context) {
|
||||
params.TargetGasLimit = ctx.GlobalUint64(TargetGasLimitFlag.Name)
|
||||
}
|
||||
|
||||
func SetupMetrics(ctx *cli.Context) {
|
||||
if metrics.Enabled {
|
||||
log.Info("Enabling metrics collection")
|
||||
var (
|
||||
enableExport = ctx.GlobalBool(MetricsEnableInfluxDBFlag.Name)
|
||||
endpoint = ctx.GlobalString(MetricsInfluxDBEndpointFlag.Name)
|
||||
database = ctx.GlobalString(MetricsInfluxDBDatabaseFlag.Name)
|
||||
username = ctx.GlobalString(MetricsInfluxDBUsernameFlag.Name)
|
||||
password = ctx.GlobalString(MetricsInfluxDBPasswordFlag.Name)
|
||||
hosttag = ctx.GlobalString(MetricsInfluxDBHostTagFlag.Name)
|
||||
)
|
||||
|
||||
if enableExport {
|
||||
log.Info("Enabling metrics export to InfluxDB")
|
||||
go influxdb.InfluxDBWithTags(metrics.DefaultRegistry, 10*time.Second, endpoint, database, username, password, "geth.", map[string]string{
|
||||
"host": hosttag,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MakeChainDatabase open an LevelDB using the flags passed to the client and will hard crash if it fails.
|
||||
func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database {
|
||||
var (
|
||||
|
@@ -39,6 +39,7 @@ import (
|
||||
|
||||
const uintBits = 32 << (uint64(^uint(0)) >> 63)
|
||||
|
||||
// Errors
|
||||
var (
|
||||
ErrEmptyString = &decError{"empty hex string"}
|
||||
ErrSyntax = &decError{"invalid hex string"}
|
||||
|
@@ -22,12 +22,13 @@ import (
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// Various big integer limit values.
|
||||
var (
|
||||
tt255 = BigPow(2, 255)
|
||||
tt256 = BigPow(2, 256)
|
||||
tt256m1 = new(big.Int).Sub(tt256, big.NewInt(1))
|
||||
MaxBig256 = new(big.Int).Set(tt256m1)
|
||||
tt63 = BigPow(2, 63)
|
||||
MaxBig256 = new(big.Int).Set(tt256m1)
|
||||
MaxBig63 = new(big.Int).Sub(tt63, big.NewInt(1))
|
||||
)
|
||||
|
||||
|
@@ -21,8 +21,8 @@ import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Integer limit values.
|
||||
const (
|
||||
// Integer limit values.
|
||||
MaxInt8 = 1<<7 - 1
|
||||
MinInt8 = -1 << 7
|
||||
MaxInt16 = 1<<15 - 1
|
||||
|
@@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// package mclock is a wrapper for a monotonic clock source
|
||||
// Package mclock is a wrapper for a monotonic clock source
|
||||
package mclock
|
||||
|
||||
import (
|
||||
@@ -23,8 +23,10 @@ import (
|
||||
"github.com/aristanetworks/goarista/monotime"
|
||||
)
|
||||
|
||||
type AbsTime time.Duration // absolute monotonic time
|
||||
// AbsTime represents absolute monotonic time.
|
||||
type AbsTime time.Duration
|
||||
|
||||
// Now returns the current absolute monotonic time.
|
||||
func Now() AbsTime {
|
||||
return AbsTime(monotime.Now())
|
||||
}
|
||||
|
@@ -1,196 +0,0 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package number
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
var tt256 = new(big.Int).Lsh(big.NewInt(1), 256)
|
||||
var tt256m1 = new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 256), big.NewInt(1))
|
||||
var tt255 = new(big.Int).Lsh(big.NewInt(1), 255)
|
||||
|
||||
func limitUnsigned256(x *Number) *Number {
|
||||
x.num.And(x.num, tt256m1)
|
||||
return x
|
||||
}
|
||||
|
||||
func limitSigned256(x *Number) *Number {
|
||||
if x.num.Cmp(tt255) < 0 {
|
||||
return x
|
||||
}
|
||||
x.num.Sub(x.num, tt256)
|
||||
return x
|
||||
}
|
||||
|
||||
// Initialiser is a Number function
|
||||
type Initialiser func(n int64) *Number
|
||||
|
||||
// A Number represents a generic integer with a bounding function limiter. Limit is called after each operations
|
||||
// to give "fake" bounded integers. New types of Number can be created through NewInitialiser returning a lambda
|
||||
// with the new Initialiser.
|
||||
type Number struct {
|
||||
num *big.Int
|
||||
limit func(n *Number) *Number
|
||||
}
|
||||
|
||||
// NewInitialiser returns a new initialiser for a new *Number without having to expose certain fields
|
||||
func NewInitialiser(limiter func(*Number) *Number) Initialiser {
|
||||
return func(n int64) *Number {
|
||||
return &Number{big.NewInt(n), limiter}
|
||||
}
|
||||
}
|
||||
|
||||
// Uint256 returns a Number with a UNSIGNED limiter up to 256 bits
|
||||
func Uint256(n int64) *Number {
|
||||
return &Number{big.NewInt(n), limitUnsigned256}
|
||||
}
|
||||
|
||||
// Int256 returns Number with a SIGNED limiter up to 256 bits
|
||||
func Int256(n int64) *Number {
|
||||
return &Number{big.NewInt(n), limitSigned256}
|
||||
}
|
||||
|
||||
// Big returns a Number with a SIGNED unlimited size
|
||||
func Big(n int64) *Number {
|
||||
return &Number{big.NewInt(n), func(x *Number) *Number { return x }}
|
||||
}
|
||||
|
||||
// Add sets i to sum of x+y
|
||||
func (i *Number) Add(x, y *Number) *Number {
|
||||
i.num.Add(x.num, y.num)
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// Sub sets i to difference of x-y
|
||||
func (i *Number) Sub(x, y *Number) *Number {
|
||||
i.num.Sub(x.num, y.num)
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// Mul sets i to product of x*y
|
||||
func (i *Number) Mul(x, y *Number) *Number {
|
||||
i.num.Mul(x.num, y.num)
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// Div sets i to the quotient prodject of x/y
|
||||
func (i *Number) Div(x, y *Number) *Number {
|
||||
i.num.Div(x.num, y.num)
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// Mod sets i to x % y
|
||||
func (i *Number) Mod(x, y *Number) *Number {
|
||||
i.num.Mod(x.num, y.num)
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// Lsh sets i to x << s
|
||||
func (i *Number) Lsh(x *Number, s uint) *Number {
|
||||
i.num.Lsh(x.num, s)
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// Pow sets i to x^y
|
||||
func (i *Number) Pow(x, y *Number) *Number {
|
||||
i.num.Exp(x.num, y.num, big.NewInt(0))
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// Setters
|
||||
|
||||
// Set sets x to i
|
||||
func (i *Number) Set(x *Number) *Number {
|
||||
i.num.Set(x.num)
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// SetBytes sets x bytes to i
|
||||
func (i *Number) SetBytes(x []byte) *Number {
|
||||
i.num.SetBytes(x)
|
||||
return i.limit(i)
|
||||
}
|
||||
|
||||
// Cmp compares x and y and returns:
|
||||
//
|
||||
// -1 if x < y
|
||||
// 0 if x == y
|
||||
// +1 if x > y
|
||||
func (i *Number) Cmp(x *Number) int {
|
||||
return i.num.Cmp(x.num)
|
||||
}
|
||||
|
||||
// Getters
|
||||
|
||||
// String returns the string representation of i
|
||||
func (i *Number) String() string {
|
||||
return i.num.String()
|
||||
}
|
||||
|
||||
// Bytes returns the byte representation of i
|
||||
func (i *Number) Bytes() []byte {
|
||||
return i.num.Bytes()
|
||||
}
|
||||
|
||||
// Uint64 returns the Uint64 representation of x. If x cannot be represented in an int64, the result is undefined.
|
||||
func (i *Number) Uint64() uint64 {
|
||||
return i.num.Uint64()
|
||||
}
|
||||
|
||||
// Int64 returns the int64 representation of x. If x cannot be represented in an int64, the result is undefined.
|
||||
func (i *Number) Int64() int64 {
|
||||
return i.num.Int64()
|
||||
}
|
||||
|
||||
// Int256 returns the signed version of i
|
||||
func (i *Number) Int256() *Number {
|
||||
return Int(0).Set(i)
|
||||
}
|
||||
|
||||
// Uint256 returns the unsigned version of i
|
||||
func (i *Number) Uint256() *Number {
|
||||
return Uint(0).Set(i)
|
||||
}
|
||||
|
||||
// FirstBitSet returns the index of the first bit that's set to 1
|
||||
func (i *Number) FirstBitSet() int {
|
||||
for j := 0; j < i.num.BitLen(); j++ {
|
||||
if i.num.Bit(j) > 0 {
|
||||
return j
|
||||
}
|
||||
}
|
||||
|
||||
return i.num.BitLen()
|
||||
}
|
||||
|
||||
// Variables
|
||||
|
||||
var (
|
||||
Zero = Uint(0)
|
||||
One = Uint(1)
|
||||
Two = Uint(2)
|
||||
MaxUint256 = Uint(0).SetBytes(common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"))
|
||||
|
||||
MinOne = Int(-1)
|
||||
|
||||
// "typedefs"
|
||||
Uint = Uint256
|
||||
Int = Int256
|
||||
)
|
@@ -1,108 +0,0 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package number
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
func TestSet(t *testing.T) {
|
||||
a := Uint(0)
|
||||
b := Uint(10)
|
||||
a.Set(b)
|
||||
if a.num.Cmp(b.num) != 0 {
|
||||
t.Error("didn't compare", a, b)
|
||||
}
|
||||
|
||||
c := Uint(0).SetBytes(common.Hex2Bytes("0a"))
|
||||
if c.num.Cmp(big.NewInt(10)) != 0 {
|
||||
t.Error("c set bytes failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitialiser(t *testing.T) {
|
||||
check := false
|
||||
init := NewInitialiser(func(x *Number) *Number {
|
||||
check = true
|
||||
return x
|
||||
})
|
||||
a := init(0).Add(init(1), init(2))
|
||||
if a.Cmp(init(3)) != 0 {
|
||||
t.Error("expected 3. got", a)
|
||||
}
|
||||
if !check {
|
||||
t.Error("expected limiter to be called")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
a := Uint(10)
|
||||
if a.Uint64() != 10 {
|
||||
t.Error("expected to get 10. got", a.Uint64())
|
||||
}
|
||||
|
||||
a = Uint(10)
|
||||
if a.Int64() != 10 {
|
||||
t.Error("expected to get 10. got", a.Int64())
|
||||
}
|
||||
}
|
||||
|
||||
func TestCmp(t *testing.T) {
|
||||
a := Uint(10)
|
||||
b := Uint(10)
|
||||
c := Uint(11)
|
||||
|
||||
if a.Cmp(b) != 0 {
|
||||
t.Error("a b == 0 failed", a, b)
|
||||
}
|
||||
|
||||
if a.Cmp(c) >= 0 {
|
||||
t.Error("a c < 0 failed", a, c)
|
||||
}
|
||||
|
||||
if c.Cmp(b) <= 0 {
|
||||
t.Error("c b > 0 failed", c, b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaxArith(t *testing.T) {
|
||||
a := Uint(0).Add(MaxUint256, One)
|
||||
if a.Cmp(Zero) != 0 {
|
||||
t.Error("expected max256 + 1 = 0 got", a)
|
||||
}
|
||||
|
||||
a = Uint(0).Sub(Uint(0), One)
|
||||
if a.Cmp(MaxUint256) != 0 {
|
||||
t.Error("expected 0 - 1 = max256 got", a)
|
||||
}
|
||||
|
||||
a = Int(0).Sub(Int(0), One)
|
||||
if a.Cmp(MinOne) != 0 {
|
||||
t.Error("expected 0 - 1 = -1 got", a)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConversion(t *testing.T) {
|
||||
a := Int(-1)
|
||||
b := a.Uint256()
|
||||
if b.Cmp(MaxUint256) != 0 {
|
||||
t.Error("expected -1 => unsigned to return max. got", b)
|
||||
}
|
||||
}
|
@@ -29,6 +29,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||
)
|
||||
|
||||
// Lengths of hashes and addresses in bytes.
|
||||
const (
|
||||
HashLength = 32
|
||||
AddressLength = 20
|
||||
|
@@ -389,7 +389,7 @@ type Config struct {
|
||||
PowMode Mode
|
||||
}
|
||||
|
||||
// Ethash is a consensus engine based on proot-of-work implementing the ethash
|
||||
// Ethash is a consensus engine based on proof-of-work implementing the ethash
|
||||
// algorithm.
|
||||
type Ethash struct {
|
||||
config Config
|
||||
|
@@ -60,7 +60,7 @@ type Config struct {
|
||||
Preload []string // Absolute paths to JavaScript files to preload
|
||||
}
|
||||
|
||||
// Console is a JavaScript interpreted runtime environment. It is a fully fleged
|
||||
// Console is a JavaScript interpreted runtime environment. It is a fully fledged
|
||||
// JavaScript console attached to a running node via an external or in-process RPC
|
||||
// client.
|
||||
type Console struct {
|
||||
|
@@ -95,7 +95,7 @@ func ensParentNode(name string) (common.Hash, common.Hash) {
|
||||
}
|
||||
}
|
||||
|
||||
func ensNode(name string) common.Hash {
|
||||
func EnsNode(name string) common.Hash {
|
||||
parentNode, parentLabel := ensParentNode(name)
|
||||
return crypto.Keccak256Hash(parentNode[:], parentLabel[:])
|
||||
}
|
||||
@@ -136,7 +136,7 @@ func (self *ENS) getRegistrar(node [32]byte) (*contract.FIFSRegistrarSession, er
|
||||
|
||||
// Resolve is a non-transactional call that returns the content hash associated with a name.
|
||||
func (self *ENS) Resolve(name string) (common.Hash, error) {
|
||||
node := ensNode(name)
|
||||
node := EnsNode(name)
|
||||
|
||||
resolver, err := self.getResolver(node)
|
||||
if err != nil {
|
||||
@@ -165,7 +165,7 @@ func (self *ENS) Register(name string) (*types.Transaction, error) {
|
||||
// SetContentHash sets the content hash associated with a name. Only works if the caller
|
||||
// owns the name, and the associated resolver implements a `setContent` function.
|
||||
func (self *ENS) SetContentHash(name string, hash common.Hash) (*types.Transaction, error) {
|
||||
node := ensNode(name)
|
||||
node := EnsNode(name)
|
||||
|
||||
resolver, err := self.getResolver(node)
|
||||
if err != nil {
|
||||
|
@@ -55,7 +55,7 @@ func TestENS(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("can't deploy resolver: %v", err)
|
||||
}
|
||||
if _, err := ens.SetResolver(ensNode(name), resolverAddr); err != nil {
|
||||
if _, err := ens.SetResolver(EnsNode(name), resolverAddr); err != nil {
|
||||
t.Fatalf("can't set resolver: %v", err)
|
||||
}
|
||||
contractBackend.Commit()
|
||||
|
@@ -51,7 +51,7 @@ func NewCompiler(debug bool) *Compiler {
|
||||
// the compiler.
|
||||
//
|
||||
// feed is the first pass in the compile stage as it
|
||||
// collect the used labels in the program and keeps a
|
||||
// collects the used labels in the program and keeps a
|
||||
// program counter which is used to determine the locations
|
||||
// of the jump dests. The labels can than be used in the
|
||||
// second stage to push labels and determine the right
|
||||
@@ -120,7 +120,7 @@ func (c *Compiler) next() token {
|
||||
return token
|
||||
}
|
||||
|
||||
// compile line compiles a single line instruction e.g.
|
||||
// compileLine compiles a single line instruction e.g.
|
||||
// "push 1", "jump @label".
|
||||
func (c *Compiler) compileLine() error {
|
||||
n := c.next()
|
||||
|
@@ -242,7 +242,7 @@ func lexLabel(l *lexer) stateFn {
|
||||
}
|
||||
|
||||
// lexInsideString lexes the inside of a string until
|
||||
// until the state function finds the closing quote.
|
||||
// the state function finds the closing quote.
|
||||
// It returns the lex text state function.
|
||||
func lexInsideString(l *lexer) stateFn {
|
||||
if l.acceptRunUntil('"') {
|
||||
|
@@ -269,8 +269,8 @@ func (bc *BlockChain) SetHead(head uint64) error {
|
||||
defer bc.mu.Unlock()
|
||||
|
||||
// Rewind the header chain, deleting all block bodies until then
|
||||
delFn := func(hash common.Hash, num uint64) {
|
||||
rawdb.DeleteBody(bc.db, hash, num)
|
||||
delFn := func(db rawdb.DatabaseDeleter, hash common.Hash, num uint64) {
|
||||
rawdb.DeleteBody(db, hash, num)
|
||||
}
|
||||
bc.hc.SetHead(head, delFn)
|
||||
currentHeader := bc.hc.CurrentHeader()
|
||||
@@ -672,7 +672,7 @@ func (bc *BlockChain) Stop() {
|
||||
}
|
||||
}
|
||||
for !bc.triegc.Empty() {
|
||||
triedb.Dereference(bc.triegc.PopItem().(common.Hash), common.Hash{})
|
||||
triedb.Dereference(bc.triegc.PopItem().(common.Hash))
|
||||
}
|
||||
if size, _ := triedb.Size(); size != 0 {
|
||||
log.Error("Dangling trie nodes after full cleanup")
|
||||
@@ -947,7 +947,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
|
||||
bc.triegc.Push(root, number)
|
||||
break
|
||||
}
|
||||
triedb.Dereference(root.(common.Hash), common.Hash{})
|
||||
triedb.Dereference(root.(common.Hash))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1340,9 +1340,12 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
|
||||
diff := types.TxDifference(deletedTxs, addedTxs)
|
||||
// When transactions get deleted from the database that means the
|
||||
// receipts that were created in the fork must also be deleted
|
||||
batch := bc.db.NewBatch()
|
||||
for _, tx := range diff {
|
||||
rawdb.DeleteTxLookupEntry(bc.db, tx.Hash())
|
||||
rawdb.DeleteTxLookupEntry(batch, tx.Hash())
|
||||
}
|
||||
batch.Write()
|
||||
|
||||
if len(deletedLogs) > 0 {
|
||||
go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
|
||||
}
|
||||
|
@@ -25,6 +25,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
@@ -35,6 +36,39 @@ import (
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
// So we can deterministically seed different blockchains
|
||||
var (
|
||||
canonicalSeed = 1
|
||||
forkSeed = 2
|
||||
)
|
||||
|
||||
// newCanonical creates a chain database, and injects a deterministic canonical
|
||||
// chain. Depending on the full flag, if creates either a full block chain or a
|
||||
// header only chain.
|
||||
func newCanonical(engine consensus.Engine, n int, full bool) (ethdb.Database, *BlockChain, error) {
|
||||
var (
|
||||
db = ethdb.NewMemDatabase()
|
||||
genesis = new(Genesis).MustCommit(db)
|
||||
)
|
||||
|
||||
// Initialize a fresh chain with only a genesis block
|
||||
blockchain, _ := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{})
|
||||
// Create and inject the requested chain
|
||||
if n == 0 {
|
||||
return db, blockchain, nil
|
||||
}
|
||||
if full {
|
||||
// Full block-chain requested
|
||||
blocks := makeBlockChain(genesis, n, engine, db, canonicalSeed)
|
||||
_, err := blockchain.InsertChain(blocks)
|
||||
return db, blockchain, err
|
||||
}
|
||||
// Header-only chain requested
|
||||
headers := makeHeaderChain(genesis.Header(), n, engine, db, canonicalSeed)
|
||||
_, err := blockchain.InsertHeaderChain(headers, 1)
|
||||
return db, blockchain, err
|
||||
}
|
||||
|
||||
// Test fork of length N starting from block i
|
||||
func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int)) {
|
||||
// Copy old chain up to #i into a new db
|
||||
@@ -1279,8 +1313,8 @@ func TestTrieForkGC(t *testing.T) {
|
||||
}
|
||||
// Dereference all the recent tries and ensure no past trie is left in
|
||||
for i := 0; i < triesInMemory; i++ {
|
||||
chain.stateCache.TrieDB().Dereference(blocks[len(blocks)-1-i].Root(), common.Hash{})
|
||||
chain.stateCache.TrieDB().Dereference(forks[len(blocks)-1-i].Root(), common.Hash{})
|
||||
chain.stateCache.TrieDB().Dereference(blocks[len(blocks)-1-i].Root())
|
||||
chain.stateCache.TrieDB().Dereference(forks[len(blocks)-1-i].Root())
|
||||
}
|
||||
if len(chain.stateCache.TrieDB().Nodes()) > 0 {
|
||||
t.Fatalf("stale tries still alive after garbase collection")
|
||||
|
@@ -30,12 +30,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
// So we can deterministically seed different blockchains
|
||||
var (
|
||||
canonicalSeed = 1
|
||||
forkSeed = 2
|
||||
)
|
||||
|
||||
// BlockGen creates blocks for testing.
|
||||
// See GenerateChain for a detailed explanation.
|
||||
type BlockGen struct {
|
||||
@@ -252,33 +246,6 @@ func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.S
|
||||
}
|
||||
}
|
||||
|
||||
// newCanonical creates a chain database, and injects a deterministic canonical
|
||||
// chain. Depending on the full flag, if creates either a full block chain or a
|
||||
// header only chain.
|
||||
func newCanonical(engine consensus.Engine, n int, full bool) (ethdb.Database, *BlockChain, error) {
|
||||
var (
|
||||
db = ethdb.NewMemDatabase()
|
||||
genesis = new(Genesis).MustCommit(db)
|
||||
)
|
||||
|
||||
// Initialize a fresh chain with only a genesis block
|
||||
blockchain, _ := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{})
|
||||
// Create and inject the requested chain
|
||||
if n == 0 {
|
||||
return db, blockchain, nil
|
||||
}
|
||||
if full {
|
||||
// Full block-chain requested
|
||||
blocks := makeBlockChain(genesis, n, engine, db, canonicalSeed)
|
||||
_, err := blockchain.InsertChain(blocks)
|
||||
return db, blockchain, err
|
||||
}
|
||||
// Header-only chain requested
|
||||
headers := makeHeaderChain(genesis.Header(), n, engine, db, canonicalSeed)
|
||||
_, err := blockchain.InsertHeaderChain(headers, 1)
|
||||
return db, blockchain, err
|
||||
}
|
||||
|
||||
// makeHeaderChain creates a deterministic chain of headers rooted at parent.
|
||||
func makeHeaderChain(parent *types.Header, n int, engine consensus.Engine, db ethdb.Database, seed int) []*types.Header {
|
||||
blocks := makeBlockChain(types.NewBlockWithHeader(parent), n, engine, db, seed)
|
||||
|
@@ -156,13 +156,16 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
|
||||
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
|
||||
if externTd.Cmp(localTd) > 0 || (externTd.Cmp(localTd) == 0 && mrand.Float64() < 0.5) {
|
||||
// Delete any canonical number assignments above the new head
|
||||
batch := hc.chainDb.NewBatch()
|
||||
for i := number + 1; ; i++ {
|
||||
hash := rawdb.ReadCanonicalHash(hc.chainDb, i)
|
||||
if hash == (common.Hash{}) {
|
||||
break
|
||||
}
|
||||
rawdb.DeleteCanonicalHash(hc.chainDb, i)
|
||||
rawdb.DeleteCanonicalHash(batch, i)
|
||||
}
|
||||
batch.Write()
|
||||
|
||||
// Overwrite any stale canonical number assignments
|
||||
var (
|
||||
headHash = header.ParentHash
|
||||
@@ -438,7 +441,7 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
|
||||
|
||||
// DeleteCallback is a callback function that is called by SetHead before
|
||||
// each header is deleted.
|
||||
type DeleteCallback func(common.Hash, uint64)
|
||||
type DeleteCallback func(rawdb.DatabaseDeleter, common.Hash, uint64)
|
||||
|
||||
// SetHead rewinds the local chain to a new head. Everything above the new head
|
||||
// will be deleted and the new one set.
|
||||
@@ -448,22 +451,24 @@ func (hc *HeaderChain) SetHead(head uint64, delFn DeleteCallback) {
|
||||
if hdr := hc.CurrentHeader(); hdr != nil {
|
||||
height = hdr.Number.Uint64()
|
||||
}
|
||||
|
||||
batch := hc.chainDb.NewBatch()
|
||||
for hdr := hc.CurrentHeader(); hdr != nil && hdr.Number.Uint64() > head; hdr = hc.CurrentHeader() {
|
||||
hash := hdr.Hash()
|
||||
num := hdr.Number.Uint64()
|
||||
if delFn != nil {
|
||||
delFn(hash, num)
|
||||
delFn(batch, hash, num)
|
||||
}
|
||||
rawdb.DeleteHeader(hc.chainDb, hash, num)
|
||||
rawdb.DeleteTd(hc.chainDb, hash, num)
|
||||
rawdb.DeleteHeader(batch, hash, num)
|
||||
rawdb.DeleteTd(batch, hash, num)
|
||||
|
||||
hc.currentHeader.Store(hc.GetHeader(hdr.ParentHash, hdr.Number.Uint64()-1))
|
||||
}
|
||||
// Roll back the canonical chain numbering
|
||||
for i := height; i > head; i-- {
|
||||
rawdb.DeleteCanonicalHash(hc.chainDb, i)
|
||||
rawdb.DeleteCanonicalHash(batch, i)
|
||||
}
|
||||
batch.Write()
|
||||
|
||||
// Clear out any stale content from the caches
|
||||
hc.headerCache.Purge()
|
||||
hc.tdCache.Purge()
|
||||
|
@@ -596,7 +596,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (root common.Hash, err error)
|
||||
case isDirty:
|
||||
// Write any contract code associated with the state object
|
||||
if stateObject.code != nil && stateObject.dirtyCode {
|
||||
s.db.TrieDB().Insert(common.BytesToHash(stateObject.CodeHash()), stateObject.code)
|
||||
s.db.TrieDB().InsertBlob(common.BytesToHash(stateObject.CodeHash()), stateObject.code)
|
||||
stateObject.dirtyCode = false
|
||||
}
|
||||
// Write any storage changes in the state object to its storage trie.
|
||||
|
@@ -815,11 +815,9 @@ func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) []error {
|
||||
|
||||
for i, tx := range txs {
|
||||
var replace bool
|
||||
if replace, errs[i] = pool.add(tx, local); errs[i] == nil {
|
||||
if !replace {
|
||||
from, _ := types.Sender(pool.signer, tx) // already validated
|
||||
dirty[from] = struct{}{}
|
||||
}
|
||||
if replace, errs[i] = pool.add(tx, local); errs[i] == nil && !replace {
|
||||
from, _ := types.Sender(pool.signer, tx) // already validated
|
||||
dirty[from] = struct{}{}
|
||||
}
|
||||
}
|
||||
// Only reprocess the internal state if something was actually added
|
||||
@@ -1107,7 +1105,7 @@ func (pool *TxPool) demoteUnexecutables() {
|
||||
log.Trace("Demoting pending transaction", "hash", hash)
|
||||
pool.enqueueTx(hash, tx)
|
||||
}
|
||||
// If there's a gap in front, warn (should never happen) and postpone all transactions
|
||||
// If there's a gap in front, alert (should never happen) and postpone all transactions
|
||||
if list.Len() > 0 && list.txs.Get(nonce) == nil {
|
||||
for _, tx := range list.Cap(0) {
|
||||
hash := tx.Hash()
|
||||
|
@@ -35,15 +35,6 @@ var (
|
||||
ErrInvalidSig = errors.New("invalid transaction v, r, s values")
|
||||
)
|
||||
|
||||
// deriveSigner makes a *best* guess about which signer to use.
|
||||
func deriveSigner(V *big.Int) Signer {
|
||||
if V.Sign() != 0 && isProtectedV(V) {
|
||||
return NewEIP155Signer(deriveChainId(V))
|
||||
} else {
|
||||
return HomesteadSigner{}
|
||||
}
|
||||
}
|
||||
|
||||
type Transaction struct {
|
||||
data txdata
|
||||
// caches
|
||||
|
@@ -18,6 +18,7 @@ package vm
|
||||
|
||||
import "errors"
|
||||
|
||||
// List execution errors
|
||||
var (
|
||||
ErrOutOfGas = errors.New("out of gas")
|
||||
ErrCodeStoreOutOfGas = errors.New("contract creation code storage out of gas")
|
||||
|
@@ -31,8 +31,10 @@ import (
|
||||
var emptyCodeHash = crypto.Keccak256Hash(nil)
|
||||
|
||||
type (
|
||||
// CanTransferFunc is the signature of a transfer guard function
|
||||
CanTransferFunc func(StateDB, common.Address, *big.Int) bool
|
||||
TransferFunc func(StateDB, common.Address, common.Address, *big.Int)
|
||||
// TransferFunc is the signature of a transfer function
|
||||
TransferFunc func(StateDB, common.Address, common.Address, *big.Int)
|
||||
// GetHashFunc returns the nth block hash in the blockchain
|
||||
// and is used by the BLOCKHASH EVM op code.
|
||||
GetHashFunc func(uint64) common.Hash
|
||||
|
@@ -22,6 +22,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
// Gas costs
|
||||
const (
|
||||
GasQuickStep uint64 = 2
|
||||
GasFastestStep uint64 = 3
|
||||
|
@@ -556,7 +556,7 @@ func opMload(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *St
|
||||
func opMstore(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
|
||||
// pop value of the stack
|
||||
mStart, val := stack.pop(), stack.pop()
|
||||
memory.Set(mStart.Uint64(), 32, math.PaddedBigBytes(val, 32))
|
||||
memory.Set32(mStart.Uint64(), val)
|
||||
|
||||
evm.interpreter.intPool.put(mStart, val)
|
||||
return nil, nil
|
||||
@@ -570,9 +570,9 @@ func opMstore8(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *
|
||||
}
|
||||
|
||||
func opSload(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
|
||||
loc := common.BigToHash(stack.pop())
|
||||
val := evm.StateDB.GetState(contract.Address(), loc).Big()
|
||||
stack.push(val)
|
||||
loc := stack.peek()
|
||||
val := evm.StateDB.GetState(contract.Address(), common.BigToHash(loc))
|
||||
loc.SetBytes(val.Bytes())
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -861,7 +861,7 @@ func makeDup(size int64) executionFunc {
|
||||
// make swap instruction function
|
||||
func makeSwap(size int64) executionFunc {
|
||||
// switch n + 1 otherwise n would be swapped with n
|
||||
size += 1
|
||||
size++
|
||||
return func(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
|
||||
stack.swap(int(size))
|
||||
return nil, nil
|
||||
|
@@ -36,6 +36,7 @@ func testTwoOperandOp(t *testing.T, tests []twoOperandTest, opFn func(pc *uint64
|
||||
stack = newstack()
|
||||
pc = uint64(0)
|
||||
)
|
||||
env.interpreter.intPool = poolOfIntPools.get()
|
||||
for i, test := range tests {
|
||||
x := new(big.Int).SetBytes(common.Hex2Bytes(test.x))
|
||||
shift := new(big.Int).SetBytes(common.Hex2Bytes(test.y))
|
||||
@@ -64,6 +65,7 @@ func testTwoOperandOp(t *testing.T, tests []twoOperandTest, opFn func(pc *uint64
|
||||
}
|
||||
}
|
||||
}
|
||||
poolOfIntPools.put(env.interpreter.intPool)
|
||||
}
|
||||
|
||||
func TestByteOp(t *testing.T) {
|
||||
@@ -71,6 +73,7 @@ func TestByteOp(t *testing.T) {
|
||||
env = NewEVM(Context{}, nil, params.TestChainConfig, Config{})
|
||||
stack = newstack()
|
||||
)
|
||||
env.interpreter.intPool = poolOfIntPools.get()
|
||||
tests := []struct {
|
||||
v string
|
||||
th uint64
|
||||
@@ -97,6 +100,7 @@ func TestByteOp(t *testing.T) {
|
||||
t.Fatalf("Expected [%v] %v:th byte to be %v, was %v.", test.v, test.th, test.expected, actual)
|
||||
}
|
||||
}
|
||||
poolOfIntPools.put(env.interpreter.intPool)
|
||||
}
|
||||
|
||||
func TestSHL(t *testing.T) {
|
||||
@@ -425,3 +429,44 @@ func BenchmarkOpIsZero(b *testing.B) {
|
||||
x := "FBCDEF090807060504030201ffffffffFBCDEF090807060504030201ffffffff"
|
||||
opBenchmark(b, opIszero, x)
|
||||
}
|
||||
|
||||
func TestOpMstore(t *testing.T) {
|
||||
var (
|
||||
env = NewEVM(Context{}, nil, params.TestChainConfig, Config{})
|
||||
stack = newstack()
|
||||
mem = NewMemory()
|
||||
)
|
||||
env.interpreter.intPool = poolOfIntPools.get()
|
||||
mem.Resize(64)
|
||||
pc := uint64(0)
|
||||
v := "abcdef00000000000000abba000000000deaf000000c0de00100000000133700"
|
||||
stack.pushN(new(big.Int).SetBytes(common.Hex2Bytes(v)), big.NewInt(0))
|
||||
opMstore(&pc, env, nil, mem, stack)
|
||||
if got := common.Bytes2Hex(mem.Get(0, 32)); got != v {
|
||||
t.Fatalf("Mstore fail, got %v, expected %v", got, v)
|
||||
}
|
||||
stack.pushN(big.NewInt(0x1), big.NewInt(0))
|
||||
opMstore(&pc, env, nil, mem, stack)
|
||||
if common.Bytes2Hex(mem.Get(0, 32)) != "0000000000000000000000000000000000000000000000000000000000000001" {
|
||||
t.Fatalf("Mstore failed to overwrite previous value")
|
||||
}
|
||||
poolOfIntPools.put(env.interpreter.intPool)
|
||||
}
|
||||
|
||||
func BenchmarkOpMstore(bench *testing.B) {
|
||||
var (
|
||||
env = NewEVM(Context{}, nil, params.TestChainConfig, Config{})
|
||||
stack = newstack()
|
||||
mem = NewMemory()
|
||||
)
|
||||
mem.Resize(64)
|
||||
pc := uint64(0)
|
||||
memStart := big.NewInt(0)
|
||||
value := big.NewInt(0x1337)
|
||||
|
||||
bench.ResetTimer()
|
||||
for i := 0; i < bench.N; i++ {
|
||||
stack.pushN(value, memStart)
|
||||
opMstore(&pc, env, nil, mem, stack)
|
||||
}
|
||||
}
|
||||
|
@@ -77,7 +77,6 @@ func NewInterpreter(evm *EVM, cfg Config) *Interpreter {
|
||||
evm: evm,
|
||||
cfg: cfg,
|
||||
gasTable: evm.ChainConfig().GasTable(evm.BlockNumber),
|
||||
intPool: newIntPool(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -104,6 +103,14 @@ func (in *Interpreter) enforceRestrictions(op OpCode, operation operation, stack
|
||||
// considered a revert-and-consume-all-gas operation except for
|
||||
// errExecutionReverted which means revert-and-keep-gas-left.
|
||||
func (in *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err error) {
|
||||
if in.intPool == nil {
|
||||
in.intPool = poolOfIntPools.get()
|
||||
defer func() {
|
||||
poolOfIntPools.put(in.intPool)
|
||||
in.intPool = nil
|
||||
}()
|
||||
}
|
||||
|
||||
// Increment the call depth which is restricted to 1024
|
||||
in.evm.depth++
|
||||
defer func() { in.evm.depth-- }()
|
||||
@@ -133,6 +140,9 @@ func (in *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err er
|
||||
)
|
||||
contract.Input = input
|
||||
|
||||
// Reclaim the stack as an int pool when the execution stops
|
||||
defer func() { in.intPool.put(stack.data...) }()
|
||||
|
||||
if in.cfg.Debug {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
|
@@ -16,7 +16,10 @@
|
||||
|
||||
package vm
|
||||
|
||||
import "math/big"
|
||||
import (
|
||||
"math/big"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var checkVal = big.NewInt(-42)
|
||||
|
||||
@@ -65,3 +68,39 @@ func (p *intPool) put(is ...*big.Int) {
|
||||
p.pool.push(i)
|
||||
}
|
||||
}
|
||||
|
||||
// The intPool pool's default capacity
|
||||
const poolDefaultCap = 25
|
||||
|
||||
// intPoolPool manages a pool of intPools.
|
||||
type intPoolPool struct {
|
||||
pools []*intPool
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
var poolOfIntPools = &intPoolPool{
|
||||
pools: make([]*intPool, 0, poolDefaultCap),
|
||||
}
|
||||
|
||||
// get is looking for an available pool to return.
|
||||
func (ipp *intPoolPool) get() *intPool {
|
||||
ipp.lock.Lock()
|
||||
defer ipp.lock.Unlock()
|
||||
|
||||
if len(poolOfIntPools.pools) > 0 {
|
||||
ip := ipp.pools[len(ipp.pools)-1]
|
||||
ipp.pools = ipp.pools[:len(ipp.pools)-1]
|
||||
return ip
|
||||
}
|
||||
return newIntPool()
|
||||
}
|
||||
|
||||
// put a pool that has been allocated with get.
|
||||
func (ipp *intPoolPool) put(ip *intPool) {
|
||||
ipp.lock.Lock()
|
||||
defer ipp.lock.Unlock()
|
||||
|
||||
if len(ipp.pools) < cap(ipp.pools) {
|
||||
ipp.pools = append(ipp.pools, ip)
|
||||
}
|
||||
}
|
||||
|
55
core/vm/intpool_test.go
Normal file
55
core/vm/intpool_test.go
Normal file
@@ -0,0 +1,55 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package vm
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIntPoolPoolGet(t *testing.T) {
|
||||
poolOfIntPools.pools = make([]*intPool, 0, poolDefaultCap)
|
||||
|
||||
nip := poolOfIntPools.get()
|
||||
if nip == nil {
|
||||
t.Fatalf("Invalid pool allocation")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntPoolPoolPut(t *testing.T) {
|
||||
poolOfIntPools.pools = make([]*intPool, 0, poolDefaultCap)
|
||||
|
||||
nip := poolOfIntPools.get()
|
||||
if len(poolOfIntPools.pools) != 0 {
|
||||
t.Fatalf("Pool got added to list when none should have been")
|
||||
}
|
||||
|
||||
poolOfIntPools.put(nip)
|
||||
if len(poolOfIntPools.pools) == 0 {
|
||||
t.Fatalf("Pool did not get added to list when one should have been")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntPoolPoolReUse(t *testing.T) {
|
||||
poolOfIntPools.pools = make([]*intPool, 0, poolDefaultCap)
|
||||
nip := poolOfIntPools.get()
|
||||
poolOfIntPools.put(nip)
|
||||
poolOfIntPools.get()
|
||||
|
||||
if len(poolOfIntPools.pools) != 0 {
|
||||
t.Fatalf("Invalid number of pools. Got %d, expected %d", len(poolOfIntPools.pools), 0)
|
||||
}
|
||||
}
|
@@ -51,17 +51,17 @@ type operation struct {
|
||||
}
|
||||
|
||||
var (
|
||||
frontierInstructionSet = NewFrontierInstructionSet()
|
||||
homesteadInstructionSet = NewHomesteadInstructionSet()
|
||||
byzantiumInstructionSet = NewByzantiumInstructionSet()
|
||||
constantinopleInstructionSet = NewConstantinopleInstructionSet()
|
||||
frontierInstructionSet = newFrontierInstructionSet()
|
||||
homesteadInstructionSet = newHomesteadInstructionSet()
|
||||
byzantiumInstructionSet = newByzantiumInstructionSet()
|
||||
constantinopleInstructionSet = newConstantinopleInstructionSet()
|
||||
)
|
||||
|
||||
// NewConstantinopleInstructionSet returns the frontier, homestead
|
||||
// byzantium and contantinople instructions.
|
||||
func NewConstantinopleInstructionSet() [256]operation {
|
||||
func newConstantinopleInstructionSet() [256]operation {
|
||||
// instructions that can be executed during the byzantium phase.
|
||||
instructionSet := NewByzantiumInstructionSet()
|
||||
instructionSet := newByzantiumInstructionSet()
|
||||
instructionSet[SHL] = operation{
|
||||
execute: opSHL,
|
||||
gasCost: constGasFunc(GasFastestStep),
|
||||
@@ -85,9 +85,9 @@ func NewConstantinopleInstructionSet() [256]operation {
|
||||
|
||||
// NewByzantiumInstructionSet returns the frontier, homestead and
|
||||
// byzantium instructions.
|
||||
func NewByzantiumInstructionSet() [256]operation {
|
||||
func newByzantiumInstructionSet() [256]operation {
|
||||
// instructions that can be executed during the homestead phase.
|
||||
instructionSet := NewHomesteadInstructionSet()
|
||||
instructionSet := newHomesteadInstructionSet()
|
||||
instructionSet[STATICCALL] = operation{
|
||||
execute: opStaticCall,
|
||||
gasCost: gasStaticCall,
|
||||
@@ -123,8 +123,8 @@ func NewByzantiumInstructionSet() [256]operation {
|
||||
|
||||
// NewHomesteadInstructionSet returns the frontier and homestead
|
||||
// instructions that can be executed during the homestead phase.
|
||||
func NewHomesteadInstructionSet() [256]operation {
|
||||
instructionSet := NewFrontierInstructionSet()
|
||||
func newHomesteadInstructionSet() [256]operation {
|
||||
instructionSet := newFrontierInstructionSet()
|
||||
instructionSet[DELEGATECALL] = operation{
|
||||
execute: opDelegateCall,
|
||||
gasCost: gasDelegateCall,
|
||||
@@ -138,7 +138,7 @@ func NewHomesteadInstructionSet() [256]operation {
|
||||
|
||||
// NewFrontierInstructionSet returns the frontier instructions
|
||||
// that can be executed during the frontier phase.
|
||||
func NewFrontierInstructionSet() [256]operation {
|
||||
func newFrontierInstructionSet() [256]operation {
|
||||
return [256]operation{
|
||||
STOP: {
|
||||
execute: opStop,
|
||||
|
@@ -29,8 +29,10 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// Storage represents a contract's storage.
|
||||
type Storage map[common.Hash]common.Hash
|
||||
|
||||
// Copy duplicates the current storage.
|
||||
func (s Storage) Copy() Storage {
|
||||
cpy := make(Storage)
|
||||
for key, value := range s {
|
||||
@@ -76,10 +78,12 @@ type structLogMarshaling struct {
|
||||
ErrorString string `json:"error"` // adds call to ErrorString() in MarshalJSON
|
||||
}
|
||||
|
||||
// OpName formats the operand name in a human-readable format.
|
||||
func (s *StructLog) OpName() string {
|
||||
return s.Op.String()
|
||||
}
|
||||
|
||||
// ErrorString formats the log's error as a string.
|
||||
func (s *StructLog) ErrorString() string {
|
||||
if s.Err != nil {
|
||||
return s.Err.Error()
|
||||
@@ -124,6 +128,7 @@ func NewStructLogger(cfg *LogConfig) *StructLogger {
|
||||
return logger
|
||||
}
|
||||
|
||||
// CaptureStart implements the Tracer interface to initialize the tracing operation.
|
||||
func (l *StructLogger) CaptureStart(from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) error {
|
||||
return nil
|
||||
}
|
||||
@@ -178,10 +183,13 @@ func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost ui
|
||||
return nil
|
||||
}
|
||||
|
||||
// CaptureFault implements the Tracer interface to trace an execution fault
|
||||
// while running an opcode.
|
||||
func (l *StructLogger) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CaptureEnd is called after the call finishes to finalize the tracing.
|
||||
func (l *StructLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) error {
|
||||
l.output = output
|
||||
l.err = err
|
||||
|
@@ -16,7 +16,12 @@
|
||||
|
||||
package vm
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
)
|
||||
|
||||
// Memory implements a simple memory model for the ethereum virtual machine.
|
||||
type Memory struct {
|
||||
@@ -24,25 +29,39 @@ type Memory struct {
|
||||
lastGasCost uint64
|
||||
}
|
||||
|
||||
// NewMemory returns a new memory memory model.
|
||||
func NewMemory() *Memory {
|
||||
return &Memory{}
|
||||
}
|
||||
|
||||
// Set sets offset + size to value
|
||||
func (m *Memory) Set(offset, size uint64, value []byte) {
|
||||
// length of store may never be less than offset + size.
|
||||
// The store should be resized PRIOR to setting the memory
|
||||
if size > uint64(len(m.store)) {
|
||||
panic("INVALID memory: store empty")
|
||||
}
|
||||
|
||||
// It's possible the offset is greater than 0 and size equals 0. This is because
|
||||
// the calcMemSize (common.go) could potentially return 0 when size is zero (NO-OP)
|
||||
if size > 0 {
|
||||
// length of store may never be less than offset + size.
|
||||
// The store should be resized PRIOR to setting the memory
|
||||
if offset+size > uint64(len(m.store)) {
|
||||
panic("invalid memory: store empty")
|
||||
}
|
||||
copy(m.store[offset:offset+size], value)
|
||||
}
|
||||
}
|
||||
|
||||
// Set32 sets the 32 bytes starting at offset to the value of val, left-padded with zeroes to
|
||||
// 32 bytes.
|
||||
func (m *Memory) Set32(offset uint64, val *big.Int) {
|
||||
// length of store may never be less than offset + size.
|
||||
// The store should be resized PRIOR to setting the memory
|
||||
if offset+32 > uint64(len(m.store)) {
|
||||
panic("invalid memory: store empty")
|
||||
}
|
||||
// Zero the memory area
|
||||
copy(m.store[offset:offset+32], []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
|
||||
// Fill in relevant bits
|
||||
math.ReadBits(val, m.store[offset:offset+32])
|
||||
}
|
||||
|
||||
// Resize resizes the memory to size
|
||||
func (m *Memory) Resize(size uint64) {
|
||||
if uint64(m.Len()) < size {
|
||||
@@ -89,6 +108,7 @@ func (m *Memory) Data() []byte {
|
||||
return m.store
|
||||
}
|
||||
|
||||
// Print dumps the content of the memory.
|
||||
func (m *Memory) Print() {
|
||||
fmt.Printf("### mem %d bytes ###\n", len(m.store))
|
||||
if len(m.store) > 0 {
|
||||
|
@@ -65,12 +65,6 @@ func memoryCall(stack *Stack) *big.Int {
|
||||
return math.BigMax(x, y)
|
||||
}
|
||||
|
||||
func memoryCallCode(stack *Stack) *big.Int {
|
||||
x := calcMemSize(stack.Back(5), stack.Back(6))
|
||||
y := calcMemSize(stack.Back(3), stack.Back(4))
|
||||
|
||||
return math.BigMax(x, y)
|
||||
}
|
||||
func memoryDelegateCall(stack *Stack) *big.Int {
|
||||
x := calcMemSize(stack.Back(4), stack.Back(5))
|
||||
y := calcMemSize(stack.Back(2), stack.Back(3))
|
||||
|
@@ -23,6 +23,7 @@ import (
|
||||
// OpCode is an EVM opcode
|
||||
type OpCode byte
|
||||
|
||||
// IsPush specifies if an opcode is a PUSH opcode.
|
||||
func (op OpCode) IsPush() bool {
|
||||
switch op {
|
||||
case PUSH1, PUSH2, PUSH3, PUSH4, PUSH5, PUSH6, PUSH7, PUSH8, PUSH9, PUSH10, PUSH11, PUSH12, PUSH13, PUSH14, PUSH15, PUSH16, PUSH17, PUSH18, PUSH19, PUSH20, PUSH21, PUSH22, PUSH23, PUSH24, PUSH25, PUSH26, PUSH27, PUSH28, PUSH29, PUSH30, PUSH31, PUSH32:
|
||||
@@ -31,12 +32,13 @@ func (op OpCode) IsPush() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsStaticJump specifies if an opcode is JUMP.
|
||||
func (op OpCode) IsStaticJump() bool {
|
||||
return op == JUMP
|
||||
}
|
||||
|
||||
// 0x0 range - arithmetic ops.
|
||||
const (
|
||||
// 0x0 range - arithmetic ops
|
||||
STOP OpCode = iota
|
||||
ADD
|
||||
MUL
|
||||
@@ -51,6 +53,7 @@ const (
|
||||
SIGNEXTEND
|
||||
)
|
||||
|
||||
// 0x10 range - comparison ops.
|
||||
const (
|
||||
LT OpCode = iota + 0x10
|
||||
GT
|
||||
@@ -70,8 +73,8 @@ const (
|
||||
SHA3 = 0x20
|
||||
)
|
||||
|
||||
// 0x30 range - closure state.
|
||||
const (
|
||||
// 0x30 range - closure state
|
||||
ADDRESS OpCode = 0x30 + iota
|
||||
BALANCE
|
||||
ORIGIN
|
||||
@@ -89,8 +92,8 @@ const (
|
||||
RETURNDATACOPY
|
||||
)
|
||||
|
||||
// 0x40 range - block operations.
|
||||
const (
|
||||
// 0x40 range - block operations
|
||||
BLOCKHASH OpCode = 0x40 + iota
|
||||
COINBASE
|
||||
TIMESTAMP
|
||||
@@ -99,8 +102,8 @@ const (
|
||||
GASLIMIT
|
||||
)
|
||||
|
||||
// 0x50 range - 'storage' and execution.
|
||||
const (
|
||||
// 0x50 range - 'storage' and execution
|
||||
POP OpCode = 0x50 + iota
|
||||
MLOAD
|
||||
MSTORE
|
||||
@@ -115,8 +118,8 @@ const (
|
||||
JUMPDEST
|
||||
)
|
||||
|
||||
// 0x60 range.
|
||||
const (
|
||||
// 0x60 range
|
||||
PUSH1 OpCode = 0x60 + iota
|
||||
PUSH2
|
||||
PUSH3
|
||||
@@ -183,6 +186,7 @@ const (
|
||||
SWAP16
|
||||
)
|
||||
|
||||
// 0xa0 range - logging ops.
|
||||
const (
|
||||
LOG0 OpCode = 0xa0 + iota
|
||||
LOG1
|
||||
@@ -191,15 +195,15 @@ const (
|
||||
LOG4
|
||||
)
|
||||
|
||||
// unofficial opcodes used for parsing
|
||||
// unofficial opcodes used for parsing.
|
||||
const (
|
||||
PUSH OpCode = 0xb0 + iota
|
||||
DUP
|
||||
SWAP
|
||||
)
|
||||
|
||||
// 0xf0 range - closures.
|
||||
const (
|
||||
// 0xf0 range - closures
|
||||
CREATE OpCode = 0xf0 + iota
|
||||
CALL
|
||||
CALLCODE
|
||||
@@ -211,9 +215,9 @@ const (
|
||||
SELFDESTRUCT = 0xff
|
||||
)
|
||||
|
||||
// Since the opcodes aren't all in order we can't use a regular slice
|
||||
// Since the opcodes aren't all in order we can't use a regular slice.
|
||||
var opCodeToString = map[OpCode]string{
|
||||
// 0x0 range - arithmetic ops
|
||||
// 0x0 range - arithmetic ops.
|
||||
STOP: "STOP",
|
||||
ADD: "ADD",
|
||||
MUL: "MUL",
|
||||
@@ -232,7 +236,7 @@ var opCodeToString = map[OpCode]string{
|
||||
ISZERO: "ISZERO",
|
||||
SIGNEXTEND: "SIGNEXTEND",
|
||||
|
||||
// 0x10 range - bit ops
|
||||
// 0x10 range - bit ops.
|
||||
AND: "AND",
|
||||
OR: "OR",
|
||||
XOR: "XOR",
|
||||
@@ -243,10 +247,10 @@ var opCodeToString = map[OpCode]string{
|
||||
ADDMOD: "ADDMOD",
|
||||
MULMOD: "MULMOD",
|
||||
|
||||
// 0x20 range - crypto
|
||||
// 0x20 range - crypto.
|
||||
SHA3: "SHA3",
|
||||
|
||||
// 0x30 range - closure state
|
||||
// 0x30 range - closure state.
|
||||
ADDRESS: "ADDRESS",
|
||||
BALANCE: "BALANCE",
|
||||
ORIGIN: "ORIGIN",
|
||||
@@ -263,7 +267,7 @@ var opCodeToString = map[OpCode]string{
|
||||
RETURNDATASIZE: "RETURNDATASIZE",
|
||||
RETURNDATACOPY: "RETURNDATACOPY",
|
||||
|
||||
// 0x40 range - block operations
|
||||
// 0x40 range - block operations.
|
||||
BLOCKHASH: "BLOCKHASH",
|
||||
COINBASE: "COINBASE",
|
||||
TIMESTAMP: "TIMESTAMP",
|
||||
@@ -271,7 +275,7 @@ var opCodeToString = map[OpCode]string{
|
||||
DIFFICULTY: "DIFFICULTY",
|
||||
GASLIMIT: "GASLIMIT",
|
||||
|
||||
// 0x50 range - 'storage' and execution
|
||||
// 0x50 range - 'storage' and execution.
|
||||
POP: "POP",
|
||||
//DUP: "DUP",
|
||||
//SWAP: "SWAP",
|
||||
@@ -287,7 +291,7 @@ var opCodeToString = map[OpCode]string{
|
||||
GAS: "GAS",
|
||||
JUMPDEST: "JUMPDEST",
|
||||
|
||||
// 0x60 range - push
|
||||
// 0x60 range - push.
|
||||
PUSH1: "PUSH1",
|
||||
PUSH2: "PUSH2",
|
||||
PUSH3: "PUSH3",
|
||||
@@ -360,7 +364,7 @@ var opCodeToString = map[OpCode]string{
|
||||
LOG3: "LOG3",
|
||||
LOG4: "LOG4",
|
||||
|
||||
// 0xf0 range
|
||||
// 0xf0 range.
|
||||
CREATE: "CREATE",
|
||||
CALL: "CALL",
|
||||
RETURN: "RETURN",
|
||||
@@ -524,6 +528,7 @@ var stringToOp = map[string]OpCode{
|
||||
"SELFDESTRUCT": SELFDESTRUCT,
|
||||
}
|
||||
|
||||
// StringToOp finds the opcode whose name is stored in `str`.
|
||||
func StringToOp(str string) OpCode {
|
||||
return stringToOp[str]
|
||||
}
|
||||
|
@@ -21,7 +21,7 @@ import (
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// stack is an object for basic stack operations. Items popped to the stack are
|
||||
// Stack is an object for basic stack operations. Items popped to the stack are
|
||||
// expected to be changed and modified. stack does not take care of adding newly
|
||||
// initialised objects.
|
||||
type Stack struct {
|
||||
@@ -32,6 +32,7 @@ func newstack() *Stack {
|
||||
return &Stack{data: make([]*big.Int, 0, 1024)}
|
||||
}
|
||||
|
||||
// Data returns the underlying big.Int array.
|
||||
func (st *Stack) Data() []*big.Int {
|
||||
return st.data
|
||||
}
|
||||
@@ -80,6 +81,7 @@ func (st *Stack) require(n int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Print dumps the content of the stack
|
||||
func (st *Stack) Print() {
|
||||
fmt.Println("### stack ###")
|
||||
if len(st.data) > 0 {
|
||||
|
@@ -359,7 +359,7 @@ type BadBlockArgs struct {
|
||||
RLP string `json:"rlp"`
|
||||
}
|
||||
|
||||
// GetBadBLocks returns a list of the last 'bad blocks' that the client has seen on the network
|
||||
// GetBadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
|
||||
// and returns them as a JSON list of block-hashes
|
||||
func (api *PrivateDebugAPI) GetBadBlocks(ctx context.Context) ([]*BadBlockArgs, error) {
|
||||
blocks := api.eth.BlockChain().BadBlocks()
|
||||
@@ -431,7 +431,7 @@ func storageRangeAt(st state.Trie, start []byte, maxResult int) (StorageRangeRes
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetModifiedAccountsByumber returns all accounts that have changed between the
|
||||
// GetModifiedAccountsByNumber returns all accounts that have changed between the
|
||||
// two blocks specified. A change is defined as a difference in nonce, balance,
|
||||
// code hash, or storage hash.
|
||||
//
|
||||
|
@@ -43,6 +43,7 @@ type EthAPIBackend struct {
|
||||
gpo *gasprice.Oracle
|
||||
}
|
||||
|
||||
// ChainConfig returns the active chain configuration.
|
||||
func (b *EthAPIBackend) ChainConfig() *params.ChainConfig {
|
||||
return b.eth.chainConfig
|
||||
}
|
||||
|
@@ -297,7 +297,7 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl
|
||||
database.TrieDB().Reference(root, common.Hash{})
|
||||
}
|
||||
// Dereference all past tries we ourselves are done working with
|
||||
database.TrieDB().Dereference(proot, common.Hash{})
|
||||
database.TrieDB().Dereference(proot)
|
||||
proot = root
|
||||
|
||||
// TODO(karalabe): Do we need the preimages? Won't they accumulate too much?
|
||||
@@ -320,7 +320,7 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl
|
||||
done[uint64(result.Block)] = result
|
||||
|
||||
// Dereference any paret tries held in memory by this task
|
||||
database.TrieDB().Dereference(res.rootref, common.Hash{})
|
||||
database.TrieDB().Dereference(res.rootref)
|
||||
|
||||
// Stream completed traces to the user, aborting on the first error
|
||||
for result, ok := done[next]; ok; result, ok = done[next] {
|
||||
@@ -526,7 +526,7 @@ func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (*
|
||||
return nil, err
|
||||
}
|
||||
database.TrieDB().Reference(root, common.Hash{})
|
||||
database.TrieDB().Dereference(proot, common.Hash{})
|
||||
database.TrieDB().Dereference(proot)
|
||||
proot = root
|
||||
}
|
||||
nodes, imgs := database.TrieDB().Size()
|
||||
|
@@ -88,7 +88,7 @@ type Ethereum struct {
|
||||
gasPrice *big.Int
|
||||
etherbase common.Address
|
||||
|
||||
networkId uint64
|
||||
networkID uint64
|
||||
netRPCService *ethapi.PublicNetAPI
|
||||
|
||||
lock sync.RWMutex // Protects the variadic fields (e.g. gas price and etherbase)
|
||||
@@ -126,7 +126,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
||||
accountManager: ctx.AccountManager,
|
||||
engine: CreateConsensusEngine(ctx, &config.Ethash, chainConfig, chainDb),
|
||||
shutdownChan: make(chan bool),
|
||||
networkId: config.NetworkId,
|
||||
networkID: config.NetworkId,
|
||||
gasPrice: config.GasPrice,
|
||||
etherbase: config.Etherbase,
|
||||
bloomRequests: make(chan chan *bloombits.Retrieval),
|
||||
@@ -369,7 +369,7 @@ func (s *Ethereum) Engine() consensus.Engine { return s.engine }
|
||||
func (s *Ethereum) ChainDb() ethdb.Database { return s.chainDb }
|
||||
func (s *Ethereum) IsListening() bool { return true } // Always listening
|
||||
func (s *Ethereum) EthVersion() int { return int(s.protocolManager.SubProtocols[0].Version) }
|
||||
func (s *Ethereum) NetVersion() uint64 { return s.networkId }
|
||||
func (s *Ethereum) NetVersion() uint64 { return s.networkID }
|
||||
func (s *Ethereum) Downloader() *downloader.Downloader { return s.protocolManager.downloader }
|
||||
|
||||
// Protocols implements node.Service, returning all the currently configured
|
||||
|
@@ -900,7 +900,7 @@ func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) (
|
||||
var (
|
||||
deliver = func(packet dataPack) (int, error) {
|
||||
pack := packet.(*headerPack)
|
||||
return d.queue.DeliverHeaders(pack.peerId, pack.headers, d.headerProcCh)
|
||||
return d.queue.DeliverHeaders(pack.peerID, pack.headers, d.headerProcCh)
|
||||
}
|
||||
expire = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) }
|
||||
throttle = func() bool { return false }
|
||||
@@ -930,7 +930,7 @@ func (d *Downloader) fetchBodies(from uint64) error {
|
||||
var (
|
||||
deliver = func(packet dataPack) (int, error) {
|
||||
pack := packet.(*bodyPack)
|
||||
return d.queue.DeliverBodies(pack.peerId, pack.transactions, pack.uncles)
|
||||
return d.queue.DeliverBodies(pack.peerID, pack.transactions, pack.uncles)
|
||||
}
|
||||
expire = func() map[string]int { return d.queue.ExpireBodies(d.requestTTL()) }
|
||||
fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) }
|
||||
@@ -954,7 +954,7 @@ func (d *Downloader) fetchReceipts(from uint64) error {
|
||||
var (
|
||||
deliver = func(packet dataPack) (int, error) {
|
||||
pack := packet.(*receiptPack)
|
||||
return d.queue.DeliverReceipts(pack.peerId, pack.receipts)
|
||||
return d.queue.DeliverReceipts(pack.peerID, pack.receipts)
|
||||
}
|
||||
expire = func() map[string]int { return d.queue.ExpireReceipts(d.requestTTL()) }
|
||||
fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchReceipts(req) }
|
||||
|
@@ -596,21 +596,21 @@ func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool m
|
||||
// Revoke cancels all pending requests belonging to a given peer. This method is
|
||||
// meant to be called during a peer drop to quickly reassign owned data fetches
|
||||
// to remaining nodes.
|
||||
func (q *queue) Revoke(peerId string) {
|
||||
func (q *queue) Revoke(peerID string) {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
if request, ok := q.blockPendPool[peerId]; ok {
|
||||
if request, ok := q.blockPendPool[peerID]; ok {
|
||||
for _, header := range request.Headers {
|
||||
q.blockTaskQueue.Push(header, -float32(header.Number.Uint64()))
|
||||
}
|
||||
delete(q.blockPendPool, peerId)
|
||||
delete(q.blockPendPool, peerID)
|
||||
}
|
||||
if request, ok := q.receiptPendPool[peerId]; ok {
|
||||
if request, ok := q.receiptPendPool[peerID]; ok {
|
||||
for _, header := range request.Headers {
|
||||
q.receiptTaskQueue.Push(header, -float32(header.Number.Uint64()))
|
||||
}
|
||||
delete(q.receiptPendPool, peerId)
|
||||
delete(q.receiptPendPool, peerID)
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -34,22 +34,22 @@ type dataPack interface {
|
||||
|
||||
// headerPack is a batch of block headers returned by a peer.
|
||||
type headerPack struct {
|
||||
peerId string
|
||||
peerID string
|
||||
headers []*types.Header
|
||||
}
|
||||
|
||||
func (p *headerPack) PeerId() string { return p.peerId }
|
||||
func (p *headerPack) PeerId() string { return p.peerID }
|
||||
func (p *headerPack) Items() int { return len(p.headers) }
|
||||
func (p *headerPack) Stats() string { return fmt.Sprintf("%d", len(p.headers)) }
|
||||
|
||||
// bodyPack is a batch of block bodies returned by a peer.
|
||||
type bodyPack struct {
|
||||
peerId string
|
||||
peerID string
|
||||
transactions [][]*types.Transaction
|
||||
uncles [][]*types.Header
|
||||
}
|
||||
|
||||
func (p *bodyPack) PeerId() string { return p.peerId }
|
||||
func (p *bodyPack) PeerId() string { return p.peerID }
|
||||
func (p *bodyPack) Items() int {
|
||||
if len(p.transactions) <= len(p.uncles) {
|
||||
return len(p.transactions)
|
||||
@@ -60,20 +60,20 @@ func (p *bodyPack) Stats() string { return fmt.Sprintf("%d:%d", len(p.transactio
|
||||
|
||||
// receiptPack is a batch of receipts returned by a peer.
|
||||
type receiptPack struct {
|
||||
peerId string
|
||||
peerID string
|
||||
receipts [][]*types.Receipt
|
||||
}
|
||||
|
||||
func (p *receiptPack) PeerId() string { return p.peerId }
|
||||
func (p *receiptPack) PeerId() string { return p.peerID }
|
||||
func (p *receiptPack) Items() int { return len(p.receipts) }
|
||||
func (p *receiptPack) Stats() string { return fmt.Sprintf("%d", len(p.receipts)) }
|
||||
|
||||
// statePack is a batch of states returned by a peer.
|
||||
type statePack struct {
|
||||
peerId string
|
||||
peerID string
|
||||
states [][]byte
|
||||
}
|
||||
|
||||
func (p *statePack) PeerId() string { return p.peerId }
|
||||
func (p *statePack) PeerId() string { return p.peerID }
|
||||
func (p *statePack) Items() int { return len(p.states) }
|
||||
func (p *statePack) Stats() string { return fmt.Sprintf("%d", len(p.states)) }
|
||||
|
@@ -88,7 +88,7 @@ type headerFilterTask struct {
|
||||
time time.Time // Arrival time of the headers
|
||||
}
|
||||
|
||||
// headerFilterTask represents a batch of block bodies (transactions and uncles)
|
||||
// bodyFilterTask represents a batch of block bodies (transactions and uncles)
|
||||
// needing fetcher filtering.
|
||||
type bodyFilterTask struct {
|
||||
peer string // The source peer of block bodies
|
||||
|
@@ -258,9 +258,9 @@ Logs:
|
||||
if len(topics) > len(log.Topics) {
|
||||
continue Logs
|
||||
}
|
||||
for i, topics := range topics {
|
||||
match := len(topics) == 0 // empty rule set == wildcard
|
||||
for _, topic := range topics {
|
||||
for i, sub := range topics {
|
||||
match := len(sub) == 0 // empty rule set == wildcard
|
||||
for _, topic := range sub {
|
||||
if log.Topics[i] == topic {
|
||||
match = true
|
||||
break
|
||||
|
@@ -64,7 +64,7 @@ func errResp(code errCode, format string, v ...interface{}) error {
|
||||
}
|
||||
|
||||
type ProtocolManager struct {
|
||||
networkId uint64
|
||||
networkID uint64
|
||||
|
||||
fastSync uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks)
|
||||
acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing)
|
||||
@@ -98,10 +98,10 @@ type ProtocolManager struct {
|
||||
|
||||
// NewProtocolManager returns a new Ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
|
||||
// with the Ethereum network.
|
||||
func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, networkId uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) {
|
||||
func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, networkID uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) {
|
||||
// Create the protocol manager with the base fields
|
||||
manager := &ProtocolManager{
|
||||
networkId: networkId,
|
||||
networkID: networkID,
|
||||
eventMux: mux,
|
||||
txpool: txpool,
|
||||
blockchain: blockchain,
|
||||
@@ -263,7 +263,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
|
||||
number = head.Number.Uint64()
|
||||
td = pm.blockchain.GetTd(hash, number)
|
||||
)
|
||||
if err := p.Handshake(pm.networkId, td, hash, genesis.Hash()); err != nil {
|
||||
if err := p.Handshake(pm.networkID, td, hash, genesis.Hash()); err != nil {
|
||||
p.Log().Debug("Ethereum handshake failed", "err", err)
|
||||
return err
|
||||
}
|
||||
@@ -779,7 +779,7 @@ type NodeInfo struct {
|
||||
func (pm *ProtocolManager) NodeInfo() *NodeInfo {
|
||||
currentBlock := pm.blockchain.CurrentBlock()
|
||||
return &NodeInfo{
|
||||
Network: pm.networkId,
|
||||
Network: pm.networkID,
|
||||
Difficulty: pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64()),
|
||||
Genesis: pm.blockchain.Genesis().Hash(),
|
||||
Config: pm.blockchain.Config(),
|
||||
|
@@ -60,7 +60,7 @@
|
||||
return;
|
||||
}
|
||||
// Skip any pre-compile invocations, those are just fancy opcodes
|
||||
if (isPrecompiled(toAddress(log.stack.peek(1)))) {
|
||||
if (isPrecompiled(toAddress(log.stack.peek(1).toString(16)))) {
|
||||
return;
|
||||
}
|
||||
// Gather internal call details
|
||||
|
File diff suppressed because one or more lines are too long
47
eth/tracers/internal/tracers/bigram_tracer.js
Normal file
47
eth/tracers/internal/tracers/bigram_tracer.js
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
{
|
||||
// hist is the counters of opcode bigrams
|
||||
hist: {},
|
||||
// lastOp is last operation
|
||||
lastOp: '',
|
||||
// execution depth of last op
|
||||
lastDepth: 0,
|
||||
// step is invoked for every opcode that the VM executes.
|
||||
step: function(log, db) {
|
||||
var op = log.op.toString();
|
||||
var depth = log.getDepth();
|
||||
if (depth == this.lastDepth){
|
||||
var key = this.lastOp+'-'+op;
|
||||
if (this.hist[key]){
|
||||
this.hist[key]++;
|
||||
}
|
||||
else {
|
||||
this.hist[key] = 1;
|
||||
}
|
||||
}
|
||||
this.lastOp = op;
|
||||
this.lastDepth = depth;
|
||||
},
|
||||
// fault is invoked when the actual execution of an opcode fails.
|
||||
fault: function(log, db) {},
|
||||
// result is invoked when all the opcodes have been iterated over and returns
|
||||
// the final result of the tracing.
|
||||
result: function(ctx) {
|
||||
return this.hist;
|
||||
},
|
||||
}
|
49
eth/tracers/internal/tracers/trigram_tracer.js
Normal file
49
eth/tracers/internal/tracers/trigram_tracer.js
Normal file
@@ -0,0 +1,49 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
{
|
||||
// hist is the map of trigram counters
|
||||
hist: {},
|
||||
// lastOp is last operation
|
||||
lastOps: ['',''],
|
||||
lastDepth: 0,
|
||||
// step is invoked for every opcode that the VM executes.
|
||||
step: function(log, db) {
|
||||
var depth = log.getDepth();
|
||||
if (depth != this.lastDepth){
|
||||
this.lastOps = ['',''];
|
||||
this.lastDepth = depth;
|
||||
return;
|
||||
}
|
||||
var op = log.op.toString();
|
||||
var key = this.lastOps[0]+'-'+this.lastOps[1]+'-'+op;
|
||||
if (this.hist[key]){
|
||||
this.hist[key]++;
|
||||
}
|
||||
else {
|
||||
this.hist[key] = 1;
|
||||
}
|
||||
this.lastOps[0] = this.lastOps[1];
|
||||
this.lastOps[1] = op;
|
||||
},
|
||||
// fault is invoked when the actual execution of an opcode fails.
|
||||
fault: function(log, db) {},
|
||||
// result is invoked when all the opcodes have been iterated over and returns
|
||||
// the final result of the tracing.
|
||||
result: function(ctx) {
|
||||
return this.hist;
|
||||
},
|
||||
}
|
43
eth/tracers/internal/tracers/unigram_tracer.js
Normal file
43
eth/tracers/internal/tracers/unigram_tracer.js
Normal file
@@ -0,0 +1,43 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
{
|
||||
// hist is the map of opcodes to counters
|
||||
hist: {},
|
||||
// nops counts number of ops
|
||||
nops: 0,
|
||||
// step is invoked for every opcode that the VM executes.
|
||||
step: function(log, db) {
|
||||
var op = log.op.toString();
|
||||
if (this.hist[op]){
|
||||
this.hist[op]++;
|
||||
}
|
||||
else {
|
||||
this.hist[op] = 1;
|
||||
}
|
||||
this.nops++;
|
||||
},
|
||||
// fault is invoked when the actual execution of an opcode fails.
|
||||
fault: function(log, db) {},
|
||||
|
||||
// result is invoked when all the opcodes have been iterated over and returns
|
||||
// the final result of the tracing.
|
||||
result: function(ctx) {
|
||||
if(this.nops > 0){
|
||||
return this.hist;
|
||||
}
|
||||
},
|
||||
}
|
@@ -388,6 +388,12 @@ func (b *ldbBatch) Put(key, value []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *ldbBatch) Delete(key []byte) error {
|
||||
b.b.Delete(key)
|
||||
b.size += 1
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *ldbBatch) Write() error {
|
||||
return b.db.Write(b.b, nil)
|
||||
}
|
||||
@@ -453,6 +459,10 @@ func (tb *tableBatch) Put(key, value []byte) error {
|
||||
return tb.batch.Put(append([]byte(tb.prefix), key...), value)
|
||||
}
|
||||
|
||||
func (tb *tableBatch) Delete(key []byte) error {
|
||||
return tb.batch.Delete(append([]byte(tb.prefix), key...))
|
||||
}
|
||||
|
||||
func (tb *tableBatch) Write() error {
|
||||
return tb.batch.Write()
|
||||
}
|
||||
|
@@ -25,12 +25,17 @@ type Putter interface {
|
||||
Put(key []byte, value []byte) error
|
||||
}
|
||||
|
||||
// Deleter wraps the database delete operation supported by both batches and regular databases.
|
||||
type Deleter interface {
|
||||
Delete(key []byte) error
|
||||
}
|
||||
|
||||
// Database wraps all database operations. All methods are safe for concurrent use.
|
||||
type Database interface {
|
||||
Putter
|
||||
Deleter
|
||||
Get(key []byte) ([]byte, error)
|
||||
Has(key []byte) (bool, error)
|
||||
Delete(key []byte) error
|
||||
Close()
|
||||
NewBatch() Batch
|
||||
}
|
||||
@@ -39,6 +44,7 @@ type Database interface {
|
||||
// when Write is called. Batch cannot be used concurrently.
|
||||
type Batch interface {
|
||||
Putter
|
||||
Deleter
|
||||
ValueSize() int // amount of data in the batch
|
||||
Write() error
|
||||
// Reset resets the batch for reuse
|
||||
|
@@ -110,11 +110,20 @@ func (b *memBatch) Put(key, value []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *memBatch) Delete(key []byte) error {
|
||||
b.writes = append(b.writes, kv{common.CopyBytes(key), nil})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *memBatch) Write() error {
|
||||
b.db.lock.Lock()
|
||||
defer b.db.lock.Unlock()
|
||||
|
||||
for _, kv := range b.writes {
|
||||
if kv.v == nil {
|
||||
delete(b.db.db, string(kv.k))
|
||||
continue
|
||||
}
|
||||
b.db.db[string(kv.k)] = kv.v
|
||||
}
|
||||
return nil
|
||||
|
@@ -499,7 +499,7 @@ func (s uncleStats) MarshalJSON() ([]byte, error) {
|
||||
return []byte("[]"), nil
|
||||
}
|
||||
|
||||
// reportBlock retrieves the current chain head and repors it to the stats server.
|
||||
// reportBlock retrieves the current chain head and reports it to the stats server.
|
||||
func (s *Service) reportBlock(conn *websocket.Conn, block *types.Block) error {
|
||||
// Gather the block details from the header or block chain
|
||||
details := s.assembleBlockStats(block)
|
||||
|
@@ -21,6 +21,7 @@
|
||||
package debug
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
@@ -190,9 +191,9 @@ func (*HandlerT) WriteMemProfile(file string) error {
|
||||
|
||||
// Stacks returns a printed representation of the stacks of all goroutines.
|
||||
func (*HandlerT) Stacks() string {
|
||||
buf := make([]byte, 1024*1024)
|
||||
buf = buf[:runtime.Stack(buf, true)]
|
||||
return string(buf)
|
||||
buf := new(bytes.Buffer)
|
||||
pprof.Lookup("goroutine").WriteTo(buf, 2)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// FreeOSMemory returns unused memory to the OS.
|
||||
|
@@ -313,8 +313,8 @@ web3._extend({
|
||||
params: 2
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'setMutexProfileRate',
|
||||
call: 'debug_setMutexProfileRate',
|
||||
name: 'setMutexProfileFraction',
|
||||
call: 'debug_setMutexProfileFraction',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
|
@@ -267,9 +267,16 @@ func (f *lightFetcher) announce(p *peer, head *announceData) {
|
||||
}
|
||||
n = n.parent
|
||||
}
|
||||
// n is now the reorg common ancestor, add a new branch of nodes
|
||||
if n != nil && (head.Number >= n.number+maxNodeCount || head.Number <= n.number) {
|
||||
// if announced head block height is lower or same as n or too far from it to add
|
||||
// intermediate nodes then discard previous announcement info and trigger a resync
|
||||
n = nil
|
||||
fp.nodeCnt = 0
|
||||
fp.nodeByHash = make(map[common.Hash]*fetcherTreeNode)
|
||||
}
|
||||
if n != nil {
|
||||
// n is now the reorg common ancestor, add a new branch of nodes
|
||||
// check if the node count is too high to add new nodes
|
||||
// check if the node count is too high to add new nodes, discard oldest ones if necessary
|
||||
locked := false
|
||||
for uint64(fp.nodeCnt)+head.Number-n.number > maxNodeCount && fp.root != nil {
|
||||
if !locked {
|
||||
|
@@ -87,6 +87,27 @@ const (
|
||||
initStatsWeight = 1
|
||||
)
|
||||
|
||||
// connReq represents a request for peer connection.
|
||||
type connReq struct {
|
||||
p *peer
|
||||
ip net.IP
|
||||
port uint16
|
||||
result chan *poolEntry
|
||||
}
|
||||
|
||||
// disconnReq represents a request for peer disconnection.
|
||||
type disconnReq struct {
|
||||
entry *poolEntry
|
||||
stopped bool
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// registerReq represents a request for peer registration.
|
||||
type registerReq struct {
|
||||
entry *poolEntry
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// serverPool implements a pool for storing and selecting newly discovered and already
|
||||
// known light server nodes. It received discovered nodes, stores statistics about
|
||||
// known nodes and takes care of always having enough good quality servers connected.
|
||||
@@ -105,10 +126,13 @@ type serverPool struct {
|
||||
discLookups chan bool
|
||||
|
||||
entries map[discover.NodeID]*poolEntry
|
||||
lock sync.Mutex
|
||||
timeout, enableRetry chan *poolEntry
|
||||
adjustStats chan poolStatAdjust
|
||||
|
||||
connCh chan *connReq
|
||||
disconnCh chan *disconnReq
|
||||
registerCh chan *registerReq
|
||||
|
||||
knownQueue, newQueue poolEntryQueue
|
||||
knownSelect, newSelect *weightedRandomSelect
|
||||
knownSelected, newSelected int
|
||||
@@ -125,6 +149,9 @@ func newServerPool(db ethdb.Database, quit chan struct{}, wg *sync.WaitGroup) *s
|
||||
timeout: make(chan *poolEntry, 1),
|
||||
adjustStats: make(chan poolStatAdjust, 100),
|
||||
enableRetry: make(chan *poolEntry, 1),
|
||||
connCh: make(chan *connReq),
|
||||
disconnCh: make(chan *disconnReq),
|
||||
registerCh: make(chan *registerReq),
|
||||
knownSelect: newWeightedRandomSelect(),
|
||||
newSelect: newWeightedRandomSelect(),
|
||||
fastDiscover: true,
|
||||
@@ -147,9 +174,8 @@ func (pool *serverPool) start(server *p2p.Server, topic discv5.Topic) {
|
||||
pool.discLookups = make(chan bool, 100)
|
||||
go pool.server.DiscV5.SearchTopic(pool.topic, pool.discSetPeriod, pool.discNodes, pool.discLookups)
|
||||
}
|
||||
|
||||
go pool.eventLoop()
|
||||
pool.checkDial()
|
||||
go pool.eventLoop()
|
||||
}
|
||||
|
||||
// connect should be called upon any incoming connection. If the connection has been
|
||||
@@ -158,83 +184,44 @@ func (pool *serverPool) start(server *p2p.Server, topic discv5.Topic) {
|
||||
// Note that whenever a connection has been accepted and a pool entry has been returned,
|
||||
// disconnect should also always be called.
|
||||
func (pool *serverPool) connect(p *peer, ip net.IP, port uint16) *poolEntry {
|
||||
pool.lock.Lock()
|
||||
defer pool.lock.Unlock()
|
||||
entry := pool.entries[p.ID()]
|
||||
if entry == nil {
|
||||
entry = pool.findOrNewNode(p.ID(), ip, port)
|
||||
}
|
||||
p.Log().Debug("Connecting to new peer", "state", entry.state)
|
||||
if entry.state == psConnected || entry.state == psRegistered {
|
||||
log.Debug("Connect new entry", "enode", p.id)
|
||||
req := &connReq{p: p, ip: ip, port: port, result: make(chan *poolEntry, 1)}
|
||||
select {
|
||||
case pool.connCh <- req:
|
||||
case <-pool.quit:
|
||||
return nil
|
||||
}
|
||||
pool.connWg.Add(1)
|
||||
entry.peer = p
|
||||
entry.state = psConnected
|
||||
addr := &poolEntryAddress{
|
||||
ip: ip,
|
||||
port: port,
|
||||
lastSeen: mclock.Now(),
|
||||
}
|
||||
entry.lastConnected = addr
|
||||
entry.addr = make(map[string]*poolEntryAddress)
|
||||
entry.addr[addr.strKey()] = addr
|
||||
entry.addrSelect = *newWeightedRandomSelect()
|
||||
entry.addrSelect.update(addr)
|
||||
return entry
|
||||
return <-req.result
|
||||
}
|
||||
|
||||
// registered should be called after a successful handshake
|
||||
func (pool *serverPool) registered(entry *poolEntry) {
|
||||
log.Debug("Registered new entry", "enode", entry.id)
|
||||
pool.lock.Lock()
|
||||
defer pool.lock.Unlock()
|
||||
|
||||
entry.state = psRegistered
|
||||
entry.regTime = mclock.Now()
|
||||
if !entry.known {
|
||||
pool.newQueue.remove(entry)
|
||||
entry.known = true
|
||||
req := ®isterReq{entry: entry, done: make(chan struct{})}
|
||||
select {
|
||||
case pool.registerCh <- req:
|
||||
case <-pool.quit:
|
||||
return
|
||||
}
|
||||
pool.knownQueue.setLatest(entry)
|
||||
entry.shortRetry = shortRetryCnt
|
||||
<-req.done
|
||||
}
|
||||
|
||||
// disconnect should be called when ending a connection. Service quality statistics
|
||||
// can be updated optionally (not updated if no registration happened, in this case
|
||||
// only connection statistics are updated, just like in case of timeout)
|
||||
func (pool *serverPool) disconnect(entry *poolEntry) {
|
||||
stopped := false
|
||||
select {
|
||||
case <-pool.quit:
|
||||
stopped = true
|
||||
default:
|
||||
}
|
||||
log.Debug("Disconnected old entry", "enode", entry.id)
|
||||
pool.lock.Lock()
|
||||
defer pool.lock.Unlock()
|
||||
req := &disconnReq{entry: entry, stopped: stopped, done: make(chan struct{})}
|
||||
|
||||
if entry.state == psRegistered {
|
||||
connTime := mclock.Now() - entry.regTime
|
||||
connAdjust := float64(connTime) / float64(targetConnTime)
|
||||
if connAdjust > 1 {
|
||||
connAdjust = 1
|
||||
}
|
||||
stopped := false
|
||||
select {
|
||||
case <-pool.quit:
|
||||
stopped = true
|
||||
default:
|
||||
}
|
||||
if stopped {
|
||||
entry.connectStats.add(1, connAdjust)
|
||||
} else {
|
||||
entry.connectStats.add(connAdjust, 1)
|
||||
}
|
||||
}
|
||||
|
||||
entry.state = psNotConnected
|
||||
if entry.knownSelected {
|
||||
pool.knownSelected--
|
||||
} else {
|
||||
pool.newSelected--
|
||||
}
|
||||
pool.setRetryDial(entry)
|
||||
pool.connWg.Done()
|
||||
// Block until disconnection request is served.
|
||||
pool.disconnCh <- req
|
||||
<-req.done
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -277,25 +264,51 @@ func (pool *serverPool) eventLoop() {
|
||||
if pool.discSetPeriod != nil {
|
||||
pool.discSetPeriod <- time.Millisecond * 100
|
||||
}
|
||||
|
||||
// disconnect updates service quality statistics depending on the connection time
|
||||
// and disconnection initiator.
|
||||
disconnect := func(req *disconnReq, stopped bool) {
|
||||
// Handle peer disconnection requests.
|
||||
entry := req.entry
|
||||
if entry.state == psRegistered {
|
||||
connAdjust := float64(mclock.Now()-entry.regTime) / float64(targetConnTime)
|
||||
if connAdjust > 1 {
|
||||
connAdjust = 1
|
||||
}
|
||||
if stopped {
|
||||
// disconnect requested by ourselves.
|
||||
entry.connectStats.add(1, connAdjust)
|
||||
} else {
|
||||
// disconnect requested by server side.
|
||||
entry.connectStats.add(connAdjust, 1)
|
||||
}
|
||||
}
|
||||
entry.state = psNotConnected
|
||||
|
||||
if entry.knownSelected {
|
||||
pool.knownSelected--
|
||||
} else {
|
||||
pool.newSelected--
|
||||
}
|
||||
pool.setRetryDial(entry)
|
||||
pool.connWg.Done()
|
||||
close(req.done)
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case entry := <-pool.timeout:
|
||||
pool.lock.Lock()
|
||||
if !entry.removed {
|
||||
pool.checkDialTimeout(entry)
|
||||
}
|
||||
pool.lock.Unlock()
|
||||
|
||||
case entry := <-pool.enableRetry:
|
||||
pool.lock.Lock()
|
||||
if !entry.removed {
|
||||
entry.delayedRetry = false
|
||||
pool.updateCheckDial(entry)
|
||||
}
|
||||
pool.lock.Unlock()
|
||||
|
||||
case adj := <-pool.adjustStats:
|
||||
pool.lock.Lock()
|
||||
switch adj.adjustType {
|
||||
case pseBlockDelay:
|
||||
adj.entry.delayStats.add(float64(adj.time), 1)
|
||||
@@ -305,13 +318,10 @@ func (pool *serverPool) eventLoop() {
|
||||
case pseResponseTimeout:
|
||||
adj.entry.timeoutStats.add(1, 1)
|
||||
}
|
||||
pool.lock.Unlock()
|
||||
|
||||
case node := <-pool.discNodes:
|
||||
pool.lock.Lock()
|
||||
entry := pool.findOrNewNode(discover.NodeID(node.ID), node.IP, node.TCP)
|
||||
pool.updateCheckDial(entry)
|
||||
pool.lock.Unlock()
|
||||
|
||||
case conv := <-pool.discLookups:
|
||||
if conv {
|
||||
@@ -327,15 +337,66 @@ func (pool *serverPool) eventLoop() {
|
||||
}
|
||||
}
|
||||
|
||||
case req := <-pool.connCh:
|
||||
// Handle peer connection requests.
|
||||
entry := pool.entries[req.p.ID()]
|
||||
if entry == nil {
|
||||
entry = pool.findOrNewNode(req.p.ID(), req.ip, req.port)
|
||||
}
|
||||
if entry.state == psConnected || entry.state == psRegistered {
|
||||
req.result <- nil
|
||||
continue
|
||||
}
|
||||
pool.connWg.Add(1)
|
||||
entry.peer = req.p
|
||||
entry.state = psConnected
|
||||
addr := &poolEntryAddress{
|
||||
ip: req.ip,
|
||||
port: req.port,
|
||||
lastSeen: mclock.Now(),
|
||||
}
|
||||
entry.lastConnected = addr
|
||||
entry.addr = make(map[string]*poolEntryAddress)
|
||||
entry.addr[addr.strKey()] = addr
|
||||
entry.addrSelect = *newWeightedRandomSelect()
|
||||
entry.addrSelect.update(addr)
|
||||
req.result <- entry
|
||||
|
||||
case req := <-pool.registerCh:
|
||||
// Handle peer registration requests.
|
||||
entry := req.entry
|
||||
entry.state = psRegistered
|
||||
entry.regTime = mclock.Now()
|
||||
if !entry.known {
|
||||
pool.newQueue.remove(entry)
|
||||
entry.known = true
|
||||
}
|
||||
pool.knownQueue.setLatest(entry)
|
||||
entry.shortRetry = shortRetryCnt
|
||||
close(req.done)
|
||||
|
||||
case req := <-pool.disconnCh:
|
||||
// Handle peer disconnection requests.
|
||||
disconnect(req, req.stopped)
|
||||
|
||||
case <-pool.quit:
|
||||
if pool.discSetPeriod != nil {
|
||||
close(pool.discSetPeriod)
|
||||
}
|
||||
pool.connWg.Wait()
|
||||
|
||||
// Spawn a goroutine to close the disconnCh after all connections are disconnected.
|
||||
go func() {
|
||||
pool.connWg.Wait()
|
||||
close(pool.disconnCh)
|
||||
}()
|
||||
|
||||
// Handle all remaining disconnection requests before exit.
|
||||
for req := range pool.disconnCh {
|
||||
disconnect(req, true)
|
||||
}
|
||||
pool.saveNodes()
|
||||
pool.wg.Done()
|
||||
return
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -59,18 +59,18 @@ type trustedCheckpoint struct {
|
||||
var (
|
||||
mainnetCheckpoint = trustedCheckpoint{
|
||||
name: "mainnet",
|
||||
sectionIdx: 174,
|
||||
sectionHead: common.HexToHash("a3ef48cd8f1c3a08419f0237fc7763491fe89497b3144b17adf87c1c43664613"),
|
||||
chtRoot: common.HexToHash("dcbeed9f4dea1b3cb75601bb27c51b9960c28e5850275402ac49a150a667296e"),
|
||||
bloomTrieRoot: common.HexToHash("6b7497a4a03e33870a2383cb6f5e70570f12b1bf5699063baf8c71d02ca90b02"),
|
||||
sectionIdx: 179,
|
||||
sectionHead: common.HexToHash("ae778e455492db1183e566fa0c67f954d256fdd08618f6d5a393b0e24576d0ea"),
|
||||
chtRoot: common.HexToHash("646b338f9ca74d936225338916be53710ec84020b89946004a8605f04c817f16"),
|
||||
bloomTrieRoot: common.HexToHash("d0f978f5dbc86e5bf931d8dd5b2ecbebbda6dc78f8896af6a27b46a3ced0ac25"),
|
||||
}
|
||||
|
||||
ropstenCheckpoint = trustedCheckpoint{
|
||||
name: "ropsten",
|
||||
sectionIdx: 102,
|
||||
sectionHead: common.HexToHash("9017ab08465cb2b2dee035ee5b817bbd7fa28e2c8d2cd903e0aed1cccb249e89"),
|
||||
chtRoot: common.HexToHash("f61c10a7a787a5ef15f0ae1ae6c13c64331e57e79d0466d2bd9b0c06833fe956"),
|
||||
bloomTrieRoot: common.HexToHash("69f2ad19aa46d5213a90137b3d2c9bff8a7c9483f7170f0125096ff450c9a873"),
|
||||
sectionIdx: 107,
|
||||
sectionHead: common.HexToHash("e1988f95399debf45b873e065e5cd61b416ef2e2e5deec5a6f87c3127086e1ce"),
|
||||
chtRoot: common.HexToHash("15cba18e4de0ab1e95e202625199ba30147aec8b0b70384b66ebea31ba6a18e0"),
|
||||
bloomTrieRoot: common.HexToHash("e00fa6389b2e597d9df52172cd8e936879eed0fca4fa59db99e2c8ed682562f2"),
|
||||
}
|
||||
)
|
||||
|
||||
|
@@ -199,15 +199,17 @@ func (pool *TxPool) checkMinedTxs(ctx context.Context, hash common.Hash, number
|
||||
// rollbackTxs marks the transactions contained in recently rolled back blocks
|
||||
// as rolled back. It also removes any positional lookup entries.
|
||||
func (pool *TxPool) rollbackTxs(hash common.Hash, txc txStateChanges) {
|
||||
batch := pool.chainDb.NewBatch()
|
||||
if list, ok := pool.mined[hash]; ok {
|
||||
for _, tx := range list {
|
||||
txHash := tx.Hash()
|
||||
rawdb.DeleteTxLookupEntry(pool.chainDb, txHash)
|
||||
rawdb.DeleteTxLookupEntry(batch, txHash)
|
||||
pool.pending[txHash] = tx
|
||||
txc.setState(txHash, false)
|
||||
}
|
||||
delete(pool.mined, hash)
|
||||
}
|
||||
batch.Write()
|
||||
}
|
||||
|
||||
// reorgOnNewHead sets a new head header, processing (and rolling back if necessary)
|
||||
@@ -504,14 +506,16 @@ func (self *TxPool) Content() (map[common.Address]types.Transactions, map[common
|
||||
func (self *TxPool) RemoveTransactions(txs types.Transactions) {
|
||||
self.mu.Lock()
|
||||
defer self.mu.Unlock()
|
||||
|
||||
var hashes []common.Hash
|
||||
batch := self.chainDb.NewBatch()
|
||||
for _, tx := range txs {
|
||||
//self.RemoveTx(tx.Hash())
|
||||
hash := tx.Hash()
|
||||
delete(self.pending, hash)
|
||||
self.chainDb.Delete(hash[:])
|
||||
batch.Delete(hash.Bytes())
|
||||
hashes = append(hashes, hash)
|
||||
}
|
||||
batch.Write()
|
||||
self.relay.Discard(hashes)
|
||||
}
|
||||
|
||||
|
@@ -15,7 +15,7 @@ import (
|
||||
|
||||
const (
|
||||
timeFormat = "2006-01-02T15:04:05-0700"
|
||||
termTimeFormat = "01-02|15:04:05"
|
||||
termTimeFormat = "01-02|15:04:05.000"
|
||||
floatFormat = 'f'
|
||||
termMsgJust = 40
|
||||
)
|
||||
|
@@ -12,6 +12,7 @@ const timeKey = "t"
|
||||
const lvlKey = "lvl"
|
||||
const msgKey = "msg"
|
||||
const errorKey = "LOG15_ERROR"
|
||||
const skipLevel = 2
|
||||
|
||||
type Lvl int
|
||||
|
||||
@@ -127,13 +128,13 @@ type logger struct {
|
||||
h *swapHandler
|
||||
}
|
||||
|
||||
func (l *logger) write(msg string, lvl Lvl, ctx []interface{}) {
|
||||
func (l *logger) write(msg string, lvl Lvl, ctx []interface{}, skip int) {
|
||||
l.h.Log(&Record{
|
||||
Time: time.Now(),
|
||||
Lvl: lvl,
|
||||
Msg: msg,
|
||||
Ctx: newContext(l.ctx, ctx),
|
||||
Call: stack.Caller(2),
|
||||
Call: stack.Caller(skip),
|
||||
KeyNames: RecordKeyNames{
|
||||
Time: timeKey,
|
||||
Msg: msgKey,
|
||||
@@ -157,27 +158,27 @@ func newContext(prefix []interface{}, suffix []interface{}) []interface{} {
|
||||
}
|
||||
|
||||
func (l *logger) Trace(msg string, ctx ...interface{}) {
|
||||
l.write(msg, LvlTrace, ctx)
|
||||
l.write(msg, LvlTrace, ctx, skipLevel)
|
||||
}
|
||||
|
||||
func (l *logger) Debug(msg string, ctx ...interface{}) {
|
||||
l.write(msg, LvlDebug, ctx)
|
||||
l.write(msg, LvlDebug, ctx, skipLevel)
|
||||
}
|
||||
|
||||
func (l *logger) Info(msg string, ctx ...interface{}) {
|
||||
l.write(msg, LvlInfo, ctx)
|
||||
l.write(msg, LvlInfo, ctx, skipLevel)
|
||||
}
|
||||
|
||||
func (l *logger) Warn(msg string, ctx ...interface{}) {
|
||||
l.write(msg, LvlWarn, ctx)
|
||||
l.write(msg, LvlWarn, ctx, skipLevel)
|
||||
}
|
||||
|
||||
func (l *logger) Error(msg string, ctx ...interface{}) {
|
||||
l.write(msg, LvlError, ctx)
|
||||
l.write(msg, LvlError, ctx, skipLevel)
|
||||
}
|
||||
|
||||
func (l *logger) Crit(msg string, ctx ...interface{}) {
|
||||
l.write(msg, LvlCrit, ctx)
|
||||
l.write(msg, LvlCrit, ctx, skipLevel)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
21
log/root.go
21
log/root.go
@@ -31,31 +31,40 @@ func Root() Logger {
|
||||
|
||||
// Trace is a convenient alias for Root().Trace
|
||||
func Trace(msg string, ctx ...interface{}) {
|
||||
root.write(msg, LvlTrace, ctx)
|
||||
root.write(msg, LvlTrace, ctx, skipLevel)
|
||||
}
|
||||
|
||||
// Debug is a convenient alias for Root().Debug
|
||||
func Debug(msg string, ctx ...interface{}) {
|
||||
root.write(msg, LvlDebug, ctx)
|
||||
root.write(msg, LvlDebug, ctx, skipLevel)
|
||||
}
|
||||
|
||||
// Info is a convenient alias for Root().Info
|
||||
func Info(msg string, ctx ...interface{}) {
|
||||
root.write(msg, LvlInfo, ctx)
|
||||
root.write(msg, LvlInfo, ctx, skipLevel)
|
||||
}
|
||||
|
||||
// Warn is a convenient alias for Root().Warn
|
||||
func Warn(msg string, ctx ...interface{}) {
|
||||
root.write(msg, LvlWarn, ctx)
|
||||
root.write(msg, LvlWarn, ctx, skipLevel)
|
||||
}
|
||||
|
||||
// Error is a convenient alias for Root().Error
|
||||
func Error(msg string, ctx ...interface{}) {
|
||||
root.write(msg, LvlError, ctx)
|
||||
root.write(msg, LvlError, ctx, skipLevel)
|
||||
}
|
||||
|
||||
// Crit is a convenient alias for Root().Crit
|
||||
func Crit(msg string, ctx ...interface{}) {
|
||||
root.write(msg, LvlCrit, ctx)
|
||||
root.write(msg, LvlCrit, ctx, skipLevel)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Output is a convenient alias for write, allowing for the modification of
|
||||
// the calldepth (number of stack frames to skip).
|
||||
// calldepth influences the reported line number of the log message.
|
||||
// A calldepth of zero reports the immediate caller of Output.
|
||||
// Non-zero calldepth skips as many stack frames.
|
||||
func Output(msg string, lvl Lvl, calldepth int, ctx ...interface{}) {
|
||||
root.write(msg, lvl, ctx, calldepth+skipLevel)
|
||||
}
|
||||
|
@@ -2,10 +2,10 @@ package influxdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
uurl "net/url"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/influxdata/influxdb/client"
|
||||
)
|
||||
@@ -35,7 +35,7 @@ func InfluxDB(r metrics.Registry, d time.Duration, url, database, username, pass
|
||||
func InfluxDBWithTags(r metrics.Registry, d time.Duration, url, database, username, password, namespace string, tags map[string]string) {
|
||||
u, err := uurl.Parse(url)
|
||||
if err != nil {
|
||||
log.Printf("unable to parse InfluxDB url %s. err=%v", url, err)
|
||||
log.Warn("Unable to parse InfluxDB", "url", url, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ func InfluxDBWithTags(r metrics.Registry, d time.Duration, url, database, userna
|
||||
cache: make(map[string]int64),
|
||||
}
|
||||
if err := rep.makeClient(); err != nil {
|
||||
log.Printf("unable to make InfluxDB client. err=%v", err)
|
||||
log.Warn("Unable to make InfluxDB client", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -76,15 +76,15 @@ func (r *reporter) run() {
|
||||
select {
|
||||
case <-intervalTicker:
|
||||
if err := r.send(); err != nil {
|
||||
log.Printf("unable to send to InfluxDB. err=%v", err)
|
||||
log.Warn("Unable to send to InfluxDB", "err", err)
|
||||
}
|
||||
case <-pingTicker:
|
||||
_, _, err := r.client.Ping()
|
||||
if err != nil {
|
||||
log.Printf("got error while sending a ping to InfluxDB, trying to recreate client. err=%v", err)
|
||||
log.Warn("Got error while sending a ping to InfluxDB, trying to recreate client", "err", err)
|
||||
|
||||
if err = r.makeClient(); err != nil {
|
||||
log.Printf("unable to make InfluxDB client. err=%v", err)
|
||||
log.Warn("Unable to make InfluxDB client", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -47,8 +47,8 @@ func TestTimerStop(t *testing.T) {
|
||||
func TestTimerFunc(t *testing.T) {
|
||||
tm := NewTimer()
|
||||
tm.Time(func() { time.Sleep(50e6) })
|
||||
if max := tm.Max(); 35e6 > max || max > 95e6 {
|
||||
t.Errorf("tm.Max(): 35e6 > %v || %v > 95e6\n", max, max)
|
||||
if max := tm.Max(); 35e6 > max || max > 145e6 {
|
||||
t.Errorf("tm.Max(): 35e6 > %v || %v > 145e6\n", max, max)
|
||||
}
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user