Compare commits

..

33 Commits

Author SHA1 Message Date
1db4ecdc0b params: bump to 1.7.2 stable 2017-10-14 15:58:53 +03:00
fdb3bd287e Merge pull request #15298 from karalabe/stack-then-readonly
core/vm: check opcode stack before readonly enforcement
2017-10-14 15:57:08 +03:00
a91e682234 core/vm: check opcode stack before readonly enforcement 2017-10-14 15:42:48 +03:00
41b7745529 Merge pull request #15288 from karalabe/trie-hash-benchmark
trie: make hasher benchmark meaningful post-caches
2017-10-13 15:50:05 +03:00
a5fcaa55ac trie: make hasher benchmark meaningful post-caches 2017-10-13 11:22:38 +03:00
0ed4d76c79 Merge pull request #15275 from mcdee/master
core/types: fix test for TransactionsByPriceAndNonce
2017-10-13 10:51:22 +03:00
4b5e797288 Merge pull request #15287 from ernestodeltoro/typo_thoretical
ethash: fix typo
2017-10-13 10:47:48 +03:00
2e83c82f80 ethash: fix typo 2017-10-13 01:13:52 -04:00
8d8034fe59 Merge pull request #15269 from karalabe/puppeth-dumb-ip-filtering
cmd/puppeth: use dumb textual IP filtering
2017-10-12 13:46:57 +03:00
fd0e7b1c67 Merge pull request #15280 from terasum/master
miner: fix typo
2017-10-12 13:46:29 +03:00
e9382c6e9f miner: fix typo 2017-10-12 10:19:23 +08:00
c599b78f62 core/types: fix test for TransactionsByPriceAndNonce 2017-10-11 11:35:44 +01:00
ad44475231 Merge pull request #14785 from Arachnid/downloaddb
cmd: Added support for downloading to another DB instance
2017-10-11 13:10:18 +03:00
35767dfd0c cmd, eth: separate out FakePeer for future reuse 2017-10-10 15:52:11 +03:00
345332906c cmd: Added support for copying data to another DB instance 2017-10-10 15:52:10 +03:00
cefeb58598 event: fix typo (#15270) 2017-10-10 14:11:15 +02:00
b45cc0c9e8 cmd/puppeth: use dumb textual IP filtering 2017-10-10 12:35:09 +03:00
3680cd5926 params: explain EIP150Hash (#15237) 2017-10-10 10:56:33 +02:00
d3beff7e20 consensus/clique: add fork hash enforcement (#15236) 2017-10-10 10:54:47 +02:00
40a3856af9 eth/fetcher: check the origin of filter tasks (#14975)
* eth/fetcher: check the origin of filter task

* eth/fetcher: add some details to fetcher logs
2017-10-10 11:53:05 +03:00
89860f4197 swarm/fuse: return amount of data written if the file exists (#15261)
If the file already existed, the WriteResponse.Size was being set
as the length of the entire file, not just the amount that was
written to the existing file.

Fixes #15216
2017-10-09 12:45:30 +02:00
88b1db7288 accounts/keystore: scan key directory without locks held (#15171)
The accountCache contains a file cache, and remembers from
scan to scan what files were present earlier. Thus, whenever
there's a change, the scan phase only bothers processing new
and removed files.
2017-10-09 12:40:50 +02:00
7a045af05b whisper/whisperv5: set filter SymKeyHash on creation (#15165) 2017-10-06 16:04:21 +02:00
36243c7ed8 internal/web3ext: make whisper v5 methods work (#15111) 2017-10-06 15:53:29 +02:00
1ae0411d41 swarm/api: fixed 404 handling on missing default entry (#15139) 2017-10-06 15:45:54 +02:00
d54e3539d4 p2p/nat: delete port mapping before adding (#15222)
Fixes #1024
2017-10-06 13:39:47 +02:00
5df0b240ae eth: fix typo (#15252) 2017-10-06 12:55:18 +02:00
605c2b261f mobile: fix variadic argument expansion 2017-10-05 21:08:14 +03:00
4bc60e3aa8 Merge pull request #15241 from karalabe/puppeth-fork-management
cmd/puppeth: support managing fork block in the chain config
2017-10-05 19:40:25 +03:00
eb9abbd3f2 Merge pull request #15248 from karalabe/update-liner
vendor: update liner to fix docker and mips bugs
2017-10-05 16:45:13 +03:00
41d361565b vendor: update liner to fix docker and mips bugs 2017-10-05 15:57:33 +03:00
edba5e9854 cmd/puppeth: support managing fork block in the chain config 2017-10-04 12:15:58 +03:00
c0a1f1c907 params, VERSION: v1.7.2 unstable 2017-10-03 19:19:37 +02:00
49 changed files with 1860 additions and 3281 deletions

View File

@ -1 +1 @@
1.7.1
1.7.2

View File

@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"gopkg.in/fatih/set.v0"
)
// Minimum amount of time between cache reloads. This limit applies if the platform does
@ -71,6 +72,14 @@ type accountCache struct {
byAddr map[common.Address][]accounts.Account
throttle *time.Timer
notify chan struct{}
fileC fileCache
}
// fileCache is a cache of files seen during scan of keystore
type fileCache struct {
all *set.SetNonTS // list of all files
mtime time.Time // latest mtime seen
mu sync.RWMutex
}
func newAccountCache(keydir string) (*accountCache, chan struct{}) {
@ -78,6 +87,7 @@ func newAccountCache(keydir string) (*accountCache, chan struct{}) {
keydir: keydir,
byAddr: make(map[common.Address][]accounts.Account),
notify: make(chan struct{}, 1),
fileC: fileCache{all: set.NewNonTS()},
}
ac.watcher = newWatcher(ac)
return ac, ac.notify
@ -127,6 +137,23 @@ func (ac *accountCache) delete(removed accounts.Account) {
}
}
// deleteByFile removes an account referenced by the given path.
func (ac *accountCache) deleteByFile(path string) {
ac.mu.Lock()
defer ac.mu.Unlock()
i := sort.Search(len(ac.all), func(i int) bool { return ac.all[i].URL.Path >= path })
if i < len(ac.all) && ac.all[i].URL.Path == path {
removed := ac.all[i]
ac.all = append(ac.all[:i], ac.all[i+1:]...)
if ba := removeAccount(ac.byAddr[removed.Address], removed); len(ba) == 0 {
delete(ac.byAddr, removed.Address)
} else {
ac.byAddr[removed.Address] = ba
}
}
}
func removeAccount(slice []accounts.Account, elem accounts.Account) []accounts.Account {
for i := range slice {
if slice[i] == elem {
@ -167,15 +194,16 @@ func (ac *accountCache) find(a accounts.Account) (accounts.Account, error) {
default:
err := &AmbiguousAddrError{Addr: a.Address, Matches: make([]accounts.Account, len(matches))}
copy(err.Matches, matches)
sort.Sort(accountsByURL(err.Matches))
return accounts.Account{}, err
}
}
func (ac *accountCache) maybeReload() {
ac.mu.Lock()
defer ac.mu.Unlock()
if ac.watcher.running {
ac.mu.Unlock()
return // A watcher is running and will keep the cache up-to-date.
}
if ac.throttle == nil {
@ -184,12 +212,15 @@ func (ac *accountCache) maybeReload() {
select {
case <-ac.throttle.C:
default:
ac.mu.Unlock()
return // The cache was reloaded recently.
}
}
// No watcher running, start it.
ac.watcher.start()
ac.reload()
ac.throttle.Reset(minReloadInterval)
ac.mu.Unlock()
ac.scanAccounts()
}
func (ac *accountCache) close() {
@ -205,54 +236,76 @@ func (ac *accountCache) close() {
ac.mu.Unlock()
}
// reload caches addresses of existing accounts.
// Callers must hold ac.mu.
func (ac *accountCache) reload() {
accounts, err := ac.scan()
// scanFiles performs a new scan on the given directory, compares against the already
// cached filenames, and returns file sets: new, missing , modified
func (fc *fileCache) scanFiles(keyDir string) (set.Interface, set.Interface, set.Interface, error) {
t0 := time.Now()
files, err := ioutil.ReadDir(keyDir)
t1 := time.Now()
if err != nil {
log.Debug("Failed to reload keystore contents", "err", err)
return nil, nil, nil, err
}
ac.all = accounts
sort.Sort(ac.all)
for k := range ac.byAddr {
delete(ac.byAddr, k)
}
for _, a := range accounts {
ac.byAddr[a.Address] = append(ac.byAddr[a.Address], a)
}
select {
case ac.notify <- struct{}{}:
default:
}
log.Debug("Reloaded keystore contents", "accounts", len(ac.all))
}
fc.mu.RLock()
prevMtime := fc.mtime
fc.mu.RUnlock()
func (ac *accountCache) scan() ([]accounts.Account, error) {
files, err := ioutil.ReadDir(ac.keydir)
if err != nil {
return nil, err
}
var (
buf = new(bufio.Reader)
addrs []accounts.Account
keyJSON struct {
Address string `json:"address"`
}
)
filesNow := set.NewNonTS()
moddedFiles := set.NewNonTS()
var newMtime time.Time
for _, fi := range files {
path := filepath.Join(ac.keydir, fi.Name())
modTime := fi.ModTime()
path := filepath.Join(keyDir, fi.Name())
if skipKeyFile(fi) {
log.Trace("Ignoring file on account scan", "path", path)
continue
}
logger := log.New("path", path)
filesNow.Add(path)
if modTime.After(prevMtime) {
moddedFiles.Add(path)
}
if modTime.After(newMtime) {
newMtime = modTime
}
}
t2 := time.Now()
fc.mu.Lock()
// Missing = previous - current
missing := set.Difference(fc.all, filesNow)
// New = current - previous
newFiles := set.Difference(filesNow, fc.all)
// Modified = modified - new
modified := set.Difference(moddedFiles, newFiles)
fc.all = filesNow
fc.mtime = newMtime
fc.mu.Unlock()
t3 := time.Now()
log.Debug("FS scan times", "list", t1.Sub(t0), "set", t2.Sub(t1), "diff", t3.Sub(t2))
return newFiles, missing, modified, nil
}
// scanAccounts checks if any changes have occurred on the filesystem, and
// updates the account cache accordingly
func (ac *accountCache) scanAccounts() error {
newFiles, missingFiles, modified, err := ac.fileC.scanFiles(ac.keydir)
t1 := time.Now()
if err != nil {
log.Debug("Failed to reload keystore contents", "err", err)
return err
}
var (
buf = new(bufio.Reader)
keyJSON struct {
Address string `json:"address"`
}
)
readAccount := func(path string) *accounts.Account {
fd, err := os.Open(path)
if err != nil {
logger.Trace("Failed to open keystore file", "err", err)
continue
log.Trace("Failed to open keystore file", "path", path, "err", err)
return nil
}
defer fd.Close()
buf.Reset(fd)
// Parse the address.
keyJSON.Address = ""
@ -260,15 +313,45 @@ func (ac *accountCache) scan() ([]accounts.Account, error) {
addr := common.HexToAddress(keyJSON.Address)
switch {
case err != nil:
logger.Debug("Failed to decode keystore key", "err", err)
log.Debug("Failed to decode keystore key", "path", path, "err", err)
case (addr == common.Address{}):
logger.Debug("Failed to decode keystore key", "err", "missing or zero address")
log.Debug("Failed to decode keystore key", "path", path, "err", "missing or zero address")
default:
addrs = append(addrs, accounts.Account{Address: addr, URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}})
return &accounts.Account{Address: addr, URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}}
}
fd.Close()
return nil
}
return addrs, err
for _, p := range newFiles.List() {
path, _ := p.(string)
a := readAccount(path)
if a != nil {
ac.add(*a)
}
}
for _, p := range missingFiles.List() {
path, _ := p.(string)
ac.deleteByFile(path)
}
for _, p := range modified.List() {
path, _ := p.(string)
a := readAccount(path)
ac.deleteByFile(path)
if a != nil {
ac.add(*a)
}
}
t2 := time.Now()
select {
case ac.notify <- struct{}{}:
default:
}
log.Trace("Handled keystore changes", "time", t2.Sub(t1))
return nil
}
func skipKeyFile(fi os.FileInfo) bool {

View File

@ -18,6 +18,7 @@ package keystore
import (
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
@ -295,3 +296,101 @@ func TestCacheFind(t *testing.T) {
}
}
}
func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error {
var list []accounts.Account
for d := 200 * time.Millisecond; d < 8*time.Second; d *= 2 {
list = ks.Accounts()
if reflect.DeepEqual(list, wantAccounts) {
// ks should have also received change notifications
select {
case <-ks.changes:
default:
return fmt.Errorf("wasn't notified of new accounts")
}
return nil
}
time.Sleep(d)
}
return fmt.Errorf("\ngot %v\nwant %v", list, wantAccounts)
}
// TestUpdatedKeyfileContents tests that updating the contents of a keystore file
// is noticed by the watcher, and the account cache is updated accordingly
func TestUpdatedKeyfileContents(t *testing.T) {
t.Parallel()
// Create a temporary kesytore to test with
rand.Seed(time.Now().UnixNano())
dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watch-test-%d-%d", os.Getpid(), rand.Int()))
ks := NewKeyStore(dir, LightScryptN, LightScryptP)
list := ks.Accounts()
if len(list) > 0 {
t.Error("initial account list not empty:", list)
}
time.Sleep(100 * time.Millisecond)
// Create the directory and copy a key file into it.
os.MkdirAll(dir, 0700)
defer os.RemoveAll(dir)
file := filepath.Join(dir, "aaa")
// Place one of our testfiles in there
if err := cp.CopyFile(file, cachetestAccounts[0].URL.Path); err != nil {
t.Fatal(err)
}
// ks should see the account.
wantAccounts := []accounts.Account{cachetestAccounts[0]}
wantAccounts[0].URL = accounts.URL{Scheme: KeyStoreScheme, Path: file}
if err := waitForAccounts(wantAccounts, ks); err != nil {
t.Error(err)
return
}
// Now replace file contents
if err := forceCopyFile(file, cachetestAccounts[1].URL.Path); err != nil {
t.Fatal(err)
return
}
wantAccounts = []accounts.Account{cachetestAccounts[1]}
wantAccounts[0].URL = accounts.URL{Scheme: KeyStoreScheme, Path: file}
if err := waitForAccounts(wantAccounts, ks); err != nil {
t.Errorf("First replacement failed")
t.Error(err)
return
}
// Now replace file contents again
if err := forceCopyFile(file, cachetestAccounts[2].URL.Path); err != nil {
t.Fatal(err)
return
}
wantAccounts = []accounts.Account{cachetestAccounts[2]}
wantAccounts[0].URL = accounts.URL{Scheme: KeyStoreScheme, Path: file}
if err := waitForAccounts(wantAccounts, ks); err != nil {
t.Errorf("Second replacement failed")
t.Error(err)
return
}
// Now replace file contents with crap
if err := ioutil.WriteFile(file, []byte("foo"), 0644); err != nil {
t.Fatal(err)
return
}
if err := waitForAccounts([]accounts.Account{}, ks); err != nil {
t.Errorf("Emptying account file failed")
t.Error(err)
return
}
}
// forceCopyFile is like cp.CopyFile, but doesn't complain if the destination exists.
func forceCopyFile(dst, src string) error {
data, err := ioutil.ReadFile(src)
if err != nil {
return err
}
return ioutil.WriteFile(dst, data, 0644)
}

View File

@ -272,82 +272,104 @@ func TestWalletNotifierLifecycle(t *testing.T) {
t.Errorf("wallet notifier didn't terminate after unsubscribe")
}
type walletEvent struct {
accounts.WalletEvent
a accounts.Account
}
// Tests that wallet notifications and correctly fired when accounts are added
// or deleted from the keystore.
func TestWalletNotifications(t *testing.T) {
// Create a temporary kesytore to test with
dir, ks := tmpKeyStore(t, false)
defer os.RemoveAll(dir)
// Subscribe to the wallet feed
updates := make(chan accounts.WalletEvent, 1)
sub := ks.Subscribe(updates)
// Subscribe to the wallet feed and collect events.
var (
events []walletEvent
updates = make(chan accounts.WalletEvent)
sub = ks.Subscribe(updates)
)
defer sub.Unsubscribe()
go func() {
for {
select {
case ev := <-updates:
events = append(events, walletEvent{ev, ev.Wallet.Accounts()[0]})
case <-sub.Err():
close(updates)
return
}
}
}()
// Randomly add and remove account and make sure events and wallets are in sync
live := make(map[common.Address]accounts.Account)
// Randomly add and remove accounts.
var (
live = make(map[common.Address]accounts.Account)
wantEvents []walletEvent
)
for i := 0; i < 1024; i++ {
// Execute a creation or deletion and ensure event arrival
if create := len(live) == 0 || rand.Int()%4 > 0; create {
// Add a new account and ensure wallet notifications arrives
account, err := ks.NewAccount("")
if err != nil {
t.Fatalf("failed to create test account: %v", err)
}
select {
case event := <-updates:
if event.Kind != accounts.WalletArrived {
t.Errorf("non-arrival event on account creation")
}
if event.Wallet.Accounts()[0] != account {
t.Errorf("account mismatch on created wallet: have %v, want %v", event.Wallet.Accounts()[0], account)
}
default:
t.Errorf("wallet arrival event not fired on account creation")
}
live[account.Address] = account
wantEvents = append(wantEvents, walletEvent{accounts.WalletEvent{Kind: accounts.WalletArrived}, account})
} else {
// Select a random account to delete (crude, but works)
// Delete a random account.
var account accounts.Account
for _, a := range live {
account = a
break
}
// Remove an account and ensure wallet notifiaction arrives
if err := ks.Delete(account, ""); err != nil {
t.Fatalf("failed to delete test account: %v", err)
}
select {
case event := <-updates:
if event.Kind != accounts.WalletDropped {
t.Errorf("non-drop event on account deletion")
}
if event.Wallet.Accounts()[0] != account {
t.Errorf("account mismatch on deleted wallet: have %v, want %v", event.Wallet.Accounts()[0], account)
}
default:
t.Errorf("wallet departure event not fired on account creation")
}
delete(live, account.Address)
wantEvents = append(wantEvents, walletEvent{accounts.WalletEvent{Kind: accounts.WalletDropped}, account})
}
// Retrieve the list of wallets and ensure it matches with our required live set
liveList := make([]accounts.Account, 0, len(live))
for _, account := range live {
liveList = append(liveList, account)
}
sort.Sort(accountsByURL(liveList))
}
wallets := ks.Wallets()
if len(liveList) != len(wallets) {
t.Errorf("wallet list doesn't match required accounts: have %v, want %v", wallets, liveList)
} else {
for j, wallet := range wallets {
if accs := wallet.Accounts(); len(accs) != 1 {
t.Errorf("wallet %d: contains invalid number of accounts: have %d, want 1", j, len(accs))
} else if accs[0] != liveList[j] {
t.Errorf("wallet %d: account mismatch: have %v, want %v", j, accs[0], liveList[j])
}
// Shut down the event collector and check events.
sub.Unsubscribe()
<-updates
checkAccounts(t, live, ks.Wallets())
checkEvents(t, wantEvents, events)
}
// checkAccounts checks that all known live accounts are present in the wallet list.
func checkAccounts(t *testing.T, live map[common.Address]accounts.Account, wallets []accounts.Wallet) {
if len(live) != len(wallets) {
t.Errorf("wallet list doesn't match required accounts: have %d, want %d", len(wallets), len(live))
return
}
liveList := make([]accounts.Account, 0, len(live))
for _, account := range live {
liveList = append(liveList, account)
}
sort.Sort(accountsByURL(liveList))
for j, wallet := range wallets {
if accs := wallet.Accounts(); len(accs) != 1 {
t.Errorf("wallet %d: contains invalid number of accounts: have %d, want 1", j, len(accs))
} else if accs[0] != liveList[j] {
t.Errorf("wallet %d: account mismatch: have %v, want %v", j, accs[0], liveList[j])
}
}
}
// checkEvents checks that all events in 'want' are present in 'have'. Events may be present multiple times.
func checkEvents(t *testing.T, want []walletEvent, have []walletEvent) {
for _, wantEv := range want {
nmatch := 0
for ; len(have) > 0; nmatch++ {
if have[0].Kind != wantEv.Kind || have[0].a != wantEv.a {
break
}
have = have[1:]
}
if nmatch == 0 {
t.Fatalf("can't find event with Kind=%v for %x", wantEv.Kind, wantEv.a.Address)
}
}
}

View File

@ -70,7 +70,6 @@ func (w *watcher) loop() {
return
}
defer notify.Stop(w.ev)
logger.Trace("Started watching keystore folder")
defer logger.Trace("Stopped watching keystore folder")
@ -82,9 +81,9 @@ func (w *watcher) loop() {
// When an event occurs, the reload call is delayed a bit so that
// multiple events arriving quickly only cause a single reload.
var (
debounce = time.NewTimer(0)
debounceDuration = 500 * time.Millisecond
inCycle, hadEvent bool
debounce = time.NewTimer(0)
debounceDuration = 500 * time.Millisecond
rescanTriggered = false
)
defer debounce.Stop()
for {
@ -92,22 +91,14 @@ func (w *watcher) loop() {
case <-w.quit:
return
case <-w.ev:
if !inCycle {
// Trigger the scan (with delay), if not already triggered
if !rescanTriggered {
debounce.Reset(debounceDuration)
inCycle = true
} else {
hadEvent = true
rescanTriggered = true
}
case <-debounce.C:
w.ac.mu.Lock()
w.ac.reload()
w.ac.mu.Unlock()
if hadEvent {
debounce.Reset(debounceDuration)
inCycle, hadEvent = true, false
} else {
inCycle, hadEvent = false, false
}
w.ac.scanAccounts()
rescanTriggered = false
}
}
}

View File

@ -31,7 +31,9 @@ import (
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie"
"github.com/syndtr/goleveldb/leveldb/util"
@ -71,7 +73,7 @@ It expects the genesis file as argument.`,
The import command imports blocks from an RLP-encoded form. The form can be one file
with several RLP-encoded blocks, or several files can be used.
If only one file is used, import error will result in failure. If several files are used,
If only one file is used, import error will result in failure. If several files are used,
processing will proceed even if an individual RLP-file import failure occurs.`,
}
exportCommand = cli.Command{
@ -90,6 +92,23 @@ Requires a first argument of the file to write to.
Optional second and third arguments control the first and
last block to write. In this mode, the file will be appended
if already existing.`,
}
copydbCommand = cli.Command{
Action: utils.MigrateFlags(copyDb),
Name: "copydb",
Usage: "Create a local chain from a target chaindata folder",
ArgsUsage: "<sourceChaindataDir>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.CacheFlag,
utils.SyncModeFlag,
utils.FakePoWFlag,
utils.TestnetFlag,
utils.RinkebyFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
The first argument must be the directory containing the blockchain to download from`,
}
removedbCommand = cli.Command{
Action: utils.MigrateFlags(removeDB),
@ -268,6 +287,54 @@ func exportChain(ctx *cli.Context) error {
return nil
}
func copyDb(ctx *cli.Context) error {
// Ensure we have a source chain directory to copy
if len(ctx.Args()) != 1 {
utils.Fatalf("Source chaindata directory path argument missing")
}
// Initialize a new chain for the running node to sync into
stack := makeFullNode(ctx)
chain, chainDb := utils.MakeChain(ctx, stack)
syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil)
// Create a source peer to satisfy downloader requests from
db, err := ethdb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256)
if err != nil {
return err
}
hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
if err != nil {
return err
}
peer := downloader.NewFakePeer("local", db, hc, dl)
if err = dl.RegisterPeer("local", 63, peer); err != nil {
return err
}
// Synchronise with the simulated peer
start := time.Now()
currentHeader := hc.CurrentHeader()
if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil {
return err
}
for dl.Synchronising() {
time.Sleep(10 * time.Millisecond)
}
fmt.Printf("Database copy done in %v\n", time.Since(start))
// Compact the entire database to remove any sync overhead
start = time.Now()
fmt.Println("Compacting entire database...")
if err = chainDb.(*ethdb.LDBDatabase).LDB().CompactRange(util.Range{}); err != nil {
utils.Fatalf("Compaction failed: %v", err)
}
fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
return nil
}
func removeDB(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)

View File

@ -146,6 +146,7 @@ func init() {
initCommand,
importCommand,
exportCommand,
copydbCommand,
removedbCommand,
dumpCommand,
// See monitorcmd.go:

View File

@ -161,6 +161,28 @@ func (w *wizard) readDefaultInt(def int) int {
}
}
// readDefaultBigInt reads a single line from stdin, trimming if from spaces,
// enforcing it to parse into a big integer. If an empty line is entered, the
// default value is returned.
func (w *wizard) readDefaultBigInt(def *big.Int) *big.Int {
for {
fmt.Printf("> ")
text, err := w.in.ReadString('\n')
if err != nil {
log.Crit("Failed to read user input", "err", err)
}
if text = strings.TrimSpace(text); text == "" {
return def
}
val, ok := new(big.Int).SetString(text, 0)
if !ok {
log.Error("Invalid input, expected big integer")
continue
}
return val
}
}
/*
// readFloat reads a single line from stdin, trimming if from spaces, enforcing it
// to parse into a float.
@ -280,8 +302,10 @@ func (w *wizard) readJSON() string {
}
// readIPAddress reads a single line from stdin, trimming if from spaces and
// converts it to a network IP address.
func (w *wizard) readIPAddress() net.IP {
// returning it if it's convertible to an IP address. The reason for keeping
// the user input format instead of returning a Go net.IP is to match with
// weird formats used by ethstats, which compares IPs textually, not by value.
func (w *wizard) readIPAddress() string {
for {
// Read the IP address from the user
fmt.Printf("> ")
@ -290,14 +314,13 @@ func (w *wizard) readIPAddress() net.IP {
log.Crit("Failed to read user input", "err", err)
}
if text = strings.TrimSpace(text); text == "" {
return nil
return ""
}
// Make sure it looks ok and return it if so
ip := net.ParseIP(text)
if ip == nil {
if ip := net.ParseIP(text); ip == nil {
log.Error("Invalid IP address, please retry")
continue
}
return ip
return text
}
}

View File

@ -18,6 +18,7 @@ package main
import (
"fmt"
"sort"
"github.com/ethereum/go-ethereum/log"
)
@ -64,17 +65,37 @@ func (w *wizard) deployEthstats() {
fmt.Println()
fmt.Printf("Keep existing IP %v blacklist (y/n)? (default = yes)\n", infos.banned)
if w.readDefaultString("y") != "y" {
infos.banned = nil
// The user might want to clear the entire list, although generally probably not
fmt.Println()
fmt.Println("Which IP addresses should be blacklisted?")
fmt.Printf("Clear out blacklist and start over (y/n)? (default = no)\n")
if w.readDefaultString("n") != "n" {
infos.banned = nil
}
// Offer the user to explicitly add/remove certain IP addresses
fmt.Println()
fmt.Println("Which additional IP addresses should be blacklisted?")
for {
if ip := w.readIPAddress(); ip != nil {
infos.banned = append(infos.banned, ip.String())
if ip := w.readIPAddress(); ip != "" {
infos.banned = append(infos.banned, ip)
continue
}
break
}
fmt.Println()
fmt.Println("Which IP addresses should not be blacklisted?")
for {
if ip := w.readIPAddress(); ip != "" {
for i, addr := range infos.banned {
if ip == addr {
infos.banned = append(infos.banned[:i], infos.banned[i+1:]...)
break
}
}
continue
}
break
}
sort.Strings(infos.banned)
}
// Try to deploy the ethstats server on the host
trusted := make([]string, 0, len(w.servers))

View File

@ -18,7 +18,9 @@ package main
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"math/big"
"math/rand"
"time"
@ -135,3 +137,53 @@ func (w *wizard) makeGenesis() {
// All done, store the genesis and flush to disk
w.conf.genesis = genesis
}
// manageGenesis permits the modification of chain configuration parameters in
// a genesis config and the export of the entire genesis spec.
func (w *wizard) manageGenesis() {
// Figure out whether to modify or export the genesis
fmt.Println()
fmt.Println(" 1. Modify existing fork rules")
fmt.Println(" 2. Export genesis configuration")
choice := w.read()
switch {
case choice == "1":
// Fork rule updating requested, iterate over each fork
fmt.Println()
fmt.Printf("Which block should Homestead come into effect? (default = %v)\n", w.conf.genesis.Config.HomesteadBlock)
w.conf.genesis.Config.HomesteadBlock = w.readDefaultBigInt(w.conf.genesis.Config.HomesteadBlock)
fmt.Println()
fmt.Printf("Which block should EIP150 come into effect? (default = %v)\n", w.conf.genesis.Config.EIP150Block)
w.conf.genesis.Config.EIP150Block = w.readDefaultBigInt(w.conf.genesis.Config.EIP150Block)
fmt.Println()
fmt.Printf("Which block should EIP155 come into effect? (default = %v)\n", w.conf.genesis.Config.EIP155Block)
w.conf.genesis.Config.EIP155Block = w.readDefaultBigInt(w.conf.genesis.Config.EIP155Block)
fmt.Println()
fmt.Printf("Which block should EIP158 come into effect? (default = %v)\n", w.conf.genesis.Config.EIP158Block)
w.conf.genesis.Config.EIP158Block = w.readDefaultBigInt(w.conf.genesis.Config.EIP158Block)
fmt.Println()
fmt.Printf("Which block should Byzantium come into effect? (default = %v)\n", w.conf.genesis.Config.ByzantiumBlock)
w.conf.genesis.Config.ByzantiumBlock = w.readDefaultBigInt(w.conf.genesis.Config.ByzantiumBlock)
out, _ := json.MarshalIndent(w.conf.genesis.Config, "", " ")
fmt.Printf("Chain configuration updated:\n\n%s\n", out)
case choice == "2":
// Save whatever genesis configuration we currently have
fmt.Println()
fmt.Printf("Which file to save the genesis into? (default = %s.json)\n", w.network)
out, _ := json.MarshalIndent(w.conf.genesis, "", " ")
if err := ioutil.WriteFile(w.readDefaultString(fmt.Sprintf("%s.json", w.network)), out, 0644); err != nil {
log.Error("Failed to save genesis file", "err", err)
}
log.Info("Exported existing genesis block")
default:
log.Error("That's not something I can do")
}
}

View File

@ -98,7 +98,7 @@ func (w *wizard) run() {
if w.conf.genesis == nil {
fmt.Println(" 2. Configure new genesis")
} else {
fmt.Println(" 2. Save existing genesis")
fmt.Println(" 2. Manage existing genesis")
}
if len(w.servers) == 0 {
fmt.Println(" 3. Track new remote server")
@ -118,18 +118,10 @@ func (w *wizard) run() {
w.networkStats(false)
case choice == "2":
// If we don't have a genesis, make one
if w.conf.genesis == nil {
w.makeGenesis()
} else {
// Otherwise just save whatever we currently have
fmt.Println()
fmt.Printf("Which file to save the genesis into? (default = %s.json)\n", w.network)
out, _ := json.MarshalIndent(w.conf.genesis, "", " ")
if err := ioutil.WriteFile(w.readDefaultString(fmt.Sprintf("%s.json", w.network)), out, 0644); err != nil {
log.Error("Failed to save genesis file", "err", err)
}
log.Info("Exported existing genesis block")
w.manageGenesis()
}
case choice == "3":
if len(w.servers) == 0 {

View File

@ -129,7 +129,7 @@ func (w *wizard) networkStats(tips bool) {
}
}
// If a genesis block was found, load it into our configs
if protips.genesis != "" {
if protips.genesis != "" && w.conf.genesis == nil {
genesis := new(core.Genesis)
if err := json.Unmarshal([]byte(protips.genesis), genesis); err != nil {
log.Error("Failed to parse remote genesis", "err", err)

View File

@ -31,6 +31,8 @@ import (
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/clique"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
@ -1086,17 +1088,22 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
var err error
chainDb = MakeChainDatabase(ctx, stack)
engine := ethash.NewFaker()
if !ctx.GlobalBool(FakePoWFlag.Name) {
engine = ethash.New(
stack.ResolvePath(eth.DefaultConfig.EthashCacheDir), eth.DefaultConfig.EthashCachesInMem, eth.DefaultConfig.EthashCachesOnDisk,
stack.ResolvePath(eth.DefaultConfig.EthashDatasetDir), eth.DefaultConfig.EthashDatasetsInMem, eth.DefaultConfig.EthashDatasetsOnDisk,
)
}
config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx))
if err != nil {
Fatalf("%v", err)
}
var engine consensus.Engine
if config.Clique != nil {
engine = clique.New(config.Clique, chainDb)
} else {
engine = ethash.NewFaker()
if !ctx.GlobalBool(FakePoWFlag.Name) {
engine = ethash.New(
stack.ResolvePath(eth.DefaultConfig.EthashCacheDir), eth.DefaultConfig.EthashCachesInMem, eth.DefaultConfig.EthashCachesOnDisk,
stack.ResolvePath(eth.DefaultConfig.EthashDatasetDir), eth.DefaultConfig.EthashDatasetsInMem, eth.DefaultConfig.EthashDatasetsOnDisk,
)
}
}
vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)}
chain, err = core.NewBlockChain(chainDb, config, engine, vmcfg)
if err != nil {

View File

@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
@ -313,6 +314,10 @@ func (c *Clique) verifyHeader(chain consensus.ChainReader, header *types.Header,
return errInvalidDifficulty
}
}
// If all checks passed, validate any special fields for hard forks
if err := misc.VerifyForkHashes(chain.Config(), header, false); err != nil {
return err
}
// All basic checks passed, verify cascading fields
return c.verifyCascadingFields(chain, header, parents)
}

View File

@ -74,7 +74,7 @@ type testerChainReader struct {
db ethdb.Database
}
func (r *testerChainReader) Config() *params.ChainConfig { panic("not supported") }
func (r *testerChainReader) Config() *params.ChainConfig { return params.AllProtocolChanges }
func (r *testerChainReader) CurrentHeader() *types.Header { panic("not supported") }
func (r *testerChainReader) GetHeader(common.Hash, uint64) *types.Header { panic("not supported") }
func (r *testerChainReader) GetBlock(common.Hash, uint64) *types.Block { panic("not supported") }

View File

@ -102,7 +102,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
header.Cap *= 4
cache := *(*[]byte)(unsafe.Pointer(&header))
// Calculate the number of thoretical rows (we'll store in one buffer nonetheless)
// Calculate the number of theoretical rows (we'll store in one buffer nonetheless)
size := uint64(len(cache))
rows := int(size) / hashBytes
@ -187,7 +187,7 @@ func fnvHash(mix []uint32, data []uint32) {
// generateDatasetItem combines data from 256 pseudorandomly selected cache nodes,
// and hashes that to compute a single dataset node.
func generateDatasetItem(cache []uint32, index uint32, keccak512 hasher) []byte {
// Calculate the number of thoretical rows (we use one buffer nonetheless)
// Calculate the number of theoretical rows (we use one buffer nonetheless)
rows := uint32(len(cache) / hashWords)
// Initialize the mix
@ -287,7 +287,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
// hashimoto aggregates data from the full dataset in order to produce our final
// value for a particular header hash and nonce.
func hashimoto(hash []byte, nonce uint64, size uint64, lookup func(index uint32) []uint32) ([]byte, []byte) {
// Calculate the number of thoretical rows (we use one buffer nonetheless)
// Calculate the number of theoretical rows (we use one buffer nonetheless)
rows := uint32(size / mixBytes)
// Combine header+nonce into a 64 byte seed

View File

@ -147,12 +147,12 @@ func TestTransactionPriceNonceSort(t *testing.T) {
txset := NewTransactionsByPriceAndNonce(signer, groups)
txs := Transactions{}
for {
if tx := txset.Peek(); tx != nil {
txs = append(txs, tx)
txset.Shift()
}
break
for tx := txset.Peek(); tx != nil; tx = txset.Peek() {
txs = append(txs, tx)
txset.Shift()
}
if len(txs) != 25*25 {
t.Errorf("expected %d transactions, found %d", 25*25, len(txs))
}
for i, txi := range txs {
fromi, _ := Sender(signer, txi)

View File

@ -138,10 +138,10 @@ func (in *Interpreter) Run(snapshot int, contract *Contract, input []byte) (ret
pc = uint64(0) // program counter
cost uint64
// copies used by tracer
stackCopy = newstack() // stackCopy needed for Tracer since stack is mutated by 63/64 gas rule
pcCopy uint64 // needed for the deferred Tracer
gasCopy uint64 // for Tracer to log gas remaining before execution
logged bool // deferred Tracer should ignore already logged steps
stackCopy = newstack() // stackCopy needed for Tracer since stack is mutated by 63/64 gas rule
pcCopy uint64 // needed for the deferred Tracer
gasCopy uint64 // for Tracer to log gas remaining before execution
logged bool // deferred Tracer should ignore already logged steps
)
contract.Input = input
@ -169,22 +169,19 @@ func (in *Interpreter) Run(snapshot int, contract *Contract, input []byte) (ret
}
}
// get the operation from the jump table matching the opcode
// Get the operation from the jump table matching the opcode and validate the
// stack and make sure there enough stack items available to perform the operation
operation := in.cfg.JumpTable[op]
if err := in.enforceRestrictions(op, operation, stack); err != nil {
return nil, err
}
// if the op is invalid abort the process and return an error
if !operation.valid {
return nil, fmt.Errorf("invalid opcode 0x%x", int(op))
}
// validate the stack and make sure there enough stack items available
// to perform the operation
if err := operation.validateStack(stack); err != nil {
return nil, err
}
// If the operation is valid, enforce and write restrictions
if err := in.enforceRestrictions(op, operation, stack); err != nil {
return nil, err
}
var memorySize uint64
// calculate the new memory size and expand the memory to fit

View File

@ -327,7 +327,7 @@ func (s *Ethereum) StartMining(local bool) error {
wallet, err := s.accountManager.Find(accounts.Account{Address: eb})
if wallet == nil || err != nil {
log.Error("Etherbase account unavailable locally", "err", err)
return fmt.Errorf("singer missing: %v", err)
return fmt.Errorf("signer missing: %v", err)
}
clique.Authorize(eb, wallet.SignHash)
}

160
eth/downloader/fakepeer.go Normal file
View File

@ -0,0 +1,160 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package downloader
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
)
// FakePeer is a mock downloader peer that operates on a local database instance
// instead of being an actual live node. It's useful for testing and to implement
// sync commands from an xisting local database.
type FakePeer struct {
id string
db ethdb.Database
hc *core.HeaderChain
dl *Downloader
}
// NewFakePeer creates a new mock downloader peer with the given data sources.
func NewFakePeer(id string, db ethdb.Database, hc *core.HeaderChain, dl *Downloader) *FakePeer {
return &FakePeer{id: id, db: db, hc: hc, dl: dl}
}
// Head implements downloader.Peer, returning the current head hash and number
// of the best known header.
func (p *FakePeer) Head() (common.Hash, *big.Int) {
header := p.hc.CurrentHeader()
return header.Hash(), header.Number
}
// RequestHeadersByHash implements downloader.Peer, returning a batch of headers
// defined by the origin hash and the associaed query parameters.
func (p *FakePeer) RequestHeadersByHash(hash common.Hash, amount int, skip int, reverse bool) error {
var (
headers []*types.Header
unknown bool
)
for !unknown && len(headers) < amount {
origin := p.hc.GetHeaderByHash(hash)
if origin == nil {
break
}
number := origin.Number.Uint64()
headers = append(headers, origin)
if reverse {
for i := 0; i < int(skip)+1; i++ {
if header := p.hc.GetHeader(hash, number); header != nil {
hash = header.ParentHash
number--
} else {
unknown = true
break
}
}
} else {
var (
current = origin.Number.Uint64()
next = current + uint64(skip) + 1
)
if header := p.hc.GetHeaderByNumber(next); header != nil {
if p.hc.GetBlockHashesFromHash(header.Hash(), uint64(skip+1))[skip] == hash {
hash = header.Hash()
} else {
unknown = true
}
} else {
unknown = true
}
}
}
p.dl.DeliverHeaders(p.id, headers)
return nil
}
// RequestHeadersByNumber implements downloader.Peer, returning a batch of headers
// defined by the origin number and the associaed query parameters.
func (p *FakePeer) RequestHeadersByNumber(number uint64, amount int, skip int, reverse bool) error {
var (
headers []*types.Header
unknown bool
)
for !unknown && len(headers) < amount {
origin := p.hc.GetHeaderByNumber(number)
if origin == nil {
break
}
if reverse {
if number >= uint64(skip+1) {
number -= uint64(skip + 1)
} else {
unknown = true
}
} else {
number += uint64(skip + 1)
}
headers = append(headers, origin)
}
p.dl.DeliverHeaders(p.id, headers)
return nil
}
// RequestBodies implements downloader.Peer, returning a batch of block bodies
// corresponding to the specified block hashes.
func (p *FakePeer) RequestBodies(hashes []common.Hash) error {
var (
txs [][]*types.Transaction
uncles [][]*types.Header
)
for _, hash := range hashes {
block := core.GetBlock(p.db, hash, p.hc.GetBlockNumber(hash))
txs = append(txs, block.Transactions())
uncles = append(uncles, block.Uncles())
}
p.dl.DeliverBodies(p.id, txs, uncles)
return nil
}
// RequestReceipts implements downloader.Peer, returning a batch of transaction
// receipts corresponding to the specified block hashes.
func (p *FakePeer) RequestReceipts(hashes []common.Hash) error {
var receipts [][]*types.Receipt
for _, hash := range hashes {
receipts = append(receipts, core.GetBlockReceipts(p.db, hash, p.hc.GetBlockNumber(hash)))
}
p.dl.DeliverReceipts(p.id, receipts)
return nil
}
// RequestNodeData implements downloader.Peer, returning a batch of state trie
// nodes corresponding to the specified trie hashes.
func (p *FakePeer) RequestNodeData(hashes []common.Hash) error {
var data [][]byte
for _, hash := range hashes {
if entry, err := p.db.Get(hash.Bytes()); err == nil {
data = append(data, entry)
}
}
p.dl.DeliverNodeData(p.id, data)
return nil
}

View File

@ -83,6 +83,7 @@ type announce struct {
// headerFilterTask represents a batch of headers needing fetcher filtering.
type headerFilterTask struct {
peer string // The source peer of block headers
headers []*types.Header // Collection of headers to filter
time time.Time // Arrival time of the headers
}
@ -90,6 +91,7 @@ type headerFilterTask struct {
// headerFilterTask represents a batch of block bodies (transactions and uncles)
// needing fetcher filtering.
type bodyFilterTask struct {
peer string // The source peer of block bodies
transactions [][]*types.Transaction // Collection of transactions per block bodies
uncles [][]*types.Header // Collection of uncles per block bodies
time time.Time // Arrival time of the blocks' contents
@ -218,8 +220,8 @@ func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
// FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
// returning those that should be handled differently.
func (f *Fetcher) FilterHeaders(headers []*types.Header, time time.Time) []*types.Header {
log.Trace("Filtering headers", "headers", len(headers))
func (f *Fetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header {
log.Trace("Filtering headers", "peer", peer, "headers", len(headers))
// Send the filter channel to the fetcher
filter := make(chan *headerFilterTask)
@ -231,7 +233,7 @@ func (f *Fetcher) FilterHeaders(headers []*types.Header, time time.Time) []*type
}
// Request the filtering of the header list
select {
case filter <- &headerFilterTask{headers: headers, time: time}:
case filter <- &headerFilterTask{peer: peer, headers: headers, time: time}:
case <-f.quit:
return nil
}
@ -246,8 +248,8 @@ func (f *Fetcher) FilterHeaders(headers []*types.Header, time time.Time) []*type
// FilterBodies extracts all the block bodies that were explicitly requested by
// the fetcher, returning those that should be handled differently.
func (f *Fetcher) FilterBodies(transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
log.Trace("Filtering bodies", "txs", len(transactions), "uncles", len(uncles))
func (f *Fetcher) FilterBodies(peer string, transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
log.Trace("Filtering bodies", "peer", peer, "txs", len(transactions), "uncles", len(uncles))
// Send the filter channel to the fetcher
filter := make(chan *bodyFilterTask)
@ -259,7 +261,7 @@ func (f *Fetcher) FilterBodies(transactions [][]*types.Transaction, uncles [][]*
}
// Request the filtering of the body list
select {
case filter <- &bodyFilterTask{transactions: transactions, uncles: uncles, time: time}:
case filter <- &bodyFilterTask{peer: peer, transactions: transactions, uncles: uncles, time: time}:
case <-f.quit:
return nil, nil
}
@ -444,7 +446,7 @@ func (f *Fetcher) loop() {
hash := header.Hash()
// Filter fetcher-requested headers from other synchronisation algorithms
if announce := f.fetching[hash]; announce != nil && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil {
if announce := f.fetching[hash]; announce != nil && announce.origin == task.peer && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil {
// If the delivered header does not match the promised number, drop the announcer
if header.Number.Uint64() != announce.number {
log.Trace("Invalid block number fetched", "peer", announce.origin, "hash", header.Hash(), "announced", announce.number, "provided", header.Number)
@ -523,7 +525,7 @@ func (f *Fetcher) loop() {
txnHash := types.DeriveSha(types.Transactions(task.transactions[i]))
uncleHash := types.CalcUncleHash(task.uncles[i])
if txnHash == announce.header.TxHash && uncleHash == announce.header.UncleHash {
if txnHash == announce.header.TxHash && uncleHash == announce.header.UncleHash && announce.origin == task.peer {
// Mark the body matched, reassemble if still unknown
matched = true

View File

@ -153,7 +153,7 @@ func (f *fetcherTester) dropPeer(peer string) {
}
// makeHeaderFetcher retrieves a block header fetcher associated with a simulated peer.
func (f *fetcherTester) makeHeaderFetcher(blocks map[common.Hash]*types.Block, drift time.Duration) headerRequesterFn {
func (f *fetcherTester) makeHeaderFetcher(peer string, blocks map[common.Hash]*types.Block, drift time.Duration) headerRequesterFn {
closure := make(map[common.Hash]*types.Block)
for hash, block := range blocks {
closure[hash] = block
@ -166,14 +166,14 @@ func (f *fetcherTester) makeHeaderFetcher(blocks map[common.Hash]*types.Block, d
headers = append(headers, block.Header())
}
// Return on a new thread
go f.fetcher.FilterHeaders(headers, time.Now().Add(drift))
go f.fetcher.FilterHeaders(peer, headers, time.Now().Add(drift))
return nil
}
}
// makeBodyFetcher retrieves a block body fetcher associated with a simulated peer.
func (f *fetcherTester) makeBodyFetcher(blocks map[common.Hash]*types.Block, drift time.Duration) bodyRequesterFn {
func (f *fetcherTester) makeBodyFetcher(peer string, blocks map[common.Hash]*types.Block, drift time.Duration) bodyRequesterFn {
closure := make(map[common.Hash]*types.Block)
for hash, block := range blocks {
closure[hash] = block
@ -191,7 +191,7 @@ func (f *fetcherTester) makeBodyFetcher(blocks map[common.Hash]*types.Block, dri
}
}
// Return on a new thread
go f.fetcher.FilterBodies(transactions, uncles, time.Now().Add(drift))
go f.fetcher.FilterBodies(peer, transactions, uncles, time.Now().Add(drift))
return nil
}
@ -282,8 +282,8 @@ func testSequentialAnnouncements(t *testing.T, protocol int) {
hashes, blocks := makeChain(targetBlocks, 0, genesis)
tester := newTester()
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
// Iteratively announce blocks until all are imported
imported := make(chan *types.Block)
@ -309,22 +309,28 @@ func testConcurrentAnnouncements(t *testing.T, protocol int) {
// Assemble a tester with a built in counter for the requests
tester := newTester()
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
firstHeaderFetcher := tester.makeHeaderFetcher("first", blocks, -gatherSlack)
firstBodyFetcher := tester.makeBodyFetcher("first", blocks, 0)
secondHeaderFetcher := tester.makeHeaderFetcher("second", blocks, -gatherSlack)
secondBodyFetcher := tester.makeBodyFetcher("second", blocks, 0)
counter := uint32(0)
headerWrapper := func(hash common.Hash) error {
firstHeaderWrapper := func(hash common.Hash) error {
atomic.AddUint32(&counter, 1)
return headerFetcher(hash)
return firstHeaderFetcher(hash)
}
secondHeaderWrapper := func(hash common.Hash) error {
atomic.AddUint32(&counter, 1)
return secondHeaderFetcher(hash)
}
// Iteratively announce blocks until all are imported
imported := make(chan *types.Block)
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
for i := len(hashes) - 2; i >= 0; i-- {
tester.fetcher.Notify("first", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerWrapper, bodyFetcher)
tester.fetcher.Notify("second", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout+time.Millisecond), headerWrapper, bodyFetcher)
tester.fetcher.Notify("second", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout-time.Millisecond), headerWrapper, bodyFetcher)
tester.fetcher.Notify("first", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), firstHeaderWrapper, firstBodyFetcher)
tester.fetcher.Notify("second", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout+time.Millisecond), secondHeaderWrapper, secondBodyFetcher)
tester.fetcher.Notify("second", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout-time.Millisecond), secondHeaderWrapper, secondBodyFetcher)
verifyImportEvent(t, imported, true)
}
verifyImportDone(t, imported)
@ -347,8 +353,8 @@ func testOverlappingAnnouncements(t *testing.T, protocol int) {
hashes, blocks := makeChain(targetBlocks, 0, genesis)
tester := newTester()
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
// Iteratively announce blocks, but overlap them continuously
overlap := 16
@ -381,8 +387,8 @@ func testPendingDeduplication(t *testing.T, protocol int) {
// Assemble a tester with a built in counter and delayed fetcher
tester := newTester()
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
headerFetcher := tester.makeHeaderFetcher("repeater", blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher("repeater", blocks, 0)
delay := 50 * time.Millisecond
counter := uint32(0)
@ -425,8 +431,8 @@ func testRandomArrivalImport(t *testing.T, protocol int) {
skip := targetBlocks / 2
tester := newTester()
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
// Iteratively announce blocks, skipping one entry
imported := make(chan *types.Block, len(hashes)-1)
@ -456,8 +462,8 @@ func testQueueGapFill(t *testing.T, protocol int) {
skip := targetBlocks / 2
tester := newTester()
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
// Iteratively announce blocks, skipping one entry
imported := make(chan *types.Block, len(hashes)-1)
@ -486,8 +492,8 @@ func testImportDeduplication(t *testing.T, protocol int) {
// Create the tester and wrap the importer with a counter
tester := newTester()
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
counter := uint32(0)
tester.fetcher.insertChain = func(blocks types.Blocks) (int, error) {
@ -570,8 +576,8 @@ func testDistantAnnouncementDiscarding(t *testing.T, protocol int) {
tester.blocks = map[common.Hash]*types.Block{head: blocks[head]}
tester.lock.Unlock()
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
headerFetcher := tester.makeHeaderFetcher("lower", blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher("lower", blocks, 0)
fetching := make(chan struct{}, 2)
tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- struct{}{} }
@ -603,14 +609,14 @@ func testInvalidNumberAnnouncement(t *testing.T, protocol int) {
hashes, blocks := makeChain(1, 0, genesis)
tester := newTester()
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
badHeaderFetcher := tester.makeHeaderFetcher("bad", blocks, -gatherSlack)
badBodyFetcher := tester.makeBodyFetcher("bad", blocks, 0)
imported := make(chan *types.Block)
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
// Announce a block with a bad number, check for immediate drop
tester.fetcher.Notify("bad", hashes[0], 2, time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
tester.fetcher.Notify("bad", hashes[0], 2, time.Now().Add(-arriveTimeout), badHeaderFetcher, badBodyFetcher)
verifyImportEvent(t, imported, false)
tester.lock.RLock()
@ -620,8 +626,11 @@ func testInvalidNumberAnnouncement(t *testing.T, protocol int) {
if !dropped {
t.Fatalf("peer with invalid numbered announcement not dropped")
}
goodHeaderFetcher := tester.makeHeaderFetcher("good", blocks, -gatherSlack)
goodBodyFetcher := tester.makeBodyFetcher("good", blocks, 0)
// Make sure a good announcement passes without a drop
tester.fetcher.Notify("good", hashes[0], 1, time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
tester.fetcher.Notify("good", hashes[0], 1, time.Now().Add(-arriveTimeout), goodHeaderFetcher, goodBodyFetcher)
verifyImportEvent(t, imported, true)
tester.lock.RLock()
@ -645,8 +654,8 @@ func testEmptyBlockShortCircuit(t *testing.T, protocol int) {
hashes, blocks := makeChain(32, 0, genesis)
tester := newTester()
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
// Add a monitoring hook for all internal events
fetching := make(chan []common.Hash)
@ -697,12 +706,12 @@ func testHashMemoryExhaustionAttack(t *testing.T, protocol int) {
// Create a valid chain and an infinite junk chain
targetBlocks := hashLimit + 2*maxQueueDist
hashes, blocks := makeChain(targetBlocks, 0, genesis)
validHeaderFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
validBodyFetcher := tester.makeBodyFetcher(blocks, 0)
validHeaderFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
validBodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
attack, _ := makeChain(targetBlocks, 0, unknownBlock)
attackerHeaderFetcher := tester.makeHeaderFetcher(nil, -gatherSlack)
attackerBodyFetcher := tester.makeBodyFetcher(nil, 0)
attackerHeaderFetcher := tester.makeHeaderFetcher("attacker", nil, -gatherSlack)
attackerBodyFetcher := tester.makeBodyFetcher("attacker", nil, 0)
// Feed the tester a huge hashset from the attacker, and a limited from the valid peer
for i := 0; i < len(attack); i++ {

View File

@ -450,7 +450,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
return nil
}
// Irrelevant of the fork checks, send the header to the fetcher just in case
headers = pm.fetcher.FilterHeaders(headers, time.Now())
headers = pm.fetcher.FilterHeaders(p.id, headers, time.Now())
}
if len(headers) > 0 || !filter {
err := pm.downloader.DeliverHeaders(p.id, headers)
@ -503,7 +503,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
// Filter out any explicitly requested bodies, deliver the rest to the downloader
filter := len(trasactions) > 0 || len(uncles) > 0
if filter {
trasactions, uncles = pm.fetcher.FilterBodies(trasactions, uncles, time.Now())
trasactions, uncles = pm.fetcher.FilterBodies(p.id, trasactions, uncles, time.Now())
}
if len(trasactions) > 0 || len(uncles) > 0 || !filter {
err := pm.downloader.DeliverBodies(p.id, trasactions, uncles)

View File

@ -56,7 +56,7 @@ func (mux *TypeMux) Subscribe(types ...interface{}) *TypeMuxSubscription {
defer mux.mutex.Unlock()
if mux.stopped {
// set the status to closed so that calling Unsubscribe after this
// call will short curuit
// call will short circuit.
sub.closed = true
close(sub.postC)
} else {

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

View File

@ -528,109 +528,6 @@ const Shh_JS = `
web3._extend({
property: 'shh',
methods: [
new web3._extend.Method({
name: 'setMaxMessageLength',
call: 'shh_setMaxMessageLength',
params: 1
}),
new web3._extend.Method({
name: 'setMinimumPoW',
call: 'shh_setMinimumPoW',
params: 1
}),
new web3._extend.Method({
name: 'markTrustedPeer',
call: 'shh_markTrustedPeer',
params: 1
}),
new web3._extend.Method({
name: 'hasKeyPair',
call: 'shh_hasKeyPair',
params: 1
}),
new web3._extend.Method({
name: 'deleteKeyPair',
call: 'shh_deleteKeyPair',
params: 1
}),
new web3._extend.Method({
name: 'newKeyPair',
call: 'shh_newKeyPair'
}),
new web3._extend.Method({
name: 'getPublicKey',
call: 'shh_getPublicKey',
params: 1
}),
new web3._extend.Method({
name: 'getPrivateKey',
call: 'shh_getPrivateKey',
params: 1
}),
new web3._extend.Method({
name: 'newSymKey',
call: 'shh_newSymKey',
}),
new web3._extend.Method({
name: 'addSymKey',
call: 'shh_addSymKey',
params: 1
}),
new web3._extend.Method({
name: 'generateSymKeyFromPassword',
call: 'shh_generateSymKeyFromPassword',
params: 1
}),
new web3._extend.Method({
name: 'hasSymKey',
call: 'shh_hasSymKey',
params: 1
}),
new web3._extend.Method({
name: 'getSymKey',
call: 'shh_getSymKey',
params: 1
}),
new web3._extend.Method({
name: 'deleteSymKey',
call: 'shh_deleteSymKey',
params: 1
}),
new web3._extend.Method({
name: 'subscribe',
call: 'shh_subscribe',
params: 2
}),
new web3._extend.Method({
name: 'unsubscribe',
call: 'shh_unsubscribe',
params: 1
}),
new web3._extend.Method({
name: 'post',
call: 'shh_post',
params: 1
}),
new web3._extend.Method({
name: 'publicKey',
call: 'shh_getPublicKey',
params: 1
}),
new web3._extend.Method({
name: 'getFilterMessages',
call: 'shh_getFilterMessages',
params: 1
}),
new web3._extend.Method({
name: 'deleteMessageFilter',
call: 'shh_deleteMessageFilter',
params: 1
}),
new web3._extend.Method({
name: 'newMessageFilter',
call: 'shh_newMessageFilter',
params: 1
}),
],
properties:
[

View File

@ -163,7 +163,7 @@ func (a *RemoteAgent) SubmitWork(nonce types.BlockNonce, mixDigest, hash common.
}
// loop monitors mining events on the work and quit channels, updating the internal
// state of the rmeote miner until a termination is requested.
// state of the remote miner until a termination is requested.
//
// Note, the reason the work and quit channels are passed as parameters is because
// RemoteAgent.Start() constantly recreates these channels, so the loop code cannot

View File

@ -165,7 +165,7 @@ func (c *BoundContract) Call(opts *CallOpts, out *Interfaces, method string, arg
// Transact invokes the (paid) contract method with params as input values.
func (c *BoundContract) Transact(opts *TransactOpts, method string, args *Interfaces) (tx *Transaction, _ error) {
rawTx, err := c.contract.Transact(&opts.opts, method, args.objects)
rawTx, err := c.contract.Transact(&opts.opts, method, args.objects...)
if err != nil {
return nil, err
}

View File

@ -62,6 +62,7 @@ func (n *upnp) AddMapping(protocol string, extport, intport int, desc string, li
}
protocol = strings.ToUpper(protocol)
lifetimeS := uint32(lifetime / time.Second)
n.DeleteMapping(protocol, extport, intport)
return n.client.AddPortMapping("", uint16(extport), protocol, uint16(intport), ip.String(), true, desc, lifetimeS)
}

View File

@ -99,17 +99,18 @@ type ChainConfig struct {
ChainId *big.Int `json:"chainId"` // Chain id identifies the current chain and is used for replay protection
HomesteadBlock *big.Int `json:"homesteadBlock,omitempty"` // Homestead switch block (nil = no fork, 0 = already homestead)
DAOForkBlock *big.Int `json:"daoForkBlock,omitempty"` // TheDAO hard-fork switch block (nil = no fork)
DAOForkSupport bool `json:"daoForkSupport,omitempty"` // Whether the nodes supports or opposes the DAO hard-fork
// EIP150 implements the Gas price changes (https://github.com/ethereum/EIPs/issues/150)
EIP150Block *big.Int `json:"eip150Block,omitempty"` // EIP150 HF block (nil = no fork)
EIP150Hash common.Hash `json:"eip150Hash,omitempty"` // EIP150 HF hash (fast sync aid)
EIP150Hash common.Hash `json:"eip150Hash,omitempty"` // EIP150 HF hash (needed for header only clients as only gas pricing changed)
EIP155Block *big.Int `json:"eip155Block,omitempty"` // EIP155 HF block
EIP158Block *big.Int `json:"eip158Block,omitempty"` // EIP158 HF block
ByzantiumBlock *big.Int `json:"byzantiumBlock,omitempty"` // Byzantium switch block (nil = no fork, 0 = alraedy on homestead)
ByzantiumBlock *big.Int `json:"byzantiumBlock,omitempty"` // Byzantium switch block (nil = no fork, 0 = already on byzantium)
// Various consensus engines
Ethash *EthashConfig `json:"ethash,omitempty"`

View File

@ -23,7 +23,7 @@ import (
const (
VersionMajor = 1 // Major version component of the current release
VersionMinor = 7 // Minor version component of the current release
VersionPatch = 1 // Patch version component of the current release
VersionPatch = 2 // Patch version component of the current release
VersionMeta = "stable" // Version metadata to append to the version string
)

View File

@ -144,9 +144,13 @@ func (self *Api) Get(key storage.Key, path string) (reader storage.LazySectionRe
if entry != nil {
key = common.Hex2Bytes(entry.Hash)
status = entry.Status
mimeType = entry.ContentType
log.Trace(fmt.Sprintf("content lookup key: '%v' (%v)", key, mimeType))
reader = self.dpa.Retrieve(key)
if status == http.StatusMultipleChoices {
return
} else {
mimeType = entry.ContentType
log.Trace(fmt.Sprintf("content lookup key: '%v' (%v)", key, mimeType))
reader = self.dpa.Retrieve(key)
}
} else {
status = http.StatusNotFound
err = fmt.Errorf("manifest entry for '%s' not found", path)

View File

@ -25,9 +25,11 @@ import (
"fmt"
"html/template"
"net/http"
"strings"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/api"
)
//templateMap holds a mapping of an HTTP error code to a template
@ -51,12 +53,14 @@ func initErrHandling() {
//pages are saved as strings - get these strings
genErrPage := GetGenericErrorPage()
notFoundPage := GetNotFoundErrorPage()
multipleChoicesPage := GetMultipleChoicesErrorPage()
//map the codes to the available pages
tnames := map[int]string{
0: genErrPage, //default
400: genErrPage,
404: notFoundPage,
500: genErrPage,
0: genErrPage, //default
http.StatusBadRequest: genErrPage,
http.StatusNotFound: notFoundPage,
http.StatusMultipleChoices: multipleChoicesPage,
http.StatusInternalServerError: genErrPage,
}
templateMap = make(map[int]*template.Template)
for code, tname := range tnames {
@ -65,6 +69,40 @@ func initErrHandling() {
}
}
//ShowMultipeChoices is used when a user requests a resource in a manifest which results
//in ambiguous results. It returns a HTML page with clickable links of each of the entry
//in the manifest which fits the request URI ambiguity.
//For example, if the user requests bzz:/<hash>/read and that manifest containes entries
//"readme.md" and "readinglist.txt", a HTML page is returned with this two links.
//This only applies if the manifest has no default entry
func ShowMultipleChoices(w http.ResponseWriter, r *http.Request, list api.ManifestList) {
msg := ""
if list.Entries == nil {
ShowError(w, r, "Internal Server Error", http.StatusInternalServerError)
return
}
//make links relative
//requestURI comes with the prefix of the ambiguous path, e.g. "read" for "readme.md" and "readinglist.txt"
//to get clickable links, need to remove the ambiguous path, i.e. "read"
idx := strings.LastIndex(r.RequestURI, "/")
if idx == -1 {
ShowError(w, r, "Internal Server Error", http.StatusInternalServerError)
return
}
//remove ambiguous part
base := r.RequestURI[:idx+1]
for _, e := range list.Entries {
//create clickable link for each entry
msg += "<a href='" + base + e.Path + "'>" + e.Path + "</a><br/>"
}
respond(w, r, &ErrorParams{
Code: http.StatusMultipleChoices,
Details: template.HTML(msg),
Timestamp: time.Now().Format(time.RFC1123),
template: getTemplate(http.StatusMultipleChoices),
})
}
//ShowError is used to show an HTML error page to a client.
//If there is an `Accept` header of `application/json`, JSON will be returned instead
//The function just takes a string message which will be displayed in the error page.

File diff suppressed because one or more lines are too long

View File

@ -441,14 +441,37 @@ func (s *Server) HandleGetList(w http.ResponseWriter, r *Request) {
return
}
walker, err := s.api.NewManifestWalker(key, nil)
list, err := s.getManifestList(key, r.uri.Path)
if err != nil {
s.Error(w, r, err)
return
}
var list api.ManifestList
prefix := r.uri.Path
// if the client wants HTML (e.g. a browser) then render the list as a
// HTML index with relative URLs
if strings.Contains(r.Header.Get("Accept"), "text/html") {
w.Header().Set("Content-Type", "text/html")
err := htmlListTemplate.Execute(w, &htmlListData{
URI: r.uri,
List: &list,
})
if err != nil {
s.logError("error rendering list HTML: %s", err)
}
return
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(&list)
}
func (s *Server) getManifestList(key storage.Key, prefix string) (list api.ManifestList, err error) {
walker, err := s.api.NewManifestWalker(key, nil)
if err != nil {
return
}
err = walker.Walk(func(entry *api.ManifestEntry) error {
// handle non-manifest files
if entry.ContentType != api.ManifestType {
@ -495,27 +518,8 @@ func (s *Server) HandleGetList(w http.ResponseWriter, r *Request) {
// so just skip it
return api.SkipManifest
})
if err != nil {
s.Error(w, r, err)
return
}
// if the client wants HTML (e.g. a browser) then render the list as a
// HTML index with relative URLs
if strings.Contains(r.Header.Get("Accept"), "text/html") {
w.Header().Set("Content-Type", "text/html")
err := htmlListTemplate.Execute(w, &htmlListData{
URI: r.uri,
List: &list,
})
if err != nil {
s.logError("error rendering list HTML: %s", err)
}
return
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(&list)
return list, nil
}
// HandleGetFile handles a GET request to bzz://<manifest>/<path> and responds
@ -544,6 +548,22 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *Request) {
return
}
//the request results in ambiguous files
//e.g. /read with readme.md and readinglist.txt available in manifest
if status == http.StatusMultipleChoices {
list, err := s.getManifestList(key, r.uri.Path)
if err != nil {
s.Error(w, r, err)
return
}
s.logDebug(fmt.Sprintf("Multiple choices! --> %v", list))
//show a nice page links to available entries
ShowMultipleChoices(w, &r.Request, list)
return
}
// check the root chunk exists by retrieving the file's size
if _, err := reader.Size(nil); err != nil {
s.NotFound(w, r, fmt.Errorf("File not found %s: %s", r.uri, err))

View File

@ -22,6 +22,8 @@ import (
"errors"
"fmt"
"io"
"net/http"
"strings"
"sync"
"time"
@ -422,25 +424,47 @@ func (self *manifestTrie) findPrefixOf(path string, quitC chan bool) (entry *man
return self.entries[256], 0
}
//see if first char is in manifest entries
b := byte(path[0])
entry = self.entries[b]
if entry == nil {
return self.entries[256], 0
}
epl := len(entry.Path)
log.Trace(fmt.Sprintf("path = %v entry.Path = %v epl = %v", path, entry.Path, epl))
if (len(path) >= epl) && (path[:epl] == entry.Path) {
if len(path) <= epl {
if entry.Path[:len(path)] == path {
if entry.ContentType == ManifestType {
entry.Status = http.StatusMultipleChoices
}
pos = len(path)
return
}
return nil, 0
}
if path[:epl] == entry.Path {
log.Trace(fmt.Sprintf("entry.ContentType = %v", entry.ContentType))
if entry.ContentType == ManifestType {
//the subentry is a manifest, load subtrie
if entry.ContentType == ManifestType && (strings.Contains(entry.Path, path) || strings.Contains(path, entry.Path)) {
err := self.loadSubTrie(entry, quitC)
if err != nil {
return nil, 0
}
entry, pos = entry.subtrie.findPrefixOf(path[epl:], quitC)
if entry != nil {
sub, pos := entry.subtrie.findPrefixOf(path[epl:], quitC)
if sub != nil {
entry = sub
pos += epl
return sub, pos
} else if path == entry.Path {
entry.Status = http.StatusMultipleChoices
}
} else {
//entry is not a manifest, return it
if path != entry.Path {
return nil, 0
}
pos = epl
}
}

View File

@ -17,7 +17,6 @@
package api
import (
// "encoding/json"
"bytes"
"encoding/json"
"fmt"
@ -72,11 +71,21 @@ func TestGetEntry(t *testing.T) {
testGetEntry(t, "/a", "", "")
testGetEntry(t, "/a/b", "a/b", "a/b")
// longest/deepest math
testGetEntry(t, "a/b", "-", "a", "a/ba", "a/b/c")
testGetEntry(t, "read", "read", "readme.md", "readit.md")
testGetEntry(t, "rf", "-", "readme.md", "readit.md")
testGetEntry(t, "readme", "readme", "readme.md")
testGetEntry(t, "readme", "-", "readit.md")
testGetEntry(t, "readme.md", "readme.md", "readme.md")
testGetEntry(t, "readme.md", "-", "readit.md")
testGetEntry(t, "readmeAmd", "-", "readit.md")
testGetEntry(t, "readme.mdffff", "-", "readme.md")
testGetEntry(t, "ab", "ab", "ab/cefg", "ab/cedh", "ab/kkkkkk")
testGetEntry(t, "ab/ce", "ab/ce", "ab/cefg", "ab/cedh", "ab/ceuuuuuuuuuu")
testGetEntry(t, "abc", "abc", "abcd", "abczzzzef", "abc/def", "abc/e/g")
testGetEntry(t, "a/b", "a/b", "a", "a/bc", "a/ba", "a/b/c")
testGetEntry(t, "a/b", "a/b", "a", "a/b", "a/bb", "a/b/c")
testGetEntry(t, "//a//b//", "a/b", "a", "a/b", "a/bb", "a/b/c")
}
func TestDeleteEntry(t *testing.T) {
}

View File

@ -134,7 +134,7 @@ func (sf *SwarmFile) Write(ctx context.Context, req *fuse.WriteRequest, resp *fu
if err != nil {
return err
}
resp.Size = int(sf.fileSize)
resp.Size = len(req.Data)
} else {
log.Warn("Invalid write request size(%v) : off(%v)", sf.fileSize, req.Offset)
return errInvalidOffset

View File

@ -22,6 +22,7 @@ import (
"errors"
"fmt"
"io/ioutil"
"math/big"
"math/rand"
"os"
"reflect"
@ -30,7 +31,9 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
)
func init() {
@ -505,8 +508,6 @@ func BenchmarkGet(b *testing.B) { benchGet(b, false) }
func BenchmarkGetDB(b *testing.B) { benchGet(b, true) }
func BenchmarkUpdateBE(b *testing.B) { benchUpdate(b, binary.BigEndian) }
func BenchmarkUpdateLE(b *testing.B) { benchUpdate(b, binary.LittleEndian) }
func BenchmarkHashBE(b *testing.B) { benchHash(b, binary.BigEndian) }
func BenchmarkHashLE(b *testing.B) { benchHash(b, binary.LittleEndian) }
const benchElemCount = 20000
@ -549,18 +550,39 @@ func benchUpdate(b *testing.B, e binary.ByteOrder) *Trie {
return trie
}
func benchHash(b *testing.B, e binary.ByteOrder) {
trie := newEmpty()
k := make([]byte, 32)
for i := 0; i < benchElemCount; i++ {
e.PutUint64(k, uint64(i))
trie.Update(k, k)
}
// Benchmarks the trie hashing. Since the trie caches the result of any operation,
// we cannot use b.N as the number of hashing rouns, since all rounds apart from
// the first one will be NOOP. As such, we'll use b.N as the number of account to
// insert into the trie before measuring the hashing.
func BenchmarkHash(b *testing.B) {
// Make the random benchmark deterministic
random := rand.New(rand.NewSource(0))
b.ResetTimer()
for i := 0; i < b.N; i++ {
trie.Hash()
// Create a realistic account trie to hash
addresses := make([][20]byte, b.N)
for i := 0; i < len(addresses); i++ {
for j := 0; j < len(addresses[i]); j++ {
addresses[i][j] = byte(random.Intn(256))
}
}
accounts := make([][]byte, len(addresses))
for i := 0; i < len(accounts); i++ {
var (
nonce = uint64(random.Int63())
balance = new(big.Int).Rand(random, new(big.Int).Exp(common.Big2, common.Big256, nil))
root = emptyRoot
code = crypto.Keccak256(nil)
)
accounts[i], _ = rlp.EncodeToBytes([]interface{}{nonce, balance, root, code})
}
// Insert the accounts into the trie and hash it
trie := newEmpty()
for i := 0; i < len(addresses); i++ {
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
}
b.ResetTimer()
b.ReportAllocs()
trie.Hash()
}
func tempDB() (string, Database) {

View File

@ -64,6 +64,11 @@ var ErrNotTerminalOutput = errors.New("standard output is not a terminal")
// be colour codes on some platforms).
var ErrInvalidPrompt = errors.New("invalid prompt")
// ErrInternal is returned when liner experiences an error that it cannot
// handle. For example, if the number of colums becomes zero during an
// active call to Prompt
var ErrInternal = errors.New("liner: internal error")
// KillRingMax is the max number of elements to save on the killring.
const KillRingMax = 60
@ -156,7 +161,7 @@ func (s *State) getHistoryByPrefix(prefix string) (ph []string) {
return
}
// Returns the history lines matching the inteligent search
// Returns the history lines matching the intelligent search
func (s *State) getHistoryByPattern(pattern string) (ph []string, pos []int) {
if pattern == "" {
return

View File

@ -113,7 +113,7 @@ func (s *State) nextPending(timeout <-chan time.Time) (rune, error) {
select {
case thing, ok := <-s.next:
if !ok {
return 0, errors.New("liner: internal error")
return 0, ErrInternal
}
if thing.err != nil {
return 0, thing.err
@ -137,7 +137,7 @@ func (s *State) readNext() (interface{}, error) {
select {
case thing, ok := <-s.next:
if !ok {
return 0, errors.New("liner: internal error")
return 0, ErrInternal
}
if thing.err != nil {
return nil, thing.err

View File

@ -3,10 +3,12 @@
package liner
import (
"bufio"
"container/ring"
"errors"
"fmt"
"io"
"os"
"strings"
"unicode"
"unicode/utf8"
@ -90,6 +92,10 @@ const (
)
func (s *State) refresh(prompt []rune, buf []rune, pos int) error {
if s.columns == 0 {
return ErrInternal
}
s.needRefresh = false
if s.multiLineMode {
return s.refreshMultiLine(prompt, buf, pos)
@ -351,8 +357,8 @@ func (s *State) tabComplete(p []rune, line []rune, pos int) ([]rune, int, interf
}
hl := utf8.RuneCountInString(head)
if len(list) == 1 {
s.refresh(p, []rune(head+list[0]+tail), hl+utf8.RuneCountInString(list[0]))
return []rune(head + list[0] + tail), hl + utf8.RuneCountInString(list[0]), rune(esc), nil
err := s.refresh(p, []rune(head+list[0]+tail), hl+utf8.RuneCountInString(list[0]))
return []rune(head + list[0] + tail), hl + utf8.RuneCountInString(list[0]), rune(esc), err
}
direction := tabForward
@ -366,7 +372,10 @@ func (s *State) tabComplete(p []rune, line []rune, pos int) ([]rune, int, interf
if err != nil {
return line, pos, rune(esc), err
}
s.refresh(p, []rune(head+pick+tail), hl+utf8.RuneCountInString(pick))
err = s.refresh(p, []rune(head+pick+tail), hl+utf8.RuneCountInString(pick))
if err != nil {
return line, pos, rune(esc), err
}
next, err := s.readNext()
if err != nil {
@ -392,7 +401,10 @@ func (s *State) tabComplete(p []rune, line []rune, pos int) ([]rune, int, interf
// reverse intelligent search, implements a bash-like history search.
func (s *State) reverseISearch(origLine []rune, origPos int) ([]rune, int, interface{}, error) {
p := "(reverse-i-search)`': "
s.refresh([]rune(p), origLine, origPos)
err := s.refresh([]rune(p), origLine, origPos)
if err != nil {
return origLine, origPos, rune(esc), err
}
line := []rune{}
pos := 0
@ -478,7 +490,10 @@ func (s *State) reverseISearch(origLine []rune, origPos int) ([]rune, int, inter
case action:
return []rune(foundLine), foundPos, next, err
}
s.refresh(getLine())
err = s.refresh(getLine())
if err != nil {
return []rune(foundLine), foundPos, rune(esc), err
}
}
}
@ -535,7 +550,10 @@ func (s *State) yank(p []rune, text []rune, pos int) ([]rune, int, interface{},
line = append(line, lineEnd...)
pos = len(lineStart) + len(value)
s.refresh(p, line, pos)
err := s.refresh(p, line, pos)
if err != nil {
return line, pos, 0, err
}
next, err := s.readNext()
if err != nil {
@ -577,6 +595,11 @@ func (s *State) PromptWithSuggestion(prompt string, text string, pos int) (strin
if s.inputRedirected || !s.terminalSupported {
return s.promptUnsupported(prompt)
}
p := []rune(prompt)
const minWorkingSpace = 10
if s.columns < countGlyphs(p)+minWorkingSpace {
return s.tooNarrow(prompt)
}
if s.outputRedirected {
return "", ErrNotTerminalOutput
}
@ -585,7 +608,6 @@ func (s *State) PromptWithSuggestion(prompt string, text string, pos int) (strin
defer s.historyMutex.RUnlock()
fmt.Print(prompt)
p := []rune(prompt)
var line = []rune(text)
historyEnd := ""
var historyPrefix []string
@ -600,7 +622,10 @@ func (s *State) PromptWithSuggestion(prompt string, text string, pos int) (strin
pos = len(text)
}
if len(line) > 0 {
s.refresh(p, line, pos)
err := s.refresh(p, line, pos)
if err != nil {
return "", err
}
}
restart:
@ -624,7 +649,10 @@ mainLoop:
switch v {
case cr, lf:
if s.needRefresh {
s.refresh(p, line, pos)
err := s.refresh(p, line, pos)
if err != nil {
return "", err
}
}
if s.multiLineMode {
s.resetMultiLine(p, line, pos)
@ -958,7 +986,10 @@ mainLoop:
s.needRefresh = true
}
if s.needRefresh && !s.inputWaiting() {
s.refresh(p, line, pos)
err := s.refresh(p, line, pos)
if err != nil {
return "", err
}
}
if !historyAction {
historyStale = true
@ -978,7 +1009,7 @@ func (s *State) PasswordPrompt(prompt string) (string, error) {
return "", ErrInvalidPrompt
}
}
if !s.terminalSupported {
if !s.terminalSupported || s.columns == 0 {
return "", errors.New("liner: function not supported in this terminal")
}
if s.inputRedirected {
@ -988,6 +1019,12 @@ func (s *State) PasswordPrompt(prompt string) (string, error) {
return "", ErrNotTerminalOutput
}
p := []rune(prompt)
const minWorkingSpace = 1
if s.columns < countGlyphs(p)+minWorkingSpace {
return s.tooNarrow(prompt)
}
defer s.stopPrompt()
restart:
@ -995,7 +1032,6 @@ restart:
s.getColumns()
fmt.Print(prompt)
p := []rune(prompt)
var line []rune
pos := 0
@ -1014,7 +1050,10 @@ mainLoop:
switch v {
case cr, lf:
if s.needRefresh {
s.refresh(p, line, pos)
err := s.refresh(p, line, pos)
if err != nil {
return "", err
}
}
if s.multiLineMode {
s.resetMultiLine(p, line, pos)
@ -1032,7 +1071,10 @@ mainLoop:
s.restartPrompt()
case ctrlL: // clear screen
s.eraseScreen()
s.refresh(p, []rune{}, 0)
err := s.refresh(p, []rune{}, 0)
if err != nil {
return "", err
}
case ctrlH, bs: // Backspace
if pos <= 0 {
fmt.Print(beep)
@ -1068,3 +1110,20 @@ mainLoop:
}
return string(line), nil
}
func (s *State) tooNarrow(prompt string) (string, error) {
// Docker and OpenWRT and etc sometimes return 0 column width
// Reset mode temporarily. Restore baked mode in case the terminal
// is wide enough for the next Prompt attempt.
m, merr := TerminalMode()
s.origMode.ApplyMode()
if merr == nil {
defer m.ApplyMode()
}
if s.r == nil {
// Windows does not always set s.r
s.r = bufio.NewReader(os.Stdin)
defer func() { s.r = nil }()
}
return s.promptUnsupported(prompt)
}

6
vendor/vendor.json vendored
View File

@ -256,10 +256,10 @@
"revisionTime": "2017-01-12T15:04:04Z"
},
{
"checksumSHA1": "4SfJoLxQlfSSQjBIMqK+IgNxLMk=",
"checksumSHA1": "lSRg5clrIZUxq4aaExbpnpAgtWA=",
"path": "github.com/peterh/liner",
"revision": "bf27d3ba8e1d9899d45a457ffac16c953eb2d647",
"revisionTime": "2017-02-11T19:53:22Z"
"revision": "a37ad39843113264dae84a5d89fcee28f50b35c6",
"revisionTime": "2017-09-02T20:46:57Z"
},
{
"checksumSHA1": "WbbxCn2jUYIL5viqLo0BKXEdPrQ=",

View File

@ -104,7 +104,7 @@ func (api *PublicWhisperAPI) Info(ctx context.Context) Info {
stats := api.w.Stats()
return Info{
Memory: stats.memoryUsed,
Messages: len(api.w.messageQueue) + len(api.w.p2pMsgQueue),
Messages: len(api.w.messageQueue) + len(api.w.p2pMsgQueue),
MinPow: api.w.MinPow(),
MaxMessageSize: api.w.MaxMessageSize(),
}

View File

@ -25,5 +25,3 @@ var DefaultConfig = Config{
MaxMessageSize: DefaultMaxMessageSize,
MinimumAcceptedPOW: DefaultMinimumPoW,
}
var ()

View File

@ -22,6 +22,7 @@ import (
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
)
@ -68,6 +69,10 @@ func (fs *Filters) Install(watcher *Filter) (string, error) {
return "", fmt.Errorf("failed to generate unique ID")
}
if watcher.expectsSymmetricEncryption() {
watcher.SymKeyHash = crypto.Keccak256Hash(watcher.KeySym)
}
fs.watchers[id] = watcher
return id, err
}
@ -119,7 +124,9 @@ func (fs *Filters) NotifyWatchers(env *Envelope, p2pMessage bool) {
if match && msg != nil {
log.Trace("processing message: decrypted", "hash", env.Hash().Hex())
watcher.Trigger(msg)
if watcher.Src == nil || IsPubKeyEqual(msg.Src, watcher.Src) {
watcher.Trigger(msg)
}
}
}
}
@ -172,9 +179,6 @@ func (f *Filter) MatchMessage(msg *ReceivedMessage) bool {
if f.PoW > 0 && msg.PoW < f.PoW {
return false
}
if f.Src != nil && !IsPubKeyEqual(msg.Src, f.Src) {
return false
}
if f.expectsAsymmetricEncryption() && msg.isAsymmetricEncryption() {
return IsPubKeyEqual(&f.KeyAsym.PublicKey, msg.Dst) && f.MatchTopic(msg.Topic)

View File

@ -132,6 +132,103 @@ func TestInstallFilters(t *testing.T) {
}
}
func TestInstallSymKeyGeneratesHash(t *testing.T) {
InitSingleTest()
w := New(&Config{})
filters := NewFilters(w)
filter, _ := generateFilter(t, true)
// save the current SymKeyHash for comparison
initialSymKeyHash := filter.SymKeyHash
// ensure the SymKeyHash is invalid, for Install to recreate it
var invalid common.Hash
filter.SymKeyHash = invalid
_, err := filters.Install(filter)
if err != nil {
t.Fatalf("Error installing the filter: %s", err)
}
for i, b := range filter.SymKeyHash {
if b != initialSymKeyHash[i] {
t.Fatalf("The filter's symmetric key hash was not properly generated by Install")
}
}
}
func TestInstallIdenticalFilters(t *testing.T) {
InitSingleTest()
w := New(&Config{})
filters := NewFilters(w)
filter1, _ := generateFilter(t, true)
// Copy the first filter since some of its fields
// are randomly gnerated.
filter2 := &Filter{
KeySym: filter1.KeySym,
Topics: filter1.Topics,
PoW: filter1.PoW,
AllowP2P: filter1.AllowP2P,
Messages: make(map[common.Hash]*ReceivedMessage),
}
_, err := filters.Install(filter1)
if err != nil {
t.Fatalf("Error installing the first filter with seed %d: %s", seed, err)
}
_, err = filters.Install(filter2)
if err != nil {
t.Fatalf("Error installing the second filter with seed %d: %s", seed, err)
}
params, err := generateMessageParams()
if err != nil {
t.Fatalf("Error generating message parameters with seed %d: %s", seed, err)
}
params.KeySym = filter1.KeySym
params.Topic = BytesToTopic(filter1.Topics[0])
filter1.Src = &params.Src.PublicKey
filter2.Src = &params.Src.PublicKey
sentMessage, err := NewSentMessage(params)
if err != nil {
t.Fatalf("failed to create new message with seed %d: %s.", seed, err)
}
env, err := sentMessage.Wrap(params)
if err != nil {
t.Fatalf("failed Wrap with seed %d: %s.", seed, err)
}
msg := env.Open(filter1)
if msg == nil {
t.Fatalf("failed to Open with filter1")
}
if !filter1.MatchEnvelope(env) {
t.Fatalf("failed matching with the first filter")
}
if !filter2.MatchEnvelope(env) {
t.Fatalf("failed matching with the first filter")
}
if !filter1.MatchMessage(msg) {
t.Fatalf("failed matching with the second filter")
}
if !filter2.MatchMessage(msg) {
t.Fatalf("failed matching with the second filter")
}
}
func TestComparePubKey(t *testing.T) {
InitSingleTest()
@ -345,11 +442,6 @@ func TestMatchMessageSym(t *testing.T) {
t.Fatalf("failed Open with seed %d.", seed)
}
// Src mismatch
if f.MatchMessage(msg) {
t.Fatalf("failed MatchMessage(src mismatch) with seed %d.", seed)
}
// Src: match
*f.Src.X = *params.Src.PublicKey.X
*f.Src.Y = *params.Src.PublicKey.Y
@ -443,11 +535,6 @@ func TestMatchMessageAsym(t *testing.T) {
t.Fatalf("failed to open with seed %d.", seed)
}
// Src mismatch
if f.MatchMessage(msg) {
t.Fatalf("failed MatchMessage(src mismatch) with seed %d.", seed)
}
// Src: match
*f.Src.X = *params.Src.PublicKey.X
*f.Src.Y = *params.Src.PublicKey.Y

View File

@ -22,6 +22,8 @@ import (
mrand "math/rand"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
)
func TestWhisperBasic(t *testing.T) {
@ -624,3 +626,226 @@ func TestCustomization(t *testing.T) {
t.Fatalf("failed to get whisper messages")
}
}
func TestSymmetricSendCycle(t *testing.T) {
InitSingleTest()
w := New(&DefaultConfig)
defer w.SetMinimumPoW(DefaultMinimumPoW)
defer w.SetMaxMessageSize(DefaultMaxMessageSize)
w.Start(nil)
defer w.Stop()
filter1, err := generateFilter(t, true)
if err != nil {
t.Fatalf("failed generateMessageParams with seed %d: %s.", seed, err)
}
filter1.PoW = DefaultMinimumPoW
// Copy the first filter since some of its fields
// are randomly gnerated.
filter2 := &Filter{
KeySym: filter1.KeySym,
Topics: filter1.Topics,
PoW: filter1.PoW,
AllowP2P: filter1.AllowP2P,
Messages: make(map[common.Hash]*ReceivedMessage),
}
params, err := generateMessageParams()
if err != nil {
t.Fatalf("failed generateMessageParams with seed %d: %s.", seed, err)
}
filter1.Src = &params.Src.PublicKey
filter2.Src = &params.Src.PublicKey
params.KeySym = filter1.KeySym
params.Topic = BytesToTopic(filter1.Topics[2])
params.PoW = filter1.PoW
params.WorkTime = 10
params.TTL = 50
msg, err := NewSentMessage(params)
if err != nil {
t.Fatalf("failed to create new message with seed %d: %s.", seed, err)
}
env, err := msg.Wrap(params)
if err != nil {
t.Fatalf("failed Wrap with seed %d: %s.", seed, err)
}
_, err = w.Subscribe(filter1)
if err != nil {
t.Fatalf("failed subscribe 1 with seed %d: %s.", seed, err)
}
_, err = w.Subscribe(filter2)
if err != nil {
t.Fatalf("failed subscribe 2 with seed %d: %s.", seed, err)
}
err = w.Send(env)
if err != nil {
t.Fatalf("Failed sending envelope with PoW %.06f (seed %d): %s", env.PoW(), seed, err)
}
// wait till received or timeout
var received bool
for j := 0; j < 200; j++ {
time.Sleep(10 * time.Millisecond)
if len(w.Envelopes()) > 0 {
received = true
break
}
}
if !received {
t.Fatalf("did not receive the sent envelope, seed: %d.", seed)
}
// check w.messages()
time.Sleep(5 * time.Millisecond)
mail1 := filter1.Retrieve()
mail2 := filter2.Retrieve()
if len(mail2) == 0 {
t.Fatalf("did not receive any email for filter 2")
}
if len(mail1) == 0 {
t.Fatalf("did not receive any email for filter 1")
}
}
func TestSymmetricSendWithoutAKey(t *testing.T) {
InitSingleTest()
w := New(&DefaultConfig)
defer w.SetMinimumPoW(DefaultMinimumPoW)
defer w.SetMaxMessageSize(DefaultMaxMessageSize)
w.Start(nil)
defer w.Stop()
filter, err := generateFilter(t, true)
if err != nil {
t.Fatalf("failed generateMessageParams with seed %d: %s.", seed, err)
}
filter.PoW = DefaultMinimumPoW
params, err := generateMessageParams()
if err != nil {
t.Fatalf("failed generateMessageParams with seed %d: %s.", seed, err)
}
filter.Src = nil
params.KeySym = filter.KeySym
params.Topic = BytesToTopic(filter.Topics[2])
params.PoW = filter.PoW
params.WorkTime = 10
params.TTL = 50
msg, err := NewSentMessage(params)
if err != nil {
t.Fatalf("failed to create new message with seed %d: %s.", seed, err)
}
env, err := msg.Wrap(params)
if err != nil {
t.Fatalf("failed Wrap with seed %d: %s.", seed, err)
}
_, err = w.Subscribe(filter)
if err != nil {
t.Fatalf("failed subscribe 1 with seed %d: %s.", seed, err)
}
err = w.Send(env)
if err != nil {
t.Fatalf("Failed sending envelope with PoW %.06f (seed %d): %s", env.PoW(), seed, err)
}
// wait till received or timeout
var received bool
for j := 0; j < 200; j++ {
time.Sleep(10 * time.Millisecond)
if len(w.Envelopes()) > 0 {
received = true
break
}
}
if !received {
t.Fatalf("did not receive the sent envelope, seed: %d.", seed)
}
// check w.messages()
time.Sleep(5 * time.Millisecond)
mail := filter.Retrieve()
if len(mail) == 0 {
t.Fatalf("did not receive message in spite of not setting a public key")
}
}
func TestSymmetricSendKeyMismatch(t *testing.T) {
InitSingleTest()
w := New(&DefaultConfig)
defer w.SetMinimumPoW(DefaultMinimumPoW)
defer w.SetMaxMessageSize(DefaultMaxMessageSize)
w.Start(nil)
defer w.Stop()
filter, err := generateFilter(t, true)
if err != nil {
t.Fatalf("failed generateMessageParams with seed %d: %s.", seed, err)
}
filter.PoW = DefaultMinimumPoW
params, err := generateMessageParams()
if err != nil {
t.Fatalf("failed generateMessageParams with seed %d: %s.", seed, err)
}
params.KeySym = filter.KeySym
params.Topic = BytesToTopic(filter.Topics[2])
params.PoW = filter.PoW
params.WorkTime = 10
params.TTL = 50
msg, err := NewSentMessage(params)
if err != nil {
t.Fatalf("failed to create new message with seed %d: %s.", seed, err)
}
env, err := msg.Wrap(params)
if err != nil {
t.Fatalf("failed Wrap with seed %d: %s.", seed, err)
}
_, err = w.Subscribe(filter)
if err != nil {
t.Fatalf("failed subscribe 1 with seed %d: %s.", seed, err)
}
err = w.Send(env)
if err != nil {
t.Fatalf("Failed sending envelope with PoW %.06f (seed %d): %s", env.PoW(), seed, err)
}
// wait till received or timeout
var received bool
for j := 0; j < 200; j++ {
time.Sleep(10 * time.Millisecond)
if len(w.Envelopes()) > 0 {
received = true
break
}
}
if !received {
t.Fatalf("did not receive the sent envelope, seed: %d.", seed)
}
// check w.messages()
time.Sleep(5 * time.Millisecond)
mail := filter.Retrieve()
if len(mail) > 0 {
t.Fatalf("received a message when keys weren't matching")
}
}